diff options
Diffstat (limited to 'drivers')
259 files changed, 9232 insertions, 4096 deletions
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 73863d86f022..76dc02f15574 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -126,6 +126,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE); */ u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE); +/* + * Disable runtime checking and repair of values returned by control methods. + * Use only if the repair is causing a problem on a particular machine. + */ +u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE); + /* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ struct acpi_table_fadt acpi_gbl_FADT; diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index c7f743ca395b..5552125d8340 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -357,6 +357,7 @@ struct acpi_predefined_data { char *pathname; const union acpi_predefined_info *predefined; union acpi_operand_object *parent_package; + struct acpi_namespace_node *node; u32 flags; u8 node_flags; }; diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index 94e73c97cf85..c445cca490ea 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -468,6 +468,7 @@ static const union acpi_predefined_info predefined_names[] = {{"_SWS", 0, ACPI_RTYPE_INTEGER}}, {{"_TC1", 0, ACPI_RTYPE_INTEGER}}, {{"_TC2", 0, ACPI_RTYPE_INTEGER}}, + {{"_TDL", 0, ACPI_RTYPE_INTEGER}}, {{"_TIP", 1, ACPI_RTYPE_INTEGER}}, {{"_TIV", 1, ACPI_RTYPE_INTEGER}}, {{"_TMP", 0, ACPI_RTYPE_INTEGER}}, diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 9fb03fa8ffde..c845c8089f39 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -193,14 +193,20 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, } /* - * 1) We have a return value, but if one wasn't expected, just exit, this is - * not a problem. For example, if the "Implicit Return" feature is - * enabled, methods will always return a value. + * Return value validation and possible repair. * - * 2) If the return value can be of any type, then we cannot perform any - * validation, exit. + * 1) Don't perform return value validation/repair if this feature + * has been disabled via a global option. + * + * 2) We have a return value, but if one wasn't expected, just exit, + * this is not a problem. For example, if the "Implicit Return" + * feature is enabled, methods will always return a value. + * + * 3) If the return value can be of any type, then we cannot perform + * any validation, just exit. */ - if ((!predefined->info.expected_btypes) || + if (acpi_gbl_disable_auto_repair || + (!predefined->info.expected_btypes) || (predefined->info.expected_btypes == ACPI_RTYPE_ALL)) { goto cleanup; } @@ -212,6 +218,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, goto cleanup; } data->predefined = predefined; + data->node = node; data->node_flags = node->flags; data->pathname = pathname; diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index 973883babee1..024c4f263f87 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -503,6 +503,21 @@ acpi_ns_repair_TSS(struct acpi_predefined_data *data, { union acpi_operand_object *return_object = *return_object_ptr; acpi_status status; + struct acpi_namespace_node *node; + + /* + * We can only sort the _TSS return package if there is no _PSS in the + * same scope. This is because if _PSS is present, the ACPI specification + * dictates that the _TSS Power Dissipation field is to be ignored, and + * therefore some BIOSs leave garbage values in the _TSS Power field(s). + * In this case, it is best to just return the _TSS package as-is. + * (May, 2011) + */ + status = + acpi_ns_get_node(data->node, "^_PSS", ACPI_NS_NO_UPSEARCH, &node); + if (ACPI_SUCCESS(status)) { + return (AE_OK); + } status = acpi_ns_check_sorted_list(data, return_object, 5, 1, ACPI_SORT_DESCENDING, diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 48db0944ce4a..62365f6075dd 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -126,12 +126,29 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index) } /* - * Originally, we checked the table signature for "SSDT" or "PSDT" here. - * Next, we added support for OEMx tables, signature "OEM". - * Valid tables were encountered with a null signature, so we've just - * given up on validating the signature, since it seems to be a waste - * of code. The original code was removed (05/2008). + * Validate the incoming table signature. + * + * 1) Originally, we checked the table signature for "SSDT" or "PSDT". + * 2) We added support for OEMx tables, signature "OEM". + * 3) Valid tables were encountered with a null signature, so we just + * gave up on validating the signature, (05/2008). + * 4) We encountered non-AML tables such as the MADT, which caused + * interpreter errors and kernel faults. So now, we once again allow + * only "SSDT", "OEMx", and now, also a null signature. (05/2011). */ + if ((table_desc->pointer->signature[0] != 0x00) && + (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)) + && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) { + ACPI_ERROR((AE_INFO, + "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx", + acpi_ut_valid_acpi_name(*(u32 *)table_desc-> + pointer-> + signature) ? table_desc-> + pointer->signature : "????", + *(u32 *)table_desc->pointer->signature)); + + return_ACPI_STATUS(AE_BAD_SIGNATURE); + } (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index f739a70b1c70..c34aa51af4ee 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -10,9 +10,11 @@ config ACPI_APEI error injection. config ACPI_APEI_GHES - tristate "APEI Generic Hardware Error Source" + bool "APEI Generic Hardware Error Source" depends on ACPI_APEI && X86 select ACPI_HED + select LLIST + select GENERIC_ALLOCATOR help Generic Hardware Error Source provides a way to report platform hardware errors (such as that from chipset). It @@ -30,6 +32,13 @@ config ACPI_APEI_PCIEAER PCIe AER errors may be reported via APEI firmware first mode. Turn on this option to enable the corresponding support. +config ACPI_APEI_MEMORY_FAILURE + bool "APEI memory error recovering support" + depends on ACPI_APEI && MEMORY_FAILURE + help + Memory errors may be reported via APEI firmware first mode. + Turn on this option to enable the memory recovering support. + config ACPI_APEI_EINJ tristate "APEI Error INJection (EINJ)" depends on ACPI_APEI && DEBUG_FS diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 4a904a4bf05f..8041248fce9b 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -157,9 +157,10 @@ EXPORT_SYMBOL_GPL(apei_exec_noop); * Interpret the specified action. Go through whole action table, * execute all instructions belong to the action. */ -int apei_exec_run(struct apei_exec_context *ctx, u8 action) +int __apei_exec_run(struct apei_exec_context *ctx, u8 action, + bool optional) { - int rc; + int rc = -ENOENT; u32 i, ip; struct acpi_whea_header *entry; apei_exec_ins_func_t run; @@ -198,9 +199,9 @@ rewind: goto rewind; } - return 0; + return !optional && rc < 0 ? rc : 0; } -EXPORT_SYMBOL_GPL(apei_exec_run); +EXPORT_SYMBOL_GPL(__apei_exec_run); typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx, struct acpi_whea_header *entry, @@ -603,3 +604,29 @@ struct dentry *apei_get_debugfs_dir(void) return dapei; } EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); + +int apei_osc_setup(void) +{ + static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; + acpi_handle handle; + u32 capbuf[3]; + struct acpi_osc_context context = { + .uuid_str = whea_uuid_str, + .rev = 1, + .cap.length = sizeof(capbuf), + .cap.pointer = capbuf, + }; + + capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; + capbuf[OSC_SUPPORT_TYPE] = 0; + capbuf[OSC_CONTROL_TYPE] = 0; + + if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)) + || ACPI_FAILURE(acpi_run_osc(handle, &context))) + return -EIO; + else { + kfree(context.ret.pointer); + return 0; + } +} +EXPORT_SYMBOL_GPL(apei_osc_setup); diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index ef0581f2094d..f57050e7a5e7 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -50,7 +50,18 @@ static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx) return ctx->value; } -int apei_exec_run(struct apei_exec_context *ctx, u8 action); +int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional); + +static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action) +{ + return __apei_exec_run(ctx, action, 0); +} + +/* It is optional whether the firmware provides the action */ +static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action) +{ + return __apei_exec_run(ctx, action, 1); +} /* Common instruction implementation */ @@ -113,4 +124,6 @@ void apei_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus); int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); int apei_estatus_check(const struct acpi_hest_generic_status *estatus); + +int apei_osc_setup(void); #endif diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index f74b2ea11f21..589b96c38704 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -46,7 +46,8 @@ * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the * EINJ table through an unpublished extension. Use with caution as * most will ignore the parameter and make their own choice of address - * for error injection. + * for error injection. This extension is used only if + * param_extension module parameter is specified. */ struct einj_parameter { u64 type; @@ -65,6 +66,9 @@ struct einj_parameter { ((struct acpi_whea_header *)((char *)(tab) + \ sizeof(struct acpi_table_einj))) +static bool param_extension; +module_param(param_extension, bool, 0); + static struct acpi_table_einj *einj_tab; static struct apei_resources einj_resources; @@ -285,7 +289,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) einj_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION); + rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION); if (rc) return rc; apei_exec_ctx_set_input(&ctx, type); @@ -323,7 +327,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) rc = __einj_error_trigger(trigger_paddr); if (rc) return rc; - rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION); + rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION); return rc; } @@ -489,14 +493,6 @@ static int __init einj_init(void) einj_debug_dir, NULL, &error_type_fops); if (!fentry) goto err_cleanup; - fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param1); - if (!fentry) - goto err_cleanup; - fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param2); - if (!fentry) - goto err_cleanup; fentry = debugfs_create_file("error_inject", S_IWUSR, einj_debug_dir, NULL, &error_inject_fops); if (!fentry) @@ -513,12 +509,23 @@ static int __init einj_init(void) rc = apei_exec_pre_map_gars(&ctx); if (rc) goto err_release; - param_paddr = einj_get_parameter_address(); - if (param_paddr) { - einj_param = ioremap(param_paddr, sizeof(*einj_param)); - rc = -ENOMEM; - if (!einj_param) - goto err_unmap; + if (param_extension) { + param_paddr = einj_get_parameter_address(); + if (param_paddr) { + einj_param = ioremap(param_paddr, sizeof(*einj_param)); + rc = -ENOMEM; + if (!einj_param) + goto err_unmap; + fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param1); + if (!fentry) + goto err_unmap; + fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, + einj_debug_dir, &error_param2); + if (!fentry) + goto err_unmap; + } else + pr_warn(EINJ_PFX "Parameter extension is not supported.\n"); } pr_info(EINJ_PFX "Error INJection is initialized.\n"); @@ -526,6 +533,8 @@ static int __init einj_init(void) return 0; err_unmap: + if (einj_param) + iounmap(einj_param); apei_exec_post_unmap_gars(&ctx); err_release: apei_resources_release(&einj_resources); diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index a4cfb64c86a1..903549df809b 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c @@ -33,7 +33,7 @@ #define ERST_DBG_PFX "ERST DBG: " -#define ERST_DBG_RECORD_LEN_MAX 4096 +#define ERST_DBG_RECORD_LEN_MAX 0x4000 static void *erst_dbg_buf; static unsigned int erst_dbg_buf_len; @@ -213,6 +213,10 @@ static struct miscdevice erst_dbg_dev = { static __init int erst_dbg_init(void) { + if (erst_disable) { + pr_info(ERST_DBG_PFX "ERST support is disabled.\n"); + return -ENODEV; + } return misc_register(&erst_dbg_dev); } diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index e6cef8e1b534..2ca59dc69f7f 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -642,7 +642,7 @@ static int __erst_write_to_storage(u64 offset) int rc; erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE); + rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE); if (rc) return rc; apei_exec_ctx_set_input(&ctx, offset); @@ -666,7 +666,7 @@ static int __erst_write_to_storage(u64 offset) if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_END); + rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); if (rc) return rc; @@ -681,7 +681,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset) int rc; erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ); + rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ); if (rc) return rc; apei_exec_ctx_set_input(&ctx, offset); @@ -709,7 +709,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset) if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_END); + rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); if (rc) return rc; @@ -724,7 +724,7 @@ static int __erst_clear_from_storage(u64 record_id) int rc; erst_exec_ctx_init(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR); + rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR); if (rc) return rc; apei_exec_ctx_set_input(&ctx, record_id); @@ -748,7 +748,7 @@ static int __erst_clear_from_storage(u64 record_id) if (rc) return rc; val = apei_exec_ctx_get_output(&ctx); - rc = apei_exec_run(&ctx, ACPI_ERST_END); + rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); if (rc) return rc; @@ -932,8 +932,11 @@ static int erst_check_table(struct acpi_table_erst *erst_tab) static int erst_open_pstore(struct pstore_info *psi); static int erst_close_pstore(struct pstore_info *psi); static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, - struct timespec *time); -static u64 erst_writer(enum pstore_type_id type, size_t size); + struct timespec *time, struct pstore_info *psi); +static u64 erst_writer(enum pstore_type_id type, unsigned int part, + size_t size, struct pstore_info *psi); +static int erst_clearer(enum pstore_type_id type, u64 id, + struct pstore_info *psi); static struct pstore_info erst_info = { .owner = THIS_MODULE, @@ -942,7 +945,7 @@ static struct pstore_info erst_info = { .close = erst_close_pstore, .read = erst_reader, .write = erst_writer, - .erase = erst_clear + .erase = erst_clearer }; #define CPER_CREATOR_PSTORE \ @@ -983,7 +986,7 @@ static int erst_close_pstore(struct pstore_info *psi) } static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, - struct timespec *time) + struct timespec *time, struct pstore_info *psi) { int rc; ssize_t len = 0; @@ -1037,7 +1040,8 @@ out: return (rc < 0) ? rc : (len - sizeof(*rcd)); } -static u64 erst_writer(enum pstore_type_id type, size_t size) +static u64 erst_writer(enum pstore_type_id type, unsigned int part, + size_t size, struct pstore_info *psi) { struct cper_pstore_record *rcd = (struct cper_pstore_record *) (erst_info.buf - sizeof(*rcd)); @@ -1080,6 +1084,12 @@ static u64 erst_writer(enum pstore_type_id type, size_t size) return rcd->hdr.record_id; } +static int erst_clearer(enum pstore_type_id type, u64 id, + struct pstore_info *psi) +{ + return erst_clear(id); +} + static int __init erst_init(void) { int rc = 0; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index f703b2881153..0784f99a4665 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -12,7 +12,7 @@ * For more information about Generic Hardware Error Source, please * refer to ACPI Specification version 4.0, section 17.3.2.6 * - * Copyright 2010 Intel Corp. + * Copyright 2010,2011 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or @@ -42,6 +42,9 @@ #include <linux/mutex.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> +#include <linux/irq_work.h> +#include <linux/llist.h> +#include <linux/genalloc.h> #include <acpi/apei.h> #include <acpi/atomicio.h> #include <acpi/hed.h> @@ -53,6 +56,30 @@ #define GHES_PFX "GHES: " #define GHES_ESTATUS_MAX_SIZE 65536 +#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536 + +#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3 + +/* This is just an estimation for memory pool allocation */ +#define GHES_ESTATUS_CACHE_AVG_SIZE 512 + +#define GHES_ESTATUS_CACHES_SIZE 4 + +#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL +/* Prevent too many caches are allocated because of RCU */ +#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2) + +#define GHES_ESTATUS_CACHE_LEN(estatus_len) \ + (sizeof(struct ghes_estatus_cache) + (estatus_len)) +#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \ + ((struct acpi_hest_generic_status *) \ + ((struct ghes_estatus_cache *)(estatus_cache) + 1)) + +#define GHES_ESTATUS_NODE_LEN(estatus_len) \ + (sizeof(struct ghes_estatus_node) + (estatus_len)) +#define GHES_ESTATUS_FROM_NODE(estatus_node) \ + ((struct acpi_hest_generic_status *) \ + ((struct ghes_estatus_node *)(estatus_node) + 1)) /* * One struct ghes is created for each generic hardware error source. @@ -77,6 +104,22 @@ struct ghes { }; }; +struct ghes_estatus_node { + struct llist_node llnode; + struct acpi_hest_generic *generic; +}; + +struct ghes_estatus_cache { + u32 estatus_len; + atomic_t count; + struct acpi_hest_generic *generic; + unsigned long long time_in; + struct rcu_head rcu; +}; + +int ghes_disable; +module_param_named(disable, ghes_disable, bool, 0); + static int ghes_panic_timeout __read_mostly = 30; /* @@ -121,6 +164,22 @@ static struct vm_struct *ghes_ioremap_area; static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); +/* + * printk is not safe in NMI context. So in NMI handler, we allocate + * required memory from lock-less memory allocator + * (ghes_estatus_pool), save estatus into it, put them into lock-less + * list (ghes_estatus_llist), then delay printk into IRQ context via + * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record + * required pool size by all NMI error source. + */ +static struct gen_pool *ghes_estatus_pool; +static unsigned long ghes_estatus_pool_size_request; +static struct llist_head ghes_estatus_llist; +static struct irq_work ghes_proc_irq_work; + +struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; +static atomic_t ghes_estatus_cache_alloced; + static int ghes_ioremap_init(void) { ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, @@ -180,6 +239,55 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr) __flush_tlb_one(vaddr); } +static int ghes_estatus_pool_init(void) +{ + ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); + if (!ghes_estatus_pool) + return -ENOMEM; + return 0; +} + +static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, + struct gen_pool_chunk *chunk, + void *data) +{ + free_page(chunk->start_addr); +} + +static void ghes_estatus_pool_exit(void) +{ + gen_pool_for_each_chunk(ghes_estatus_pool, + ghes_estatus_pool_free_chunk_page, NULL); + gen_pool_destroy(ghes_estatus_pool); +} + +static int ghes_estatus_pool_expand(unsigned long len) +{ + unsigned long i, pages, size, addr; + int ret; + + ghes_estatus_pool_size_request += PAGE_ALIGN(len); + size = gen_pool_size(ghes_estatus_pool); + if (size >= ghes_estatus_pool_size_request) + return 0; + pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; + for (i = 0; i < pages; i++) { + addr = __get_free_page(GFP_KERNEL); + if (!addr) + return -ENOMEM; + ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); + if (ret) + return ret; + } + + return 0; +} + +static void ghes_estatus_pool_shrink(unsigned long len) +{ + ghes_estatus_pool_size_request -= PAGE_ALIGN(len); +} + static struct ghes *ghes_new(struct acpi_hest_generic *generic) { struct ghes *ghes; @@ -341,43 +449,196 @@ static void ghes_clear_estatus(struct ghes *ghes) ghes->flags &= ~GHES_TO_CLEAR; } -static void ghes_do_proc(struct ghes *ghes) +static void ghes_do_proc(const struct acpi_hest_generic_status *estatus) { - int sev, processed = 0; + int sev, sec_sev; struct acpi_hest_generic_data *gdata; - sev = ghes_severity(ghes->estatus->error_severity); - apei_estatus_for_each_section(ghes->estatus, gdata) { -#ifdef CONFIG_X86_MCE + sev = ghes_severity(estatus->error_severity); + apei_estatus_for_each_section(estatus, gdata) { + sec_sev = ghes_severity(gdata->error_severity); if (!uuid_le_cmp(*(uuid_le *)gdata->section_type, CPER_SEC_PLATFORM_MEM)) { - apei_mce_report_mem_error( - sev == GHES_SEV_CORRECTED, - (struct cper_sec_mem_err *)(gdata+1)); - processed = 1; - } + struct cper_sec_mem_err *mem_err; + mem_err = (struct cper_sec_mem_err *)(gdata+1); +#ifdef CONFIG_X86_MCE + apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED, + mem_err); #endif +#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE + if (sev == GHES_SEV_RECOVERABLE && + sec_sev == GHES_SEV_RECOVERABLE && + mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) { + unsigned long pfn; + pfn = mem_err->physical_addr >> PAGE_SHIFT; + memory_failure_queue(pfn, 0, 0); + } +#endif + } } } -static void ghes_print_estatus(const char *pfx, struct ghes *ghes) +static void __ghes_print_estatus(const char *pfx, + const struct acpi_hest_generic *generic, + const struct acpi_hest_generic_status *estatus) { - /* Not more than 2 messages every 5 seconds */ - static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2); - if (pfx == NULL) { - if (ghes_severity(ghes->estatus->error_severity) <= + if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) pfx = KERN_WARNING HW_ERR; else pfx = KERN_ERR HW_ERR; } - if (__ratelimit(&ratelimit)) { - printk( - "%s""Hardware error from APEI Generic Hardware Error Source: %d\n", - pfx, ghes->generic->header.source_id); - apei_estatus_print(pfx, ghes->estatus); + printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n", + pfx, generic->header.source_id); + apei_estatus_print(pfx, estatus); +} + +static int ghes_print_estatus(const char *pfx, + const struct acpi_hest_generic *generic, + const struct acpi_hest_generic_status *estatus) +{ + /* Not more than 2 messages every 5 seconds */ + static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); + static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); + struct ratelimit_state *ratelimit; + + if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED) + ratelimit = &ratelimit_corrected; + else + ratelimit = &ratelimit_uncorrected; + if (__ratelimit(ratelimit)) { + __ghes_print_estatus(pfx, generic, estatus); + return 1; } + return 0; +} + +/* + * GHES error status reporting throttle, to report more kinds of + * errors, instead of just most frequently occurred errors. + */ +static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus) +{ + u32 len; + int i, cached = 0; + unsigned long long now; + struct ghes_estatus_cache *cache; + struct acpi_hest_generic_status *cache_estatus; + + len = apei_estatus_len(estatus); + rcu_read_lock(); + for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { + cache = rcu_dereference(ghes_estatus_caches[i]); + if (cache == NULL) + continue; + if (len != cache->estatus_len) + continue; + cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); + if (memcmp(estatus, cache_estatus, len)) + continue; + atomic_inc(&cache->count); + now = sched_clock(); + if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) + cached = 1; + break; + } + rcu_read_unlock(); + return cached; +} + +static struct ghes_estatus_cache *ghes_estatus_cache_alloc( + struct acpi_hest_generic *generic, + struct acpi_hest_generic_status *estatus) +{ + int alloced; + u32 len, cache_len; + struct ghes_estatus_cache *cache; + struct acpi_hest_generic_status *cache_estatus; + + alloced = atomic_add_return(1, &ghes_estatus_cache_alloced); + if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) { + atomic_dec(&ghes_estatus_cache_alloced); + return NULL; + } + len = apei_estatus_len(estatus); + cache_len = GHES_ESTATUS_CACHE_LEN(len); + cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); + if (!cache) { + atomic_dec(&ghes_estatus_cache_alloced); + return NULL; + } + cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); + memcpy(cache_estatus, estatus, len); + cache->estatus_len = len; + atomic_set(&cache->count, 0); + cache->generic = generic; + cache->time_in = sched_clock(); + return cache; +} + +static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) +{ + u32 len; + + len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); + len = GHES_ESTATUS_CACHE_LEN(len); + gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); + atomic_dec(&ghes_estatus_cache_alloced); +} + +static void ghes_estatus_cache_rcu_free(struct rcu_head *head) +{ + struct ghes_estatus_cache *cache; + + cache = container_of(head, struct ghes_estatus_cache, rcu); + ghes_estatus_cache_free(cache); +} + +static void ghes_estatus_cache_add( + struct acpi_hest_generic *generic, + struct acpi_hest_generic_status *estatus) +{ + int i, slot = -1, count; + unsigned long long now, duration, period, max_period = 0; + struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; + + new_cache = ghes_estatus_cache_alloc(generic, estatus); + if (new_cache == NULL) + return; + rcu_read_lock(); + now = sched_clock(); + for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) { + cache = rcu_dereference(ghes_estatus_caches[i]); + if (cache == NULL) { + slot = i; + slot_cache = NULL; + break; + } + duration = now - cache->time_in; + if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) { + slot = i; + slot_cache = cache; + break; + } + count = atomic_read(&cache->count); + period = duration; + do_div(period, (count + 1)); + if (period > max_period) { + max_period = period; + slot = i; + slot_cache = cache; + } + } + /* new_cache must be put into array after its contents are written */ + smp_wmb(); + if (slot != -1 && cmpxchg(ghes_estatus_caches + slot, + slot_cache, new_cache) == slot_cache) { + if (slot_cache) + call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free); + } else + ghes_estatus_cache_free(new_cache); + rcu_read_unlock(); } static int ghes_proc(struct ghes *ghes) @@ -387,9 +648,11 @@ static int ghes_proc(struct ghes *ghes) rc = ghes_read_estatus(ghes, 0); if (rc) goto out; - ghes_print_estatus(NULL, ghes); - ghes_do_proc(ghes); - + if (!ghes_estatus_cached(ghes->estatus)) { + if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) + ghes_estatus_cache_add(ghes->generic, ghes->estatus); + } + ghes_do_proc(ghes->estatus); out: ghes_clear_estatus(ghes); return 0; @@ -447,6 +710,45 @@ static int ghes_notify_sci(struct notifier_block *this, return ret; } +static void ghes_proc_in_irq(struct irq_work *irq_work) +{ + struct llist_node *llnode, *next, *tail = NULL; + struct ghes_estatus_node *estatus_node; + struct acpi_hest_generic *generic; + struct acpi_hest_generic_status *estatus; + u32 len, node_len; + + /* + * Because the time order of estatus in list is reversed, + * revert it back to proper order. + */ + llnode = llist_del_all(&ghes_estatus_llist); + while (llnode) { + next = llnode->next; + llnode->next = tail; + tail = llnode; + llnode = next; + } + llnode = tail; + while (llnode) { + next = llnode->next; + estatus_node = llist_entry(llnode, struct ghes_estatus_node, + llnode); + estatus = GHES_ESTATUS_FROM_NODE(estatus_node); + len = apei_estatus_len(estatus); + node_len = GHES_ESTATUS_NODE_LEN(len); + ghes_do_proc(estatus); + if (!ghes_estatus_cached(estatus)) { + generic = estatus_node->generic; + if (ghes_print_estatus(NULL, generic, estatus)) + ghes_estatus_cache_add(generic, estatus); + } + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, + node_len); + llnode = next; + } +} + static int ghes_notify_nmi(struct notifier_block *this, unsigned long cmd, void *data) { @@ -476,7 +778,8 @@ static int ghes_notify_nmi(struct notifier_block *this, if (sev_global >= GHES_SEV_PANIC) { oops_begin(); - ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global); + __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic, + ghes_global->estatus); /* reboot to log the error! */ if (panic_timeout == 0) panic_timeout = ghes_panic_timeout; @@ -484,12 +787,34 @@ static int ghes_notify_nmi(struct notifier_block *this, } list_for_each_entry_rcu(ghes, &ghes_nmi, list) { +#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + u32 len, node_len; + struct ghes_estatus_node *estatus_node; + struct acpi_hest_generic_status *estatus; +#endif if (!(ghes->flags & GHES_TO_CLEAR)) continue; - /* Do not print estatus because printk is not NMI safe */ - ghes_do_proc(ghes); +#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + if (ghes_estatus_cached(ghes->estatus)) + goto next; + /* Save estatus for further processing in IRQ context */ + len = apei_estatus_len(ghes->estatus); + node_len = GHES_ESTATUS_NODE_LEN(len); + estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, + node_len); + if (estatus_node) { + estatus_node->generic = ghes->generic; + estatus = GHES_ESTATUS_FROM_NODE(estatus_node); + memcpy(estatus, ghes->estatus, len); + llist_add(&estatus_node->llnode, &ghes_estatus_llist); + } +next: +#endif ghes_clear_estatus(ghes); } +#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + irq_work_queue(&ghes_proc_irq_work); +#endif out: raw_spin_unlock(&ghes_nmi_lock); @@ -504,10 +829,26 @@ static struct notifier_block ghes_notifier_nmi = { .notifier_call = ghes_notify_nmi, }; +static unsigned long ghes_esource_prealloc_size( + const struct acpi_hest_generic *generic) +{ + unsigned long block_length, prealloc_records, prealloc_size; + + block_length = min_t(unsigned long, generic->error_block_length, + GHES_ESTATUS_MAX_SIZE); + prealloc_records = max_t(unsigned long, + generic->records_to_preallocate, 1); + prealloc_size = min_t(unsigned long, block_length * prealloc_records, + GHES_ESOURCE_PREALLOC_MAX_SIZE); + + return prealloc_size; +} + static int __devinit ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; struct ghes *ghes = NULL; + unsigned long len; int rc = -EINVAL; generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; @@ -573,6 +914,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) mutex_unlock(&ghes_list_mutex); break; case ACPI_HEST_NOTIFY_NMI: + len = ghes_esource_prealloc_size(generic); + ghes_estatus_pool_expand(len); mutex_lock(&ghes_list_mutex); if (list_empty(&ghes_nmi)) register_die_notifier(&ghes_notifier_nmi); @@ -597,6 +940,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) { struct ghes *ghes; struct acpi_hest_generic *generic; + unsigned long len; ghes = platform_get_drvdata(ghes_dev); generic = ghes->generic; @@ -627,6 +971,8 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) * freed after NMI handler finishes. */ synchronize_rcu(); + len = ghes_esource_prealloc_size(generic); + ghes_estatus_pool_shrink(len); break; default: BUG(); @@ -662,15 +1008,43 @@ static int __init ghes_init(void) return -EINVAL; } + if (ghes_disable) { + pr_info(GHES_PFX "GHES is not enabled!\n"); + return -EINVAL; + } + + init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); + rc = ghes_ioremap_init(); if (rc) goto err; - rc = platform_driver_register(&ghes_platform_driver); + rc = ghes_estatus_pool_init(); if (rc) goto err_ioremap_exit; + rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * + GHES_ESTATUS_CACHE_ALLOCED_MAX); + if (rc) + goto err_pool_exit; + + rc = platform_driver_register(&ghes_platform_driver); + if (rc) + goto err_pool_exit; + + rc = apei_osc_setup(); + if (rc == 0 && osc_sb_apei_support_acked) + pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n"); + else if (rc == 0 && !osc_sb_apei_support_acked) + pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n"); + else if (rc && osc_sb_apei_support_acked) + pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n"); + else + pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); + return 0; +err_pool_exit: + ghes_estatus_pool_exit(); err_ioremap_exit: ghes_ioremap_exit(); err: @@ -680,6 +1054,7 @@ err: static void __exit ghes_exit(void) { platform_driver_unregister(&ghes_platform_driver); + ghes_estatus_pool_exit(); ghes_ioremap_exit(); } diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 181bc2f7bb74..05fee06f4d6e 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -231,16 +231,17 @@ void __init acpi_hest_init(void) goto err; } - rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); - if (rc) - goto err; - - rc = hest_ghes_dev_register(ghes_count); - if (!rc) { - pr_info(HEST_PFX "Table parsing has been initialized.\n"); - return; + if (!ghes_disable) { + rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); + if (rc) + goto err; + rc = hest_ghes_dev_register(ghes_count); + if (rc) + goto err; } + pr_info(HEST_PFX "Table parsing has been initialized.\n"); + return; err: hest_disable = 1; } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 2c661353e8f2..7711d94a0409 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -55,6 +55,9 @@ #define ACPI_BATTERY_NOTIFY_INFO 0x81 #define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82 +/* Battery power unit: 0 means mW, 1 means mA */ +#define ACPI_BATTERY_POWER_UNIT_MA 1 + #define _COMPONENT ACPI_BATTERY_COMPONENT ACPI_MODULE_NAME("battery"); @@ -91,16 +94,12 @@ MODULE_DEVICE_TABLE(acpi, battery_device_ids); enum { ACPI_BATTERY_ALARM_PRESENT, ACPI_BATTERY_XINFO_PRESENT, - /* For buggy DSDTs that report negative 16-bit values for either - * charging or discharging current and/or report 0 as 65536 - * due to bad math. - */ - ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, }; struct acpi_battery { struct mutex lock; + struct mutex sysfs_lock; struct power_supply bat; struct acpi_device *device; struct notifier_block pm_nb; @@ -301,7 +300,8 @@ static enum power_supply_property energy_battery_props[] = { #ifdef CONFIG_ACPI_PROCFS_POWER inline char *acpi_battery_units(struct acpi_battery *battery) { - return (battery->power_unit)?"mA":"mW"; + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? + "mA" : "mW"; } #endif @@ -461,9 +461,17 @@ static int acpi_battery_get_state(struct acpi_battery *battery) battery->update_time = jiffies; kfree(buffer.pointer); - if (test_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags) && - battery->rate_now != -1) + /* For buggy DSDTs that report negative 16-bit values for either + * charging or discharging current and/or report 0 as 65536 + * due to bad math. + */ + if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA && + battery->rate_now != ACPI_BATTERY_VALUE_UNKNOWN && + (s16)(battery->rate_now) < 0) { battery->rate_now = abs((s16)battery->rate_now); + printk_once(KERN_WARNING FW_BUG "battery: (dis)charge rate" + " invalid.\n"); + } if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) && battery->capacity_now >= 0 && battery->capacity_now <= 100) @@ -544,7 +552,7 @@ static int sysfs_add_battery(struct acpi_battery *battery) { int result; - if (battery->power_unit) { + if (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) { battery->bat.properties = charge_battery_props; battery->bat.num_properties = ARRAY_SIZE(charge_battery_props); @@ -566,18 +574,16 @@ static int sysfs_add_battery(struct acpi_battery *battery) static void sysfs_remove_battery(struct acpi_battery *battery) { - if (!battery->bat.dev) + mutex_lock(&battery->sysfs_lock); + if (!battery->bat.dev) { + mutex_unlock(&battery->sysfs_lock); return; + } + device_remove_file(battery->bat.dev, &alarm_attr); power_supply_unregister(&battery->bat); battery->bat.dev = NULL; -} - -static void acpi_battery_quirks(struct acpi_battery *battery) -{ - if (dmi_name_in_vendors("Acer") && battery->power_unit) { - set_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags); - } + mutex_unlock(&battery->sysfs_lock); } /* @@ -592,7 +598,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery) * * Handle this correctly so that they won't break userspace. */ -static void acpi_battery_quirks2(struct acpi_battery *battery) +static void acpi_battery_quirks(struct acpi_battery *battery) { if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) return ; @@ -623,13 +629,15 @@ static int acpi_battery_update(struct acpi_battery *battery) result = acpi_battery_get_info(battery); if (result) return result; - acpi_battery_quirks(battery); acpi_battery_init_alarm(battery); } - if (!battery->bat.dev) - sysfs_add_battery(battery); + if (!battery->bat.dev) { + result = sysfs_add_battery(battery); + if (result) + return result; + } result = acpi_battery_get_state(battery); - acpi_battery_quirks2(battery); + acpi_battery_quirks(battery); return result; } @@ -863,7 +871,7 @@ DECLARE_FILE_FUNCTIONS(alarm); }, \ } -static struct battery_file { +static const struct battery_file { struct file_operations ops; mode_t mode; const char *name; @@ -948,9 +956,12 @@ static int battery_notify(struct notifier_block *nb, struct acpi_battery *battery = container_of(nb, struct acpi_battery, pm_nb); switch (mode) { + case PM_POST_HIBERNATION: case PM_POST_SUSPEND: - sysfs_remove_battery(battery); - sysfs_add_battery(battery); + if (battery->bat.dev) { + sysfs_remove_battery(battery); + sysfs_add_battery(battery); + } break; } @@ -972,28 +983,38 @@ static int acpi_battery_add(struct acpi_device *device) strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); device->driver_data = battery; mutex_init(&battery->lock); + mutex_init(&battery->sysfs_lock); if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, "_BIX", &handle))) set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); - acpi_battery_update(battery); + result = acpi_battery_update(battery); + if (result) + goto fail; #ifdef CONFIG_ACPI_PROCFS_POWER result = acpi_battery_add_fs(device); #endif - if (!result) { - printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", - ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), - device->status.battery_present ? "present" : "absent"); - } else { + if (result) { #ifdef CONFIG_ACPI_PROCFS_POWER acpi_battery_remove_fs(device); #endif - kfree(battery); + goto fail; } + printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", + ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), + device->status.battery_present ? "present" : "absent"); + battery->pm_nb.notifier_call = battery_notify; register_pm_notifier(&battery->pm_nb); return result; + +fail: + sysfs_remove_battery(battery); + mutex_destroy(&battery->lock); + mutex_destroy(&battery->sysfs_lock); + kfree(battery); + return result; } static int acpi_battery_remove(struct acpi_device *device, int type) @@ -1009,6 +1030,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) #endif sysfs_remove_battery(battery); mutex_destroy(&battery->lock); + mutex_destroy(&battery->sysfs_lock); kfree(battery); return 0; } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d1e06c182cdb..437ddbf0c49a 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -39,6 +39,7 @@ #include <linux/pci.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> +#include <acpi/apei.h> #include <linux/dmi.h> #include <linux/suspend.h> @@ -519,6 +520,7 @@ out_kfree: } EXPORT_SYMBOL(acpi_run_osc); +bool osc_sb_apei_support_acked; static u8 sb_uuid_str[] = "0811B06E-4A27-44F9-8D60-3CBBC22E7B48"; static void acpi_bus_osc_support(void) { @@ -541,11 +543,19 @@ static void acpi_bus_osc_support(void) #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; #endif + + if (!ghes_disable) + capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT; if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) return; - if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) + if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) { + u32 *capbuf_ret = context.ret.pointer; + if (context.ret.length > OSC_SUPPORT_TYPE) + osc_sb_apei_support_acked = + capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT; kfree(context.ret.pointer); - /* do we need to check the returned cap? Sounds no */ + } + /* do we need to check other returned cap? Sounds no */ } /* -------------------------------------------------------------------------- diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 1864ad3cf895..19a61136d848 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c @@ -77,7 +77,7 @@ struct dock_dependent_device { struct list_head list; struct list_head hotplug_list; acpi_handle handle; - struct acpi_dock_ops *ops; + const struct acpi_dock_ops *ops; void *context; }; @@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier); * the dock driver after _DCK is executed. */ int -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, void *context) { struct dock_dependent_device *dd; diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index 05b44201a614..22f918bacd35 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c @@ -92,7 +92,7 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf, return count; } -static struct file_operations acpi_ec_io_ops = { +static const struct file_operations acpi_ec_io_ops = { .owner = THIS_MODULE, .open = acpi_ec_open_io, .read = acpi_ec_read_io, diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 467479f07c1f..0f0356ca1a9e 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -110,7 +110,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) return result; } -static struct thermal_cooling_device_ops fan_cooling_ops = { +static const struct thermal_cooling_device_ops fan_cooling_ops = { .get_max_state = fan_get_max_state, .get_cur_state = fan_get_cur_state, .set_cur_state = fan_set_cur_state, diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 372f9b70f7f4..fa32f584229f 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -155,7 +155,7 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported) { if (!strcmp("Linux", interface)) { - printk(KERN_NOTICE FW_BUG PREFIX + printk_once(KERN_NOTICE FW_BUG PREFIX "BIOS _OSI(Linux) query %s%s\n", osi_linux.enable ? "honored" : "ignored", osi_linux.cmdline ? " via cmdline" : @@ -237,8 +237,23 @@ void acpi_os_vprintf(const char *fmt, va_list args) #endif } +#ifdef CONFIG_KEXEC +static unsigned long acpi_rsdp; +static int __init setup_acpi_rsdp(char *arg) +{ + acpi_rsdp = simple_strtoul(arg, NULL, 16); + return 0; +} +early_param("acpi_rsdp", setup_acpi_rsdp); +#endif + acpi_physical_address __init acpi_os_get_root_pointer(void) { +#ifdef CONFIG_KEXEC + if (acpi_rsdp) + return acpi_rsdp; +#endif + if (efi_enabled) { if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) return efi.acpi20; @@ -1083,7 +1098,13 @@ struct osi_setup_entry { bool enable; }; -static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX]; +static struct osi_setup_entry __initdata + osi_setup_entries[OSI_STRING_ENTRIES_MAX] = { + {"Module Device", true}, + {"Processor Device", true}, + {"3.0 _SCP Extensions", true}, + {"Processor Aggregator Device", true}, +}; void __init acpi_osi_setup(char *str) { diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index f907cfbfa13c..7f9eba9a0b02 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -303,6 +303,61 @@ void acpi_pci_irq_del_prt(struct pci_bus *bus) /* -------------------------------------------------------------------------- PCI Interrupt Routing Support -------------------------------------------------------------------------- */ +#ifdef CONFIG_X86_IO_APIC +extern int noioapicquirk; +extern int noioapicreroute; + +static int bridge_has_boot_interrupt_variant(struct pci_bus *bus) +{ + struct pci_bus *bus_it; + + for (bus_it = bus ; bus_it ; bus_it = bus_it->parent) { + if (!bus_it->self) + return 0; + if (bus_it->self->irq_reroute_variant) + return bus_it->self->irq_reroute_variant; + } + return 0; +} + +/* + * Some chipsets (e.g. Intel 6700PXH) generate a legacy INTx when the IRQ + * entry in the chipset's IO-APIC is masked (as, e.g. the RT kernel does + * during interrupt handling). When this INTx generation cannot be disabled, + * we reroute these interrupts to their legacy equivalent to get rid of + * spurious interrupts. + */ +static int acpi_reroute_boot_interrupt(struct pci_dev *dev, + struct acpi_prt_entry *entry) +{ + if (noioapicquirk || noioapicreroute) { + return 0; + } else { + switch (bridge_has_boot_interrupt_variant(dev->bus)) { + case 0: + /* no rerouting necessary */ + return 0; + case INTEL_IRQ_REROUTE_VARIANT: + /* + * Remap according to INTx routing table in 6700PXH + * specs, intel order number 302628-002, section + * 2.15.2. Other chipsets (80332, ...) have the same + * mapping and are handled here as well. + */ + dev_info(&dev->dev, "PCI IRQ %d -> rerouted to legacy " + "IRQ %d\n", entry->index, + (entry->index % 4) + 16); + entry->index = (entry->index % 4) + 16; + return 1; + default: + dev_warn(&dev->dev, "Cannot reroute IRQ %d to legacy " + "IRQ: unknown mapping\n", entry->index); + return -1; + } + } +} +#endif /* CONFIG_X86_IO_APIC */ + static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) { struct acpi_prt_entry *entry; @@ -311,6 +366,9 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin) entry = acpi_pci_irq_find_prt_entry(dev, pin); if (entry) { +#ifdef CONFIG_X86_IO_APIC + acpi_reroute_boot_interrupt(dev, entry); +#endif /* CONFIG_X86_IO_APIC */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n", pci_name(dev), pin_name(pin))); return entry; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index d06078d660ad..2672c798272f 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -485,7 +485,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) root->secondary.end = 0xFF; printk(KERN_WARNING FW_BUG PREFIX "no secondary bus range in _CRS\n"); - status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); + status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, + NULL, &bus); if (ACPI_SUCCESS(status)) root->secondary.start = bus; else if (status == AE_NOT_FOUND) diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 79cb65332894..870550d6a4bf 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -244,7 +244,7 @@ processor_set_cur_state(struct thermal_cooling_device *cdev, return result; } -struct thermal_cooling_device_ops processor_cooling_ops = { +const struct thermal_cooling_device_ops processor_cooling_ops = { .get_max_state = processor_get_max_state, .get_cur_state = processor_get_cur_state, .set_cur_state = processor_set_cur_state, diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 50658ff887d9..6e36d0c0057c 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -130,6 +130,9 @@ struct acpi_sbs { #define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) +static int acpi_sbs_remove(struct acpi_device *device, int type); +static int acpi_battery_get_state(struct acpi_battery *battery); + static inline int battery_scale(int log) { int scale = 1; @@ -195,6 +198,8 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy, if ((!battery->present) && psp != POWER_SUPPLY_PROP_PRESENT) return -ENODEV; + + acpi_battery_get_state(battery); switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (battery->rate_now < 0) @@ -225,11 +230,17 @@ static int acpi_sbs_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_POWER_NOW: val->intval = abs(battery->rate_now) * acpi_battery_ipscale(battery) * 1000; + val->intval *= (acpi_battery_mode(battery)) ? + (battery->voltage_now * + acpi_battery_vscale(battery) / 1000) : 1; break; case POWER_SUPPLY_PROP_CURRENT_AVG: case POWER_SUPPLY_PROP_POWER_AVG: val->intval = abs(battery->rate_avg) * acpi_battery_ipscale(battery) * 1000; + val->intval *= (acpi_battery_mode(battery)) ? + (battery->voltage_now * + acpi_battery_vscale(battery) / 1000) : 1; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = battery->state_of_charge; @@ -903,8 +914,6 @@ static void acpi_sbs_callback(void *context) } } -static int acpi_sbs_remove(struct acpi_device *device, int type); - static int acpi_sbs_add(struct acpi_device *device) { struct acpi_sbs *sbs; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 6c949602cbd1..3ed80b2ca907 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -428,6 +428,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), }, }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus A8N-SLI DELUXE", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), + }, + }, + { + .callback = init_old_suspend_ordering, + .ident = "Asus A8N-SLI Premium", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 77255f250dbb..c538d0ef10ff 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -149,12 +149,12 @@ static int param_get_debug_level(char *buffer, const struct kernel_param *kp) return result; } -static struct kernel_param_ops param_ops_debug_layer = { +static const struct kernel_param_ops param_ops_debug_layer = { .set = param_set_uint, .get = param_get_debug_layer, }; -static struct kernel_param_ops param_ops_debug_level = { +static const struct kernel_param_ops param_ops_debug_level = { .set = param_set_uint, .get = param_get_debug_level, }; diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 2607e17b520f..48fbc647b178 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -812,7 +812,7 @@ acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal, thermal_zone_unbind_cooling_device); } -static struct thermal_zone_device_ops acpi_thermal_zone_ops = { +static const struct thermal_zone_device_ops acpi_thermal_zone_ops = { .bind = acpi_thermal_bind_cooling_device, .unbind = acpi_thermal_unbind_cooling_device, .get_temp = thermal_get_temp, diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index ada4b4d9bdc8..08a44b532f7c 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -307,7 +307,7 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st return acpi_video_device_lcd_set_level(video, level); } -static struct thermal_cooling_device_ops video_cooling_ops = { +static const struct thermal_cooling_device_ops video_cooling_ops = { .get_max_state = video_get_max_state, .get_cur_state = video_get_cur_state, .set_cur_state = video_set_cur_state, diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index e0a5b555cee1..bb7c5f1085cc 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data) ata_acpi_uevent(dev->link->ap, dev, event); } -static struct acpi_dock_ops ata_acpi_dev_dock_ops = { +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = { .handler = ata_acpi_dev_notify_dock, .uevent = ata_acpi_dev_uevent, }; -static struct acpi_dock_ops ata_acpi_ap_dock_ops = { +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { .handler = ata_acpi_ap_notify_dock, .uevent = ata_acpi_ap_uevent, }; diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index b89fffc1d777..33e1bed68fdd 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -166,7 +166,7 @@ static int create_path(const char *nodepath) { char *path; char *s; - int err; + int err = 0; /* parent directories do not exist, create them */ path = kstrdup(nodepath, GFP_KERNEL); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index be8714aa9dd6..e18566a0fedd 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -80,7 +80,6 @@ static void genpd_set_active(struct generic_pm_domain *genpd) int pm_genpd_poweron(struct generic_pm_domain *genpd) { struct generic_pm_domain *parent = genpd->parent; - DEFINE_WAIT(wait); int ret = 0; start: @@ -112,7 +111,7 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd) } if (genpd->power_on) { - int ret = genpd->power_on(genpd); + ret = genpd->power_on(genpd); if (ret) goto out; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 8dc247c974af..acb3f83b8079 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -226,11 +226,17 @@ static int rpm_idle(struct device *dev, int rpmflags) callback = NULL; if (callback) { - spin_unlock_irq(&dev->power.lock); + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); callback(dev); - spin_lock_irq(&dev->power.lock); + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); } dev->power.idle_notification = false; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 49502bc5360a..423fd56bf612 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -616,5 +616,16 @@ config MSM_SMD_PKT Enables userspace clients to read and write to some packet SMD ports via device interface for MSM chipset. +config TILE_SROM + bool "Character-device access via hypervisor to the Tilera SPI ROM" + depends on TILE + default y + ---help--- + This device provides character-level read-write access + to the SROM, typically via the "0", "1", and "2" devices + in /dev/srom/. The Tilera hypervisor makes the flash + device appear much like a simple EEPROM, and knows + how to partition a single ROM for multiple purposes. + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7a00672bd85d..32762ba769c2 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -63,3 +63,5 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o + +obj-$(CONFIG_TILE_SROM) += tile-srom.o diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c index fca0c51bbc90..810aff9e750f 100644 --- a/drivers/char/ramoops.c +++ b/drivers/char/ramoops.c @@ -147,6 +147,14 @@ static int __init ramoops_probe(struct platform_device *pdev) cxt->phys_addr = pdata->mem_address; cxt->record_size = pdata->record_size; cxt->dump_oops = pdata->dump_oops; + /* + * Update the module parameter variables as well so they are visible + * through /sys/module/ramoops/parameters/ + */ + mem_size = pdata->mem_size; + mem_address = pdata->mem_address; + record_size = pdata->record_size; + dump_oops = pdata->dump_oops; if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) { pr_err("request mem region failed\n"); diff --git a/drivers/char/random.c b/drivers/char/random.c index 729281961f22..c35a785005b0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1300,345 +1300,14 @@ ctl_table random_table[] = { }; #endif /* CONFIG_SYSCTL */ -/******************************************************************** - * - * Random functions for networking - * - ********************************************************************/ - -/* - * TCP initial sequence number picking. This uses the random number - * generator to pick an initial secret value. This value is hashed - * along with the TCP endpoint information to provide a unique - * starting point for each pair of TCP endpoints. This defeats - * attacks which rely on guessing the initial TCP sequence number. - * This algorithm was suggested by Steve Bellovin. - * - * Using a very strong hash was taking an appreciable amount of the total - * TCP connection establishment time, so this is a weaker hash, - * compensated for by changing the secret periodically. - */ - -/* F, G and H are basic MD4 functions: selection, majority, parity */ -#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) -#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z))) -#define H(x, y, z) ((x) ^ (y) ^ (z)) - -/* - * The generic round function. The application is so specific that - * we don't bother protecting all the arguments with parens, as is generally - * good macro practice, in favor of extra legibility. - * Rotation is separate from addition to prevent recomputation - */ -#define ROUND(f, a, b, c, d, x, s) \ - (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s))) -#define K1 0 -#define K2 013240474631UL -#define K3 015666365641UL - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - -static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12]) -{ - __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; - - /* Round 1 */ - ROUND(F, a, b, c, d, in[ 0] + K1, 3); - ROUND(F, d, a, b, c, in[ 1] + K1, 7); - ROUND(F, c, d, a, b, in[ 2] + K1, 11); - ROUND(F, b, c, d, a, in[ 3] + K1, 19); - ROUND(F, a, b, c, d, in[ 4] + K1, 3); - ROUND(F, d, a, b, c, in[ 5] + K1, 7); - ROUND(F, c, d, a, b, in[ 6] + K1, 11); - ROUND(F, b, c, d, a, in[ 7] + K1, 19); - ROUND(F, a, b, c, d, in[ 8] + K1, 3); - ROUND(F, d, a, b, c, in[ 9] + K1, 7); - ROUND(F, c, d, a, b, in[10] + K1, 11); - ROUND(F, b, c, d, a, in[11] + K1, 19); - - /* Round 2 */ - ROUND(G, a, b, c, d, in[ 1] + K2, 3); - ROUND(G, d, a, b, c, in[ 3] + K2, 5); - ROUND(G, c, d, a, b, in[ 5] + K2, 9); - ROUND(G, b, c, d, a, in[ 7] + K2, 13); - ROUND(G, a, b, c, d, in[ 9] + K2, 3); - ROUND(G, d, a, b, c, in[11] + K2, 5); - ROUND(G, c, d, a, b, in[ 0] + K2, 9); - ROUND(G, b, c, d, a, in[ 2] + K2, 13); - ROUND(G, a, b, c, d, in[ 4] + K2, 3); - ROUND(G, d, a, b, c, in[ 6] + K2, 5); - ROUND(G, c, d, a, b, in[ 8] + K2, 9); - ROUND(G, b, c, d, a, in[10] + K2, 13); - - /* Round 3 */ - ROUND(H, a, b, c, d, in[ 3] + K3, 3); - ROUND(H, d, a, b, c, in[ 7] + K3, 9); - ROUND(H, c, d, a, b, in[11] + K3, 11); - ROUND(H, b, c, d, a, in[ 2] + K3, 15); - ROUND(H, a, b, c, d, in[ 6] + K3, 3); - ROUND(H, d, a, b, c, in[10] + K3, 9); - ROUND(H, c, d, a, b, in[ 1] + K3, 11); - ROUND(H, b, c, d, a, in[ 5] + K3, 15); - ROUND(H, a, b, c, d, in[ 9] + K3, 3); - ROUND(H, d, a, b, c, in[ 0] + K3, 9); - ROUND(H, c, d, a, b, in[ 4] + K3, 11); - ROUND(H, b, c, d, a, in[ 8] + K3, 15); - - return buf[1] + b; /* "most hashed" word */ - /* Alternative: return sum of all words? */ -} -#endif - -#undef ROUND -#undef F -#undef G -#undef H -#undef K1 -#undef K2 -#undef K3 - -/* This should not be decreased so low that ISNs wrap too fast. */ -#define REKEY_INTERVAL (300 * HZ) -/* - * Bit layout of the tcp sequence numbers (before adding current time): - * bit 24-31: increased after every key exchange - * bit 0-23: hash(source,dest) - * - * The implementation is similar to the algorithm described - * in the Appendix of RFC 1185, except that - * - it uses a 1 MHz clock instead of a 250 kHz clock - * - it performs a rekey every 5 minutes, which is equivalent - * to a (source,dest) tulple dependent forward jump of the - * clock by 0..2^(HASH_BITS+1) - * - * Thus the average ISN wraparound time is 68 minutes instead of - * 4.55 hours. - * - * SMP cleanup and lock avoidance with poor man's RCU. - * Manfred Spraul <manfred@colorfullife.com> - * - */ -#define COUNT_BITS 8 -#define COUNT_MASK ((1 << COUNT_BITS) - 1) -#define HASH_BITS 24 -#define HASH_MASK ((1 << HASH_BITS) - 1) +static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; -static struct keydata { - __u32 count; /* already shifted to the final position */ - __u32 secret[12]; -} ____cacheline_aligned ip_keydata[2]; - -static unsigned int ip_cnt; - -static void rekey_seq_generator(struct work_struct *work); - -static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); - -/* - * Lock avoidance: - * The ISN generation runs lockless - it's just a hash over random data. - * State changes happen every 5 minutes when the random key is replaced. - * Synchronization is performed by having two copies of the hash function - * state and rekey_seq_generator always updates the inactive copy. - * The copy is then activated by updating ip_cnt. - * The implementation breaks down if someone blocks the thread - * that processes SYN requests for more than 5 minutes. Should never - * happen, and even if that happens only a not perfectly compliant - * ISN is generated, nothing fatal. - */ -static void rekey_seq_generator(struct work_struct *work) +static int __init random_int_secret_init(void) { - struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; - - get_random_bytes(keyptr->secret, sizeof(keyptr->secret)); - keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS; - smp_wmb(); - ip_cnt++; - schedule_delayed_work(&rekey_work, - round_jiffies_relative(REKEY_INTERVAL)); -} - -static inline struct keydata *get_keyptr(void) -{ - struct keydata *keyptr = &ip_keydata[ip_cnt & 1]; - - smp_rmb(); - - return keyptr; -} - -static __init int seqgen_init(void) -{ - rekey_seq_generator(NULL); + get_random_bytes(random_int_secret, sizeof(random_int_secret)); return 0; } -late_initcall(seqgen_init); - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, - __be16 sport, __be16 dport) -{ - __u32 seq; - __u32 hash[12]; - struct keydata *keyptr = get_keyptr(); - - /* The procedure is the same as for IPv4, but addresses are longer. - * Thus we must use twothirdsMD4Transform. - */ - - memcpy(hash, saddr, 16); - hash[4] = ((__force u16)sport << 16) + (__force u16)dport; - memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); - - seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; - seq += keyptr->count; - - seq += ktime_to_ns(ktime_get_real()); - - return seq; -} -EXPORT_SYMBOL(secure_tcpv6_sequence_number); -#endif - -/* The code below is shamelessly stolen from secure_tcp_sequence_number(). - * All blames to Andrey V. Savochkin <saw@msu.ru>. - */ -__u32 secure_ip_id(__be32 daddr) -{ - struct keydata *keyptr; - __u32 hash[4]; - - keyptr = get_keyptr(); - - /* - * Pick a unique starting offset for each IP destination. - * The dest ip address is placed in the starting vector, - * which is then hashed with random data. - */ - hash[0] = (__force __u32)daddr; - hash[1] = keyptr->secret[9]; - hash[2] = keyptr->secret[10]; - hash[3] = keyptr->secret[11]; - - return half_md4_transform(hash, keyptr->secret); -} - -__u32 secure_ipv6_id(const __be32 daddr[4]) -{ - const struct keydata *keyptr; - __u32 hash[4]; - - keyptr = get_keyptr(); - - hash[0] = (__force __u32)daddr[0]; - hash[1] = (__force __u32)daddr[1]; - hash[2] = (__force __u32)daddr[2]; - hash[3] = (__force __u32)daddr[3]; - - return half_md4_transform(hash, keyptr->secret); -} - -#ifdef CONFIG_INET - -__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport) -{ - __u32 seq; - __u32 hash[4]; - struct keydata *keyptr = get_keyptr(); - - /* - * Pick a unique starting offset for each TCP connection endpoints - * (saddr, daddr, sport, dport). - * Note that the words are placed into the starting vector, which is - * then mixed with a partial MD4 over random data. - */ - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = ((__force u16)sport << 16) + (__force u16)dport; - hash[3] = keyptr->secret[11]; - - seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; - seq += keyptr->count; - /* - * As close as possible to RFC 793, which - * suggests using a 250 kHz clock. - * Further reading shows this assumes 2 Mb/s networks. - * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate. - * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but - * we also need to limit the resolution so that the u32 seq - * overlaps less than one time per MSL (2 minutes). - * Choosing a clock of 64 ns period is OK. (period of 274 s) - */ - seq += ktime_to_ns(ktime_get_real()) >> 6; - - return seq; -} - -/* Generate secure starting point for ephemeral IPV4 transport port search */ -u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) -{ - struct keydata *keyptr = get_keyptr(); - u32 hash[4]; - - /* - * Pick a unique starting offset for each ephemeral port search - * (saddr, daddr, dport) and 48bits of random data. - */ - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = (__force u32)dport ^ keyptr->secret[10]; - hash[3] = keyptr->secret[11]; - - return half_md4_transform(hash, keyptr->secret); -} -EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); - -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, - __be16 dport) -{ - struct keydata *keyptr = get_keyptr(); - u32 hash[12]; - - memcpy(hash, saddr, 16); - hash[4] = (__force u32)dport; - memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); - - return twothirdsMD4Transform((const __u32 *)daddr, hash); -} -#endif - -#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE) -/* Similar to secure_tcp_sequence_number but generate a 48 bit value - * bit's 32-47 increase every key exchange - * 0-31 hash(source, dest) - */ -u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport) -{ - u64 seq; - __u32 hash[4]; - struct keydata *keyptr = get_keyptr(); - - hash[0] = (__force u32)saddr; - hash[1] = (__force u32)daddr; - hash[2] = ((__force u16)sport << 16) + (__force u16)dport; - hash[3] = keyptr->secret[11]; - - seq = half_md4_transform(hash, keyptr->secret); - seq |= ((u64)keyptr->count) << (32 - HASH_BITS); - - seq += ktime_to_ns(ktime_get_real()); - seq &= (1ull << 48) - 1; - - return seq; -} -EXPORT_SYMBOL(secure_dccp_sequence_number); -#endif - -#endif /* CONFIG_INET */ - +late_initcall(random_int_secret_init); /* * Get a random word for internal kernel use only. Similar to urandom but @@ -1646,17 +1315,15 @@ EXPORT_SYMBOL(secure_dccp_sequence_number); * value is not cryptographically secure but for several uses the cost of * depleting entropy is too high */ -DEFINE_PER_CPU(__u32 [4], get_random_int_hash); +DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); unsigned int get_random_int(void) { - struct keydata *keyptr; __u32 *hash = get_cpu_var(get_random_int_hash); - int ret; + unsigned int ret; - keyptr = get_keyptr(); hash[0] += current->pid + jiffies + get_cycles(); - - ret = half_md4_transform(hash, keyptr->secret); + md5_transform(hash, random_int_secret); + ret = hash[0]; put_cpu_var(get_random_int_hash); return ret; diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c new file mode 100644 index 000000000000..cf3ee008dca2 --- /dev/null +++ b/drivers/char/tile-srom.c @@ -0,0 +1,481 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * SPI Flash ROM driver + * + * This source code is derived from code provided in "Linux Device + * Drivers, Third Edition", by Jonathan Corbet, Alessandro Rubini, and + * Greg Kroah-Hartman, published by O'Reilly Media, Inc. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/kernel.h> /* printk() */ +#include <linux/slab.h> /* kmalloc() */ +#include <linux/fs.h> /* everything... */ +#include <linux/errno.h> /* error codes */ +#include <linux/types.h> /* size_t */ +#include <linux/proc_fs.h> +#include <linux/fcntl.h> /* O_ACCMODE */ +#include <linux/aio.h> +#include <linux/pagemap.h> +#include <linux/hugetlb.h> +#include <linux/uaccess.h> +#include <linux/platform_device.h> +#include <hv/hypervisor.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/delay.h> +#include <hv/drv_srom_intf.h> + +/* + * Size of our hypervisor I/O requests. We break up large transfers + * so that we don't spend large uninterrupted spans of time in the + * hypervisor. Erasing an SROM sector takes a significant fraction of + * a second, so if we allowed the user to, say, do one I/O to write the + * entire ROM, we'd get soft lockup timeouts, or worse. + */ +#define SROM_CHUNK_SIZE ((size_t)4096) + +/* + * When hypervisor is busy (e.g. erasing), poll the status periodically. + */ + +/* + * Interval to poll the state in msec + */ +#define SROM_WAIT_TRY_INTERVAL 20 + +/* + * Maximum times to poll the state + */ +#define SROM_MAX_WAIT_TRY_TIMES 1000 + +struct srom_dev { + int hv_devhdl; /* Handle for hypervisor device */ + u32 total_size; /* Size of this device */ + u32 sector_size; /* Size of a sector */ + u32 page_size; /* Size of a page */ + struct mutex lock; /* Allow only one accessor at a time */ +}; + +static int srom_major; /* Dynamic major by default */ +module_param(srom_major, int, 0); +MODULE_AUTHOR("Tilera Corporation"); +MODULE_LICENSE("GPL"); + +static int srom_devs; /* Number of SROM partitions */ +static struct cdev srom_cdev; +static struct class *srom_class; +static struct srom_dev *srom_devices; + +/* + * Handle calling the hypervisor and managing EAGAIN/EBUSY. + */ + +static ssize_t _srom_read(int hv_devhdl, void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pread(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_read: error %d\n", retval); + return -EIO; + } +} + +static ssize_t _srom_write(int hv_devhdl, const void *buf, + loff_t off, size_t count) +{ + int retval, retries = SROM_MAX_WAIT_TRY_TIMES; + for (;;) { + retval = hv_dev_pwrite(hv_devhdl, 0, (HV_VirtAddr)buf, + count, off); + if (retval >= 0) + return retval; + if (retval == HV_EAGAIN) + continue; + if (retval == HV_EBUSY && --retries > 0) { + msleep(SROM_WAIT_TRY_INTERVAL); + continue; + } + pr_err("_srom_write: error %d\n", retval); + return -EIO; + } +} + +/** + * srom_open() - Device open routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_open(struct inode *inode, struct file *filp) +{ + filp->private_data = &srom_devices[iminor(inode)]; + return 0; +} + + +/** + * srom_release() - Device release routine. + * @inode: Inode for this device. + * @filp: File for this specific open of the device. + * + * Returns zero, or an error code. + */ +static int srom_release(struct inode *inode, struct file *filp) +{ + struct srom_dev *srom = filp->private_data; + char dummy; + + /* Make sure we've flushed anything written to the ROM. */ + mutex_lock(&srom->lock); + if (srom->hv_devhdl >= 0) + _srom_write(srom->hv_devhdl, &dummy, SROM_FLUSH_OFF, 1); + mutex_unlock(&srom->lock); + + filp->private_data = NULL; + + return 0; +} + + +/** + * srom_read() - Read data from the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes read, or an error code. + */ +static ssize_t srom_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + hv_retval = _srom_read(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval > 0) { + if (copy_to_user(buf, kernbuf, hv_retval) != 0) { + retval = -EFAULT; + break; + } + } else if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/** + * srom_write() - Write data to the device. + * @filp: File for this specific open of the device. + * @buf: User's data buffer. + * @count: Number of bytes requested. + * @f_pos: File position. + * + * Returns number of bytes written, or an error code. + */ +static ssize_t srom_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + int retval = 0; + void *kernbuf; + struct srom_dev *srom = filp->private_data; + + kernbuf = kmalloc(SROM_CHUNK_SIZE, GFP_KERNEL); + if (!kernbuf) + return -ENOMEM; + + if (mutex_lock_interruptible(&srom->lock)) { + retval = -ERESTARTSYS; + kfree(kernbuf); + return retval; + } + + while (count) { + int hv_retval; + int bytes_this_pass = min(count, SROM_CHUNK_SIZE); + + if (copy_from_user(kernbuf, buf, bytes_this_pass) != 0) { + retval = -EFAULT; + break; + } + + hv_retval = _srom_write(srom->hv_devhdl, kernbuf, + *f_pos, bytes_this_pass); + if (hv_retval <= 0) { + if (retval == 0) + retval = hv_retval; + break; + } + + retval += hv_retval; + *f_pos += hv_retval; + buf += hv_retval; + count -= hv_retval; + } + + mutex_unlock(&srom->lock); + kfree(kernbuf); + + return retval; +} + +/* Provide our own implementation so we can use srom->total_size. */ +loff_t srom_llseek(struct file *filp, loff_t offset, int origin) +{ + struct srom_dev *srom = filp->private_data; + + if (mutex_lock_interruptible(&srom->lock)) + return -ERESTARTSYS; + + switch (origin) { + case SEEK_END: + offset += srom->total_size; + break; + case SEEK_CUR: + offset += filp->f_pos; + break; + } + + if (offset < 0 || offset > srom->total_size) { + offset = -EINVAL; + } else { + filp->f_pos = offset; + filp->f_version = 0; + } + + mutex_unlock(&srom->lock); + + return offset; +} + +static ssize_t total_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->total_size); +} + +static ssize_t sector_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->sector_size); +} + +static ssize_t page_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct srom_dev *srom = dev_get_drvdata(dev); + return sprintf(buf, "%u\n", srom->page_size); +} + +static struct device_attribute srom_dev_attrs[] = { + __ATTR(total_size, S_IRUGO, total_show, NULL), + __ATTR(sector_size, S_IRUGO, sector_show, NULL), + __ATTR(page_size, S_IRUGO, page_show, NULL), + __ATTR_NULL +}; + +static char *srom_devnode(struct device *dev, mode_t *mode) +{ + *mode = S_IRUGO | S_IWUSR; + return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev)); +} + +/* + * The fops + */ +static const struct file_operations srom_fops = { + .owner = THIS_MODULE, + .llseek = srom_llseek, + .read = srom_read, + .write = srom_write, + .open = srom_open, + .release = srom_release, +}; + +/** + * srom_setup_minor() - Initialize per-minor information. + * @srom: Per-device SROM state. + * @index: Device to set up. + */ +static int srom_setup_minor(struct srom_dev *srom, int index) +{ + struct device *dev; + int devhdl = srom->hv_devhdl; + + mutex_init(&srom->lock); + + if (_srom_read(devhdl, &srom->total_size, + SROM_TOTAL_SIZE_OFF, sizeof(srom->total_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->sector_size, + SROM_SECTOR_SIZE_OFF, sizeof(srom->sector_size)) < 0) + return -EIO; + if (_srom_read(devhdl, &srom->page_size, + SROM_PAGE_SIZE_OFF, sizeof(srom->page_size)) < 0) + return -EIO; + + dev = device_create(srom_class, &platform_bus, + MKDEV(srom_major, index), srom, "%d", index); + return IS_ERR(dev) ? PTR_ERR(dev) : 0; +} + +/** srom_init() - Initialize the driver's module. */ +static int srom_init(void) +{ + int result, i; + dev_t dev = MKDEV(srom_major, 0); + + /* + * Start with a plausible number of partitions; the krealloc() call + * below will yield about log(srom_devs) additional allocations. + */ + srom_devices = kzalloc(4 * sizeof(struct srom_dev), GFP_KERNEL); + + /* Discover the number of srom partitions. */ + for (i = 0; ; i++) { + int devhdl; + char buf[20]; + struct srom_dev *new_srom_devices = + krealloc(srom_devices, (i+1) * sizeof(struct srom_dev), + GFP_KERNEL | __GFP_ZERO); + if (!new_srom_devices) { + result = -ENOMEM; + goto fail_mem; + } + srom_devices = new_srom_devices; + sprintf(buf, "srom/0/%d", i); + devhdl = hv_dev_open((HV_VirtAddr)buf, 0); + if (devhdl < 0) { + if (devhdl != HV_ENODEV) + pr_notice("srom/%d: hv_dev_open failed: %d.\n", + i, devhdl); + break; + } + srom_devices[i].hv_devhdl = devhdl; + } + srom_devs = i; + + /* Bail out early if we have no partitions at all. */ + if (srom_devs == 0) { + result = -ENODEV; + goto fail_mem; + } + + /* Register our major, and accept a dynamic number. */ + if (srom_major) + result = register_chrdev_region(dev, srom_devs, "srom"); + else { + result = alloc_chrdev_region(&dev, 0, srom_devs, "srom"); + srom_major = MAJOR(dev); + } + if (result < 0) + goto fail_mem; + + /* Register a character device. */ + cdev_init(&srom_cdev, &srom_fops); + srom_cdev.owner = THIS_MODULE; + srom_cdev.ops = &srom_fops; + result = cdev_add(&srom_cdev, dev, srom_devs); + if (result < 0) + goto fail_chrdev; + + /* Create a sysfs class. */ + srom_class = class_create(THIS_MODULE, "srom"); + if (IS_ERR(srom_class)) { + result = PTR_ERR(srom_class); + goto fail_cdev; + } + srom_class->dev_attrs = srom_dev_attrs; + srom_class->devnode = srom_devnode; + + /* Do per-partition initialization */ + for (i = 0; i < srom_devs; i++) { + result = srom_setup_minor(srom_devices + i, i); + if (result < 0) + goto fail_class; + } + + return 0; + +fail_class: + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); +fail_cdev: + cdev_del(&srom_cdev); +fail_chrdev: + unregister_chrdev_region(dev, srom_devs); +fail_mem: + kfree(srom_devices); + return result; +} + +/** srom_cleanup() - Clean up the driver's module. */ +static void srom_cleanup(void) +{ + int i; + for (i = 0; i < srom_devs; i++) + device_destroy(srom_class, MKDEV(srom_major, i)); + class_destroy(srom_class); + cdev_del(&srom_cdev); + unregister_chrdev_region(MKDEV(srom_major, 0), srom_devs); + kfree(srom_devices); +} + +module_init(srom_init); +module_exit(srom_cleanup); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7fc2f108f490..3f4051a7c5a7 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -80,7 +80,7 @@ enum tis_defaults { static LIST_HEAD(tis_chips); static DEFINE_SPINLOCK(tis_lock); -#ifdef CONFIG_PNP +#if defined(CONFIG_PNP) && defined(CONFIG_ACPI) static int is_itpm(struct pnp_dev *dev) { struct acpi_device *acpi = pnp_acpi_device(dev); @@ -93,6 +93,11 @@ static int is_itpm(struct pnp_dev *dev) return 0; } +#else +static inline int is_itpm(struct pnp_dev *dev) +{ + return 0; +} #endif static int check_locality(struct tpm_chip *chip, int l) diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index bf5092455a8f..d4c542372886 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -25,9 +25,19 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DEFINE_MUTEX(cpuidle_lock); LIST_HEAD(cpuidle_detected_devices); -static void (*pm_idle_old)(void); static int enabled_devices; +static int off __read_mostly; +static int initialized __read_mostly; + +int cpuidle_disabled(void) +{ + return off; +} +void disable_cpuidle(void) +{ + off = 1; +} #if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT) static void cpuidle_kick_cpus(void) @@ -46,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); * cpuidle_idle_call - the main idle loop * * NOTE: no locks or semaphores should be used here + * return non-zero on failure */ -static void cpuidle_idle_call(void) +int cpuidle_idle_call(void) { struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); struct cpuidle_state *target_state; int next_state; + if (off) + return -ENODEV; + + if (!initialized) + return -ENODEV; + /* check if the device is ready */ - if (!dev || !dev->enabled) { - if (pm_idle_old) - pm_idle_old(); - else -#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) - default_idle(); -#else - local_irq_enable(); -#endif - return; - } + if (!dev || !dev->enabled) + return -EBUSY; #if 0 /* shows regressions, re-enable for 2.6.29 */ @@ -89,7 +97,7 @@ static void cpuidle_idle_call(void) next_state = cpuidle_curr_governor->select(dev); if (need_resched()) { local_irq_enable(); - return; + return 0; } target_state = &dev->states[next_state]; @@ -114,6 +122,8 @@ static void cpuidle_idle_call(void) /* give the governor an opportunity to reflect on the outcome */ if (cpuidle_curr_governor->reflect) cpuidle_curr_governor->reflect(dev); + + return 0; } /** @@ -121,10 +131,10 @@ static void cpuidle_idle_call(void) */ void cpuidle_install_idle_handler(void) { - if (enabled_devices && (pm_idle != cpuidle_idle_call)) { + if (enabled_devices) { /* Make sure all changes finished before we switch to new idle */ smp_wmb(); - pm_idle = cpuidle_idle_call; + initialized = 1; } } @@ -133,8 +143,8 @@ void cpuidle_install_idle_handler(void) */ void cpuidle_uninstall_idle_handler(void) { - if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { - pm_idle = pm_idle_old; + if (enabled_devices) { + initialized = 0; cpuidle_kick_cpus(); } } @@ -427,7 +437,8 @@ static int __init cpuidle_init(void) { int ret; - pm_idle_old = pm_idle; + if (cpuidle_disabled()) + return -ENODEV; ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); if (ret) @@ -438,4 +449,5 @@ static int __init cpuidle_init(void) return 0; } +module_param(off, int, 0444); core_initcall(cpuidle_init); diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h index 33e50d556f17..38c3fd8b9d76 100644 --- a/drivers/cpuidle/cpuidle.h +++ b/drivers/cpuidle/cpuidle.h @@ -13,6 +13,7 @@ extern struct list_head cpuidle_governors; extern struct list_head cpuidle_detected_devices; extern struct mutex cpuidle_lock; extern spinlock_t cpuidle_driver_lock; +extern int cpuidle_disabled(void); /* idle loop */ extern void cpuidle_install_idle_handler(void); diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index fd1601e3d125..3f7e3cedd133 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c @@ -26,6 +26,9 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) if (!drv) return -EINVAL; + if (cpuidle_disabled()) + return -ENODEV; + spin_lock(&cpuidle_driver_lock); if (cpuidle_curr_driver) { spin_unlock(&cpuidle_driver_lock); diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 724c164d31c9..ea2f8e7aa24a 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -81,6 +81,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) if (!gov || !gov->select) return -EINVAL; + if (cpuidle_disabled()) + return -ENODEV; + mutex_lock(&cpuidle_lock); if (__cpuidle_find_governor(gov->name) == NULL) { ret = 0; diff --git a/drivers/dma/TODO b/drivers/dma/TODO index a4af8589330c..734ed0206cd5 100644 --- a/drivers/dma/TODO +++ b/drivers/dma/TODO @@ -9,6 +9,5 @@ TODO for slave dma - mxs-dma.c - dw_dmac - intel_mid_dma - - ste_dma40 4. Check other subsystems for dma drivers and merge/move to dmaengine 5. Remove dma_slave_config's dma direction. diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index e6d7228b1479..196a7378d332 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -156,14 +156,10 @@ struct pl08x_driver_data { #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) -/* Minimum period between work queue runs */ -#define PL08X_WQ_PERIODMIN 20 - /* Size (bytes) of each LLI buffer allocated for one transfer */ # define PL08X_LLI_TSFR_SIZE 0x2000 /* Maximum times we call dma_pool_alloc on this pool without freeing */ -#define PL08X_MAX_ALLOCS 0x40 #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) #define PL08X_ALIGN 8 @@ -495,10 +491,10 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, struct pl08x_lli_build_data { struct pl08x_txd *txd; - struct pl08x_driver_data *pl08x; struct pl08x_bus_data srcbus; struct pl08x_bus_data dstbus; size_t remainder; + u32 lli_bus; }; /* @@ -551,8 +547,7 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, llis_va[num_llis].src = bd->srcbus.addr; llis_va[num_llis].dst = bd->dstbus.addr; llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); - if (bd->pl08x->lli_buses & PL08X_AHB2) - llis_va[num_llis].lli |= PL080_LLI_LM_AHB2; + llis_va[num_llis].lli |= bd->lli_bus; if (cctl & PL080_CONTROL_SRC_INCR) bd->srcbus.addr += len; @@ -605,9 +600,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, cctl = txd->cctl; bd.txd = txd; - bd.pl08x = pl08x; bd.srcbus.addr = txd->src_addr; bd.dstbus.addr = txd->dst_addr; + bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; /* Find maximum width of the source bus */ bd.srcbus.maxwidth = @@ -622,25 +617,15 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, /* Set up the bus widths to the maximum */ bd.srcbus.buswidth = bd.srcbus.maxwidth; bd.dstbus.buswidth = bd.dstbus.maxwidth; - dev_vdbg(&pl08x->adev->dev, - "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", - __func__, bd.srcbus.buswidth, bd.dstbus.buswidth); - /* * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) */ max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * PL080_CONTROL_TRANSFER_SIZE_MASK; - dev_vdbg(&pl08x->adev->dev, - "%s max bytes per lli = %zu\n", - __func__, max_bytes_per_lli); /* We need to count this down to zero */ bd.remainder = txd->len; - dev_vdbg(&pl08x->adev->dev, - "%s remainder = %zu\n", - __func__, bd.remainder); /* * Choose bus to align to @@ -649,6 +634,16 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, */ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); + dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", + bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", + bd.srcbus.buswidth, + bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", + bd.dstbus.buswidth, + bd.remainder, max_bytes_per_lli); + dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", + mbus == &bd.srcbus ? "src" : "dst", + sbus == &bd.srcbus ? "src" : "dst"); + if (txd->len < mbus->buswidth) { /* Less than a bus width available - send as single bytes */ while (bd.remainder) { @@ -840,15 +835,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, { int i; + dev_vdbg(&pl08x->adev->dev, + "%-3s %-9s %-10s %-10s %-10s %s\n", + "lli", "", "csrc", "cdst", "clli", "cctl"); for (i = 0; i < num_llis; i++) { dev_vdbg(&pl08x->adev->dev, - "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n", - i, - &llis_va[i], - llis_va[i].src, - llis_va[i].dst, - llis_va[i].cctl, - llis_va[i].lli + "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", + i, &llis_va[i], llis_va[i].src, + llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl ); } } @@ -1054,64 +1048,105 @@ pl08x_dma_tx_status(struct dma_chan *chan, /* PrimeCell DMA extension */ struct burst_table { - int burstwords; + u32 burstwords; u32 reg; }; static const struct burst_table burst_sizes[] = { { .burstwords = 256, - .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), + .reg = PL080_BSIZE_256, }, { .burstwords = 128, - .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), + .reg = PL080_BSIZE_128, }, { .burstwords = 64, - .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), + .reg = PL080_BSIZE_64, }, { .burstwords = 32, - .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), + .reg = PL080_BSIZE_32, }, { .burstwords = 16, - .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), + .reg = PL080_BSIZE_16, }, { .burstwords = 8, - .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), + .reg = PL080_BSIZE_8, }, { .burstwords = 4, - .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), + .reg = PL080_BSIZE_4, }, { - .burstwords = 1, - .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), + .burstwords = 0, + .reg = PL080_BSIZE_1, }, }; +/* + * Given the source and destination available bus masks, select which + * will be routed to each port. We try to have source and destination + * on separate ports, but always respect the allowable settings. + */ +static u32 pl08x_select_bus(u8 src, u8 dst) +{ + u32 cctl = 0; + + if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) + cctl |= PL080_CONTROL_DST_AHB2; + if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) + cctl |= PL080_CONTROL_SRC_AHB2; + + return cctl; +} + +static u32 pl08x_cctl(u32 cctl) +{ + cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | + PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | + PL080_CONTROL_PROT_MASK); + + /* Access the cell in privileged mode, non-bufferable, non-cacheable */ + return cctl | PL080_CONTROL_PROT_SYS; +} + +static u32 pl08x_width(enum dma_slave_buswidth width) +{ + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + return PL080_WIDTH_8BIT; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + return PL080_WIDTH_16BIT; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + return PL080_WIDTH_32BIT; + default: + return ~0; + } +} + +static u32 pl08x_burst(u32 maxburst) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) + if (burst_sizes[i].burstwords <= maxburst) + break; + + return burst_sizes[i].reg; +} + static int dma_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_channel_data *cd = plchan->cd; enum dma_slave_buswidth addr_width; - dma_addr_t addr; - u32 maxburst; + u32 width, burst, maxburst; u32 cctl = 0; - int i; if (!plchan->slave) return -EINVAL; @@ -1119,11 +1154,9 @@ static int dma_set_runtime_config(struct dma_chan *chan, /* Transfer direction */ plchan->runtime_direction = config->direction; if (config->direction == DMA_TO_DEVICE) { - addr = config->dst_addr; addr_width = config->dst_addr_width; maxburst = config->dst_maxburst; } else if (config->direction == DMA_FROM_DEVICE) { - addr = config->src_addr; addr_width = config->src_addr_width; maxburst = config->src_maxburst; } else { @@ -1132,46 +1165,40 @@ static int dma_set_runtime_config(struct dma_chan *chan, return -EINVAL; } - switch (addr_width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | - (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); - break; - case DMA_SLAVE_BUSWIDTH_2_BYTES: - cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | - (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); - break; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | - (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); - break; - default: + width = pl08x_width(addr_width); + if (width == ~0) { dev_err(&pl08x->adev->dev, "bad runtime_config: alien address width\n"); return -EINVAL; } + cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; + cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; + /* - * Now decide on a maxburst: * If this channel will only request single transfers, set this * down to ONE element. Also select one element if no maxburst * is specified. */ - if (plchan->cd->single || maxburst == 0) { - cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | - (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); + if (plchan->cd->single) + maxburst = 1; + + burst = pl08x_burst(maxburst); + cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; + cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; + + if (plchan->runtime_direction == DMA_FROM_DEVICE) { + plchan->src_addr = config->src_addr; + plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | + pl08x_select_bus(plchan->cd->periph_buses, + pl08x->mem_buses); } else { - for (i = 0; i < ARRAY_SIZE(burst_sizes); i++) - if (burst_sizes[i].burstwords <= maxburst) - break; - cctl |= burst_sizes[i].reg; + plchan->dst_addr = config->dst_addr; + plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | + pl08x_select_bus(pl08x->mem_buses, + plchan->cd->periph_buses); } - plchan->runtime_addr = addr; - - /* Modify the default channel data to fit PrimeCell request */ - cd->cctl = cctl; - dev_dbg(&pl08x->adev->dev, "configured channel %s (%s) for %s, data width %d, " "maxburst %d words, LE, CCTL=0x%08x\n", @@ -1270,23 +1297,6 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, return 0; } -/* - * Given the source and destination available bus masks, select which - * will be routed to each port. We try to have source and destination - * on separate ports, but always respect the allowable settings. - */ -static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst) -{ - u32 cctl = 0; - - if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1))) - cctl |= PL080_CONTROL_DST_AHB2; - if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2))) - cctl |= PL080_CONTROL_SRC_AHB2; - - return cctl; -} - static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, unsigned long flags) { @@ -1338,8 +1348,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; if (pl08x->vd->dualmaster) - txd->cctl |= pl08x_select_bus(pl08x, - pl08x->mem_buses, pl08x->mem_buses); + txd->cctl |= pl08x_select_bus(pl08x->mem_buses, + pl08x->mem_buses); ret = pl08x_prep_channel_resources(plchan, txd); if (ret) @@ -1356,7 +1366,6 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_txd *txd; - u8 src_buses, dst_buses; int ret; /* @@ -1390,42 +1399,22 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->direction = direction; txd->len = sgl->length; - txd->cctl = plchan->cd->cctl & - ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 | - PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR | - PL080_CONTROL_PROT_MASK); - - /* Access the cell in privileged mode, non-bufferable, non-cacheable */ - txd->cctl |= PL080_CONTROL_PROT_SYS; - if (direction == DMA_TO_DEVICE) { txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl |= PL080_CONTROL_SRC_INCR; + txd->cctl = plchan->dst_cctl; txd->src_addr = sgl->dma_address; - if (plchan->runtime_addr) - txd->dst_addr = plchan->runtime_addr; - else - txd->dst_addr = plchan->cd->addr; - src_buses = pl08x->mem_buses; - dst_buses = plchan->cd->periph_buses; + txd->dst_addr = plchan->dst_addr; } else if (direction == DMA_FROM_DEVICE) { txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl |= PL080_CONTROL_DST_INCR; - if (plchan->runtime_addr) - txd->src_addr = plchan->runtime_addr; - else - txd->src_addr = plchan->cd->addr; + txd->cctl = plchan->src_cctl; + txd->src_addr = plchan->src_addr; txd->dst_addr = sgl->dma_address; - src_buses = plchan->cd->periph_buses; - dst_buses = pl08x->mem_buses; } else { dev_err(&pl08x->adev->dev, "%s direction unsupported\n", __func__); return NULL; } - txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses); - ret = pl08x_prep_channel_resources(plchan, txd); if (ret) return NULL; @@ -1676,6 +1665,20 @@ static irqreturn_t pl08x_irq(int irq, void *dev) return mask ? IRQ_HANDLED : IRQ_NONE; } +static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) +{ + u32 cctl = pl08x_cctl(chan->cd->cctl); + + chan->slave = true; + chan->name = chan->cd->bus_id; + chan->src_addr = chan->cd->addr; + chan->dst_addr = chan->cd->addr; + chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | + pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); + chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | + pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); +} + /* * Initialise the DMAC memcpy/slave channels. * Make a local wrapper to hold required data @@ -1707,9 +1710,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->state = PL08X_CHAN_IDLE; if (slave) { - chan->slave = true; - chan->name = pl08x->pd->slave_channels[i].bus_id; chan->cd = &pl08x->pd->slave_channels[i]; + pl08x_dma_slave_init(chan); } else { chan->cd = &pl08x->pd->memcpy_channel; chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 36144f88d718..6a483eac7b3f 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev) atdma->dma_common.cap_mask = pdata->cap_mask; atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; - size = io->end - io->start + 1; + size = resource_size(io); if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { err = -EBUSY; goto err_kfree; @@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev) atdma->regs = NULL; io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(io->start, io->end - io->start + 1); + release_mem_region(io->start, resource_size(io)); kfree(atdma); diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index a92d95eac86b..4234f416ef11 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -41,6 +41,8 @@ struct coh901318_desc { struct coh901318_lli *lli; enum dma_data_direction dir; unsigned long flags; + u32 head_config; + u32 head_ctrl; }; struct coh901318_base { @@ -661,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) coh901318_desc_submit(cohc, cohd); + /* Program the transaction head */ + coh901318_set_conf(cohc, cohd->head_config); + coh901318_set_ctrl(cohc, cohd->head_ctrl); coh901318_prep_linked_list(cohc, cohd->lli); /* start dma job on this channel */ @@ -1091,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } else goto err_direction; - coh901318_set_conf(cohc, config); - /* The dma only supports transmitting packages up to * MAX_DMA_PACKET_SIZE. Calculate to total number of * dma elemts required to send the entire sg list @@ -1129,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (ret) goto err_lli_fill; - /* - * Set the default ctrl for the channel to the one from the lli, - * things may have changed due to odd buffer alignment etc. - */ - coh901318_set_ctrl(cohc, lli->control); COH_DBG(coh901318_list_print(cohc, lli)); /* Pick a descriptor to handle this transfer */ cohd = coh901318_desc_get(cohc); + cohd->head_config = config; + /* + * Set the default head ctrl for the channel to the one from the + * lli, things may have changed due to odd buffer alignment + * etc. + */ + cohd->head_ctrl = lli->control; cohd->dir = direction; cohd->flags = flags; cohd->desc.tx_submit = coh901318_tx_submit; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 48694c34d96b..b48967b499da 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -62,9 +62,9 @@ #include <linux/slab.h> static DEFINE_MUTEX(dma_list_mutex); +static DEFINE_IDR(dma_idr); static LIST_HEAD(dma_device_list); static long dmaengine_ref_count; -static struct idr dma_idr; /* --- sysfs implementation --- */ @@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v dma_chan_name(chan)); list_del_rcu(&device->global_node); } else if (err) - pr_err("dmaengine: failed to get %s: (%d)\n", - dma_chan_name(chan), err); + pr_debug("dmaengine: failed to get %s: (%d)\n", + dma_chan_name(chan), err); else break; if (--device->privatecnt == 0) @@ -1050,8 +1050,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies); static int __init dma_bus_init(void) { - idr_init(&dma_idr); - mutex_init(&dma_list_mutex); return class_register(&dma_devclass); } arch_initcall(dma_bus_init); diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 0766c1e53b1d..5d7a49bd7c26 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -902,7 +902,7 @@ static void ep93xx_dma_free_chan_resources(struct dma_chan *chan) * * Returns a valid DMA descriptor or %NULL in case of failure. */ -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 1eb60ded2f0d..7bd7e98548cd 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1305,8 +1305,10 @@ static int __init sdma_probe(struct platform_device *pdev) goto err_request_irq; sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); - if (!sdma->script_addrs) + if (!sdma->script_addrs) { + ret = -ENOMEM; goto err_alloc; + } if (of_id) pdev->id_entry = of_id->data; diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index f653517ef744..8a3fdd87db97 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state) return -EAGAIN; } device->state = SUSPENDED; - pci_set_drvdata(pci, device); pci_save_state(pci); pci_disable_device(pci); pci_set_power_state(pci, PCI_D3hot); @@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci) } device->state = RUNNING; iowrite32(REG_BIT0, device->dma_base + DMA_CFG); - pci_set_drvdata(pci, device); return 0; } diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index d845dc4b7103..f519c93a61e7 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -73,10 +73,10 @@ /* provide a lookup table for setting the source address in the base or * extended descriptor of an xor or pq descriptor */ -static const u8 xor_idx_to_desc __read_mostly = 0xd0; -static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; -static const u8 pq_idx_to_desc __read_mostly = 0xf8; -static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 }; +static const u8 xor_idx_to_desc = 0xe0; +static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; +static const u8 pq_idx_to_desc = 0xf8; +static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) { diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index fab37d1cf48d..5e3a40f79945 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -72,6 +72,17 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index fd7d2b308cf2..6815905a772f 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1706,16 +1706,14 @@ static int __init ipu_probe(struct platform_device *pdev) ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); /* Remap IPU common registers */ - ipu_data.reg_ipu = ioremap(mem_ipu->start, - mem_ipu->end - mem_ipu->start + 1); + ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu)); if (!ipu_data.reg_ipu) { ret = -ENOMEM; goto err_ioremap_ipu; } /* Remap Image Converter and Image DMA Controller registers */ - ipu_data.reg_ic = ioremap(mem_ic->start, - mem_ic->end - mem_ic->start + 1); + ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic)); if (!ipu_data.reg_ic) { ret = -ENOMEM; goto err_ioremap_ic; diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 06f9f27dbe7c..9a353c2216d0 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1304,7 +1304,8 @@ static int mv_xor_shared_probe(struct platform_device *pdev) if (!res) return -ENODEV; - msp->xor_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + msp->xor_base = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); if (!msp->xor_base) return -EBUSY; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 88aad4f54002..be641cbd36fc 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) memset(mxs_chan->ccw, 0, PAGE_SIZE); - ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, - 0, "mxs-dma", mxs_dma); - if (ret) - goto err_irq; + if (mxs_chan->chan_irq != NO_IRQ) { + ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, + 0, "mxs-dma", mxs_dma); + if (ret) + goto err_irq; + } ret = clk_enable(mxs_dma->clk); if (ret) @@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, switch (cmd) { case DMA_TERMINATE_ALL: mxs_dma_disable_chan(mxs_chan); + mxs_dma_reset_chan(mxs_chan); break; case DMA_PAUSE: mxs_dma_pause_chan(mxs_chan); @@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = { }, { .name = "mxs-dma-apbx", .driver_data = MXS_DMA_APBX, + }, { + /* end of list */ } }; diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index ff5b38f9d45b..1ac8d4b580b7 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -45,7 +45,8 @@ #define DMA_STATUS_MASK_BITS 0x3 #define DMA_STATUS_SHIFT_BITS 16 #define DMA_STATUS_IRQ(x) (0x1 << (x)) -#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) +#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) +#define DMA_STATUS2_ERR(x) (0x1 << (x)) #define DMA_DESC_WIDTH_SHIFT_BITS 12 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) @@ -61,6 +62,9 @@ #define MAX_CHAN_NR 8 +#define DMA_MASK_CTL0_MODE 0x33333333 +#define DMA_MASK_CTL2_MODE 0x00003333 + static unsigned int init_nr_desc_per_channel = 64; module_param(init_nr_desc_per_channel, uint, 0644); MODULE_PARM_DESC(init_nr_desc_per_channel, @@ -133,6 +137,7 @@ struct pch_dma { #define PCH_DMA_CTL3 0x0C #define PCH_DMA_STS0 0x10 #define PCH_DMA_STS1 0x14 +#define PCH_DMA_STS2 0x18 #define dma_readl(pd, name) \ readl((pd)->membase + PCH_DMA_##name) @@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable) { struct pch_dma *pd = to_pd(chan->device); u32 val; + int pos; + + if (chan->chan_id < 8) + pos = chan->chan_id; + else + pos = chan->chan_id + 8; val = dma_readl(pd, CTL2); if (enable) - val |= 0x1 << chan->chan_id; + val |= 0x1 << pos; else - val &= ~(0x1 << chan->chan_id); + val &= ~(0x1 << pos); dma_writel(pd, CTL2, val); @@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan) struct pch_dma_chan *pd_chan = to_pd_chan(chan); struct pch_dma *pd = to_pd(chan->device); u32 val; + u32 mask_mode; + u32 mask_ctl; if (chan->chan_id < 8) { val = dma_readl(pd, CTL0); + mask_mode = DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id); + mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + val &= mask_mode; if (pd_chan->dir == DMA_TO_DEVICE) val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + DMA_CTL0_DIR_SHIFT_BITS); @@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan) val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + DMA_CTL0_DIR_SHIFT_BITS)); + val |= mask_ctl; dma_writel(pd, CTL0, val); } else { int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ val = dma_readl(pd, CTL3); + mask_mode = DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * ch); + mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * ch)); + val &= mask_mode; if (pd_chan->dir == DMA_TO_DEVICE) val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + DMA_CTL0_DIR_SHIFT_BITS); else val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + DMA_CTL0_DIR_SHIFT_BITS)); - + val |= mask_ctl; dma_writel(pd, CTL3, val); } @@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode) { struct pch_dma *pd = to_pd(chan->device); u32 val; + u32 mask_ctl; + u32 mask_dir; if (chan->chan_id < 8) { + mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ + DMA_CTL0_DIR_SHIFT_BITS); val = dma_readl(pd, CTL0); - - val &= ~(DMA_CTL0_MODE_MASK_BITS << - (DMA_CTL0_BITS_PER_CH * chan->chan_id)); + val &= mask_dir; val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); - + val |= mask_ctl; dma_writel(pd, CTL0, val); } else { int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ - + mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << + (DMA_CTL0_BITS_PER_CH * ch)); + mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\ + DMA_CTL0_DIR_SHIFT_BITS); val = dma_readl(pd, CTL3); - - val &= ~(DMA_CTL0_MODE_MASK_BITS << - (DMA_CTL0_BITS_PER_CH * ch)); + val &= mask_dir; val |= mode << (DMA_CTL0_BITS_PER_CH * ch); - + val |= mask_ctl; dma_writel(pd, CTL3, val); - } dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", chan->chan_id, val); } -static u32 pdc_get_status(struct pch_dma_chan *pd_chan) +static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) { struct pch_dma *pd = to_pd(pd_chan->chan.device); u32 val; @@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan) DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); } +static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) +{ + struct pch_dma *pd = to_pd(pd_chan->chan.device); + u32 val; + + val = dma_readl(pd, STS2); + return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + + DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); +} + static bool pdc_is_idle(struct pch_dma_chan *pd_chan) { - if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) + u32 sts; + + if (pd_chan->chan.chan_id < 8) + sts = pdc_get_status0(pd_chan); + else + sts = pdc_get_status2(pd_chan); + + + if (sts == DMA_STATUS_IDLE) return true; else return false; @@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan) list_add_tail(&desc->desc_node, &tmp_list); } - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); list_splice(&tmp_list, &pd_chan->free_list); pd_chan->descs_allocated = i; pd_chan->completed_cookie = chan->cookie = 1; - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); pdc_enable_irq(chan, 1); @@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan) BUG_ON(!list_empty(&pd_chan->active_list)); BUG_ON(!list_empty(&pd_chan->queue)); - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); list_splice_init(&pd_chan->free_list, &tmp_list); pd_chan->descs_allocated = 0; - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) pci_pool_free(pd->pool, desc, desc->txd.phys); @@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t last_completed; int ret; - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); last_completed = pd_chan->completed_cookie; last_used = chan->cookie; - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); ret = dma_async_is_complete(cookie, last_completed, last_used); @@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, if (cmd != DMA_TERMINATE_ALL) return -ENXIO; - spin_lock_bh(&pd_chan->lock); + spin_lock_irq(&pd_chan->lock); pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); @@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, list_for_each_entry_safe(desc, _d, &list, desc_node) pdc_chain_complete(pd_chan, desc); - spin_unlock_bh(&pd_chan->lock); + spin_unlock_irq(&pd_chan->lock); return 0; } @@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid) struct pch_dma *pd = (struct pch_dma *)devid; struct pch_dma_chan *pd_chan; u32 sts0; + u32 sts2; int i; - int ret = IRQ_NONE; + int ret0 = IRQ_NONE; + int ret2 = IRQ_NONE; sts0 = dma_readl(pd, STS0); + sts2 = dma_readl(pd, STS2); dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); for (i = 0; i < pd->dma.chancnt; i++) { pd_chan = &pd->channels[i]; - if (sts0 & DMA_STATUS_IRQ(i)) { - if (sts0 & DMA_STATUS_ERR(i)) - set_bit(0, &pd_chan->err_status); + if (i < 8) { + if (sts0 & DMA_STATUS_IRQ(i)) { + if (sts0 & DMA_STATUS0_ERR(i)) + set_bit(0, &pd_chan->err_status); - tasklet_schedule(&pd_chan->tasklet); - ret = IRQ_HANDLED; - } + tasklet_schedule(&pd_chan->tasklet); + ret0 = IRQ_HANDLED; + } + } else { + if (sts2 & DMA_STATUS_IRQ(i - 8)) { + if (sts2 & DMA_STATUS2_ERR(i)) + set_bit(0, &pd_chan->err_status); + tasklet_schedule(&pd_chan->tasklet); + ret2 = IRQ_HANDLED; + } + } } /* clear interrupt bits in status register */ - dma_writel(pd, STS0, sts0); + if (ret0) + dma_writel(pd, STS0, sts0); + if (ret2) + dma_writel(pd, STS2, sts2); - return ret; + return ret0 | ret2; } #ifdef CONFIG_PM diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6abe1ec1f2ce..00eee59e8b33 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -82,7 +82,7 @@ struct dma_pl330_dmac { spinlock_t pool_lock; /* Peripheral channels connected to this DMAC */ - struct dma_pl330_chan peripherals[0]; /* keep at end */ + struct dma_pl330_chan *peripherals; /* keep at end */ }; struct dma_pl330_desc { @@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) desc->txd.cookie = 0; async_tx_ack(&desc->txd); - desc->req.rqtype = peri->rqtype; - desc->req.peri = peri->peri_id; + if (peri) { + desc->req.rqtype = peri->rqtype; + desc->req.peri = peri->peri_id; + } else { + desc->req.rqtype = MEMTOMEM; + desc->req.peri = 0; + } dma_async_tx_descriptor_init(&desc->txd, &pch->chan); @@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, struct pl330_info *pi; int burst; - if (unlikely(!pch || !len || !peri)) + if (unlikely(!pch || !len)) return NULL; - if (peri->rqtype != MEMTOMEM) + if (peri && peri->rqtype != MEMTOMEM) return NULL; pi = &pch->dmac->pif; @@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, int i, burst_size; dma_addr_t addr; - if (unlikely(!pch || !sgl || !sg_len)) + if (unlikely(!pch || !sgl || !sg_len || !peri)) return NULL; /* Make sure the direction is consistent */ @@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) struct dma_device *pd; struct resource *res; int i, ret, irq; + int num_chan; pdat = adev->dev.platform_data; - if (!pdat || !pdat->nr_valid_peri) { - dev_err(&adev->dev, "platform data missing\n"); - return -ENODEV; - } - /* Allocate a new DMAC and its Channels */ - pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) - + sizeof(*pdmac), GFP_KERNEL); + pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL); if (!pdmac) { dev_err(&adev->dev, "unable to allocate mem\n"); return -ENOMEM; @@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pi = &pdmac->pif; pi->dev = &adev->dev; pi->pl330_data = NULL; - pi->mcbufsz = pdat->mcbuf_sz; + pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; res = &adev->res; request_mem_region(res->start, resource_size(res), "dma-pl330"); @@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ - for (i = 0; i < pdat->nr_valid_peri; i++) { - struct dma_pl330_peri *peri = &pdat->peri[i]; - pch = &pdmac->peripherals[i]; + num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan); + pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); - switch (peri->rqtype) { - case MEMTOMEM: + for (i = 0; i < num_chan; i++) { + pch = &pdmac->peripherals[i]; + if (pdat) { + struct dma_pl330_peri *peri = &pdat->peri[i]; + + switch (peri->rqtype) { + case MEMTOMEM: + dma_cap_set(DMA_MEMCPY, pd->cap_mask); + break; + case MEMTODEV: + case DEVTOMEM: + dma_cap_set(DMA_SLAVE, pd->cap_mask); + break; + default: + dev_err(&adev->dev, "DEVTODEV Not Supported\n"); + continue; + } + pch->chan.private = peri; + } else { dma_cap_set(DMA_MEMCPY, pd->cap_mask); - break; - case MEMTODEV: - case DEVTOMEM: - dma_cap_set(DMA_SLAVE, pd->cap_mask); - break; - default: - dev_err(&adev->dev, "DEVTODEV Not Supported\n"); - continue; + pch->chan.private = NULL; } INIT_LIST_HEAD(&pch->work_list); spin_lock_init(&pch->lock); pch->pl330_chid = NULL; - pch->chan.private = peri; pch->chan.device = pd; pch->chan.chan_id = i; pch->dmac = pdmac; diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 028330044201..7f49235d14b9 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -70,12 +70,36 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) static u16 dmaor_read(struct sh_dmae_device *shdev) { - return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); + u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); + + if (shdev->pdata->dmaor_is_32bit) + return __raw_readl(addr); + else + return __raw_readw(addr); } static void dmaor_write(struct sh_dmae_device *shdev, u16 data) { - __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); + u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); + + if (shdev->pdata->dmaor_is_32bit) + __raw_writel(data, addr); + else + __raw_writew(data, addr); +} + +static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) +{ + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + + __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); +} + +static u32 chcr_read(struct sh_dmae_chan *sh_dc) +{ + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + + return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); } /* @@ -120,7 +144,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) { - u32 chcr = sh_dmae_readl(sh_chan, CHCR); + u32 chcr = chcr_read(sh_chan); if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) return true; /* working */ @@ -130,8 +154,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) { - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); @@ -144,8 +167,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) { - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int i; @@ -169,18 +191,23 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) static void dmae_start(struct sh_dmae_chan *sh_chan) { - u32 chcr = sh_dmae_readl(sh_chan, CHCR); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + u32 chcr = chcr_read(sh_chan); + + if (shdev->pdata->needs_tend_set) + sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); - chcr |= CHCR_DE | CHCR_IE; - sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); + chcr |= CHCR_DE | shdev->chcr_ie_bit; + chcr_write(sh_chan, chcr & ~CHCR_TE); } static void dmae_halt(struct sh_dmae_chan *sh_chan) { - u32 chcr = sh_dmae_readl(sh_chan, CHCR); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + u32 chcr = chcr_read(sh_chan); - chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); - sh_dmae_writel(sh_chan, chcr, CHCR); + chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); + chcr_write(sh_chan, chcr); } static void dmae_init(struct sh_dmae_chan *sh_chan) @@ -192,7 +219,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, LOG2_DEFAULT_XFER_SIZE); sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); - sh_dmae_writel(sh_chan, chcr, CHCR); + chcr_write(sh_chan, chcr); } static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) @@ -202,23 +229,25 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) return -EBUSY; sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); - sh_dmae_writel(sh_chan, val, CHCR); + chcr_write(sh_chan, val); return 0; } static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) { - struct sh_dmae_device *shdev = container_of(sh_chan->common.device, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; u16 __iomem *addr = shdev->dmars; - int shift = chan_pdata->dmars_bit; + unsigned int shift = chan_pdata->dmars_bit; if (dmae_is_busy(sh_chan)) return -EBUSY; + if (pdata->no_dmars) + return 0; + /* in the case of a missing DMARS resource use first memory window */ if (!addr) addr = (u16 __iomem *)shdev->chan_reg; @@ -296,9 +325,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) static const struct sh_dmae_slave_config *sh_dmae_find_slave( struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) { - struct dma_device *dma_dev = sh_chan->common.device; - struct sh_dmae_device *shdev = container_of(dma_dev, - struct sh_dmae_device, common); + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); struct sh_dmae_pdata *pdata = shdev->pdata; int i; @@ -771,10 +798,8 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) spin_lock_bh(&sh_chan->desc_lock); /* DMA work check */ - if (dmae_is_busy(sh_chan)) { - spin_unlock_bh(&sh_chan->desc_lock); - return; - } + if (dmae_is_busy(sh_chan)) + goto sh_chan_xfer_ld_queue_end; /* Find the first not transferred descriptor */ list_for_each_entry(desc, &sh_chan->ld_queue, node) @@ -788,6 +813,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) break; } +sh_chan_xfer_ld_queue_end: spin_unlock_bh(&sh_chan->desc_lock); } @@ -846,7 +872,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) spin_lock(&sh_chan->desc_lock); - chcr = sh_dmae_readl(sh_chan, CHCR); + chcr = chcr_read(sh_chan); if (chcr & CHCR_TE) { /* DMA stop */ @@ -1144,6 +1170,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev) /* platform data */ shdev->pdata = pdata; + if (pdata->chcr_offset) + shdev->chcr_offset = pdata->chcr_offset; + else + shdev->chcr_offset = CHCR; + + if (pdata->chcr_ie_bit) + shdev->chcr_ie_bit = pdata->chcr_ie_bit; + else + shdev->chcr_ie_bit = CHCR_IE; + platform_set_drvdata(pdev, shdev); pm_runtime_enable(&pdev->dev); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 5ae9fc512180..dc56576f9fdb 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -47,10 +47,14 @@ struct sh_dmae_device { struct list_head node; u32 __iomem *chan_reg; u16 __iomem *dmars; + unsigned int chcr_offset; + u32 chcr_ie_bit; }; #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) +#define to_sh_dev(chan) container_of(chan->common.device,\ + struct sh_dmae_device, common) #endif /* __DMA_SHDMA_H */ diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 29d1addbe0cf..cd3a7c726bf8 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -14,6 +14,7 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> +#include <linux/amba/bus.h> #include <plat/ste_dma40.h> @@ -45,9 +46,6 @@ #define D40_ALLOC_PHY (1 << 30) #define D40_ALLOC_LOG_FREE 0 -/* Hardware designer of the block */ -#define D40_HW_DESIGNER 0x8 - /** * enum 40_command - The different commands and/or statuses. * @@ -186,6 +184,8 @@ struct d40_base; * @log_def: Default logical channel settings. * @lcla: Space for one dst src pair for logical channel transfers. * @lcpa: Pointer to dst and src lcpa settings. + * @runtime_addr: runtime configured address. + * @runtime_direction: runtime configured direction. * * This struct can either "be" a logical or a physical channel. */ @@ -200,6 +200,7 @@ struct d40_chan { struct dma_chan chan; struct tasklet_struct tasklet; struct list_head client; + struct list_head pending_queue; struct list_head active; struct list_head queue; struct stedma40_chan_cfg dma_cfg; @@ -645,7 +646,20 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) { - list_add_tail(&desc->node, &d40c->queue); + list_add_tail(&desc->node, &d40c->pending_queue); +} + +static struct d40_desc *d40_first_pending(struct d40_chan *d40c) +{ + struct d40_desc *d; + + if (list_empty(&d40c->pending_queue)) + return NULL; + + d = list_first_entry(&d40c->pending_queue, + struct d40_desc, + node); + return d; } static struct d40_desc *d40_first_queued(struct d40_chan *d40c) @@ -802,6 +816,11 @@ static void d40_term_all(struct d40_chan *d40c) d40_desc_free(d40c, d40d); } + /* Release pending descriptors */ + while ((d40d = d40_first_pending(d40c))) { + d40_desc_remove(d40d); + d40_desc_free(d40c, d40d); + } d40c->pending_tx = 0; d40c->busy = false; @@ -2092,7 +2111,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, struct scatterlist *sg; int i; - sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); + sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT); for (i = 0; i < periods; i++) { sg_dma_address(&sg[i]) = dma_addr; sg_dma_len(&sg[i]) = period_len; @@ -2152,24 +2171,87 @@ static void d40_issue_pending(struct dma_chan *chan) spin_lock_irqsave(&d40c->lock, flags); - /* Busy means that pending jobs are already being processed */ + list_splice_tail_init(&d40c->pending_queue, &d40c->queue); + + /* Busy means that queued jobs are already being processed */ if (!d40c->busy) (void) d40_queue_start(d40c); spin_unlock_irqrestore(&d40c->lock, flags); } +static int +dma40_config_to_halfchannel(struct d40_chan *d40c, + struct stedma40_half_channel_info *info, + enum dma_slave_buswidth width, + u32 maxburst) +{ + enum stedma40_periph_data_width addr_width; + int psize; + + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + addr_width = STEDMA40_BYTE_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + addr_width = STEDMA40_HALFWORD_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + addr_width = STEDMA40_WORD_WIDTH; + break; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + addr_width = STEDMA40_DOUBLEWORD_WIDTH; + break; + default: + dev_err(d40c->base->dev, + "illegal peripheral address width " + "requested (%d)\n", + width); + return -EINVAL; + } + + if (chan_is_logical(d40c)) { + if (maxburst >= 16) + psize = STEDMA40_PSIZE_LOG_16; + else if (maxburst >= 8) + psize = STEDMA40_PSIZE_LOG_8; + else if (maxburst >= 4) + psize = STEDMA40_PSIZE_LOG_4; + else + psize = STEDMA40_PSIZE_LOG_1; + } else { + if (maxburst >= 16) + psize = STEDMA40_PSIZE_PHY_16; + else if (maxburst >= 8) + psize = STEDMA40_PSIZE_PHY_8; + else if (maxburst >= 4) + psize = STEDMA40_PSIZE_PHY_4; + else + psize = STEDMA40_PSIZE_PHY_1; + } + + info->data_width = addr_width; + info->psize = psize; + info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; + + return 0; +} + /* Runtime reconfiguration extension */ -static void d40_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) +static int d40_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; - enum dma_slave_buswidth config_addr_width; + enum dma_slave_buswidth src_addr_width, dst_addr_width; dma_addr_t config_addr; - u32 config_maxburst; - enum stedma40_periph_data_width addr_width; - int psize; + u32 src_maxburst, dst_maxburst; + int ret; + + src_addr_width = config->src_addr_width; + src_maxburst = config->src_maxburst; + dst_addr_width = config->dst_addr_width; + dst_maxburst = config->dst_maxburst; if (config->direction == DMA_FROM_DEVICE) { dma_addr_t dev_addr_rx = @@ -2188,8 +2270,11 @@ static void d40_set_runtime_config(struct dma_chan *chan, cfg->dir); cfg->dir = STEDMA40_PERIPH_TO_MEM; - config_addr_width = config->src_addr_width; - config_maxburst = config->src_maxburst; + /* Configure the memory side */ + if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + dst_addr_width = src_addr_width; + if (dst_maxburst == 0) + dst_maxburst = src_maxburst; } else if (config->direction == DMA_TO_DEVICE) { dma_addr_t dev_addr_tx = @@ -2208,68 +2293,39 @@ static void d40_set_runtime_config(struct dma_chan *chan, cfg->dir); cfg->dir = STEDMA40_MEM_TO_PERIPH; - config_addr_width = config->dst_addr_width; - config_maxburst = config->dst_maxburst; - + /* Configure the memory side */ + if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + src_addr_width = dst_addr_width; + if (src_maxburst == 0) + src_maxburst = dst_maxburst; } else { dev_err(d40c->base->dev, "unrecognized channel direction %d\n", config->direction); - return; + return -EINVAL; } - switch (config_addr_width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - addr_width = STEDMA40_BYTE_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_2_BYTES: - addr_width = STEDMA40_HALFWORD_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - addr_width = STEDMA40_WORD_WIDTH; - break; - case DMA_SLAVE_BUSWIDTH_8_BYTES: - addr_width = STEDMA40_DOUBLEWORD_WIDTH; - break; - default: + if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { dev_err(d40c->base->dev, - "illegal peripheral address width " - "requested (%d)\n", - config->src_addr_width); - return; + "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", + src_maxburst, + src_addr_width, + dst_maxburst, + dst_addr_width); + return -EINVAL; } - if (chan_is_logical(d40c)) { - if (config_maxburst >= 16) - psize = STEDMA40_PSIZE_LOG_16; - else if (config_maxburst >= 8) - psize = STEDMA40_PSIZE_LOG_8; - else if (config_maxburst >= 4) - psize = STEDMA40_PSIZE_LOG_4; - else - psize = STEDMA40_PSIZE_LOG_1; - } else { - if (config_maxburst >= 16) - psize = STEDMA40_PSIZE_PHY_16; - else if (config_maxburst >= 8) - psize = STEDMA40_PSIZE_PHY_8; - else if (config_maxburst >= 4) - psize = STEDMA40_PSIZE_PHY_4; - else if (config_maxburst >= 2) - psize = STEDMA40_PSIZE_PHY_2; - else - psize = STEDMA40_PSIZE_PHY_1; - } + ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, + src_addr_width, + src_maxburst); + if (ret) + return ret; - /* Set up all the endpoint configs */ - cfg->src_info.data_width = addr_width; - cfg->src_info.psize = psize; - cfg->src_info.big_endian = false; - cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; - cfg->dst_info.data_width = addr_width; - cfg->dst_info.psize = psize; - cfg->dst_info.big_endian = false; - cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; + ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, + dst_addr_width, + dst_maxburst); + if (ret) + return ret; /* Fill in register values */ if (chan_is_logical(d40c)) @@ -2282,12 +2338,14 @@ static void d40_set_runtime_config(struct dma_chan *chan, d40c->runtime_addr = config_addr; d40c->runtime_direction = config->direction; dev_dbg(d40c->base->dev, - "configured channel %s for %s, data width %d, " - "maxburst %d bytes, LE, no flow control\n", + "configured channel %s for %s, data width %d/%d, " + "maxburst %d/%d elements, LE, no flow control\n", dma_chan_name(chan), (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", - config_addr_width, - config_maxburst); + src_addr_width, dst_addr_width, + src_maxburst, dst_maxburst); + + return 0; } static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -2308,9 +2366,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, case DMA_RESUME: return d40_resume(d40c); case DMA_SLAVE_CONFIG: - d40_set_runtime_config(chan, + return d40_set_runtime_config(chan, (struct dma_slave_config *) arg); - return 0; default: break; } @@ -2341,6 +2398,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, INIT_LIST_HEAD(&d40c->active); INIT_LIST_HEAD(&d40c->queue); + INIT_LIST_HEAD(&d40c->pending_queue); INIT_LIST_HEAD(&d40c->client); tasklet_init(&d40c->tasklet, dma_tasklet, @@ -2502,25 +2560,6 @@ static int __init d40_phy_res_init(struct d40_base *base) static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { - static const struct d40_reg_val dma_id_regs[] = { - /* Peripheral Id */ - { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, - { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, - /* - * D40_DREG_PERIPHID2 Depends on HW revision: - * DB8500ed has 0x0008, - * ? has 0x0018, - * DB8500v1 has 0x0028 - * DB8500v2 has 0x0038 - */ - { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, - - /* PCell Id */ - { .reg = D40_DREG_CELLID0, .val = 0x000d}, - { .reg = D40_DREG_CELLID1, .val = 0x00f0}, - { .reg = D40_DREG_CELLID2, .val = 0x0005}, - { .reg = D40_DREG_CELLID3, .val = 0x00b1} - }; struct stedma40_platform_data *plat_data; struct clk *clk = NULL; void __iomem *virtbase = NULL; @@ -2529,8 +2568,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) int num_log_chans = 0; int num_phy_chans; int i; - u32 val; - u32 rev; + u32 pid; + u32 cid; + u8 rev; clk = clk_get(&pdev->dev, NULL); @@ -2554,32 +2594,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!virtbase) goto failure; - /* HW version check */ - for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { - if (dma_id_regs[i].val != - readl(virtbase + dma_id_regs[i].reg)) { - d40_err(&pdev->dev, - "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", - dma_id_regs[i].val, - dma_id_regs[i].reg, - readl(virtbase + dma_id_regs[i].reg)); - goto failure; - } - } + /* This is just a regular AMBA PrimeCell ID actually */ + for (pid = 0, i = 0; i < 4; i++) + pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i) + & 255) << (i * 8); + for (cid = 0, i = 0; i < 4; i++) + cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i) + & 255) << (i * 8); - /* Get silicon revision and designer */ - val = readl(virtbase + D40_DREG_PERIPHID2); - - if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != - D40_HW_DESIGNER) { + if (cid != AMBA_CID) { + d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n"); + goto failure; + } + if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) { d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", - val & D40_DREG_PERIPHID2_DESIGNER_MASK, - D40_HW_DESIGNER); + AMBA_MANF_BITS(pid), + AMBA_VENDOR_ST); goto failure; } - - rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> - D40_DREG_PERIPHID2_REV_POS; + /* + * HW revision: + * DB8500ed has revision 0 + * ? has revision 1 + * DB8500v1 has revision 2 + * DB8500v2 has revision 3 + */ + rev = AMBA_REV_BITS(pid); /* The number of physical channels on this HW */ num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 195ee65ee7f3..b44c455158de 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -184,9 +184,6 @@ #define D40_DREG_PERIPHID0 0xFE0 #define D40_DREG_PERIPHID1 0xFE4 #define D40_DREG_PERIPHID2 0xFE8 -#define D40_DREG_PERIPHID2_REV_POS 4 -#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS) -#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf #define D40_DREG_PERIPHID3 0xFEC #define D40_DREG_CELLID0 0xFF0 #define D40_DREG_CELLID1 0xFF4 diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index 30da70d06a6d..cdae207028a7 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c @@ -45,13 +45,13 @@ static int __init pci_eisa_init(struct pci_dev *pdev, return 0; } -static struct pci_device_id __initdata pci_eisa_pci_tbl[] = { +static struct pci_device_id pci_eisa_pci_tbl[] = { { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, { 0, } }; -static struct pci_driver __initdata pci_eisa_driver = { +static struct pci_driver __refdata pci_eisa_driver = { .name = "pci_eisa", .id_table = pci_eisa_pci_tbl, .probe = pci_eisa_init, diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 5f29aafd4462..eb80b549ed8d 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -78,6 +78,7 @@ #include <linux/kobject.h> #include <linux/device.h> #include <linux/slab.h> +#include <linux/pstore.h> #include <asm/uaccess.h> @@ -89,6 +90,8 @@ MODULE_DESCRIPTION("sysfs interface to EFI Variables"); MODULE_LICENSE("GPL"); MODULE_VERSION(EFIVARS_VERSION); +#define DUMP_NAME_LEN 52 + /* * The maximum size of VariableName + Data = 1024 * Therefore, it's reasonable to save that much @@ -119,6 +122,10 @@ struct efivar_attribute { ssize_t (*store)(struct efivar_entry *entry, const char *buf, size_t count); }; +#define PSTORE_EFI_ATTRIBUTES \ + (EFI_VARIABLE_NON_VOLATILE | \ + EFI_VARIABLE_BOOTSERVICE_ACCESS | \ + EFI_VARIABLE_RUNTIME_ACCESS) #define EFIVAR_ATTR(_name, _mode, _show, _store) \ struct efivar_attribute efivar_attr_##_name = { \ @@ -141,38 +148,72 @@ efivar_create_sysfs_entry(struct efivars *efivars, /* Return the number of unicode characters in data */ static unsigned long -utf8_strlen(efi_char16_t *data, unsigned long maxlength) +utf16_strnlen(efi_char16_t *s, size_t maxlength) { unsigned long length = 0; - while (*data++ != 0 && length < maxlength) + while (*s++ != 0 && length < maxlength) length++; return length; } +static inline unsigned long +utf16_strlen(efi_char16_t *s) +{ + return utf16_strnlen(s, ~0UL); +} + /* * Return the number of bytes is the length of this string * Note: this is NOT the same as the number of unicode characters */ static inline unsigned long -utf8_strsize(efi_char16_t *data, unsigned long maxlength) +utf16_strsize(efi_char16_t *data, unsigned long maxlength) { - return utf8_strlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); + return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t); +} + +static inline int +utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) +{ + while (1) { + if (len == 0) + return 0; + if (*a < *b) + return -1; + if (*a > *b) + return 1; + if (*a == 0) /* implies *b == 0 */ + return 0; + a++; + b++; + len--; + } } static efi_status_t -get_var_data(struct efivars *efivars, struct efi_variable *var) +get_var_data_locked(struct efivars *efivars, struct efi_variable *var) { efi_status_t status; - spin_lock(&efivars->lock); var->DataSize = 1024; status = efivars->ops->get_variable(var->VariableName, &var->VendorGuid, &var->Attributes, &var->DataSize, var->Data); + return status; +} + +static efi_status_t +get_var_data(struct efivars *efivars, struct efi_variable *var) +{ + efi_status_t status; + + spin_lock(&efivars->lock); + status = get_var_data_locked(efivars, var); spin_unlock(&efivars->lock); + if (status != EFI_SUCCESS) { printk(KERN_WARNING "efivars: get_variable() failed 0x%lx!\n", status); @@ -387,12 +428,180 @@ static struct kobj_type efivar_ktype = { .default_attrs = def_attrs, }; +static struct pstore_info efi_pstore_info; + static inline void efivar_unregister(struct efivar_entry *var) { kobject_put(&var->kobj); } +#ifdef CONFIG_PSTORE + +static int efi_pstore_open(struct pstore_info *psi) +{ + struct efivars *efivars = psi->data; + + spin_lock(&efivars->lock); + efivars->walk_entry = list_first_entry(&efivars->list, + struct efivar_entry, list); + return 0; +} + +static int efi_pstore_close(struct pstore_info *psi) +{ + struct efivars *efivars = psi->data; + + spin_unlock(&efivars->lock); + return 0; +} + +static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, + struct timespec *timespec, struct pstore_info *psi) +{ + efi_guid_t vendor = LINUX_EFI_CRASH_GUID; + struct efivars *efivars = psi->data; + char name[DUMP_NAME_LEN]; + int i; + unsigned int part, size; + unsigned long time; + + while (&efivars->walk_entry->list != &efivars->list) { + if (!efi_guidcmp(efivars->walk_entry->var.VendorGuid, + vendor)) { + for (i = 0; i < DUMP_NAME_LEN; i++) { + name[i] = efivars->walk_entry->var.VariableName[i]; + } + if (sscanf(name, "dump-type%u-%u-%lu", type, &part, &time) == 3) { + *id = part; + timespec->tv_sec = time; + timespec->tv_nsec = 0; + get_var_data_locked(efivars, &efivars->walk_entry->var); + size = efivars->walk_entry->var.DataSize; + memcpy(psi->buf, efivars->walk_entry->var.Data, size); + efivars->walk_entry = list_entry(efivars->walk_entry->list.next, + struct efivar_entry, list); + return size; + } + } + efivars->walk_entry = list_entry(efivars->walk_entry->list.next, + struct efivar_entry, list); + } + return 0; +} + +static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part, + size_t size, struct pstore_info *psi) +{ + char name[DUMP_NAME_LEN]; + char stub_name[DUMP_NAME_LEN]; + efi_char16_t efi_name[DUMP_NAME_LEN]; + efi_guid_t vendor = LINUX_EFI_CRASH_GUID; + struct efivars *efivars = psi->data; + struct efivar_entry *entry, *found = NULL; + int i; + + sprintf(stub_name, "dump-type%u-%u-", type, part); + sprintf(name, "%s%lu", stub_name, get_seconds()); + + spin_lock(&efivars->lock); + + for (i = 0; i < DUMP_NAME_LEN; i++) + efi_name[i] = stub_name[i]; + + /* + * Clean up any entries with the same name + */ + + list_for_each_entry(entry, &efivars->list, list) { + get_var_data_locked(efivars, &entry->var); + + if (efi_guidcmp(entry->var.VendorGuid, vendor)) + continue; + if (utf16_strncmp(entry->var.VariableName, efi_name, + utf16_strlen(efi_name))) + continue; + /* Needs to be a prefix */ + if (entry->var.VariableName[utf16_strlen(efi_name)] == 0) + continue; + + /* found */ + found = entry; + efivars->ops->set_variable(entry->var.VariableName, + &entry->var.VendorGuid, + PSTORE_EFI_ATTRIBUTES, + 0, NULL); + } + + if (found) + list_del(&found->list); + + for (i = 0; i < DUMP_NAME_LEN; i++) + efi_name[i] = name[i]; + + efivars->ops->set_variable(efi_name, &vendor, PSTORE_EFI_ATTRIBUTES, + size, psi->buf); + + spin_unlock(&efivars->lock); + + if (found) + efivar_unregister(found); + + if (size) + efivar_create_sysfs_entry(efivars, + utf16_strsize(efi_name, + DUMP_NAME_LEN * 2), + efi_name, &vendor); + + return part; +}; + +static int efi_pstore_erase(enum pstore_type_id type, u64 id, + struct pstore_info *psi) +{ + efi_pstore_write(type, id, 0, psi); + + return 0; +} +#else +static int efi_pstore_open(struct pstore_info *psi) +{ + return 0; +} + +static int efi_pstore_close(struct pstore_info *psi) +{ + return 0; +} + +static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, + struct timespec *time, struct pstore_info *psi) +{ + return -1; +} + +static u64 efi_pstore_write(enum pstore_type_id type, unsigned int part, + size_t size, struct pstore_info *psi) +{ + return 0; +} + +static int efi_pstore_erase(enum pstore_type_id type, u64 id, + struct pstore_info *psi) +{ + return 0; +} +#endif + +static struct pstore_info efi_pstore_info = { + .owner = THIS_MODULE, + .name = "efi", + .open = efi_pstore_open, + .close = efi_pstore_close, + .read = efi_pstore_read, + .write = efi_pstore_write, + .erase = efi_pstore_erase, +}; static ssize_t efivar_create(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -414,8 +623,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf8_strsize(new_var->VariableName, 1024); + strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); + strsize2 = utf16_strsize(new_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), new_var->VariableName, strsize1) && @@ -447,8 +656,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, /* Create the entry in sysfs. Locking is not required here */ status = efivar_create_sysfs_entry(efivars, - utf8_strsize(new_var->VariableName, - 1024), + utf16_strsize(new_var->VariableName, + 1024), new_var->VariableName, &new_var->VendorGuid); if (status) { @@ -477,8 +686,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj, * Does this variable already exist? */ list_for_each_entry_safe(search_efivar, n, &efivars->list, list) { - strsize1 = utf8_strsize(search_efivar->var.VariableName, 1024); - strsize2 = utf8_strsize(del_var->VariableName, 1024); + strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024); + strsize2 = utf16_strsize(del_var->VariableName, 1024); if (strsize1 == strsize2 && !memcmp(&(search_efivar->var.VariableName), del_var->VariableName, strsize1) && @@ -763,6 +972,16 @@ int register_efivars(struct efivars *efivars, if (error) unregister_efivars(efivars); + efivars->efi_pstore_info = efi_pstore_info; + + efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); + if (efivars->efi_pstore_info.buf) { + efivars->efi_pstore_info.bufsize = 1024; + efivars->efi_pstore_info.data = efivars; + mutex_init(&efivars->efi_pstore_info.buf_mutex); + pstore_register(&efivars->efi_pstore_info); + } + out: kfree(variable_name); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 6778f56a4c64..d539efd96d4b 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -103,6 +103,22 @@ config GPIO_MPC5200 def_bool y depends on PPC_MPC52xx +config GPIO_MSM_V1 + tristate "Qualcomm MSM GPIO v1" + depends on GPIOLIB && ARCH_MSM + help + Say yes here to support the GPIO interface on ARM v6 based + Qualcomm MSM chips. Most of the pins on the MSM can be + selected for GPIO, and are controlled by this driver. + +config GPIO_MSM_V2 + tristate "Qualcomm MSM GPIO v2" + depends on GPIOLIB && ARCH_MSM + help + Say yes here to support the GPIO interface on ARM v7 based + Qualcomm MSM chips. Most of the pins on the MSM can be + selected for GPIO, and are controlled by this driver. + config GPIO_MXC def_bool y depends on ARCH_MXC diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 4b81d4e1e709..9588948c96f0 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -27,6 +27,8 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o +obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o +obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o diff --git a/drivers/gpio/gpio-ab8500.c b/drivers/gpio/gpio-ab8500.c index ed795e64eea7..050c05d91896 100644 --- a/drivers/gpio/gpio-ab8500.c +++ b/drivers/gpio/gpio-ab8500.c @@ -516,5 +516,5 @@ module_exit(ab8500_gpio_exit); MODULE_AUTHOR("BIBEK BASU <bibek.basu@stericsson.com>"); MODULE_DESCRIPTION("Driver allows to use AB8500 unused pins to be used as GPIO"); -MODULE_ALIAS("AB8500 GPIO driver"); +MODULE_ALIAS("platform:ab8500-gpio"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c new file mode 100644 index 000000000000..52a4d4286eba --- /dev/null +++ b/drivers/gpio/gpio-msm-v1.c @@ -0,0 +1,636 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/bitops.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <mach/cpu.h> +#include <mach/msm_gpiomux.h> +#include <mach/msm_iomap.h> + +/* see 80-VA736-2 Rev C pp 695-751 +** +** These are actually the *shadow* gpio registers, since the +** real ones (which allow full access) are only available to the +** ARM9 side of the world. +** +** Since the _BASE need to be page-aligned when we're mapping them +** to virtual addresses, adjust for the additional offset in these +** macros. +*/ + +#define MSM_GPIO1_REG(off) (MSM_GPIO1_BASE + (off)) +#define MSM_GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off)) +#define MSM_GPIO1_SHADOW_REG(off) (MSM_GPIO1_BASE + 0x800 + (off)) +#define MSM_GPIO2_SHADOW_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off)) + +/* + * MSM7X00 registers + */ +/* output value */ +#define MSM7X00_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */ +#define MSM7X00_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */ +#define MSM7X00_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */ +#define MSM7X00_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */ +#define MSM7X00_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 106-95 */ +#define MSM7X00_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x50) /* gpio 107-121 */ + +/* same pin map as above, output enable */ +#define MSM7X00_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x10) +#define MSM7X00_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08) +#define MSM7X00_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x14) +#define MSM7X00_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x18) +#define MSM7X00_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x1C) +#define MSM7X00_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x54) + +/* same pin map as above, input read */ +#define MSM7X00_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x34) +#define MSM7X00_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20) +#define MSM7X00_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x38) +#define MSM7X00_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x3C) +#define MSM7X00_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x40) +#define MSM7X00_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x44) + +/* same pin map as above, 1=edge 0=level interrup */ +#define MSM7X00_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x60) +#define MSM7X00_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50) +#define MSM7X00_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x64) +#define MSM7X00_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x68) +#define MSM7X00_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x6C) +#define MSM7X00_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0xC0) + +/* same pin map as above, 1=positive 0=negative */ +#define MSM7X00_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x70) +#define MSM7X00_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58) +#define MSM7X00_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x74) +#define MSM7X00_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x78) +#define MSM7X00_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x7C) +#define MSM7X00_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xBC) + +/* same pin map as above, interrupt enable */ +#define MSM7X00_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0x80) +#define MSM7X00_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60) +#define MSM7X00_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0x84) +#define MSM7X00_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0x88) +#define MSM7X00_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0x8C) +#define MSM7X00_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xB8) + +/* same pin map as above, write 1 to clear interrupt */ +#define MSM7X00_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0x90) +#define MSM7X00_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68) +#define MSM7X00_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0x94) +#define MSM7X00_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0x98) +#define MSM7X00_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0x9C) +#define MSM7X00_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xB4) + +/* same pin map as above, 1=interrupt pending */ +#define MSM7X00_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xA0) +#define MSM7X00_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70) +#define MSM7X00_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xA4) +#define MSM7X00_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xA8) +#define MSM7X00_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xAC) +#define MSM7X00_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0xB0) + +/* + * QSD8X50 registers + */ +/* output value */ +#define QSD8X50_GPIO_OUT_0 MSM_GPIO1_SHADOW_REG(0x00) /* gpio 15-0 */ +#define QSD8X50_GPIO_OUT_1 MSM_GPIO2_SHADOW_REG(0x00) /* gpio 42-16 */ +#define QSD8X50_GPIO_OUT_2 MSM_GPIO1_SHADOW_REG(0x04) /* gpio 67-43 */ +#define QSD8X50_GPIO_OUT_3 MSM_GPIO1_SHADOW_REG(0x08) /* gpio 94-68 */ +#define QSD8X50_GPIO_OUT_4 MSM_GPIO1_SHADOW_REG(0x0C) /* gpio 103-95 */ +#define QSD8X50_GPIO_OUT_5 MSM_GPIO1_SHADOW_REG(0x10) /* gpio 121-104 */ +#define QSD8X50_GPIO_OUT_6 MSM_GPIO1_SHADOW_REG(0x14) /* gpio 152-122 */ +#define QSD8X50_GPIO_OUT_7 MSM_GPIO1_SHADOW_REG(0x18) /* gpio 164-153 */ + +/* same pin map as above, output enable */ +#define QSD8X50_GPIO_OE_0 MSM_GPIO1_SHADOW_REG(0x20) +#define QSD8X50_GPIO_OE_1 MSM_GPIO2_SHADOW_REG(0x08) +#define QSD8X50_GPIO_OE_2 MSM_GPIO1_SHADOW_REG(0x24) +#define QSD8X50_GPIO_OE_3 MSM_GPIO1_SHADOW_REG(0x28) +#define QSD8X50_GPIO_OE_4 MSM_GPIO1_SHADOW_REG(0x2C) +#define QSD8X50_GPIO_OE_5 MSM_GPIO1_SHADOW_REG(0x30) +#define QSD8X50_GPIO_OE_6 MSM_GPIO1_SHADOW_REG(0x34) +#define QSD8X50_GPIO_OE_7 MSM_GPIO1_SHADOW_REG(0x38) + +/* same pin map as above, input read */ +#define QSD8X50_GPIO_IN_0 MSM_GPIO1_SHADOW_REG(0x50) +#define QSD8X50_GPIO_IN_1 MSM_GPIO2_SHADOW_REG(0x20) +#define QSD8X50_GPIO_IN_2 MSM_GPIO1_SHADOW_REG(0x54) +#define QSD8X50_GPIO_IN_3 MSM_GPIO1_SHADOW_REG(0x58) +#define QSD8X50_GPIO_IN_4 MSM_GPIO1_SHADOW_REG(0x5C) +#define QSD8X50_GPIO_IN_5 MSM_GPIO1_SHADOW_REG(0x60) +#define QSD8X50_GPIO_IN_6 MSM_GPIO1_SHADOW_REG(0x64) +#define QSD8X50_GPIO_IN_7 MSM_GPIO1_SHADOW_REG(0x68) + +/* same pin map as above, 1=edge 0=level interrup */ +#define QSD8X50_GPIO_INT_EDGE_0 MSM_GPIO1_SHADOW_REG(0x70) +#define QSD8X50_GPIO_INT_EDGE_1 MSM_GPIO2_SHADOW_REG(0x50) +#define QSD8X50_GPIO_INT_EDGE_2 MSM_GPIO1_SHADOW_REG(0x74) +#define QSD8X50_GPIO_INT_EDGE_3 MSM_GPIO1_SHADOW_REG(0x78) +#define QSD8X50_GPIO_INT_EDGE_4 MSM_GPIO1_SHADOW_REG(0x7C) +#define QSD8X50_GPIO_INT_EDGE_5 MSM_GPIO1_SHADOW_REG(0x80) +#define QSD8X50_GPIO_INT_EDGE_6 MSM_GPIO1_SHADOW_REG(0x84) +#define QSD8X50_GPIO_INT_EDGE_7 MSM_GPIO1_SHADOW_REG(0x88) + +/* same pin map as above, 1=positive 0=negative */ +#define QSD8X50_GPIO_INT_POS_0 MSM_GPIO1_SHADOW_REG(0x90) +#define QSD8X50_GPIO_INT_POS_1 MSM_GPIO2_SHADOW_REG(0x58) +#define QSD8X50_GPIO_INT_POS_2 MSM_GPIO1_SHADOW_REG(0x94) +#define QSD8X50_GPIO_INT_POS_3 MSM_GPIO1_SHADOW_REG(0x98) +#define QSD8X50_GPIO_INT_POS_4 MSM_GPIO1_SHADOW_REG(0x9C) +#define QSD8X50_GPIO_INT_POS_5 MSM_GPIO1_SHADOW_REG(0xA0) +#define QSD8X50_GPIO_INT_POS_6 MSM_GPIO1_SHADOW_REG(0xA4) +#define QSD8X50_GPIO_INT_POS_7 MSM_GPIO1_SHADOW_REG(0xA8) + +/* same pin map as above, interrupt enable */ +#define QSD8X50_GPIO_INT_EN_0 MSM_GPIO1_SHADOW_REG(0xB0) +#define QSD8X50_GPIO_INT_EN_1 MSM_GPIO2_SHADOW_REG(0x60) +#define QSD8X50_GPIO_INT_EN_2 MSM_GPIO1_SHADOW_REG(0xB4) +#define QSD8X50_GPIO_INT_EN_3 MSM_GPIO1_SHADOW_REG(0xB8) +#define QSD8X50_GPIO_INT_EN_4 MSM_GPIO1_SHADOW_REG(0xBC) +#define QSD8X50_GPIO_INT_EN_5 MSM_GPIO1_SHADOW_REG(0xC0) +#define QSD8X50_GPIO_INT_EN_6 MSM_GPIO1_SHADOW_REG(0xC4) +#define QSD8X50_GPIO_INT_EN_7 MSM_GPIO1_SHADOW_REG(0xC8) + +/* same pin map as above, write 1 to clear interrupt */ +#define QSD8X50_GPIO_INT_CLEAR_0 MSM_GPIO1_SHADOW_REG(0xD0) +#define QSD8X50_GPIO_INT_CLEAR_1 MSM_GPIO2_SHADOW_REG(0x68) +#define QSD8X50_GPIO_INT_CLEAR_2 MSM_GPIO1_SHADOW_REG(0xD4) +#define QSD8X50_GPIO_INT_CLEAR_3 MSM_GPIO1_SHADOW_REG(0xD8) +#define QSD8X50_GPIO_INT_CLEAR_4 MSM_GPIO1_SHADOW_REG(0xDC) +#define QSD8X50_GPIO_INT_CLEAR_5 MSM_GPIO1_SHADOW_REG(0xE0) +#define QSD8X50_GPIO_INT_CLEAR_6 MSM_GPIO1_SHADOW_REG(0xE4) +#define QSD8X50_GPIO_INT_CLEAR_7 MSM_GPIO1_SHADOW_REG(0xE8) + +/* same pin map as above, 1=interrupt pending */ +#define QSD8X50_GPIO_INT_STATUS_0 MSM_GPIO1_SHADOW_REG(0xF0) +#define QSD8X50_GPIO_INT_STATUS_1 MSM_GPIO2_SHADOW_REG(0x70) +#define QSD8X50_GPIO_INT_STATUS_2 MSM_GPIO1_SHADOW_REG(0xF4) +#define QSD8X50_GPIO_INT_STATUS_3 MSM_GPIO1_SHADOW_REG(0xF8) +#define QSD8X50_GPIO_INT_STATUS_4 MSM_GPIO1_SHADOW_REG(0xFC) +#define QSD8X50_GPIO_INT_STATUS_5 MSM_GPIO1_SHADOW_REG(0x100) +#define QSD8X50_GPIO_INT_STATUS_6 MSM_GPIO1_SHADOW_REG(0x104) +#define QSD8X50_GPIO_INT_STATUS_7 MSM_GPIO1_SHADOW_REG(0x108) + +/* + * MSM7X30 registers + */ +/* output value */ +#define MSM7X30_GPIO_OUT_0 MSM_GPIO1_REG(0x00) /* gpio 15-0 */ +#define MSM7X30_GPIO_OUT_1 MSM_GPIO2_REG(0x00) /* gpio 43-16 */ +#define MSM7X30_GPIO_OUT_2 MSM_GPIO1_REG(0x04) /* gpio 67-44 */ +#define MSM7X30_GPIO_OUT_3 MSM_GPIO1_REG(0x08) /* gpio 94-68 */ +#define MSM7X30_GPIO_OUT_4 MSM_GPIO1_REG(0x0C) /* gpio 106-95 */ +#define MSM7X30_GPIO_OUT_5 MSM_GPIO1_REG(0x50) /* gpio 133-107 */ +#define MSM7X30_GPIO_OUT_6 MSM_GPIO1_REG(0xC4) /* gpio 150-134 */ +#define MSM7X30_GPIO_OUT_7 MSM_GPIO1_REG(0x214) /* gpio 181-151 */ + +/* same pin map as above, output enable */ +#define MSM7X30_GPIO_OE_0 MSM_GPIO1_REG(0x10) +#define MSM7X30_GPIO_OE_1 MSM_GPIO2_REG(0x08) +#define MSM7X30_GPIO_OE_2 MSM_GPIO1_REG(0x14) +#define MSM7X30_GPIO_OE_3 MSM_GPIO1_REG(0x18) +#define MSM7X30_GPIO_OE_4 MSM_GPIO1_REG(0x1C) +#define MSM7X30_GPIO_OE_5 MSM_GPIO1_REG(0x54) +#define MSM7X30_GPIO_OE_6 MSM_GPIO1_REG(0xC8) +#define MSM7X30_GPIO_OE_7 MSM_GPIO1_REG(0x218) + +/* same pin map as above, input read */ +#define MSM7X30_GPIO_IN_0 MSM_GPIO1_REG(0x34) +#define MSM7X30_GPIO_IN_1 MSM_GPIO2_REG(0x20) +#define MSM7X30_GPIO_IN_2 MSM_GPIO1_REG(0x38) +#define MSM7X30_GPIO_IN_3 MSM_GPIO1_REG(0x3C) +#define MSM7X30_GPIO_IN_4 MSM_GPIO1_REG(0x40) +#define MSM7X30_GPIO_IN_5 MSM_GPIO1_REG(0x44) +#define MSM7X30_GPIO_IN_6 MSM_GPIO1_REG(0xCC) +#define MSM7X30_GPIO_IN_7 MSM_GPIO1_REG(0x21C) + +/* same pin map as above, 1=edge 0=level interrup */ +#define MSM7X30_GPIO_INT_EDGE_0 MSM_GPIO1_REG(0x60) +#define MSM7X30_GPIO_INT_EDGE_1 MSM_GPIO2_REG(0x50) +#define MSM7X30_GPIO_INT_EDGE_2 MSM_GPIO1_REG(0x64) +#define MSM7X30_GPIO_INT_EDGE_3 MSM_GPIO1_REG(0x68) +#define MSM7X30_GPIO_INT_EDGE_4 MSM_GPIO1_REG(0x6C) +#define MSM7X30_GPIO_INT_EDGE_5 MSM_GPIO1_REG(0xC0) +#define MSM7X30_GPIO_INT_EDGE_6 MSM_GPIO1_REG(0xD0) +#define MSM7X30_GPIO_INT_EDGE_7 MSM_GPIO1_REG(0x240) + +/* same pin map as above, 1=positive 0=negative */ +#define MSM7X30_GPIO_INT_POS_0 MSM_GPIO1_REG(0x70) +#define MSM7X30_GPIO_INT_POS_1 MSM_GPIO2_REG(0x58) +#define MSM7X30_GPIO_INT_POS_2 MSM_GPIO1_REG(0x74) +#define MSM7X30_GPIO_INT_POS_3 MSM_GPIO1_REG(0x78) +#define MSM7X30_GPIO_INT_POS_4 MSM_GPIO1_REG(0x7C) +#define MSM7X30_GPIO_INT_POS_5 MSM_GPIO1_REG(0xBC) +#define MSM7X30_GPIO_INT_POS_6 MSM_GPIO1_REG(0xD4) +#define MSM7X30_GPIO_INT_POS_7 MSM_GPIO1_REG(0x228) + +/* same pin map as above, interrupt enable */ +#define MSM7X30_GPIO_INT_EN_0 MSM_GPIO1_REG(0x80) +#define MSM7X30_GPIO_INT_EN_1 MSM_GPIO2_REG(0x60) +#define MSM7X30_GPIO_INT_EN_2 MSM_GPIO1_REG(0x84) +#define MSM7X30_GPIO_INT_EN_3 MSM_GPIO1_REG(0x88) +#define MSM7X30_GPIO_INT_EN_4 MSM_GPIO1_REG(0x8C) +#define MSM7X30_GPIO_INT_EN_5 MSM_GPIO1_REG(0xB8) +#define MSM7X30_GPIO_INT_EN_6 MSM_GPIO1_REG(0xD8) +#define MSM7X30_GPIO_INT_EN_7 MSM_GPIO1_REG(0x22C) + +/* same pin map as above, write 1 to clear interrupt */ +#define MSM7X30_GPIO_INT_CLEAR_0 MSM_GPIO1_REG(0x90) +#define MSM7X30_GPIO_INT_CLEAR_1 MSM_GPIO2_REG(0x68) +#define MSM7X30_GPIO_INT_CLEAR_2 MSM_GPIO1_REG(0x94) +#define MSM7X30_GPIO_INT_CLEAR_3 MSM_GPIO1_REG(0x98) +#define MSM7X30_GPIO_INT_CLEAR_4 MSM_GPIO1_REG(0x9C) +#define MSM7X30_GPIO_INT_CLEAR_5 MSM_GPIO1_REG(0xB4) +#define MSM7X30_GPIO_INT_CLEAR_6 MSM_GPIO1_REG(0xDC) +#define MSM7X30_GPIO_INT_CLEAR_7 MSM_GPIO1_REG(0x230) + +/* same pin map as above, 1=interrupt pending */ +#define MSM7X30_GPIO_INT_STATUS_0 MSM_GPIO1_REG(0xA0) +#define MSM7X30_GPIO_INT_STATUS_1 MSM_GPIO2_REG(0x70) +#define MSM7X30_GPIO_INT_STATUS_2 MSM_GPIO1_REG(0xA4) +#define MSM7X30_GPIO_INT_STATUS_3 MSM_GPIO1_REG(0xA8) +#define MSM7X30_GPIO_INT_STATUS_4 MSM_GPIO1_REG(0xAC) +#define MSM7X30_GPIO_INT_STATUS_5 MSM_GPIO1_REG(0xB0) +#define MSM7X30_GPIO_INT_STATUS_6 MSM_GPIO1_REG(0xE0) +#define MSM7X30_GPIO_INT_STATUS_7 MSM_GPIO1_REG(0x234) + +#define FIRST_GPIO_IRQ MSM_GPIO_TO_INT(0) + +#define MSM_GPIO_BANK(soc, bank, first, last) \ + { \ + .regs = { \ + .out = soc##_GPIO_OUT_##bank, \ + .in = soc##_GPIO_IN_##bank, \ + .int_status = soc##_GPIO_INT_STATUS_##bank, \ + .int_clear = soc##_GPIO_INT_CLEAR_##bank, \ + .int_en = soc##_GPIO_INT_EN_##bank, \ + .int_edge = soc##_GPIO_INT_EDGE_##bank, \ + .int_pos = soc##_GPIO_INT_POS_##bank, \ + .oe = soc##_GPIO_OE_##bank, \ + }, \ + .chip = { \ + .base = (first), \ + .ngpio = (last) - (first) + 1, \ + .get = msm_gpio_get, \ + .set = msm_gpio_set, \ + .direction_input = msm_gpio_direction_input, \ + .direction_output = msm_gpio_direction_output, \ + .to_irq = msm_gpio_to_irq, \ + .request = msm_gpio_request, \ + .free = msm_gpio_free, \ + } \ + } + +#define MSM_GPIO_BROKEN_INT_CLEAR 1 + +struct msm_gpio_regs { + void __iomem *out; + void __iomem *in; + void __iomem *int_status; + void __iomem *int_clear; + void __iomem *int_en; + void __iomem *int_edge; + void __iomem *int_pos; + void __iomem *oe; +}; + +struct msm_gpio_chip { + spinlock_t lock; + struct gpio_chip chip; + struct msm_gpio_regs regs; +#if MSM_GPIO_BROKEN_INT_CLEAR + unsigned int_status_copy; +#endif + unsigned int both_edge_detect; + unsigned int int_enable[2]; /* 0: awake, 1: sleep */ +}; + +static int msm_gpio_write(struct msm_gpio_chip *msm_chip, + unsigned offset, unsigned on) +{ + unsigned mask = BIT(offset); + unsigned val; + + val = readl(msm_chip->regs.out); + if (on) + writel(val | mask, msm_chip->regs.out); + else + writel(val & ~mask, msm_chip->regs.out); + return 0; +} + +static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip) +{ + int loop_limit = 100; + unsigned pol, val, val2, intstat; + do { + val = readl(msm_chip->regs.in); + pol = readl(msm_chip->regs.int_pos); + pol = (pol & ~msm_chip->both_edge_detect) | + (~val & msm_chip->both_edge_detect); + writel(pol, msm_chip->regs.int_pos); + intstat = readl(msm_chip->regs.int_status); + val2 = readl(msm_chip->regs.in); + if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0) + return; + } while (loop_limit-- > 0); + printk(KERN_ERR "msm_gpio_update_both_edge_detect, " + "failed to reach stable state %x != %x\n", val, val2); +} + +static int msm_gpio_clear_detect_status(struct msm_gpio_chip *msm_chip, + unsigned offset) +{ + unsigned bit = BIT(offset); + +#if MSM_GPIO_BROKEN_INT_CLEAR + /* Save interrupts that already triggered before we loose them. */ + /* Any interrupt that triggers between the read of int_status */ + /* and the write to int_clear will still be lost though. */ + msm_chip->int_status_copy |= readl(msm_chip->regs.int_status); + msm_chip->int_status_copy &= ~bit; +#endif + writel(bit, msm_chip->regs.int_clear); + msm_gpio_update_both_edge_detect(msm_chip); + return 0; +} + +static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + struct msm_gpio_chip *msm_chip; + unsigned long irq_flags; + + msm_chip = container_of(chip, struct msm_gpio_chip, chip); + spin_lock_irqsave(&msm_chip->lock, irq_flags); + writel(readl(msm_chip->regs.oe) & ~BIT(offset), msm_chip->regs.oe); + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); + return 0; +} + +static int +msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) +{ + struct msm_gpio_chip *msm_chip; + unsigned long irq_flags; + + msm_chip = container_of(chip, struct msm_gpio_chip, chip); + spin_lock_irqsave(&msm_chip->lock, irq_flags); + msm_gpio_write(msm_chip, offset, value); + writel(readl(msm_chip->regs.oe) | BIT(offset), msm_chip->regs.oe); + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); + return 0; +} + +static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct msm_gpio_chip *msm_chip; + + msm_chip = container_of(chip, struct msm_gpio_chip, chip); + return (readl(msm_chip->regs.in) & (1U << offset)) ? 1 : 0; +} + +static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct msm_gpio_chip *msm_chip; + unsigned long irq_flags; + + msm_chip = container_of(chip, struct msm_gpio_chip, chip); + spin_lock_irqsave(&msm_chip->lock, irq_flags); + msm_gpio_write(msm_chip, offset, value); + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); +} + +static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + return MSM_GPIO_TO_INT(chip->base + offset); +} + +#ifdef CONFIG_MSM_GPIOMUX +static int msm_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + return msm_gpiomux_get(chip->base + offset); +} + +static void msm_gpio_free(struct gpio_chip *chip, unsigned offset) +{ + msm_gpiomux_put(chip->base + offset); +} +#else +#define msm_gpio_request NULL +#define msm_gpio_free NULL +#endif + +static struct msm_gpio_chip *msm_gpio_chips; +static int msm_gpio_count; + +static struct msm_gpio_chip msm_gpio_chips_msm7x01[] = { + MSM_GPIO_BANK(MSM7X00, 0, 0, 15), + MSM_GPIO_BANK(MSM7X00, 1, 16, 42), + MSM_GPIO_BANK(MSM7X00, 2, 43, 67), + MSM_GPIO_BANK(MSM7X00, 3, 68, 94), + MSM_GPIO_BANK(MSM7X00, 4, 95, 106), + MSM_GPIO_BANK(MSM7X00, 5, 107, 121), +}; + +static struct msm_gpio_chip msm_gpio_chips_msm7x30[] = { + MSM_GPIO_BANK(MSM7X30, 0, 0, 15), + MSM_GPIO_BANK(MSM7X30, 1, 16, 43), + MSM_GPIO_BANK(MSM7X30, 2, 44, 67), + MSM_GPIO_BANK(MSM7X30, 3, 68, 94), + MSM_GPIO_BANK(MSM7X30, 4, 95, 106), + MSM_GPIO_BANK(MSM7X30, 5, 107, 133), + MSM_GPIO_BANK(MSM7X30, 6, 134, 150), + MSM_GPIO_BANK(MSM7X30, 7, 151, 181), +}; + +static struct msm_gpio_chip msm_gpio_chips_qsd8x50[] = { + MSM_GPIO_BANK(QSD8X50, 0, 0, 15), + MSM_GPIO_BANK(QSD8X50, 1, 16, 42), + MSM_GPIO_BANK(QSD8X50, 2, 43, 67), + MSM_GPIO_BANK(QSD8X50, 3, 68, 94), + MSM_GPIO_BANK(QSD8X50, 4, 95, 103), + MSM_GPIO_BANK(QSD8X50, 5, 104, 121), + MSM_GPIO_BANK(QSD8X50, 6, 122, 152), + MSM_GPIO_BANK(QSD8X50, 7, 153, 164), +}; + +static void msm_gpio_irq_ack(struct irq_data *d) +{ + unsigned long irq_flags; + struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); + spin_lock_irqsave(&msm_chip->lock, irq_flags); + msm_gpio_clear_detect_status(msm_chip, + d->irq - gpio_to_irq(msm_chip->chip.base)); + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); +} + +static void msm_gpio_irq_mask(struct irq_data *d) +{ + unsigned long irq_flags; + struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); + unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); + + spin_lock_irqsave(&msm_chip->lock, irq_flags); + /* level triggered interrupts are also latched */ + if (!(readl(msm_chip->regs.int_edge) & BIT(offset))) + msm_gpio_clear_detect_status(msm_chip, offset); + msm_chip->int_enable[0] &= ~BIT(offset); + writel(msm_chip->int_enable[0], msm_chip->regs.int_en); + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); +} + +static void msm_gpio_irq_unmask(struct irq_data *d) +{ + unsigned long irq_flags; + struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); + unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); + + spin_lock_irqsave(&msm_chip->lock, irq_flags); + /* level triggered interrupts are also latched */ + if (!(readl(msm_chip->regs.int_edge) & BIT(offset))) + msm_gpio_clear_detect_status(msm_chip, offset); + msm_chip->int_enable[0] |= BIT(offset); + writel(msm_chip->int_enable[0], msm_chip->regs.int_en); + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); +} + +static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) +{ + unsigned long irq_flags; + struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); + unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); + + spin_lock_irqsave(&msm_chip->lock, irq_flags); + + if (on) + msm_chip->int_enable[1] |= BIT(offset); + else + msm_chip->int_enable[1] &= ~BIT(offset); + + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); + return 0; +} + +static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + unsigned long irq_flags; + struct msm_gpio_chip *msm_chip = irq_data_get_irq_chip_data(d); + unsigned offset = d->irq - gpio_to_irq(msm_chip->chip.base); + unsigned val, mask = BIT(offset); + + spin_lock_irqsave(&msm_chip->lock, irq_flags); + val = readl(msm_chip->regs.int_edge); + if (flow_type & IRQ_TYPE_EDGE_BOTH) { + writel(val | mask, msm_chip->regs.int_edge); + __irq_set_handler_locked(d->irq, handle_edge_irq); + } else { + writel(val & ~mask, msm_chip->regs.int_edge); + __irq_set_handler_locked(d->irq, handle_level_irq); + } + if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) { + msm_chip->both_edge_detect |= mask; + msm_gpio_update_both_edge_detect(msm_chip); + } else { + msm_chip->both_edge_detect &= ~mask; + val = readl(msm_chip->regs.int_pos); + if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) + writel(val | mask, msm_chip->regs.int_pos); + else + writel(val & ~mask, msm_chip->regs.int_pos); + } + spin_unlock_irqrestore(&msm_chip->lock, irq_flags); + return 0; +} + +static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + int i, j, mask; + unsigned val; + + for (i = 0; i < msm_gpio_count; i++) { + struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i]; + val = readl(msm_chip->regs.int_status); + val &= msm_chip->int_enable[0]; + while (val) { + mask = val & -val; + j = fls(mask) - 1; + /* printk("%s %08x %08x bit %d gpio %d irq %d\n", + __func__, v, m, j, msm_chip->chip.start + j, + FIRST_GPIO_IRQ + msm_chip->chip.start + j); */ + val &= ~mask; + generic_handle_irq(FIRST_GPIO_IRQ + + msm_chip->chip.base + j); + } + } + desc->irq_data.chip->irq_ack(&desc->irq_data); +} + +static struct irq_chip msm_gpio_irq_chip = { + .name = "msmgpio", + .irq_ack = msm_gpio_irq_ack, + .irq_mask = msm_gpio_irq_mask, + .irq_unmask = msm_gpio_irq_unmask, + .irq_set_wake = msm_gpio_irq_set_wake, + .irq_set_type = msm_gpio_irq_set_type, +}; + +static int __init msm_init_gpio(void) +{ + int i, j = 0; + + if (cpu_is_msm7x01()) { + msm_gpio_chips = msm_gpio_chips_msm7x01; + msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x01); + } else if (cpu_is_msm7x30()) { + msm_gpio_chips = msm_gpio_chips_msm7x30; + msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_msm7x30); + } else if (cpu_is_qsd8x50()) { + msm_gpio_chips = msm_gpio_chips_qsd8x50; + msm_gpio_count = ARRAY_SIZE(msm_gpio_chips_qsd8x50); + } else { + return 0; + } + + for (i = FIRST_GPIO_IRQ; i < FIRST_GPIO_IRQ + NR_GPIO_IRQS; i++) { + if (i - FIRST_GPIO_IRQ >= + msm_gpio_chips[j].chip.base + + msm_gpio_chips[j].chip.ngpio) + j++; + irq_set_chip_data(i, &msm_gpio_chips[j]); + irq_set_chip_and_handler(i, &msm_gpio_irq_chip, + handle_edge_irq); + set_irq_flags(i, IRQF_VALID); + } + + for (i = 0; i < msm_gpio_count; i++) { + spin_lock_init(&msm_gpio_chips[i].lock); + writel(0, msm_gpio_chips[i].regs.int_en); + gpiochip_add(&msm_gpio_chips[i].chip); + } + + irq_set_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler); + irq_set_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler); + irq_set_irq_wake(INT_GPIO_GROUP1, 1); + irq_set_irq_wake(INT_GPIO_GROUP2, 2); + return 0; +} + +postcore_initcall(msm_init_gpio); diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c new file mode 100644 index 000000000000..5cb1227d69cf --- /dev/null +++ b/drivers/gpio/gpio-msm-v2.c @@ -0,0 +1,433 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/gpio.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#include <asm/mach/irq.h> + +#include <mach/msm_gpiomux.h> +#include <mach/msm_iomap.h> + +/* Bits of interest in the GPIO_IN_OUT register. + */ +enum { + GPIO_IN = 0, + GPIO_OUT = 1 +}; + +/* Bits of interest in the GPIO_INTR_STATUS register. + */ +enum { + INTR_STATUS = 0, +}; + +/* Bits of interest in the GPIO_CFG register. + */ +enum { + GPIO_OE = 9, +}; + +/* Bits of interest in the GPIO_INTR_CFG register. + * When a GPIO triggers, two separate decisions are made, controlled + * by two separate flags. + * + * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS + * register for that GPIO will be updated to reflect the triggering of that + * gpio. If this bit is 0, this register will not be updated. + * - Second, INTR_ENABLE controls whether an interrupt is triggered. + * + * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt + * can be triggered but the status register will not reflect it. + */ +enum { + INTR_ENABLE = 0, + INTR_POL_CTL = 1, + INTR_DECT_CTL = 2, + INTR_RAW_STATUS_EN = 3, +}; + +/* Codes of interest in GPIO_INTR_CFG_SU. + */ +enum { + TARGET_PROC_SCORPION = 4, + TARGET_PROC_NONE = 7, +}; + + +#define GPIO_INTR_CFG_SU(gpio) (MSM_TLMM_BASE + 0x0400 + (0x04 * (gpio))) +#define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio))) +#define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio))) +#define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio))) +#define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio))) + +/** + * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure + * + * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By + * keeping track of which gpios are unmasked as irq sources, we avoid + * having to do readl calls on hundreds of iomapped registers each time + * the summary interrupt fires in order to locate the active interrupts. + * + * @wake_irqs: a bitmap for tracking which interrupt lines are enabled + * as wakeup sources. When the device is suspended, interrupts which are + * not wakeup sources are disabled. + * + * @dual_edge_irqs: a bitmap used to track which irqs are configured + * as dual-edge, as this is not supported by the hardware and requires + * some special handling in the driver. + */ +struct msm_gpio_dev { + struct gpio_chip gpio_chip; + DECLARE_BITMAP(enabled_irqs, NR_GPIO_IRQS); + DECLARE_BITMAP(wake_irqs, NR_GPIO_IRQS); + DECLARE_BITMAP(dual_edge_irqs, NR_GPIO_IRQS); +}; + +static DEFINE_SPINLOCK(tlmm_lock); + +static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip) +{ + return container_of(chip, struct msm_gpio_dev, gpio_chip); +} + +static inline void set_gpio_bits(unsigned n, void __iomem *reg) +{ + writel(readl(reg) | n, reg); +} + +static inline void clear_gpio_bits(unsigned n, void __iomem *reg) +{ + writel(readl(reg) & ~n, reg); +} + +static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN); +} + +static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val) +{ + writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset)); +} + +static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&tlmm_lock, irq_flags); + clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset)); + spin_unlock_irqrestore(&tlmm_lock, irq_flags); + return 0; +} + +static int msm_gpio_direction_output(struct gpio_chip *chip, + unsigned offset, + int val) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&tlmm_lock, irq_flags); + msm_gpio_set(chip, offset, val); + set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset)); + spin_unlock_irqrestore(&tlmm_lock, irq_flags); + return 0; +} + +static int msm_gpio_request(struct gpio_chip *chip, unsigned offset) +{ + return msm_gpiomux_get(chip->base + offset); +} + +static void msm_gpio_free(struct gpio_chip *chip, unsigned offset) +{ + msm_gpiomux_put(chip->base + offset); +} + +static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + return MSM_GPIO_TO_INT(chip->base + offset); +} + +static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq) +{ + return irq - MSM_GPIO_TO_INT(chip->base); +} + +static struct msm_gpio_dev msm_gpio = { + .gpio_chip = { + .base = 0, + .ngpio = NR_GPIO_IRQS, + .direction_input = msm_gpio_direction_input, + .direction_output = msm_gpio_direction_output, + .get = msm_gpio_get, + .set = msm_gpio_set, + .to_irq = msm_gpio_to_irq, + .request = msm_gpio_request, + .free = msm_gpio_free, + }, +}; + +/* For dual-edge interrupts in software, since the hardware has no + * such support: + * + * At appropriate moments, this function may be called to flip the polarity + * settings of both-edge irq lines to try and catch the next edge. + * + * The attempt is considered successful if: + * - the status bit goes high, indicating that an edge was caught, or + * - the input value of the gpio doesn't change during the attempt. + * If the value changes twice during the process, that would cause the first + * test to fail but would force the second, as two opposite + * transitions would cause a detection no matter the polarity setting. + * + * The do-loop tries to sledge-hammer closed the timing hole between + * the initial value-read and the polarity-write - if the line value changes + * during that window, an interrupt is lost, the new polarity setting is + * incorrect, and the first success test will fail, causing a retry. + * + * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c. + */ +static void msm_gpio_update_dual_edge_pos(unsigned gpio) +{ + int loop_limit = 100; + unsigned val, val2, intstat; + + do { + val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN); + if (val) + clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio)); + else + set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio)); + val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN); + intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS); + if (intstat || val == val2) + return; + } while (loop_limit-- > 0); + pr_err("dual-edge irq failed to stabilize, " + "interrupts dropped. %#08x != %#08x\n", + val, val2); +} + +static void msm_gpio_irq_ack(struct irq_data *d) +{ + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); + + writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio)); + if (test_bit(gpio, msm_gpio.dual_edge_irqs)) + msm_gpio_update_dual_edge_pos(gpio); +} + +static void msm_gpio_irq_mask(struct irq_data *d) +{ + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); + unsigned long irq_flags; + + spin_lock_irqsave(&tlmm_lock, irq_flags); + writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); + clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); + __clear_bit(gpio, msm_gpio.enabled_irqs); + spin_unlock_irqrestore(&tlmm_lock, irq_flags); +} + +static void msm_gpio_irq_unmask(struct irq_data *d) +{ + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); + unsigned long irq_flags; + + spin_lock_irqsave(&tlmm_lock, irq_flags); + __set_bit(gpio, msm_gpio.enabled_irqs); + set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); + writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); + spin_unlock_irqrestore(&tlmm_lock, irq_flags); +} + +static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) +{ + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); + unsigned long irq_flags; + uint32_t bits; + + spin_lock_irqsave(&tlmm_lock, irq_flags); + + bits = readl(GPIO_INTR_CFG(gpio)); + + if (flow_type & IRQ_TYPE_EDGE_BOTH) { + bits |= BIT(INTR_DECT_CTL); + __irq_set_handler_locked(d->irq, handle_edge_irq); + if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) + __set_bit(gpio, msm_gpio.dual_edge_irqs); + else + __clear_bit(gpio, msm_gpio.dual_edge_irqs); + } else { + bits &= ~BIT(INTR_DECT_CTL); + __irq_set_handler_locked(d->irq, handle_level_irq); + __clear_bit(gpio, msm_gpio.dual_edge_irqs); + } + + if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) + bits |= BIT(INTR_POL_CTL); + else + bits &= ~BIT(INTR_POL_CTL); + + writel(bits, GPIO_INTR_CFG(gpio)); + + if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) + msm_gpio_update_dual_edge_pos(gpio); + + spin_unlock_irqrestore(&tlmm_lock, irq_flags); + + return 0; +} + +/* + * When the summary IRQ is raised, any number of GPIO lines may be high. + * It is the job of the summary handler to find all those GPIO lines + * which have been set as summary IRQ lines and which are triggered, + * and to call their interrupt handlers. + */ +static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + unsigned long i; + struct irq_chip *chip = irq_desc_get_chip(desc); + + chained_irq_enter(chip, desc); + + for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS); + i < NR_GPIO_IRQS; + i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) { + if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS)) + generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip, + i)); + } + + chained_irq_exit(chip, desc); +} + +static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) +{ + int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); + + if (on) { + if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) + irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1); + set_bit(gpio, msm_gpio.wake_irqs); + } else { + clear_bit(gpio, msm_gpio.wake_irqs); + if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) + irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0); + } + + return 0; +} + +static struct irq_chip msm_gpio_irq_chip = { + .name = "msmgpio", + .irq_mask = msm_gpio_irq_mask, + .irq_unmask = msm_gpio_irq_unmask, + .irq_ack = msm_gpio_irq_ack, + .irq_set_type = msm_gpio_irq_set_type, + .irq_set_wake = msm_gpio_irq_set_wake, +}; + +static int __devinit msm_gpio_probe(struct platform_device *dev) +{ + int i, irq, ret; + + bitmap_zero(msm_gpio.enabled_irqs, NR_GPIO_IRQS); + bitmap_zero(msm_gpio.wake_irqs, NR_GPIO_IRQS); + bitmap_zero(msm_gpio.dual_edge_irqs, NR_GPIO_IRQS); + msm_gpio.gpio_chip.label = dev->name; + ret = gpiochip_add(&msm_gpio.gpio_chip); + if (ret < 0) + return ret; + + for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) { + irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i); + irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, + handle_level_irq); + set_irq_flags(irq, IRQF_VALID); + } + + irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ, + msm_summary_irq_handler); + return 0; +} + +static int __devexit msm_gpio_remove(struct platform_device *dev) +{ + int ret = gpiochip_remove(&msm_gpio.gpio_chip); + + if (ret < 0) + return ret; + + irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL); + + return 0; +} + +static struct platform_driver msm_gpio_driver = { + .probe = msm_gpio_probe, + .remove = __devexit_p(msm_gpio_remove), + .driver = { + .name = "msmgpio", + .owner = THIS_MODULE, + }, +}; + +static struct platform_device msm_device_gpio = { + .name = "msmgpio", + .id = -1, +}; + +static int __init msm_gpio_init(void) +{ + int rc; + + rc = platform_driver_register(&msm_gpio_driver); + if (!rc) { + rc = platform_device_register(&msm_device_gpio); + if (rc) + platform_driver_unregister(&msm_gpio_driver); + } + + return rc; +} + +static void __exit msm_gpio_exit(void) +{ + platform_device_unregister(&msm_device_gpio); + platform_driver_unregister(&msm_gpio_driver); +} + +postcore_initcall(msm_gpio_init); +module_exit(msm_gpio_exit); + +MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); +MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:msmgpio"); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 9d8c892d07c9..9d2668a50872 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -90,7 +90,6 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, struct drm_device *dev = minor->dev; struct dentry *ent; struct drm_info_node *tmp; - char name[64]; int i, ret; for (i = 0; i < count; i++) { @@ -108,6 +107,9 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count, ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO, root, tmp, &drm_debugfs_fops); if (!ent) { + char name[64]; + strncpy(name, root->d_name.name, + min(root->d_name.len, 64U)); DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n", name, files[i].name); kfree(tmp); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 756af4d7ec74..7425e5c9bd75 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -127,6 +127,23 @@ static const u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; + /* + * Sanity check the header of the base EDID block. Return 8 if the header + * is perfect, down to 0 if it's totally wrong. + */ +int drm_edid_header_is_valid(const u8 *raw_edid) +{ + int i, score = 0; + + for (i = 0; i < sizeof(edid_header); i++) + if (raw_edid[i] == edid_header[i]) + score++; + + return score; +} +EXPORT_SYMBOL(drm_edid_header_is_valid); + + /* * Sanity check the EDID block (base or extension). Return 0 if the block * doesn't check out, or 1 if it's valid. @@ -139,12 +156,7 @@ drm_edid_block_valid(u8 *raw_edid) struct edid *edid = (struct edid *)raw_edid; if (raw_edid[0] == 0x00) { - int score = 0; - - for (i = 0; i < sizeof(edid_header); i++) - if (raw_edid[i] == edid_header[i]) - score++; - + int score = drm_edid_header_is_valid(raw_edid); if (score == 8) ; else if (score >= 6) { DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); @@ -1439,6 +1451,8 @@ EXPORT_SYMBOL(drm_detect_monitor_audio); static void drm_add_display_info(struct edid *edid, struct drm_display_info *info) { + u8 *edid_ext; + info->width_mm = edid->width_cm * 10; info->height_mm = edid->height_cm * 10; @@ -1483,6 +1497,13 @@ static void drm_add_display_info(struct edid *edid, info->color_formats = DRM_COLOR_FORMAT_YCRCB444; if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422) info->color_formats = DRM_COLOR_FORMAT_YCRCB422; + + /* Get data from CEA blocks if present */ + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) + return; + + info->cea_rev = edid_ext[1]; } /** diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2022a5c966bb..3830e9e478c0 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -291,11 +291,14 @@ static void drm_irq_vgaarb_nokms(void *cookie, bool state) if (!dev->irq_enabled) return; - if (state) - dev->driver->irq_uninstall(dev); - else { - dev->driver->irq_preinstall(dev); - dev->driver->irq_postinstall(dev); + if (state) { + if (dev->driver->irq_uninstall) + dev->driver->irq_uninstall(dev); + } else { + if (dev->driver->irq_preinstall) + dev->driver->irq_preinstall(dev); + if (dev->driver->irq_postinstall) + dev->driver->irq_postinstall(dev); } } @@ -338,7 +341,8 @@ int drm_irq_install(struct drm_device *dev) DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); /* Before installing handler */ - dev->driver->irq_preinstall(dev); + if (dev->driver->irq_preinstall) + dev->driver->irq_preinstall(dev); /* Install handler */ if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) @@ -363,11 +367,16 @@ int drm_irq_install(struct drm_device *dev) vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); /* After installing handler */ - ret = dev->driver->irq_postinstall(dev); + if (dev->driver->irq_postinstall) + ret = dev->driver->irq_postinstall(dev); + if (ret < 0) { mutex_lock(&dev->struct_mutex); dev->irq_enabled = 0; mutex_unlock(&dev->struct_mutex); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + vga_client_register(dev->pdev, NULL, NULL, NULL); + free_irq(drm_dev_to_irq(dev), dev); } return ret; @@ -413,7 +422,8 @@ int drm_irq_uninstall(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) vga_client_register(dev->pdev, NULL, NULL, NULL); - dev->driver->irq_uninstall(dev); + if (dev->driver->irq_uninstall) + dev->driver->irq_uninstall(dev); free_irq(drm_dev_to_irq(dev), dev); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e2662497d50f..a8ab6263e0d7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1338,6 +1338,155 @@ static const struct file_operations i915_wedged_fops = { .llseek = default_llseek, }; +static int +i915_max_freq_open(struct inode *inode, + struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +static ssize_t +i915_max_freq_read(struct file *filp, + char __user *ubuf, + size_t max, + loff_t *ppos) +{ + struct drm_device *dev = filp->private_data; + drm_i915_private_t *dev_priv = dev->dev_private; + char buf[80]; + int len; + + len = snprintf(buf, sizeof (buf), + "max freq: %d\n", dev_priv->max_delay * 50); + + if (len > sizeof (buf)) + len = sizeof (buf); + + return simple_read_from_buffer(ubuf, max, ppos, buf, len); +} + +static ssize_t +i915_max_freq_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct drm_device *dev = filp->private_data; + struct drm_i915_private *dev_priv = dev->dev_private; + char buf[20]; + int val = 1; + + if (cnt > 0) { + if (cnt > sizeof (buf) - 1) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt)) + return -EFAULT; + buf[cnt] = 0; + + val = simple_strtoul(buf, NULL, 0); + } + + DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); + + /* + * Turbo will still be enabled, but won't go above the set value. + */ + dev_priv->max_delay = val / 50; + + gen6_set_rps(dev, val / 50); + + return cnt; +} + +static const struct file_operations i915_max_freq_fops = { + .owner = THIS_MODULE, + .open = i915_max_freq_open, + .read = i915_max_freq_read, + .write = i915_max_freq_write, + .llseek = default_llseek, +}; + +static int +i915_cache_sharing_open(struct inode *inode, + struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +static ssize_t +i915_cache_sharing_read(struct file *filp, + char __user *ubuf, + size_t max, + loff_t *ppos) +{ + struct drm_device *dev = filp->private_data; + drm_i915_private_t *dev_priv = dev->dev_private; + char buf[80]; + u32 snpcr; + int len; + + mutex_lock(&dev_priv->dev->struct_mutex); + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + mutex_unlock(&dev_priv->dev->struct_mutex); + + len = snprintf(buf, sizeof (buf), + "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >> + GEN6_MBC_SNPCR_SHIFT); + + if (len > sizeof (buf)) + len = sizeof (buf); + + return simple_read_from_buffer(ubuf, max, ppos, buf, len); +} + +static ssize_t +i915_cache_sharing_write(struct file *filp, + const char __user *ubuf, + size_t cnt, + loff_t *ppos) +{ + struct drm_device *dev = filp->private_data; + struct drm_i915_private *dev_priv = dev->dev_private; + char buf[20]; + u32 snpcr; + int val = 1; + + if (cnt > 0) { + if (cnt > sizeof (buf) - 1) + return -EINVAL; + + if (copy_from_user(buf, ubuf, cnt)) + return -EFAULT; + buf[cnt] = 0; + + val = simple_strtoul(buf, NULL, 0); + } + + if (val < 0 || val > 3) + return -EINVAL; + + DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val); + + /* Update the cache sharing policy here as well */ + snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr &= ~GEN6_MBC_SNPCR_MASK; + snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); + I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); + + return cnt; +} + +static const struct file_operations i915_cache_sharing_fops = { + .owner = THIS_MODULE, + .open = i915_cache_sharing_open, + .read = i915_cache_sharing_read, + .write = i915_cache_sharing_write, + .llseek = default_llseek, +}; + /* As the drm_debugfs_init() routines are called before dev->dev_private is * allocated we need to hook into the minor for release. */ static int @@ -1437,6 +1586,36 @@ static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor) return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops); } +static int i915_max_freq_create(struct dentry *root, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + + ent = debugfs_create_file("i915_max_freq", + S_IRUGO | S_IWUSR, + root, dev, + &i915_max_freq_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + return drm_add_fake_info_node(minor, ent, &i915_max_freq_fops); +} + +static int i915_cache_sharing_create(struct dentry *root, struct drm_minor *minor) +{ + struct drm_device *dev = minor->dev; + struct dentry *ent; + + ent = debugfs_create_file("i915_cache_sharing", + S_IRUGO | S_IWUSR, + root, dev, + &i915_cache_sharing_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + return drm_add_fake_info_node(minor, ent, &i915_cache_sharing_fops); +} + static struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, @@ -1490,6 +1669,12 @@ int i915_debugfs_init(struct drm_minor *minor) ret = i915_forcewake_create(minor->debugfs_root, minor); if (ret) return ret; + ret = i915_max_freq_create(minor->debugfs_root, minor); + if (ret) + return ret; + ret = i915_cache_sharing_create(minor->debugfs_root, minor); + if (ret) + return ret; return drm_debugfs_create_files(i915_debugfs_list, I915_DEBUGFS_ENTRIES, @@ -1504,6 +1689,10 @@ void i915_debugfs_cleanup(struct drm_minor *minor) 1, minor); drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, + 1, minor); + drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, + 1, minor); } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 12712824a6d2..8a3942c4f099 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -61,7 +61,6 @@ static void i915_write_hws_pga(struct drm_device *dev) static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring = LP_RING(dev_priv); /* Program Hardware Status Page */ dev_priv->status_page_dmah = @@ -71,10 +70,9 @@ static int i915_init_phys_hws(struct drm_device *dev) DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - ring->status_page.page_addr = - (void __force __iomem *)dev_priv->status_page_dmah->vaddr; - memset_io(ring->status_page.page_addr, 0, PAGE_SIZE); + memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr, + 0, PAGE_SIZE); i915_write_hws_pga(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6867e193d85e..feb4f164fd1b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -544,6 +544,7 @@ typedef struct drm_i915_private { u32 savePIPEB_LINK_M1; u32 savePIPEB_LINK_N1; u32 saveMCHBAR_RENDER_STANDBY; + u32 savePCH_PORT_HOTPLUG; struct { /** Bridge to intel-gtt-ko */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d1cd8b89f47d..a546a71fb060 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3112,7 +3112,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, if (pipelined != obj->ring) { ret = i915_gem_object_wait_rendering(obj); - if (ret) + if (ret == -ERESTARTSYS) return ret; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 23d1ae67d279..02f96fd0d52d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -306,12 +306,15 @@ static void i915_hotplug_work_func(struct work_struct *work) struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *encoder; + mutex_lock(&mode_config->mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); list_for_each_entry(encoder, &mode_config->encoder_list, base.head) if (encoder->hot_plug) encoder->hot_plug(encoder); + mutex_unlock(&mode_config->mutex); + /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 02db299f621a..d1331f771e2f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -78,6 +78,14 @@ #define GRDOM_RENDER (1<<2) #define GRDOM_MEDIA (3<<2) +#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */ +#define GEN6_MBC_SNPCR_SHIFT 21 +#define GEN6_MBC_SNPCR_MASK (3<<21) +#define GEN6_MBC_SNPCR_MAX (0<<21) +#define GEN6_MBC_SNPCR_MED (1<<21) +#define GEN6_MBC_SNPCR_LOW (2<<21) +#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */ + #define GEN6_GDRST 0x941c #define GEN6_GRDOM_FULL (1 << 0) #define GEN6_GRDOM_RENDER (1 << 1) @@ -1506,6 +1514,7 @@ #define VIDEO_DIP_SELECT_AVI (0 << 19) #define VIDEO_DIP_SELECT_VENDOR (1 << 19) #define VIDEO_DIP_SELECT_SPD (3 << 19) +#define VIDEO_DIP_SELECT_MASK (3 << 19) #define VIDEO_DIP_FREQ_ONCE (0 << 16) #define VIDEO_DIP_FREQ_VSYNC (1 << 16) #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) @@ -2084,9 +2093,6 @@ #define DP_PIPEB_SELECT (1 << 30) #define DP_PIPE_MASK (1 << 30) -#define DP_PIPE_ENABLED(V, P) \ - (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN)) - /* Link training mode - select a suitable mode for each stage */ #define DP_LINK_TRAIN_PAT_1 (0 << 28) #define DP_LINK_TRAIN_PAT_2 (1 << 28) @@ -3024,6 +3030,20 @@ #define _TRANSA_DP_LINK_M2 0xe0048 #define _TRANSA_DP_LINK_N2 0xe004c +/* Per-transcoder DIP controls */ + +#define _VIDEO_DIP_CTL_A 0xe0200 +#define _VIDEO_DIP_DATA_A 0xe0208 +#define _VIDEO_DIP_GCP_A 0xe0210 + +#define _VIDEO_DIP_CTL_B 0xe1200 +#define _VIDEO_DIP_DATA_B 0xe1208 +#define _VIDEO_DIP_GCP_B 0xe1210 + +#define TVIDEO_DIP_CTL(pipe) _PIPE(pipe, _VIDEO_DIP_CTL_A, _VIDEO_DIP_CTL_B) +#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) +#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) + #define _TRANS_HTOTAL_B 0xe1000 #define _TRANS_HBLANK_B 0xe1004 #define _TRANS_HSYNC_B 0xe1008 @@ -3076,6 +3096,16 @@ #define TRANS_6BPC (2<<5) #define TRANS_12BPC (3<<5) +#define _TRANSA_CHICKEN2 0xf0064 +#define _TRANSB_CHICKEN2 0xf1064 +#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) +#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) + +#define SOUTH_CHICKEN1 0xc2000 +#define FDIA_PHASE_SYNC_SHIFT_OVR 19 +#define FDIA_PHASE_SYNC_SHIFT_EN 18 +#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) +#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define SOUTH_CHICKEN2 0xc2004 #define DPLS_EDP_PPS_FIX_DIS (1<<0) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 285758603ac8..87677d60d0df 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -812,6 +812,7 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); dev_priv->saveMCHBAR_RENDER_STANDBY = I915_READ(RSTDBYCTL); + dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); @@ -863,6 +864,7 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(GTIMR, dev_priv->saveGTIMR); I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); + I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); } else { I915_WRITE(IER, dev_priv->saveIER); I915_WRITE(IMR, dev_priv->saveIMR); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 393a39922e53..35364e68a091 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -980,11 +980,29 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); } +static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, + int reg, u32 port_sel, u32 val) +{ + if ((val & DP_PORT_EN) == 0) + return false; + + if (HAS_PCH_CPT(dev_priv->dev)) { + u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); + u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); + if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) + return false; + } else { + if ((val & DP_PIPE_MASK) != (pipe << 30)) + return false; + } + return true; +} + static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, int reg) + enum pipe pipe, int reg, u32 port_sel) { u32 val = I915_READ(reg); - WARN(DP_PIPE_ENABLED(val, pipe), + WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val), "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } @@ -1004,9 +1022,9 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, int reg; u32 val; - assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B); - assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C); - assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D); + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); + assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); reg = PCH_ADPA; val = I915_READ(reg); @@ -1276,6 +1294,17 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, intel_wait_for_pipe_off(dev_priv->dev, pipe); } +/* + * Plane regs are double buffered, going from enabled->disabled needs a + * trigger in order to latch. The display address reg provides this. + */ +static void intel_flush_display_plane(struct drm_i915_private *dev_priv, + enum plane plane) +{ + I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); + I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); +} + /** * intel_enable_plane - enable a display plane on a given pipe * @dev_priv: i915 private structure @@ -1299,20 +1328,10 @@ static void intel_enable_plane(struct drm_i915_private *dev_priv, return; I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev_priv, plane); intel_wait_for_vblank(dev_priv->dev, pipe); } -/* - * Plane regs are double buffered, going from enabled->disabled needs a - * trigger in order to latch. The display address reg provides this. - */ -static void intel_flush_display_plane(struct drm_i915_private *dev_priv, - enum plane plane) -{ - u32 reg = DSPADDR(plane); - I915_WRITE(reg, I915_READ(reg)); -} - /** * intel_disable_plane - disable a display plane * @dev_priv: i915 private structure @@ -1338,19 +1357,24 @@ static void intel_disable_plane(struct drm_i915_private *dev_priv, } static void disable_pch_dp(struct drm_i915_private *dev_priv, - enum pipe pipe, int reg) + enum pipe pipe, int reg, u32 port_sel) { u32 val = I915_READ(reg); - if (DP_PIPE_ENABLED(val, pipe)) + if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) { + DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~DP_PORT_EN); + } } static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - if (HDMI_PIPE_ENABLED(val, pipe)) + if (HDMI_PIPE_ENABLED(val, pipe)) { + DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", + reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); + } } /* Disable any ports connected to this transcoder */ @@ -1362,9 +1386,9 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, val = I915_READ(PCH_PP_CONTROL); I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); - disable_pch_dp(dev_priv, pipe, PCH_DP_B); - disable_pch_dp(dev_priv, pipe, PCH_DP_C); - disable_pch_dp(dev_priv, pipe, PCH_DP_D); + disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); + disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); + disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); reg = PCH_ADPA; val = I915_READ(reg); @@ -2096,7 +2120,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, /* no fb bound */ if (!crtc->fb) { - DRM_DEBUG_KMS("No FB bound\n"); + DRM_ERROR("No FB bound\n"); return 0; } @@ -2105,6 +2129,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, case 1: break; default: + DRM_ERROR("no plane for crtc\n"); return -EINVAL; } @@ -2114,6 +2139,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, NULL); if (ret != 0) { mutex_unlock(&dev->struct_mutex); + DRM_ERROR("pin & fence failed\n"); return ret; } @@ -2142,6 +2168,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (ret) { i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); + DRM_ERROR("failed to update base address\n"); return ret; } @@ -2248,6 +2275,18 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) FDI_FE_ERRC_ENABLE); } +static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 flags = I915_READ(SOUTH_CHICKEN1); + + flags |= FDI_PHASE_SYNC_OVR(pipe); + I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ + flags |= FDI_PHASE_SYNC_EN(pipe); + I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ + POSTING_READ(SOUTH_CHICKEN1); +} + /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -2398,6 +2437,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); + if (HAS_PCH_CPT(dev)) + cpt_phase_pointer_enable(dev, pipe); + for (i = 0; i < 4; i++ ) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); @@ -2514,6 +2556,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); + if (HAS_PCH_CPT(dev)) + cpt_phase_pointer_enable(dev, pipe); + for (i = 0; i < 4; i++ ) { reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); @@ -2623,6 +2668,17 @@ static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) } } +static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 flags = I915_READ(SOUTH_CHICKEN1); + + flags &= ~(FDI_PHASE_SYNC_EN(pipe)); + I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ + flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); + I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ + POSTING_READ(SOUTH_CHICKEN1); +} static void ironlake_fdi_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -2652,6 +2708,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) I915_WRITE(FDI_RX_CHICKEN(pipe), I915_READ(FDI_RX_CHICKEN(pipe) & ~FDI_RX_PHASE_SYNC_POINTER_EN)); + } else if (HAS_PCH_CPT(dev)) { + cpt_phase_pointer_disable(dev, pipe); } /* still set train pattern 1 */ @@ -2862,14 +2920,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); } + /* + * On ILK+ LUT must be loaded before the pipe is running but with + * clocks enabled + */ + intel_crtc_load_lut(crtc); + intel_enable_pipe(dev_priv, pipe, is_pch_port); intel_enable_plane(dev_priv, plane, pipe); if (is_pch_port) ironlake_pch_enable(crtc); - intel_crtc_load_lut(crtc); - mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); @@ -4538,7 +4600,9 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, if (connector->encoder != encoder) continue; - if (connector->display_info.bpc < display_bpc) { + /* Don't use an invalid EDID bpc value */ + if (connector->display_info.bpc && + connector->display_info.bpc < display_bpc) { DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); display_bpc = connector->display_info.bpc; } @@ -5153,7 +5217,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, temp |= PIPE_12BPC; break; default: - WARN(1, "intel_choose_pipe_bpp returned invalid value\n"); + WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", + pipe_bpp); temp |= PIPE_8BPC; pipe_bpp = 24; break; @@ -5238,7 +5303,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, } else if (is_sdvo && is_tv) factor = 20; - if (clock.m1 < factor * clock.n) + if (clock.m < factor * clock.n) fp |= FP_CB_TUNE; dpll = 0; @@ -5516,6 +5581,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, drm_vblank_post_modeset(dev, pipe); + intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; + return ret; } @@ -7714,10 +7781,12 @@ static void gen6_init_clock_gating(struct drm_device *dev) ILK_DPARB_CLK_GATE | ILK_DPFD_CLK_GATE); - for_each_pipe(pipe) + for_each_pipe(pipe) { I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } } static void ivybridge_init_clock_gating(struct drm_device *dev) @@ -7734,10 +7803,12 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); - for_each_pipe(pipe) + for_each_pipe(pipe) { I915_WRITE(DSPCNTR(pipe), I915_READ(DSPCNTR(pipe)) | DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } } static void g4x_init_clock_gating(struct drm_device *dev) @@ -7820,6 +7891,7 @@ static void ibx_init_clock_gating(struct drm_device *dev) static void cpt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -7829,6 +7901,9 @@ static void cpt_init_clock_gating(struct drm_device *dev) I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | DPLS_EDP_PPS_FIX_DIS); + /* Without this, mode sets may fail silently on FDI */ + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); } static void ironlake_teardown_rc6(struct drm_device *dev) @@ -8178,6 +8253,9 @@ struct intel_quirk intel_quirks[] = { /* Lenovo U160 cannot use SSC on LVDS */ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, + + /* Sony Vaio Y cannot use SSC on LVDS */ + { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, }; static void intel_init_quirks(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f797fb58ba9c..0feae908bb37 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -50,9 +50,10 @@ struct intel_dp { bool has_audio; int force_audio; uint32_t color_range; + int dpms_mode; uint8_t link_bw; uint8_t lane_count; - uint8_t dpcd[4]; + uint8_t dpcd[8]; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; bool is_pch_edp; @@ -316,9 +317,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, else precharge = 5; - if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { - DRM_ERROR("dp_aux_ch not started status 0x%08x\n", - I915_READ(ch_ctl)); + /* Try to wait for any previous AUX channel activity */ + for (try = 0; try < 3; try++) { + status = I915_READ(ch_ctl); + if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) + break; + msleep(1); + } + + if (try == 3) { + WARN(1, "dp_aux_ch not started status 0x%08x\n", + I915_READ(ch_ctl)); return -EBUSY; } @@ -770,6 +779,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); intel_dp->link_configuration[0] = intel_dp->link_bw; intel_dp->link_configuration[1] = intel_dp->lane_count; + intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; /* * Check for DPCD version > 1.1 and enhanced framing support @@ -1011,6 +1021,8 @@ static void intel_dp_commit(struct drm_encoder *encoder) if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); + + intel_dp->dpms_mode = DRM_MODE_DPMS_ON; } static void @@ -1045,6 +1057,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); } + intel_dp->dpms_mode = mode; } /* @@ -1334,10 +1347,16 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) u32 reg; uint32_t DP = intel_dp->DP; - /* Enable output, wait for it to become active */ - I915_WRITE(intel_dp->output_reg, intel_dp->DP); - POSTING_READ(intel_dp->output_reg); - intel_wait_for_vblank(dev, intel_crtc->pipe); + /* + * On CPT we have to enable the port in training pattern 1, which + * will happen below in intel_dp_set_link_train. Otherwise, enable + * the port and wait for it to become active. + */ + if (!HAS_PCH_CPT(dev)) { + I915_WRITE(intel_dp->output_reg, intel_dp->DP); + POSTING_READ(intel_dp->output_reg); + intel_wait_for_vblank(dev, intel_crtc->pipe); + } /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, @@ -1370,7 +1389,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) reg = DP | DP_LINK_TRAIN_PAT_1; if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_1)) + DP_TRAINING_PATTERN_1 | + DP_LINK_SCRAMBLING_DISABLE)) break; /* Set training pattern 1 */ @@ -1445,7 +1465,8 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) /* channel eq pattern */ if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_2)) + DP_TRAINING_PATTERN_2 | + DP_LINK_SCRAMBLING_DISABLE)) break; udelay(400); @@ -1559,6 +1580,18 @@ intel_dp_link_down(struct intel_dp *intel_dp) POSTING_READ(intel_dp->output_reg); } +static bool +intel_dp_get_dpcd(struct intel_dp *intel_dp) +{ + if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, + sizeof (intel_dp->dpcd)) && + (intel_dp->dpcd[DP_DPCD_REV] != 0)) { + return true; + } + + return false; +} + /* * According to DP spec * 5.1.2: @@ -1571,36 +1604,44 @@ intel_dp_link_down(struct intel_dp *intel_dp) static void intel_dp_check_link_status(struct intel_dp *intel_dp) { - int ret; + if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) + return; if (!intel_dp->base.base.crtc) return; + /* Try to read receiver status if the link appears to be up */ if (!intel_dp_get_link_status(intel_dp)) { intel_dp_link_down(intel_dp); return; } - /* Try to read receiver status if the link appears to be up */ - ret = intel_dp_aux_native_read(intel_dp, - 0x000, intel_dp->dpcd, - sizeof (intel_dp->dpcd)); - if (ret != sizeof(intel_dp->dpcd)) { + /* Now read the DPCD to see if it's actually running */ + if (!intel_dp_get_dpcd(intel_dp)) { intel_dp_link_down(intel_dp); return; } if (!intel_channel_eq_ok(intel_dp)) { + DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", + drm_get_encoder_name(&intel_dp->base.base)); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); } } static enum drm_connector_status +intel_dp_detect_dpcd(struct intel_dp *intel_dp) +{ + if (intel_dp_get_dpcd(intel_dp)) + return connector_status_connected; + return connector_status_disconnected; +} + +static enum drm_connector_status ironlake_dp_detect(struct intel_dp *intel_dp) { enum drm_connector_status status; - bool ret; /* Can't disconnect eDP, but you can close the lid... */ if (is_edp(intel_dp)) { @@ -1610,15 +1651,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp) return status; } - status = connector_status_disconnected; - ret = intel_dp_aux_native_read_retry(intel_dp, - 0x000, intel_dp->dpcd, - sizeof (intel_dp->dpcd)); - if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0) - status = connector_status_connected; - DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], - intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); - return status; + return intel_dp_detect_dpcd(intel_dp); } static enum drm_connector_status @@ -1626,7 +1659,6 @@ g4x_dp_detect(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum drm_connector_status status; uint32_t temp, bit; switch (intel_dp->output_reg) { @@ -1648,15 +1680,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) if ((temp & bit) == 0) return connector_status_disconnected; - status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, - sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) - { - if (intel_dp->dpcd[DP_DPCD_REV] != 0) - status = connector_status_connected; - } - - return status; + return intel_dp_detect_dpcd(intel_dp); } /** @@ -1679,6 +1703,12 @@ intel_dp_detect(struct drm_connector *connector, bool force) status = ironlake_dp_detect(intel_dp); else status = g4x_dp_detect(intel_dp); + + DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", + intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], + intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], + intel_dp->dpcd[6], intel_dp->dpcd[7]); + if (status != connector_status_connected) return status; @@ -1924,6 +1954,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) return; intel_dp->output_reg = output_reg; + intel_dp->dpms_mode = -1; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { @@ -2000,7 +2031,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) /* Cache some DPCD data in the eDP case */ if (is_edp(intel_dp)) { - int ret; + bool ret; u32 pp_on, pp_div; pp_on = I915_READ(PCH_PP_ON_DELAYS); @@ -2013,11 +2044,9 @@ intel_dp_init(struct drm_device *dev, int output_reg) dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ ironlake_edp_panel_vdd_on(intel_dp); - ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, - intel_dp->dpcd, - sizeof(intel_dp->dpcd)); + ret = intel_dp_get_dpcd(intel_dp); ironlake_edp_panel_vdd_off(intel_dp); - if (ret == sizeof(intel_dp->dpcd)) { + if (ret) { if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6e990f9760ef..7b330e76a435 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -178,10 +178,28 @@ struct intel_crtc { #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +#define DIP_HEADER_SIZE 5 + #define DIP_TYPE_AVI 0x82 #define DIP_VERSION_AVI 0x2 #define DIP_LEN_AVI 13 +#define DIP_TYPE_SPD 0x3 +#define DIP_VERSION_SPD 0x1 +#define DIP_LEN_SPD 25 +#define DIP_SPD_UNKNOWN 0 +#define DIP_SPD_DSTB 0x1 +#define DIP_SPD_DVDP 0x2 +#define DIP_SPD_DVHS 0x3 +#define DIP_SPD_HDDVR 0x4 +#define DIP_SPD_DVC 0x5 +#define DIP_SPD_DSC 0x6 +#define DIP_SPD_VCD 0x7 +#define DIP_SPD_GAME 0x8 +#define DIP_SPD_PC 0x9 +#define DIP_SPD_BD 0xa +#define DIP_SPD_SCD 0xb + struct dip_infoframe { uint8_t type; /* HB0 */ uint8_t ver; /* HB1 */ @@ -206,6 +224,11 @@ struct dip_infoframe { uint16_t left_bar_end; uint16_t right_bar_start; } avi; + struct { + uint8_t vn[8]; + uint8_t pd[16]; + uint8_t sdi; + } spd; uint8_t payload[27]; } __attribute__ ((packed)) body; } __attribute__((packed)); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 1ed8e6903915..226ba830f383 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -45,6 +45,8 @@ struct intel_hdmi { bool has_hdmi_sink; bool has_audio; int force_audio; + void (*write_infoframe)(struct drm_encoder *encoder, + struct dip_infoframe *frame); }; static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) @@ -58,37 +60,70 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) struct intel_hdmi, base); } -void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) +void intel_dip_infoframe_csum(struct dip_infoframe *frame) { - uint8_t *data = (uint8_t *)avi_if; + uint8_t *data = (uint8_t *)frame; uint8_t sum = 0; unsigned i; - avi_if->checksum = 0; - avi_if->ecc = 0; + frame->checksum = 0; + frame->ecc = 0; - for (i = 0; i < sizeof(*avi_if); i++) + /* Header isn't part of the checksum */ + for (i = 5; i < frame->len; i++) sum += data[i]; - avi_if->checksum = 0x100 - sum; + frame->checksum = 0x100 - sum; } -static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) +static u32 intel_infoframe_index(struct dip_infoframe *frame) { - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; - uint32_t *data = (uint32_t *)&avi_if; + u32 flags = 0; + + switch (frame->type) { + case DIP_TYPE_AVI: + flags |= VIDEO_DIP_SELECT_AVI; + break; + case DIP_TYPE_SPD: + flags |= VIDEO_DIP_SELECT_SPD; + break; + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + break; + } + + return flags; +} + +static u32 intel_infoframe_flags(struct dip_infoframe *frame) +{ + u32 flags = 0; + + switch (frame->type) { + case DIP_TYPE_AVI: + flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; + break; + case DIP_TYPE_SPD: + flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC; + break; + default: + DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + break; + } + + return flags; +} + +static void i9xx_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - u32 port; - unsigned i; + u32 port, flags, val = I915_READ(VIDEO_DIP_CTL); + unsigned i, len = DIP_HEADER_SIZE + frame->len; - if (!intel_hdmi->has_hdmi_sink) - return; /* XXX first guess at handling video port, is this corrent? */ if (intel_hdmi->sdvox_reg == SDVOB) @@ -98,18 +133,87 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) else return; - I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | - VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); + flags = intel_infoframe_index(frame); + + val &= ~VIDEO_DIP_SELECT_MASK; - intel_dip_infoframe_csum(&avi_if); - for (i = 0; i < sizeof(avi_if); i += 4) { + I915_WRITE(VIDEO_DIP_CTL, val | port | flags); + + for (i = 0; i < len; i += 4) { I915_WRITE(VIDEO_DIP_DATA, *data); data++; } - I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | - VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | - VIDEO_DIP_ENABLE_AVI); + flags |= intel_infoframe_flags(frame); + + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); +} + +static void ironlake_write_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + uint32_t *data = (uint32_t *)frame; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = encoder->crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); + unsigned i, len = DIP_HEADER_SIZE + frame->len; + u32 flags, val = I915_READ(reg); + + intel_wait_for_vblank(dev, intel_crtc->pipe); + + flags = intel_infoframe_index(frame); + + val &= ~VIDEO_DIP_SELECT_MASK; + + I915_WRITE(reg, val | flags); + + for (i = 0; i < len; i += 4) { + I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); + data++; + } + + flags |= intel_infoframe_flags(frame); + + I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); +} +static void intel_set_infoframe(struct drm_encoder *encoder, + struct dip_infoframe *frame) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + + if (!intel_hdmi->has_hdmi_sink) + return; + + intel_dip_infoframe_csum(frame); + intel_hdmi->write_infoframe(encoder, frame); +} + +static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) +{ + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + + intel_set_infoframe(encoder, &avi_if); +} + +static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) +{ + struct dip_infoframe spd_if; + + memset(&spd_if, 0, sizeof(spd_if)); + spd_if.type = DIP_TYPE_SPD; + spd_if.ver = DIP_VERSION_SPD; + spd_if.len = DIP_LEN_SPD; + strcpy(spd_if.body.spd.vn, "Intel"); + strcpy(spd_if.body.spd.pd, "Integrated gfx"); + spd_if.body.spd.sdi = DIP_SPD_PC; + + intel_set_infoframe(encoder, &spd_if); } static void intel_hdmi_mode_set(struct drm_encoder *encoder, @@ -156,6 +260,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, POSTING_READ(intel_hdmi->sdvox_reg); intel_hdmi_set_avi_infoframe(encoder); + intel_hdmi_set_spd_infoframe(encoder); } static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) @@ -433,6 +538,11 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) intel_hdmi->sdvox_reg = sdvox_reg; + if (!HAS_PCH_SPLIT(dev)) + intel_hdmi->write_infoframe = i9xx_write_infoframe; + else + intel_hdmi->write_infoframe = ironlake_write_infoframe; + drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); intel_hdmi_add_properties(intel_hdmi, connector); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b28f7bd9f88a..2e8ddfcba40c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -690,6 +690,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "Dell OptiPlex FX170", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex FX170"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "AOpen Mini PC", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AOpen"), diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a06ff07a4d3b..05f500cd9c24 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -83,11 +83,15 @@ intel_pch_panel_fitting(struct drm_device *dev, u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; if (scaled_width > scaled_height) { /* pillar */ width = scaled_height / mode->vdisplay; + if (width & 1) + width++; x = (adjusted_mode->hdisplay - width + 1) / 2; y = 0; height = adjusted_mode->vdisplay; } else if (scaled_width < scaled_height) { /* letter */ height = scaled_width / mode->hdisplay; + if (height & 1) + height++; y = (adjusted_mode->vdisplay - height + 1) / 2; x = 0; width = adjusted_mode->hdisplay; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e9615685a39c..47b9b2777038 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1321,6 +1321,9 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->get_seqno = pc_render_get_seqno; } + if (!I915_NEED_GFX_HWS(dev)) + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 3896ef811102..9f363e0c4b60 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -5,6 +5,7 @@ ccflags-y := -Iinclude/drm hostprogs-y := mkregtable +clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h quiet_cmd_mkregtable = MKREGTABLE $@ cmd_mkregtable = $(obj)/mkregtable $< > $@ diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index ebdb0fdb8348..e88c64417a8a 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1245,6 +1245,9 @@ struct atom_context *atom_parse(struct card_info *card, void *bios) char name[512]; int i; + if (!ctx) + return NULL; + ctx->card = card; ctx->bios = bios; diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 189e86522b5b..a134790903d3 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -428,7 +428,7 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm); i = (reg >> 7); - if (i > last_reg) { + if (i >= last_reg) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index db8ef1905d5f..cf83aa05a684 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -915,12 +915,11 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx { struct r600_cs_track *track = (struct r600_cs_track *)p->track; struct radeon_cs_reloc *reloc; - u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm); u32 m, i, tmp, *ib; int r; i = (reg >> 7); - if (i > last_reg) { + if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index a74217cd192f..e0138b674aca 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -2557,6 +2557,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) u16 offset, misc, misc2 = 0; u8 rev, blocks, tmp; int state_index = 0; + struct radeon_i2c_bus_rec i2c_bus; rdev->pm.default_power_state_index = -1; @@ -2575,7 +2576,6 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); if (offset) { u8 thermal_controller = 0, gpio = 0, i2c_addr = 0, clk_bit = 0, data_bit = 0; - struct radeon_i2c_bus_rec i2c_bus; rev = RBIOS8(offset); @@ -2617,6 +2617,25 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } + } else { + /* boards with a thermal chip, but no overdrive table */ + + /* Asus 9600xt has an f75375 on the monid bus */ + if ((dev->pdev->device == 0x4152) && + (dev->pdev->subsystem_vendor == 0x1043) && + (dev->pdev->subsystem_device == 0xc002)) { + i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); + rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); + if (rdev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = "f75375"; + info.addr = 0x28; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + DRM_INFO("Possible %s thermal controller at 0x%02x\n", + name, info.addr); + } + } } if (rdev->flags & RADEON_IS_MOBILITY) { diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 9792d4ffdc86..6d6b5f16bc09 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -430,6 +430,45 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr return 0; } +/* + * Some integrated ATI Radeon chipset implementations (e. g. + * Asus M2A-VM HDMI) may indicate the availability of a DDC, + * even when there's no monitor connected. For these connectors + * following DDC probe extension will be applied: check also for the + * availability of EDID with at least a correct EDID header. Only then, + * DDC is assumed to be available. This prevents drm_get_edid() and + * drm_edid_block_valid() from periodically dumping data and kernel + * errors into the logs and onto the terminal. + */ +static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, + uint32_t supported_device, + int connector_type) +{ + /* Asus M2A-VM HDMI board sends data to i2c bus even, + * if HDMI add-on card is not plugged in or HDMI is disabled in + * BIOS. Valid DDC can only be assumed, if also a valid EDID header + * can be retrieved via i2c bus during DDC probe */ + if ((dev->pdev->device == 0x791e) && + (dev->pdev->subsystem_vendor == 0x1043) && + (dev->pdev->subsystem_device == 0x826d)) { + if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && + (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) + return true; + } + /* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus + * for a DVI connector that is not implemented */ + if ((dev->pdev->device == 0x796e) && + (dev->pdev->subsystem_vendor == 0x1019) && + (dev->pdev->subsystem_device == 0x2615)) { + if ((connector_type == DRM_MODE_CONNECTOR_DVID) && + (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) + return true; + } + + /* Default: no EDID header probe required for DDC probing */ + return false; +} + static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, struct drm_connector *connector) { @@ -661,7 +700,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force) ret = connector_status_disconnected; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector); + dret = radeon_ddc_probe(radeon_connector, + radeon_connector->requires_extended_probe); if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); @@ -833,7 +873,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) bool dret = false; if (radeon_connector->ddc_bus) - dret = radeon_ddc_probe(radeon_connector); + dret = radeon_ddc_probe(radeon_connector, + radeon_connector->requires_extended_probe); if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); @@ -1251,7 +1292,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; } else { - if (radeon_ddc_probe(radeon_connector)) + if (radeon_ddc_probe(radeon_connector, + radeon_connector->requires_extended_probe)) ret = connector_status_connected; } } @@ -1406,6 +1448,9 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_connector->shared_ddc = shared_ddc; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; + radeon_connector->requires_extended_probe = + radeon_connector_needs_extended_probe(rdev, supported_device, + connector_type); radeon_connector->router = *router; if (router->ddc_valid || router->cd_valid) { radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); @@ -1752,6 +1797,9 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->devices = supported_device; radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; + radeon_connector->requires_extended_probe = + radeon_connector_needs_extended_probe(rdev, supported_device, + connector_type); switch (connector_type) { case DRM_MODE_CONNECTOR_VGA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 7cfaa7e2f3b5..440e6ecccc40 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -704,8 +704,9 @@ int radeon_device_init(struct radeon_device *rdev, rdev->gpu_lockup = false; rdev->accel_working = false; - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n", - radeon_family_name[rdev->family], pdev->vendor, pdev->device); + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", + radeon_family_name[rdev->family], pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device); /* mutex initialization are all done here so we * can recall function without having locking issues */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 28f4655905bc..1a858944e4f3 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -751,8 +751,17 @@ static int radeon_ddc_dump(struct drm_connector *connector) if (!radeon_connector->ddc_bus) return -1; edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); + /* Log EDID retrieval status here. In particular with regard to + * connectors with requires_extended_probe flag set, that will prevent + * function radeon_dvi_detect() to fetch EDID on this connector, + * as long as there is no valid EDID header found */ if (edid) { + DRM_INFO("Radeon display connector %s: Found valid EDID", + drm_get_connector_name(connector)); kfree(edid); + } else { + DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID", + drm_get_connector_name(connector)); } return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 85f033f19a8a..e71d2ed7fa11 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -50,8 +50,8 @@ * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query - * 2.10.0 - fusion 2D tiling, initial compute support for the CS checker - * 2.11.0 - backend map + * 2.10.0 - fusion 2D tiling + * 2.11.0 - backend map, initial compute support for the CS checker */ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 11 diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 781196db792f..6c111c1fa3f9 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -32,17 +32,17 @@ * radeon_ddc_probe * */ -bool radeon_ddc_probe(struct radeon_connector *radeon_connector) +bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe) { - u8 out_buf[] = { 0x0, 0x0}; - u8 buf[2]; + u8 out = 0x0; + u8 buf[8]; int ret; struct i2c_msg msgs[] = { { .addr = 0x50, .flags = 0, .len = 1, - .buf = out_buf, + .buf = &out, }, { .addr = 0x50, @@ -52,15 +52,31 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) } }; + /* Read 8 bytes from i2c for extended probe of EDID header */ + if (requires_extended_probe) + msgs[1].len = 8; + /* on hw with routers, select right port */ if (radeon_connector->router.ddc_valid) radeon_router_select_ddc_port(radeon_connector); ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); - if (ret == 2) - return true; - - return false; + if (ret != 2) + /* Couldn't find an accessible DDC on this connector */ + return false; + if (requires_extended_probe) { + /* Probe also for valid EDID header + * EDID header starts with: + * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. + * Only the first 6 bytes must be valid as + * drm_edid_block_valid() can fix the last 2 bytes */ + if (drm_edid_header_is_valid(buf) < 6) { + /* Couldn't find an accessible EDID on this + * connector */ + return false; + } + } + return true; } /* bit banging i2c */ diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 6df4e3cec0c2..d09031c03e26 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -438,6 +438,9 @@ struct radeon_connector { struct radeon_i2c_chan *ddc_bus; /* some systems have an hdmi and vga port with a shared ddc line */ bool shared_ddc; + /* for some Radeon chip families we apply an additional EDID header + check as part of the DDC probe */ + bool requires_extended_probe; bool use_digital; /* we need to mind the EDID between detect and get modes due to analog/digital/tvencoder */ @@ -514,7 +517,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, u8 val); extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); -extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); +extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, + bool requires_extended_probe); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c index 3be60da52123..67cbcfa35122 100644 --- a/drivers/ide/cy82c693.c +++ b/drivers/ide/cy82c693.c @@ -141,6 +141,8 @@ static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); } + if (hwif->index > 0) + pci_dev_put(dev); } static void __devinit init_iops_cy82c693(ide_hwif_t *hwif) diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c index 542603b394e4..962693b10a1c 100644 --- a/drivers/ide/ide_platform.c +++ b/drivers/ide/ide_platform.c @@ -19,6 +19,7 @@ #include <linux/module.h> #include <linux/ata_platform.h> #include <linux/platform_device.h> +#include <linux/interrupt.h> #include <linux/io.h> static void __devinit plat_ide_setup_ports(struct ide_hw *hw, @@ -95,7 +96,10 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) plat_ide_setup_ports(&hw, base, alt_base, pdata, res_irq->start); hw.dev = &pdev->dev; - d.irq_flags = res_irq->flags; + d.irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; + if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) + d.irq_flags |= IRQF_SHARED; + if (mmio) d.host_flags |= IDE_HFLAG_MMIO; diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index ce281d152275..67df91af8424 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -483,7 +483,7 @@ static int gpio_keys_get_devtree_pdata(struct device *dev, buttons = kzalloc(pdata->nbuttons * (sizeof *buttons), GFP_KERNEL); if (!buttons) - return -ENODEV; + return -ENOMEM; pp = NULL; i = 0; diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c index ab0acaf7fe8f..756348a7f93a 100644 --- a/drivers/input/keyboard/lm8323.c +++ b/drivers/input/keyboard/lm8323.c @@ -754,8 +754,11 @@ fail3: device_remove_file(&client->dev, &dev_attr_disable_kp); fail2: while (--pwm >= 0) - if (lm->pwm[pwm].enabled) + if (lm->pwm[pwm].enabled) { + device_remove_file(lm->pwm[pwm].cdev.dev, + &dev_attr_time); led_classdev_unregister(&lm->pwm[pwm].cdev); + } fail1: input_free_device(idev); kfree(lm); @@ -775,8 +778,10 @@ static int __devexit lm8323_remove(struct i2c_client *client) device_remove_file(&lm->client->dev, &dev_attr_disable_kp); for (i = 0; i < 3; i++) - if (lm->pwm[i].enabled) + if (lm->pwm[i].enabled) { + device_remove_file(lm->pwm[i].cdev.dev, &dev_attr_time); led_classdev_unregister(&lm->pwm[i].cdev); + } kfree(lm); diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index da3828fc2c09..f270447ba951 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -19,6 +19,7 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ +#include <linux/kernel.h> #include <linux/module.h> #include <linux/input.h> #include <linux/platform_device.h> @@ -37,7 +38,7 @@ #define KBC_ROW_SCAN_DLY 5 /* KBC uses a 32KHz clock so a cycle = 1/32Khz */ -#define KBC_CYCLE_USEC 32 +#define KBC_CYCLE_MS 32 /* KBC Registers */ @@ -647,7 +648,7 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev) debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT); scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows; kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt; - kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000; + kbc->repoll_dly = DIV_ROUND_UP(kbc->repoll_dly, KBC_CYCLE_MS); input_dev->name = pdev->name; input_dev->id.bustype = BUS_HOST; diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c index c456f63b6bae..783597a9a64a 100644 --- a/drivers/input/misc/kxtj9.c +++ b/drivers/input/misc/kxtj9.c @@ -21,6 +21,7 @@ #include <linux/i2c.h> #include <linux/input.h> #include <linux/interrupt.h> +#include <linux/module.h> #include <linux/slab.h> #include <linux/input/kxtj9.h> #include <linux/input-polldev.h> diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 20f8f9284f02..6c76cf792991 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c @@ -24,6 +24,7 @@ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/input-polldev.h> +#include <linux/of_device.h> #define MMA8450_DRV_NAME "mma8450" @@ -229,10 +230,17 @@ static const struct i2c_device_id mma8450_id[] = { }; MODULE_DEVICE_TABLE(i2c, mma8450_id); +static const struct of_device_id mma8450_dt_ids[] = { + { .compatible = "fsl,mma8450", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); + static struct i2c_driver mma8450_driver = { .driver = { .name = MMA8450_DRV_NAME, .owner = THIS_MODULE, + .of_match_table = mma8450_dt_ids, }, .probe = mma8450_probe, .remove = __devexit_p(mma8450_remove), diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c index 95577c15ae56..4d17d9f3320b 100644 --- a/drivers/input/mouse/hgpk.c +++ b/drivers/input/mouse/hgpk.c @@ -32,6 +32,7 @@ #define DEBUG #include <linux/slab.h> #include <linux/input.h> +#include <linux/module.h> #include <linux/serio.h> #include <linux/libps2.h> #include <linux/delay.h> diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c index 80baa53da5b1..d64c5a43aaad 100644 --- a/drivers/input/serio/xilinx_ps2.c +++ b/drivers/input/serio/xilinx_ps2.c @@ -23,7 +23,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> - +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c index bc3b5187f3a3..131f9d1c921b 100644 --- a/drivers/input/touchscreen/ad7879.c +++ b/drivers/input/touchscreen/ad7879.c @@ -249,12 +249,14 @@ static void __ad7879_enable(struct ad7879 *ts) static void __ad7879_disable(struct ad7879 *ts) { + u16 reg = (ts->cmd_crtl2 & ~AD7879_PM(-1)) | + AD7879_PM(AD7879_PM_SHUTDOWN); disable_irq(ts->irq); if (del_timer_sync(&ts->timer)) ad7879_ts_event_release(ts); - ad7879_write(ts, AD7879_REG_CTRL2, AD7879_PM(AD7879_PM_SHUTDOWN)); + ad7879_write(ts, AD7879_REG_CTRL2, reg); } diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 8420129fc5ee..f75a66e7d312 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -241,12 +241,13 @@ config DM_MIRROR needed for live data migration tools such as 'pvmove'. config DM_RAID - tristate "RAID 4/5/6 target (EXPERIMENTAL)" + tristate "RAID 1/4/5/6 target (EXPERIMENTAL)" depends on BLK_DEV_DM && EXPERIMENTAL + select MD_RAID1 select MD_RAID456 select BLK_DEV_MD ---help--- - A dm target that supports RAID4, RAID5 and RAID6 mappings + A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings A RAID-5 set of N drives with a capacity of C MB per drive provides the capacity of C * (N - 1) MB, and protects against a failure diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index bae6c4e23d3f..49da55c1528a 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -30,7 +30,6 @@ #include <linux/device-mapper.h> #define DM_MSG_PREFIX "crypt" -#define MESG_STR(x) x, sizeof(x) /* * context holding the current state of a multi-part conversion @@ -239,7 +238,7 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { memset(iv, 0, cc->iv_size); - *(u32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); + *(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff); return 0; } @@ -248,7 +247,7 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { memset(iv, 0, cc->iv_size); - *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); + *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); return 0; } @@ -415,7 +414,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; memset(iv, 0, cc->iv_size); - *(u64 *)iv = cpu_to_le64(dmreq->iv_sector); + *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); crypto_cipher_encrypt_one(essiv_tfm, iv, iv); return 0; @@ -1575,11 +1574,17 @@ bad_mem: static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; - unsigned int key_size; + unsigned int key_size, opt_params; unsigned long long tmpll; int ret; + struct dm_arg_set as; + const char *opt_string; + + static struct dm_arg _args[] = { + {0, 1, "Invalid number of feature args"}, + }; - if (argc != 5) { + if (argc < 5) { ti->error = "Not enough arguments"; return -EINVAL; } @@ -1648,6 +1653,30 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } cc->start = tmpll; + argv += 5; + argc -= 5; + + /* Optional parameters */ + if (argc) { + as.argc = argc; + as.argv = argv; + + ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error); + if (ret) + goto bad; + + opt_string = dm_shift_arg(&as); + + if (opt_params == 1 && opt_string && + !strcasecmp(opt_string, "allow_discards")) + ti->num_discard_requests = 1; + else if (opt_params) { + ret = -EINVAL; + ti->error = "Invalid feature arguments"; + goto bad; + } + } + ret = -ENOMEM; cc->io_queue = alloc_workqueue("kcryptd_io", WQ_NON_REENTRANT| @@ -1682,9 +1711,16 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, struct dm_crypt_io *io; struct crypt_config *cc; - if (bio->bi_rw & REQ_FLUSH) { + /* + * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. + * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight + * - for REQ_DISCARD caller must use flush if IO ordering matters + */ + if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { cc = ti->private; bio->bi_bdev = cc->dev->bdev; + if (bio_sectors(bio)) + bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); return DM_MAPIO_REMAPPED; } @@ -1727,6 +1763,10 @@ static int crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, cc->dev->name, (unsigned long long)cc->start); + + if (ti->num_discard_requests) + DMEMIT(" 1 allow_discards"); + break; } return 0; @@ -1770,12 +1810,12 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) if (argc < 2) goto error; - if (!strnicmp(argv[0], MESG_STR("key"))) { + if (!strcasecmp(argv[0], "key")) { if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { DMWARN("not suspended during key manipulation."); return -EINVAL; } - if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { + if (argc == 3 && !strcasecmp(argv[1], "set")) { ret = crypt_set_key(cc, argv[2]); if (ret) return ret; @@ -1783,7 +1823,7 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) ret = cc->iv_gen_ops->init(cc); return ret; } - if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { + if (argc == 2 && !strcasecmp(argv[1], "wipe")) { if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { ret = cc->iv_gen_ops->wipe(cc); if (ret) @@ -1823,7 +1863,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 10, 0}, + .version = {1, 11, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index ea790623c30b..89f73ca22cfa 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2003 Sistina Software (UK) Limited. - * Copyright (C) 2004, 2010 Red Hat, Inc. All rights reserved. + * Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ @@ -15,6 +15,9 @@ #define DM_MSG_PREFIX "flakey" +#define all_corrupt_bio_flags_match(bio, fc) \ + (((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) + /* * Flakey: Used for testing only, simulates intermittent, * catastrophic device failure. @@ -25,60 +28,189 @@ struct flakey_c { sector_t start; unsigned up_interval; unsigned down_interval; + unsigned long flags; + unsigned corrupt_bio_byte; + unsigned corrupt_bio_rw; + unsigned corrupt_bio_value; + unsigned corrupt_bio_flags; +}; + +enum feature_flag_bits { + DROP_WRITES }; +static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, + struct dm_target *ti) +{ + int r; + unsigned argc; + const char *arg_name; + + static struct dm_arg _args[] = { + {0, 6, "Invalid number of feature args"}, + {1, UINT_MAX, "Invalid corrupt bio byte"}, + {0, 255, "Invalid corrupt value to write into bio byte (0-255)"}, + {0, UINT_MAX, "Invalid corrupt bio flags mask"}, + }; + + /* No feature arguments supplied. */ + if (!as->argc) + return 0; + + r = dm_read_arg_group(_args, as, &argc, &ti->error); + if (r) + return r; + + while (argc) { + arg_name = dm_shift_arg(as); + argc--; + + /* + * drop_writes + */ + if (!strcasecmp(arg_name, "drop_writes")) { + if (test_and_set_bit(DROP_WRITES, &fc->flags)) { + ti->error = "Feature drop_writes duplicated"; + return -EINVAL; + } + + continue; + } + + /* + * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> + */ + if (!strcasecmp(arg_name, "corrupt_bio_byte")) { + if (!argc) + ti->error = "Feature corrupt_bio_byte requires parameters"; + + r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); + if (r) + return r; + argc--; + + /* + * Direction r or w? + */ + arg_name = dm_shift_arg(as); + if (!strcasecmp(arg_name, "w")) + fc->corrupt_bio_rw = WRITE; + else if (!strcasecmp(arg_name, "r")) + fc->corrupt_bio_rw = READ; + else { + ti->error = "Invalid corrupt bio direction (r or w)"; + return -EINVAL; + } + argc--; + + /* + * Value of byte (0-255) to write in place of correct one. + */ + r = dm_read_arg(_args + 2, as, &fc->corrupt_bio_value, &ti->error); + if (r) + return r; + argc--; + + /* + * Only corrupt bios with these flags set. + */ + r = dm_read_arg(_args + 3, as, &fc->corrupt_bio_flags, &ti->error); + if (r) + return r; + argc--; + + continue; + } + + ti->error = "Unrecognised flakey feature requested"; + return -EINVAL; + } + + if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { + ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; + return -EINVAL; + } + + return 0; +} + /* - * Construct a flakey mapping: <dev_path> <offset> <up interval> <down interval> + * Construct a flakey mapping: + * <dev_path> <offset> <up interval> <down interval> [<#feature args> [<arg>]*] + * + * Feature args: + * [drop_writes] + * [corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>] + * + * Nth_byte starts from 1 for the first byte. + * Direction is r for READ or w for WRITE. + * bio_flags is ignored if 0. */ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) { + static struct dm_arg _args[] = { + {0, UINT_MAX, "Invalid up interval"}, + {0, UINT_MAX, "Invalid down interval"}, + }; + + int r; struct flakey_c *fc; - unsigned long long tmp; + unsigned long long tmpll; + struct dm_arg_set as; + const char *devname; - if (argc != 4) { - ti->error = "dm-flakey: Invalid argument count"; + as.argc = argc; + as.argv = argv; + + if (argc < 4) { + ti->error = "Invalid argument count"; return -EINVAL; } - fc = kmalloc(sizeof(*fc), GFP_KERNEL); + fc = kzalloc(sizeof(*fc), GFP_KERNEL); if (!fc) { - ti->error = "dm-flakey: Cannot allocate linear context"; + ti->error = "Cannot allocate linear context"; return -ENOMEM; } fc->start_time = jiffies; - if (sscanf(argv[1], "%llu", &tmp) != 1) { - ti->error = "dm-flakey: Invalid device sector"; + devname = dm_shift_arg(&as); + + if (sscanf(dm_shift_arg(&as), "%llu", &tmpll) != 1) { + ti->error = "Invalid device sector"; goto bad; } - fc->start = tmp; + fc->start = tmpll; - if (sscanf(argv[2], "%u", &fc->up_interval) != 1) { - ti->error = "dm-flakey: Invalid up interval"; + r = dm_read_arg(_args, &as, &fc->up_interval, &ti->error); + if (r) goto bad; - } - if (sscanf(argv[3], "%u", &fc->down_interval) != 1) { - ti->error = "dm-flakey: Invalid down interval"; + r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error); + if (r) goto bad; - } if (!(fc->up_interval + fc->down_interval)) { - ti->error = "dm-flakey: Total (up + down) interval is zero"; + ti->error = "Total (up + down) interval is zero"; goto bad; } if (fc->up_interval + fc->down_interval < fc->up_interval) { - ti->error = "dm-flakey: Interval overflow"; + ti->error = "Interval overflow"; goto bad; } - if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &fc->dev)) { - ti->error = "dm-flakey: Device lookup failed"; + r = parse_features(&as, fc, ti); + if (r) + goto bad; + + if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) { + ti->error = "Device lookup failed"; goto bad; } ti->num_flush_requests = 1; + ti->num_discard_requests = 1; ti->private = fc; return 0; @@ -99,7 +231,7 @@ static sector_t flakey_map_sector(struct dm_target *ti, sector_t bi_sector) { struct flakey_c *fc = ti->private; - return fc->start + (bi_sector - ti->begin); + return fc->start + dm_target_offset(ti, bi_sector); } static void flakey_map_bio(struct dm_target *ti, struct bio *bio) @@ -111,6 +243,25 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); } +static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) +{ + unsigned bio_bytes = bio_cur_bytes(bio); + char *data = bio_data(bio); + + /* + * Overwrite the Nth byte of the data returned. + */ + if (data && bio_bytes >= fc->corrupt_bio_byte) { + data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value; + + DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " + "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", + bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, + (bio_data_dir(bio) == WRITE) ? 'w' : 'r', + bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); + } +} + static int flakey_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { @@ -119,18 +270,71 @@ static int flakey_map(struct dm_target *ti, struct bio *bio, /* Are we alive ? */ elapsed = (jiffies - fc->start_time) / HZ; - if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) + if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { + /* + * Flag this bio as submitted while down. + */ + map_context->ll = 1; + + /* + * Map reads as normal. + */ + if (bio_data_dir(bio) == READ) + goto map_bio; + + /* + * Drop writes? + */ + if (test_bit(DROP_WRITES, &fc->flags)) { + bio_endio(bio, 0); + return DM_MAPIO_SUBMITTED; + } + + /* + * Corrupt matching writes. + */ + if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) { + if (all_corrupt_bio_flags_match(bio, fc)) + corrupt_bio_data(bio, fc); + goto map_bio; + } + + /* + * By default, error all I/O. + */ return -EIO; + } +map_bio: flakey_map_bio(ti, bio); return DM_MAPIO_REMAPPED; } +static int flakey_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + struct flakey_c *fc = ti->private; + unsigned bio_submitted_while_down = map_context->ll; + + /* + * Corrupt successful READs while in down state. + * If flags were specified, only corrupt those that match. + */ + if (!error && bio_submitted_while_down && + (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && + all_corrupt_bio_flags_match(bio, fc)) + corrupt_bio_data(bio, fc); + + return error; +} + static int flakey_status(struct dm_target *ti, status_type_t type, char *result, unsigned int maxlen) { + unsigned sz = 0; struct flakey_c *fc = ti->private; + unsigned drop_writes; switch (type) { case STATUSTYPE_INFO: @@ -138,9 +342,22 @@ static int flakey_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - snprintf(result, maxlen, "%s %llu %u %u", fc->dev->name, - (unsigned long long)fc->start, fc->up_interval, - fc->down_interval); + DMEMIT("%s %llu %u %u ", fc->dev->name, + (unsigned long long)fc->start, fc->up_interval, + fc->down_interval); + + drop_writes = test_bit(DROP_WRITES, &fc->flags); + DMEMIT("%u ", drop_writes + (fc->corrupt_bio_byte > 0) * 5); + + if (drop_writes) + DMEMIT("drop_writes "); + + if (fc->corrupt_bio_byte) + DMEMIT("corrupt_bio_byte %u %c %u %u ", + fc->corrupt_bio_byte, + (fc->corrupt_bio_rw == WRITE) ? 'w' : 'r', + fc->corrupt_bio_value, fc->corrupt_bio_flags); + break; } return 0; @@ -177,11 +394,12 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_ static struct target_type flakey_target = { .name = "flakey", - .version = {1, 1, 0}, + .version = {1, 2, 0}, .module = THIS_MODULE, .ctr = flakey_ctr, .dtr = flakey_dtr, .map = flakey_map, + .end_io = flakey_end_io, .status = flakey_status, .ioctl = flakey_ioctl, .merge = flakey_merge, diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2067288f61f9..ad2eba40e319 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -38,6 +38,8 @@ struct io { struct dm_io_client *client; io_notify_fn callback; void *context; + void *vma_invalidate_address; + unsigned long vma_invalidate_size; } __attribute__((aligned(DM_IO_MAX_REGIONS))); static struct kmem_cache *_dm_io_cache; @@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error) set_bit(region, &io->error_bits); if (atomic_dec_and_test(&io->count)) { + if (io->vma_invalidate_size) + invalidate_kernel_vmap_range(io->vma_invalidate_address, + io->vma_invalidate_size); + if (io->sleeper) wake_up_process(io->sleeper); @@ -159,6 +165,9 @@ struct dpages { unsigned context_u; void *context_ptr; + + void *vma_invalidate_address; + unsigned long vma_invalidate_size; }; /* @@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, io->sleeper = current; io->client = client; + io->vma_invalidate_address = dp->vma_invalidate_address; + io->vma_invalidate_size = dp->vma_invalidate_size; + dispatch_io(rw, num_regions, where, dp, io, 1); while (1) { @@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io->callback = fn; io->context = context; + io->vma_invalidate_address = dp->vma_invalidate_address; + io->vma_invalidate_size = dp->vma_invalidate_size; + dispatch_io(rw, num_regions, where, dp, io, 0); return 0; } -static int dp_init(struct dm_io_request *io_req, struct dpages *dp) +static int dp_init(struct dm_io_request *io_req, struct dpages *dp, + unsigned long size) { /* Set up dpages based on memory type */ + + dp->vma_invalidate_address = NULL; + dp->vma_invalidate_size = 0; + switch (io_req->mem.type) { case DM_IO_PAGE_LIST: list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); @@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp) break; case DM_IO_VMA: + flush_kernel_vmap_range(io_req->mem.ptr.vma, size); + if ((io_req->bi_rw & RW_MASK) == READ) { + dp->vma_invalidate_address = io_req->mem.ptr.vma; + dp->vma_invalidate_size = size; + } vm_dp_init(dp, io_req->mem.ptr.vma); break; @@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions, int r; struct dpages dp; - r = dp_init(io_req, &dp); + r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); if (r) return r; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 4cacdad2270a..2e9a3ca37bdd 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -128,6 +128,24 @@ static struct hash_cell *__get_uuid_cell(const char *str) return NULL; } +static struct hash_cell *__get_dev_cell(uint64_t dev) +{ + struct mapped_device *md; + struct hash_cell *hc; + + md = dm_get_md(huge_decode_dev(dev)); + if (!md) + return NULL; + + hc = dm_get_mdptr(md); + if (!hc) { + dm_put(md); + return NULL; + } + + return hc; +} + /*----------------------------------------------------------------- * Inserting, removing and renaming a device. *---------------------------------------------------------------*/ @@ -718,25 +736,45 @@ static int dev_create(struct dm_ioctl *param, size_t param_size) */ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) { - struct mapped_device *md; - void *mdptr = NULL; + struct hash_cell *hc = NULL; - if (*param->uuid) - return __get_uuid_cell(param->uuid); + if (*param->uuid) { + if (*param->name || param->dev) + return NULL; - if (*param->name) - return __get_name_cell(param->name); + hc = __get_uuid_cell(param->uuid); + if (!hc) + return NULL; + } else if (*param->name) { + if (param->dev) + return NULL; - md = dm_get_md(huge_decode_dev(param->dev)); - if (!md) - goto out; + hc = __get_name_cell(param->name); + if (!hc) + return NULL; + } else if (param->dev) { + hc = __get_dev_cell(param->dev); + if (!hc) + return NULL; + } else + return NULL; - mdptr = dm_get_mdptr(md); - if (!mdptr) - dm_put(md); + /* + * Sneakily write in both the name and the uuid + * while we have the cell. + */ + strlcpy(param->name, hc->name, sizeof(param->name)); + if (hc->uuid) + strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); + else + param->uuid[0] = '\0'; -out: - return mdptr; + if (hc->new_map) + param->flags |= DM_INACTIVE_PRESENT_FLAG; + else + param->flags &= ~DM_INACTIVE_PRESENT_FLAG; + + return hc; } static struct mapped_device *find_device(struct dm_ioctl *param) @@ -746,24 +784,8 @@ static struct mapped_device *find_device(struct dm_ioctl *param) down_read(&_hash_lock); hc = __find_device_hash_cell(param); - if (hc) { + if (hc) md = hc->md; - - /* - * Sneakily write in both the name and the uuid - * while we have the cell. - */ - strlcpy(param->name, hc->name, sizeof(param->name)); - if (hc->uuid) - strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); - else - param->uuid[0] = '\0'; - - if (hc->new_map) - param->flags |= DM_INACTIVE_PRESENT_FLAG; - else - param->flags &= ~DM_INACTIVE_PRESENT_FLAG; - } up_read(&_hash_lock); return md; @@ -1402,6 +1424,11 @@ static int target_message(struct dm_ioctl *param, size_t param_size) goto out; } + if (!argc) { + DMWARN("Empty message received."); + goto out; + } + table = dm_get_live_table(md); if (!table) goto out_argv; diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 320401dec104..f82147029636 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -224,8 +224,6 @@ struct kcopyd_job { unsigned int num_dests; struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; - sector_t offset; - unsigned int nr_pages; struct page_list *pages; /* @@ -380,7 +378,7 @@ static int run_io_job(struct kcopyd_job *job) .bi_rw = job->rw, .mem.type = DM_IO_PAGE_LIST, .mem.ptr.pl = job->pages, - .mem.offset = job->offset, + .mem.offset = 0, .notify.fn = complete_io, .notify.context = job, .client = job->kc->io_client, @@ -397,10 +395,9 @@ static int run_io_job(struct kcopyd_job *job) static int run_pages_job(struct kcopyd_job *job) { int r; + unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9); - job->nr_pages = dm_div_up(job->dests[0].count + job->offset, - PAGE_SIZE >> 9); - r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); + r = kcopyd_get_pages(job->kc, nr_pages, &job->pages); if (!r) { /* this job is ready for io */ push(&job->kc->io_jobs, job); @@ -602,8 +599,6 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, job->num_dests = num_dests; memcpy(&job->dests, dests, sizeof(*dests) * num_dests); - job->offset = 0; - job->nr_pages = 0; job->pages = NULL; job->fn = fn; @@ -622,6 +617,37 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, } EXPORT_SYMBOL(dm_kcopyd_copy); +void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, + dm_kcopyd_notify_fn fn, void *context) +{ + struct kcopyd_job *job; + + job = mempool_alloc(kc->job_pool, GFP_NOIO); + + memset(job, 0, sizeof(struct kcopyd_job)); + job->kc = kc; + job->fn = fn; + job->context = context; + + atomic_inc(&kc->nr_jobs); + + return job; +} +EXPORT_SYMBOL(dm_kcopyd_prepare_callback); + +void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err) +{ + struct kcopyd_job *job = j; + struct dm_kcopyd_client *kc = job->kc; + + job->read_err = read_err; + job->write_err = write_err; + + push(&kc->complete_jobs, job); + wake(kc); +} +EXPORT_SYMBOL(dm_kcopyd_do_callback); + /* * Cancels a kcopyd job, eg. someone might be deactivating a * mirror. diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index aa2e0c374ab3..1021c8986011 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c @@ -394,8 +394,7 @@ static int flush_by_group(struct log_c *lc, struct list_head *flush_list) group[count] = fe->region; count++; - list_del(&fe->list); - list_add(&fe->list, &tmp_list); + list_move(&fe->list, &tmp_list); type = fe->type; if (count >= MAX_FLUSH_GROUP_COUNT) diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 948e3f4925bf..3b52bb72bd1f 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -197,15 +197,21 @@ EXPORT_SYMBOL(dm_dirty_log_destroy); #define MIRROR_DISK_VERSION 2 #define LOG_OFFSET 2 -struct log_header { - uint32_t magic; +struct log_header_disk { + __le32 magic; /* * Simple, incrementing version. no backward * compatibility. */ + __le32 version; + __le64 nr_regions; +} __packed; + +struct log_header_core { + uint32_t magic; uint32_t version; - sector_t nr_regions; + uint64_t nr_regions; }; struct log_c { @@ -239,10 +245,10 @@ struct log_c { int log_dev_failed; int log_dev_flush_failed; struct dm_dev *log_dev; - struct log_header header; + struct log_header_core header; struct dm_io_region header_location; - struct log_header *disk_header; + struct log_header_disk *disk_header; }; /* @@ -251,34 +257,34 @@ struct log_c { */ static inline int log_test_bit(uint32_t *bs, unsigned bit) { - return test_bit_le(bit, (unsigned long *) bs) ? 1 : 0; + return test_bit_le(bit, bs) ? 1 : 0; } static inline void log_set_bit(struct log_c *l, uint32_t *bs, unsigned bit) { - __test_and_set_bit_le(bit, (unsigned long *) bs); + __set_bit_le(bit, bs); l->touched_cleaned = 1; } static inline void log_clear_bit(struct log_c *l, uint32_t *bs, unsigned bit) { - __test_and_clear_bit_le(bit, (unsigned long *) bs); + __clear_bit_le(bit, bs); l->touched_dirtied = 1; } /*---------------------------------------------------------------- * Header IO *--------------------------------------------------------------*/ -static void header_to_disk(struct log_header *core, struct log_header *disk) +static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk) { disk->magic = cpu_to_le32(core->magic); disk->version = cpu_to_le32(core->version); disk->nr_regions = cpu_to_le64(core->nr_regions); } -static void header_from_disk(struct log_header *core, struct log_header *disk) +static void header_from_disk(struct log_header_core *core, struct log_header_disk *disk) { core->magic = le32_to_cpu(disk->magic); core->version = le32_to_cpu(disk->version); @@ -486,7 +492,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size); lc->sync_count = (sync == NOSYNC) ? region_count : 0; - lc->recovering_bits = vmalloc(bitset_size); + lc->recovering_bits = vzalloc(bitset_size); if (!lc->recovering_bits) { DMWARN("couldn't allocate sync bitset"); vfree(lc->sync_bits); @@ -498,7 +504,6 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, kfree(lc); return -ENOMEM; } - memset(lc->recovering_bits, 0, bitset_size); lc->sync_search = 0; log->context = lc; @@ -739,8 +744,7 @@ static int core_get_resync_work(struct dm_dirty_log *log, region_t *region) return 0; do { - *region = find_next_zero_bit_le( - (unsigned long *) lc->sync_bits, + *region = find_next_zero_bit_le(lc->sync_bits, lc->region_count, lc->sync_search); lc->sync_search = *region + 1; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c3547016f0f1..5e0090ef4182 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -22,7 +22,6 @@ #include <linux/atomic.h> #define DM_MSG_PREFIX "multipath" -#define MESG_STR(x) x, sizeof(x) #define DM_PG_INIT_DELAY_MSECS 2000 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) @@ -505,80 +504,29 @@ static void trigger_event(struct work_struct *work) * <#paths> <#per-path selector args> * [<path> [<arg>]* ]+ ]+ *---------------------------------------------------------------*/ -struct param { - unsigned min; - unsigned max; - char *error; -}; - -static int read_param(struct param *param, char *str, unsigned *v, char **error) -{ - if (!str || - (sscanf(str, "%u", v) != 1) || - (*v < param->min) || - (*v > param->max)) { - *error = param->error; - return -EINVAL; - } - - return 0; -} - -struct arg_set { - unsigned argc; - char **argv; -}; - -static char *shift(struct arg_set *as) -{ - char *r; - - if (as->argc) { - as->argc--; - r = *as->argv; - as->argv++; - return r; - } - - return NULL; -} - -static void consume(struct arg_set *as, unsigned n) -{ - BUG_ON (as->argc < n); - as->argc -= n; - as->argv += n; -} - -static int parse_path_selector(struct arg_set *as, struct priority_group *pg, +static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg, struct dm_target *ti) { int r; struct path_selector_type *pst; unsigned ps_argc; - static struct param _params[] = { + static struct dm_arg _args[] = { {0, 1024, "invalid number of path selector args"}, }; - pst = dm_get_path_selector(shift(as)); + pst = dm_get_path_selector(dm_shift_arg(as)); if (!pst) { ti->error = "unknown path selector type"; return -EINVAL; } - r = read_param(_params, shift(as), &ps_argc, &ti->error); + r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); if (r) { dm_put_path_selector(pst); return -EINVAL; } - if (ps_argc > as->argc) { - dm_put_path_selector(pst); - ti->error = "not enough arguments for path selector"; - return -EINVAL; - } - r = pst->create(&pg->ps, ps_argc, as->argv); if (r) { dm_put_path_selector(pst); @@ -587,12 +535,12 @@ static int parse_path_selector(struct arg_set *as, struct priority_group *pg, } pg->ps.type = pst; - consume(as, ps_argc); + dm_consume_args(as, ps_argc); return 0; } -static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, +static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, struct dm_target *ti) { int r; @@ -609,7 +557,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, if (!p) return ERR_PTR(-ENOMEM); - r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), + r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), &p->path.dev); if (r) { ti->error = "error getting device"; @@ -660,16 +608,16 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, return ERR_PTR(r); } -static struct priority_group *parse_priority_group(struct arg_set *as, +static struct priority_group *parse_priority_group(struct dm_arg_set *as, struct multipath *m) { - static struct param _params[] = { + static struct dm_arg _args[] = { {1, 1024, "invalid number of paths"}, {0, 1024, "invalid number of selector args"} }; int r; - unsigned i, nr_selector_args, nr_params; + unsigned i, nr_selector_args, nr_args; struct priority_group *pg; struct dm_target *ti = m->ti; @@ -693,26 +641,26 @@ static struct priority_group *parse_priority_group(struct arg_set *as, /* * read the paths */ - r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); + r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); if (r) goto bad; - r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); + r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); if (r) goto bad; - nr_params = 1 + nr_selector_args; + nr_args = 1 + nr_selector_args; for (i = 0; i < pg->nr_pgpaths; i++) { struct pgpath *pgpath; - struct arg_set path_args; + struct dm_arg_set path_args; - if (as->argc < nr_params) { + if (as->argc < nr_args) { ti->error = "not enough path parameters"; r = -EINVAL; goto bad; } - path_args.argc = nr_params; + path_args.argc = nr_args; path_args.argv = as->argv; pgpath = parse_path(&path_args, &pg->ps, ti); @@ -723,7 +671,7 @@ static struct priority_group *parse_priority_group(struct arg_set *as, pgpath->pg = pg; list_add_tail(&pgpath->list, &pg->pgpaths); - consume(as, nr_params); + dm_consume_args(as, nr_args); } return pg; @@ -733,28 +681,23 @@ static struct priority_group *parse_priority_group(struct arg_set *as, return ERR_PTR(r); } -static int parse_hw_handler(struct arg_set *as, struct multipath *m) +static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) { unsigned hw_argc; int ret; struct dm_target *ti = m->ti; - static struct param _params[] = { + static struct dm_arg _args[] = { {0, 1024, "invalid number of hardware handler args"}, }; - if (read_param(_params, shift(as), &hw_argc, &ti->error)) + if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) return -EINVAL; if (!hw_argc) return 0; - if (hw_argc > as->argc) { - ti->error = "not enough arguments for hardware handler"; - return -EINVAL; - } - - m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); + m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); request_module("scsi_dh_%s", m->hw_handler_name); if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { ti->error = "unknown hardware handler type"; @@ -778,7 +721,7 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) j = sprintf(p, "%s", as->argv[i]); } - consume(as, hw_argc - 1); + dm_consume_args(as, hw_argc - 1); return 0; fail: @@ -787,20 +730,20 @@ fail: return ret; } -static int parse_features(struct arg_set *as, struct multipath *m) +static int parse_features(struct dm_arg_set *as, struct multipath *m) { int r; unsigned argc; struct dm_target *ti = m->ti; - const char *param_name; + const char *arg_name; - static struct param _params[] = { + static struct dm_arg _args[] = { {0, 5, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, }; - r = read_param(_params, shift(as), &argc, &ti->error); + r = dm_read_arg_group(_args, as, &argc, &ti->error); if (r) return -EINVAL; @@ -808,26 +751,24 @@ static int parse_features(struct arg_set *as, struct multipath *m) return 0; do { - param_name = shift(as); + arg_name = dm_shift_arg(as); argc--; - if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { + if (!strcasecmp(arg_name, "queue_if_no_path")) { r = queue_if_no_path(m, 1, 0); continue; } - if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && + if (!strcasecmp(arg_name, "pg_init_retries") && (argc >= 1)) { - r = read_param(_params + 1, shift(as), - &m->pg_init_retries, &ti->error); + r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); argc--; continue; } - if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) && + if (!strcasecmp(arg_name, "pg_init_delay_msecs") && (argc >= 1)) { - r = read_param(_params + 2, shift(as), - &m->pg_init_delay_msecs, &ti->error); + r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); argc--; continue; } @@ -842,15 +783,15 @@ static int parse_features(struct arg_set *as, struct multipath *m) static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) { - /* target parameters */ - static struct param _params[] = { + /* target arguments */ + static struct dm_arg _args[] = { {0, 1024, "invalid number of priority groups"}, {0, 1024, "invalid initial priority group number"}, }; int r; struct multipath *m; - struct arg_set as; + struct dm_arg_set as; unsigned pg_count = 0; unsigned next_pg_num; @@ -871,11 +812,11 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, if (r) goto bad; - r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); + r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); if (r) goto bad; - r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); + r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); if (r) goto bad; @@ -1505,10 +1446,10 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) } if (argc == 1) { - if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { + if (!strcasecmp(argv[0], "queue_if_no_path")) { r = queue_if_no_path(m, 1, 0); goto out; - } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { + } else if (!strcasecmp(argv[0], "fail_if_no_path")) { r = queue_if_no_path(m, 0, 0); goto out; } @@ -1519,18 +1460,18 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) goto out; } - if (!strnicmp(argv[0], MESG_STR("disable_group"))) { + if (!strcasecmp(argv[0], "disable_group")) { r = bypass_pg_num(m, argv[1], 1); goto out; - } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { + } else if (!strcasecmp(argv[0], "enable_group")) { r = bypass_pg_num(m, argv[1], 0); goto out; - } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { + } else if (!strcasecmp(argv[0], "switch_group")) { r = switch_pg_num(m, argv[1]); goto out; - } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) + } else if (!strcasecmp(argv[0], "reinstate_path")) action = reinstate_path; - else if (!strnicmp(argv[0], MESG_STR("fail_path"))) + else if (!strcasecmp(argv[0], "fail_path")) action = fail_path; else { DMWARN("Unrecognised multipath message received."); diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index e5d8904fc8f6..a002dd85db1e 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -8,19 +8,19 @@ #include <linux/slab.h> #include "md.h" +#include "raid1.h" #include "raid5.h" -#include "dm.h" #include "bitmap.h" +#include <linux/device-mapper.h> + #define DM_MSG_PREFIX "raid" /* - * If the MD doesn't support MD_SYNC_STATE_FORCED yet, then - * make it so the flag doesn't set anything. + * The following flags are used by dm-raid.c to set up the array state. + * They must be cleared before md_run is called. */ -#ifndef MD_SYNC_STATE_FORCED -#define MD_SYNC_STATE_FORCED 0 -#endif +#define FirstUse 10 /* rdev flag */ struct raid_dev { /* @@ -43,14 +43,15 @@ struct raid_dev { /* * Flags for rs->print_flags field. */ -#define DMPF_DAEMON_SLEEP 0x1 -#define DMPF_MAX_WRITE_BEHIND 0x2 -#define DMPF_SYNC 0x4 -#define DMPF_NOSYNC 0x8 -#define DMPF_STRIPE_CACHE 0x10 -#define DMPF_MIN_RECOVERY_RATE 0x20 -#define DMPF_MAX_RECOVERY_RATE 0x40 - +#define DMPF_SYNC 0x1 +#define DMPF_NOSYNC 0x2 +#define DMPF_REBUILD 0x4 +#define DMPF_DAEMON_SLEEP 0x8 +#define DMPF_MIN_RECOVERY_RATE 0x10 +#define DMPF_MAX_RECOVERY_RATE 0x20 +#define DMPF_MAX_WRITE_BEHIND 0x40 +#define DMPF_STRIPE_CACHE 0x80 +#define DMPF_REGION_SIZE 0X100 struct raid_set { struct dm_target *ti; @@ -72,6 +73,7 @@ static struct raid_type { const unsigned level; /* RAID level. */ const unsigned algorithm; /* RAID algorithm. */ } raid_types[] = { + {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, @@ -105,7 +107,8 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra } sectors_per_dev = ti->len; - if (sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { + if ((raid_type->level > 1) && + sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { ti->error = "Target length not divisible by number of data devices"; return ERR_PTR(-EINVAL); } @@ -147,9 +150,16 @@ static void context_free(struct raid_set *rs) { int i; - for (i = 0; i < rs->md.raid_disks; i++) + for (i = 0; i < rs->md.raid_disks; i++) { + if (rs->dev[i].meta_dev) + dm_put_device(rs->ti, rs->dev[i].meta_dev); + if (rs->dev[i].rdev.sb_page) + put_page(rs->dev[i].rdev.sb_page); + rs->dev[i].rdev.sb_page = NULL; + rs->dev[i].rdev.sb_loaded = 0; if (rs->dev[i].data_dev) dm_put_device(rs->ti, rs->dev[i].data_dev); + } kfree(rs); } @@ -159,7 +169,16 @@ static void context_free(struct raid_set *rs) * <meta_dev>: meta device name or '-' if missing * <data_dev>: data device name or '-' if missing * - * This code parses those words. + * The following are permitted: + * - - + * - <data_dev> + * <meta_dev> <data_dev> + * + * The following is not allowed: + * <meta_dev> - + * + * This code parses those words. If there is a failure, + * the caller must use context_free to unwind the operations. */ static int dev_parms(struct raid_set *rs, char **argv) { @@ -182,8 +201,16 @@ static int dev_parms(struct raid_set *rs, char **argv) rs->dev[i].rdev.mddev = &rs->md; if (strcmp(argv[0], "-")) { - rs->ti->error = "Metadata devices not supported"; - return -EINVAL; + ret = dm_get_device(rs->ti, argv[0], + dm_table_get_mode(rs->ti->table), + &rs->dev[i].meta_dev); + rs->ti->error = "RAID metadata device lookup failure"; + if (ret) + return ret; + + rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); + if (!rs->dev[i].rdev.sb_page) + return -ENOMEM; } if (!strcmp(argv[1], "-")) { @@ -193,6 +220,10 @@ static int dev_parms(struct raid_set *rs, char **argv) return -EINVAL; } + rs->ti->error = "No data device supplied with metadata device"; + if (rs->dev[i].meta_dev) + return -EINVAL; + continue; } @@ -204,6 +235,10 @@ static int dev_parms(struct raid_set *rs, char **argv) return ret; } + if (rs->dev[i].meta_dev) { + metadata_available = 1; + rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; + } rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; list_add(&rs->dev[i].rdev.same_set, &rs->md.disks); if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) @@ -235,33 +270,109 @@ static int dev_parms(struct raid_set *rs, char **argv) } /* + * validate_region_size + * @rs + * @region_size: region size in sectors. If 0, pick a size (4MiB default). + * + * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). + * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. + * + * Returns: 0 on success, -EINVAL on failure. + */ +static int validate_region_size(struct raid_set *rs, unsigned long region_size) +{ + unsigned long min_region_size = rs->ti->len / (1 << 21); + + if (!region_size) { + /* + * Choose a reasonable default. All figures in sectors. + */ + if (min_region_size > (1 << 13)) { + DMINFO("Choosing default region size of %lu sectors", + region_size); + region_size = min_region_size; + } else { + DMINFO("Choosing default region size of 4MiB"); + region_size = 1 << 13; /* sectors */ + } + } else { + /* + * Validate user-supplied value. + */ + if (region_size > rs->ti->len) { + rs->ti->error = "Supplied region size is too large"; + return -EINVAL; + } + + if (region_size < min_region_size) { + DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", + region_size, min_region_size); + rs->ti->error = "Supplied region size is too small"; + return -EINVAL; + } + + if (!is_power_of_2(region_size)) { + rs->ti->error = "Region size is not a power of 2"; + return -EINVAL; + } + + if (region_size < rs->md.chunk_sectors) { + rs->ti->error = "Region size is smaller than the chunk size"; + return -EINVAL; + } + } + + /* + * Convert sectors to bytes. + */ + rs->md.bitmap_info.chunksize = (region_size << 9); + + return 0; +} + +/* * Possible arguments are... - * RAID456: * <chunk_size> [optional_args] * - * Optional args: - * [[no]sync] Force or prevent recovery of the entire array + * Argument definitions + * <chunk_size> The number of sectors per disk that + * will form the "stripe" + * [[no]sync] Force or prevent recovery of the + * entire array * [rebuild <idx>] Rebuild the drive indicated by the index - * [daemon_sleep <ms>] Time between bitmap daemon work to clear bits + * [daemon_sleep <ms>] Time between bitmap daemon work to + * clear bits * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization + * [write_mostly <idx>] Indicate a write mostly drive via index * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) * [stripe_cache <sectors>] Stripe cache size for higher RAIDs + * [region_size <sectors>] Defines granularity of bitmap */ static int parse_raid_params(struct raid_set *rs, char **argv, unsigned num_raid_params) { unsigned i, rebuild_cnt = 0; - unsigned long value; + unsigned long value, region_size = 0; char *key; /* * First, parse the in-order required arguments + * "chunk_size" is the only argument of this type. */ - if ((strict_strtoul(argv[0], 10, &value) < 0) || - !is_power_of_2(value) || (value < 8)) { + if ((strict_strtoul(argv[0], 10, &value) < 0)) { rs->ti->error = "Bad chunk size"; return -EINVAL; + } else if (rs->raid_type->level == 1) { + if (value) + DMERR("Ignoring chunk size parameter for RAID 1"); + value = 0; + } else if (!is_power_of_2(value)) { + rs->ti->error = "Chunk size must be a power of 2"; + return -EINVAL; + } else if (value < 8) { + rs->ti->error = "Chunk size value is too small"; + return -EINVAL; } rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; @@ -269,22 +380,39 @@ static int parse_raid_params(struct raid_set *rs, char **argv, num_raid_params--; /* - * Second, parse the unordered optional arguments + * We set each individual device as In_sync with a completed + * 'recovery_offset'. If there has been a device failure or + * replacement then one of the following cases applies: + * + * 1) User specifies 'rebuild'. + * - Device is reset when param is read. + * 2) A new device is supplied. + * - No matching superblock found, resets device. + * 3) Device failure was transient and returns on reload. + * - Failure noticed, resets device for bitmap replay. + * 4) Device hadn't completed recovery after previous failure. + * - Superblock is read and overrides recovery_offset. + * + * What is found in the superblocks of the devices is always + * authoritative, unless 'rebuild' or '[no]sync' was specified. */ - for (i = 0; i < rs->md.raid_disks; i++) + for (i = 0; i < rs->md.raid_disks; i++) { set_bit(In_sync, &rs->dev[i].rdev.flags); + rs->dev[i].rdev.recovery_offset = MaxSector; + } + /* + * Second, parse the unordered optional arguments + */ for (i = 0; i < num_raid_params; i++) { - if (!strcmp(argv[i], "nosync")) { + if (!strcasecmp(argv[i], "nosync")) { rs->md.recovery_cp = MaxSector; rs->print_flags |= DMPF_NOSYNC; - rs->md.flags |= MD_SYNC_STATE_FORCED; continue; } - if (!strcmp(argv[i], "sync")) { + if (!strcasecmp(argv[i], "sync")) { rs->md.recovery_cp = 0; rs->print_flags |= DMPF_SYNC; - rs->md.flags |= MD_SYNC_STATE_FORCED; continue; } @@ -300,9 +428,13 @@ static int parse_raid_params(struct raid_set *rs, char **argv, return -EINVAL; } - if (!strcmp(key, "rebuild")) { - if (++rebuild_cnt > rs->raid_type->parity_devs) { - rs->ti->error = "Too many rebuild drives given"; + if (!strcasecmp(key, "rebuild")) { + rebuild_cnt++; + if (((rs->raid_type->level != 1) && + (rebuild_cnt > rs->raid_type->parity_devs)) || + ((rs->raid_type->level == 1) && + (rebuild_cnt > (rs->md.raid_disks - 1)))) { + rs->ti->error = "Too many rebuild devices specified for given RAID type"; return -EINVAL; } if (value > rs->md.raid_disks) { @@ -311,7 +443,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } clear_bit(In_sync, &rs->dev[value].rdev.flags); rs->dev[value].rdev.recovery_offset = 0; - } else if (!strcmp(key, "max_write_behind")) { + rs->print_flags |= DMPF_REBUILD; + } else if (!strcasecmp(key, "write_mostly")) { + if (rs->raid_type->level != 1) { + rs->ti->error = "write_mostly option is only valid for RAID1"; + return -EINVAL; + } + if (value > rs->md.raid_disks) { + rs->ti->error = "Invalid write_mostly drive index given"; + return -EINVAL; + } + set_bit(WriteMostly, &rs->dev[value].rdev.flags); + } else if (!strcasecmp(key, "max_write_behind")) { + if (rs->raid_type->level != 1) { + rs->ti->error = "max_write_behind option is only valid for RAID1"; + return -EINVAL; + } rs->print_flags |= DMPF_MAX_WRITE_BEHIND; /* @@ -324,14 +471,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv, return -EINVAL; } rs->md.bitmap_info.max_write_behind = value; - } else if (!strcmp(key, "daemon_sleep")) { + } else if (!strcasecmp(key, "daemon_sleep")) { rs->print_flags |= DMPF_DAEMON_SLEEP; if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { rs->ti->error = "daemon sleep period out of range"; return -EINVAL; } rs->md.bitmap_info.daemon_sleep = value; - } else if (!strcmp(key, "stripe_cache")) { + } else if (!strcasecmp(key, "stripe_cache")) { rs->print_flags |= DMPF_STRIPE_CACHE; /* @@ -348,20 +495,23 @@ static int parse_raid_params(struct raid_set *rs, char **argv, rs->ti->error = "Bad stripe_cache size"; return -EINVAL; } - } else if (!strcmp(key, "min_recovery_rate")) { + } else if (!strcasecmp(key, "min_recovery_rate")) { rs->print_flags |= DMPF_MIN_RECOVERY_RATE; if (value > INT_MAX) { rs->ti->error = "min_recovery_rate out of range"; return -EINVAL; } rs->md.sync_speed_min = (int)value; - } else if (!strcmp(key, "max_recovery_rate")) { + } else if (!strcasecmp(key, "max_recovery_rate")) { rs->print_flags |= DMPF_MAX_RECOVERY_RATE; if (value > INT_MAX) { rs->ti->error = "max_recovery_rate out of range"; return -EINVAL; } rs->md.sync_speed_max = (int)value; + } else if (!strcasecmp(key, "region_size")) { + rs->print_flags |= DMPF_REGION_SIZE; + region_size = value; } else { DMERR("Unable to parse RAID parameter: %s", key); rs->ti->error = "Unable to parse RAID parameters"; @@ -369,6 +519,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } } + if (validate_region_size(rs, region_size)) + return -EINVAL; + + if (rs->md.chunk_sectors) + rs->ti->split_io = rs->md.chunk_sectors; + else + rs->ti->split_io = region_size; + + if (rs->md.chunk_sectors) + rs->ti->split_io = rs->md.chunk_sectors; + else + rs->ti->split_io = region_size; + /* Assume there are no metadata devices until the drives are parsed */ rs->md.persistent = 0; rs->md.external = 1; @@ -387,17 +550,351 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) { struct raid_set *rs = container_of(cb, struct raid_set, callbacks); + if (rs->raid_type->level == 1) + return md_raid1_congested(&rs->md, bits); + return md_raid5_congested(&rs->md, bits); } /* + * This structure is never routinely used by userspace, unlike md superblocks. + * Devices with this superblock should only ever be accessed via device-mapper. + */ +#define DM_RAID_MAGIC 0x64526D44 +struct dm_raid_superblock { + __le32 magic; /* "DmRd" */ + __le32 features; /* Used to indicate possible future changes */ + + __le32 num_devices; /* Number of devices in this array. (Max 64) */ + __le32 array_position; /* The position of this drive in the array */ + + __le64 events; /* Incremented by md when superblock updated */ + __le64 failed_devices; /* Bit field of devices to indicate failures */ + + /* + * This offset tracks the progress of the repair or replacement of + * an individual drive. + */ + __le64 disk_recovery_offset; + + /* + * This offset tracks the progress of the initial array + * synchronisation/parity calculation. + */ + __le64 array_resync_offset; + + /* + * RAID characteristics + */ + __le32 level; + __le32 layout; + __le32 stripe_sectors; + + __u8 pad[452]; /* Round struct to 512 bytes. */ + /* Always set to 0 when writing. */ +} __packed; + +static int read_disk_sb(mdk_rdev_t *rdev, int size) +{ + BUG_ON(!rdev->sb_page); + + if (rdev->sb_loaded) + return 0; + + if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) { + DMERR("Failed to read device superblock"); + return -EINVAL; + } + + rdev->sb_loaded = 1; + + return 0; +} + +static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) +{ + mdk_rdev_t *r, *t; + uint64_t failed_devices; + struct dm_raid_superblock *sb; + + sb = page_address(rdev->sb_page); + failed_devices = le64_to_cpu(sb->failed_devices); + + rdev_for_each(r, t, mddev) + if ((r->raid_disk >= 0) && test_bit(Faulty, &r->flags)) + failed_devices |= (1ULL << r->raid_disk); + + memset(sb, 0, sizeof(*sb)); + + sb->magic = cpu_to_le32(DM_RAID_MAGIC); + sb->features = cpu_to_le32(0); /* No features yet */ + + sb->num_devices = cpu_to_le32(mddev->raid_disks); + sb->array_position = cpu_to_le32(rdev->raid_disk); + + sb->events = cpu_to_le64(mddev->events); + sb->failed_devices = cpu_to_le64(failed_devices); + + sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); + sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); + + sb->level = cpu_to_le32(mddev->level); + sb->layout = cpu_to_le32(mddev->layout); + sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); +} + +/* + * super_load + * + * This function creates a superblock if one is not found on the device + * and will decide which superblock to use if there's a choice. + * + * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise + */ +static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) +{ + int ret; + struct dm_raid_superblock *sb; + struct dm_raid_superblock *refsb; + uint64_t events_sb, events_refsb; + + rdev->sb_start = 0; + rdev->sb_size = sizeof(*sb); + + ret = read_disk_sb(rdev, rdev->sb_size); + if (ret) + return ret; + + sb = page_address(rdev->sb_page); + if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) { + super_sync(rdev->mddev, rdev); + + set_bit(FirstUse, &rdev->flags); + + /* Force writing of superblocks to disk */ + set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); + + /* Any superblock is better than none, choose that if given */ + return refdev ? 0 : 1; + } + + if (!refdev) + return 1; + + events_sb = le64_to_cpu(sb->events); + + refsb = page_address(refdev->sb_page); + events_refsb = le64_to_cpu(refsb->events); + + return (events_sb > events_refsb) ? 1 : 0; +} + +static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) +{ + int role; + struct raid_set *rs = container_of(mddev, struct raid_set, md); + uint64_t events_sb; + uint64_t failed_devices; + struct dm_raid_superblock *sb; + uint32_t new_devs = 0; + uint32_t rebuilds = 0; + mdk_rdev_t *r, *t; + struct dm_raid_superblock *sb2; + + sb = page_address(rdev->sb_page); + events_sb = le64_to_cpu(sb->events); + failed_devices = le64_to_cpu(sb->failed_devices); + + /* + * Initialise to 1 if this is a new superblock. + */ + mddev->events = events_sb ? : 1; + + /* + * Reshaping is not currently allowed + */ + if ((le32_to_cpu(sb->level) != mddev->level) || + (le32_to_cpu(sb->layout) != mddev->layout) || + (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors)) { + DMERR("Reshaping arrays not yet supported."); + return -EINVAL; + } + + /* We can only change the number of devices in RAID1 right now */ + if ((rs->raid_type->level != 1) && + (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { + DMERR("Reshaping arrays not yet supported."); + return -EINVAL; + } + + if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))) + mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); + + /* + * During load, we set FirstUse if a new superblock was written. + * There are two reasons we might not have a superblock: + * 1) The array is brand new - in which case, all of the + * devices must have their In_sync bit set. Also, + * recovery_cp must be 0, unless forced. + * 2) This is a new device being added to an old array + * and the new device needs to be rebuilt - in which + * case the In_sync bit will /not/ be set and + * recovery_cp must be MaxSector. + */ + rdev_for_each(r, t, mddev) { + if (!test_bit(In_sync, &r->flags)) { + if (!test_bit(FirstUse, &r->flags)) + DMERR("Superblock area of " + "rebuild device %d should have been " + "cleared.", r->raid_disk); + set_bit(FirstUse, &r->flags); + rebuilds++; + } else if (test_bit(FirstUse, &r->flags)) + new_devs++; + } + + if (!rebuilds) { + if (new_devs == mddev->raid_disks) { + DMINFO("Superblocks created for new array"); + set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); + } else if (new_devs) { + DMERR("New device injected " + "into existing array without 'rebuild' " + "parameter specified"); + return -EINVAL; + } + } else if (new_devs) { + DMERR("'rebuild' devices cannot be " + "injected into an array with other first-time devices"); + return -EINVAL; + } else if (mddev->recovery_cp != MaxSector) { + DMERR("'rebuild' specified while array is not in-sync"); + return -EINVAL; + } + + /* + * Now we set the Faulty bit for those devices that are + * recorded in the superblock as failed. + */ + rdev_for_each(r, t, mddev) { + if (!r->sb_page) + continue; + sb2 = page_address(r->sb_page); + sb2->failed_devices = 0; + + /* + * Check for any device re-ordering. + */ + if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { + role = le32_to_cpu(sb2->array_position); + if (role != r->raid_disk) { + if (rs->raid_type->level != 1) { + rs->ti->error = "Cannot change device " + "positions in RAID array"; + return -EINVAL; + } + DMINFO("RAID1 device #%d now at position #%d", + role, r->raid_disk); + } + + /* + * Partial recovery is performed on + * returning failed devices. + */ + if (failed_devices & (1 << role)) + set_bit(Faulty, &r->flags); + } + } + + return 0; +} + +static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) +{ + struct dm_raid_superblock *sb = page_address(rdev->sb_page); + + /* + * If mddev->events is not set, we know we have not yet initialized + * the array. + */ + if (!mddev->events && super_init_validation(mddev, rdev)) + return -EINVAL; + + mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */ + rdev->mddev->bitmap_info.default_offset = 4096 >> 9; + if (!test_bit(FirstUse, &rdev->flags)) { + rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); + if (rdev->recovery_offset != MaxSector) + clear_bit(In_sync, &rdev->flags); + } + + /* + * If a device comes back, set it as not In_sync and no longer faulty. + */ + if (test_bit(Faulty, &rdev->flags)) { + clear_bit(Faulty, &rdev->flags); + clear_bit(In_sync, &rdev->flags); + rdev->saved_raid_disk = rdev->raid_disk; + rdev->recovery_offset = 0; + } + + clear_bit(FirstUse, &rdev->flags); + + return 0; +} + +/* + * Analyse superblocks and select the freshest. + */ +static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) +{ + int ret; + mdk_rdev_t *rdev, *freshest, *tmp; + mddev_t *mddev = &rs->md; + + freshest = NULL; + rdev_for_each(rdev, tmp, mddev) { + if (!rdev->meta_bdev) + continue; + + ret = super_load(rdev, freshest); + + switch (ret) { + case 1: + freshest = rdev; + break; + case 0: + break; + default: + ti->error = "Failed to load superblock"; + return ret; + } + } + + if (!freshest) + return 0; + + /* + * Validation of the freshest device provides the source of + * validation for the remaining devices. + */ + ti->error = "Unable to assemble array: Invalid superblocks"; + if (super_validate(mddev, freshest)) + return -EINVAL; + + rdev_for_each(rdev, tmp, mddev) + if ((rdev != freshest) && super_validate(mddev, rdev)) + return -EINVAL; + + return 0; +} + +/* * Construct a RAID4/5/6 mapping: * Args: * <raid_type> <#raid_params> <raid_params> \ * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> } * - * ** metadata devices are not supported yet, use '-' instead ** - * * <raid_params> varies by <raid_type>. See 'parse_raid_params' for * details on possible <raid_params>. */ @@ -465,8 +962,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) if (ret) goto bad; + rs->md.sync_super = super_sync; + ret = analyse_superblocks(ti, rs); + if (ret) + goto bad; + INIT_WORK(&rs->md.event_work, do_table_event); - ti->split_io = rs->md.chunk_sectors; ti->private = rs; mutex_lock(&rs->md.reconfig_mutex); @@ -482,6 +983,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) rs->callbacks.congested_fn = raid_is_congested; dm_table_add_target_callbacks(ti->table, &rs->callbacks); + mddev_suspend(&rs->md); return 0; bad: @@ -546,12 +1048,17 @@ static int raid_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: /* The string you would use to construct this array */ - for (i = 0; i < rs->md.raid_disks; i++) - if (rs->dev[i].data_dev && + for (i = 0; i < rs->md.raid_disks; i++) { + if ((rs->print_flags & DMPF_REBUILD) && + rs->dev[i].data_dev && !test_bit(In_sync, &rs->dev[i].rdev.flags)) - raid_param_cnt++; /* for rebuilds */ + raid_param_cnt += 2; /* for rebuilds */ + if (rs->dev[i].data_dev && + test_bit(WriteMostly, &rs->dev[i].rdev.flags)) + raid_param_cnt += 2; + } - raid_param_cnt += (hweight64(rs->print_flags) * 2); + raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2); if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)) raid_param_cnt--; @@ -565,7 +1072,8 @@ static int raid_status(struct dm_target *ti, status_type_t type, DMEMIT(" nosync"); for (i = 0; i < rs->md.raid_disks; i++) - if (rs->dev[i].data_dev && + if ((rs->print_flags & DMPF_REBUILD) && + rs->dev[i].data_dev && !test_bit(In_sync, &rs->dev[i].rdev.flags)) DMEMIT(" rebuild %u", i); @@ -579,6 +1087,11 @@ static int raid_status(struct dm_target *ti, status_type_t type, if (rs->print_flags & DMPF_MAX_RECOVERY_RATE) DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max); + for (i = 0; i < rs->md.raid_disks; i++) + if (rs->dev[i].data_dev && + test_bit(WriteMostly, &rs->dev[i].rdev.flags)) + DMEMIT(" write_mostly %u", i); + if (rs->print_flags & DMPF_MAX_WRITE_BEHIND) DMEMIT(" max_write_behind %lu", rs->md.bitmap_info.max_write_behind); @@ -591,9 +1104,16 @@ static int raid_status(struct dm_target *ti, status_type_t type, conf ? conf->max_nr_stripes * 2 : 0); } + if (rs->print_flags & DMPF_REGION_SIZE) + DMEMIT(" region_size %lu", + rs->md.bitmap_info.chunksize >> 9); + DMEMIT(" %d", rs->md.raid_disks); for (i = 0; i < rs->md.raid_disks; i++) { - DMEMIT(" -"); /* metadata device */ + if (rs->dev[i].meta_dev) + DMEMIT(" %s", rs->dev[i].meta_dev->name); + else + DMEMIT(" -"); if (rs->dev[i].data_dev) DMEMIT(" %s", rs->dev[i].data_dev->name); @@ -650,12 +1170,13 @@ static void raid_resume(struct dm_target *ti) { struct raid_set *rs = ti->private; + bitmap_load(&rs->md); mddev_resume(&rs->md); } static struct target_type raid_target = { .name = "raid", - .version = {1, 0, 0}, + .version = {1, 1, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 135c2f1fdbfc..d1f1d7017103 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -58,25 +58,30 @@ #define NUM_SNAPSHOT_HDR_CHUNKS 1 struct disk_header { - uint32_t magic; + __le32 magic; /* * Is this snapshot valid. There is no way of recovering * an invalid snapshot. */ - uint32_t valid; + __le32 valid; /* * Simple, incrementing version. no backward * compatibility. */ - uint32_t version; + __le32 version; /* In sectors */ - uint32_t chunk_size; -}; + __le32 chunk_size; +} __packed; struct disk_exception { + __le64 old_chunk; + __le64 new_chunk; +} __packed; + +struct core_exception { uint64_t old_chunk; uint64_t new_chunk; }; @@ -169,10 +174,9 @@ static int alloc_area(struct pstore *ps) if (!ps->area) goto err_area; - ps->zero_area = vmalloc(len); + ps->zero_area = vzalloc(len); if (!ps->zero_area) goto err_zero_area; - memset(ps->zero_area, 0, len); ps->header_area = vmalloc(len); if (!ps->header_area) @@ -396,32 +400,32 @@ static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) } static void read_exception(struct pstore *ps, - uint32_t index, struct disk_exception *result) + uint32_t index, struct core_exception *result) { - struct disk_exception *e = get_exception(ps, index); + struct disk_exception *de = get_exception(ps, index); /* copy it */ - result->old_chunk = le64_to_cpu(e->old_chunk); - result->new_chunk = le64_to_cpu(e->new_chunk); + result->old_chunk = le64_to_cpu(de->old_chunk); + result->new_chunk = le64_to_cpu(de->new_chunk); } static void write_exception(struct pstore *ps, - uint32_t index, struct disk_exception *de) + uint32_t index, struct core_exception *e) { - struct disk_exception *e = get_exception(ps, index); + struct disk_exception *de = get_exception(ps, index); /* copy it */ - e->old_chunk = cpu_to_le64(de->old_chunk); - e->new_chunk = cpu_to_le64(de->new_chunk); + de->old_chunk = cpu_to_le64(e->old_chunk); + de->new_chunk = cpu_to_le64(e->new_chunk); } static void clear_exception(struct pstore *ps, uint32_t index) { - struct disk_exception *e = get_exception(ps, index); + struct disk_exception *de = get_exception(ps, index); /* clear it */ - e->old_chunk = 0; - e->new_chunk = 0; + de->old_chunk = 0; + de->new_chunk = 0; } /* @@ -437,13 +441,13 @@ static int insert_exceptions(struct pstore *ps, { int r; unsigned int i; - struct disk_exception de; + struct core_exception e; /* presume the area is full */ *full = 1; for (i = 0; i < ps->exceptions_per_area; i++) { - read_exception(ps, i, &de); + read_exception(ps, i, &e); /* * If the new_chunk is pointing at the start of @@ -451,7 +455,7 @@ static int insert_exceptions(struct pstore *ps, * is we know that we've hit the end of the * exceptions. Therefore the area is not full. */ - if (de.new_chunk == 0LL) { + if (e.new_chunk == 0LL) { ps->current_committed = i; *full = 0; break; @@ -460,13 +464,13 @@ static int insert_exceptions(struct pstore *ps, /* * Keep track of the start of the free chunks. */ - if (ps->next_free <= de.new_chunk) - ps->next_free = de.new_chunk + 1; + if (ps->next_free <= e.new_chunk) + ps->next_free = e.new_chunk + 1; /* * Otherwise we add the exception to the snapshot. */ - r = callback(callback_context, de.old_chunk, de.new_chunk); + r = callback(callback_context, e.old_chunk, e.new_chunk); if (r) return r; } @@ -563,7 +567,7 @@ static int persistent_read_metadata(struct dm_exception_store *store, ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / sizeof(struct disk_exception); ps->callbacks = dm_vcalloc(ps->exceptions_per_area, - sizeof(*ps->callbacks)); + sizeof(*ps->callbacks)); if (!ps->callbacks) return -ENOMEM; @@ -641,12 +645,12 @@ static void persistent_commit_exception(struct dm_exception_store *store, { unsigned int i; struct pstore *ps = get_info(store); - struct disk_exception de; + struct core_exception ce; struct commit_callback *cb; - de.old_chunk = e->old_chunk; - de.new_chunk = e->new_chunk; - write_exception(ps, ps->current_committed++, &de); + ce.old_chunk = e->old_chunk; + ce.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &ce); /* * Add the callback to the back of the array. This code @@ -670,7 +674,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, * If we completely filled the current area, then wipe the next one. */ if ((ps->current_committed == ps->exceptions_per_area) && - zero_disk_area(ps, ps->current_area + 1)) + zero_disk_area(ps, ps->current_area + 1)) ps->valid = 0; /* @@ -701,7 +705,7 @@ static int persistent_prepare_merge(struct dm_exception_store *store, chunk_t *last_new_chunk) { struct pstore *ps = get_info(store); - struct disk_exception de; + struct core_exception ce; int nr_consecutive; int r; @@ -722,9 +726,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, ps->current_committed = ps->exceptions_per_area; } - read_exception(ps, ps->current_committed - 1, &de); - *last_old_chunk = de.old_chunk; - *last_new_chunk = de.new_chunk; + read_exception(ps, ps->current_committed - 1, &ce); + *last_old_chunk = ce.old_chunk; + *last_new_chunk = ce.new_chunk; /* * Find number of consecutive chunks within the current area, @@ -733,9 +737,9 @@ static int persistent_prepare_merge(struct dm_exception_store *store, for (nr_consecutive = 1; nr_consecutive < ps->current_committed; nr_consecutive++) { read_exception(ps, ps->current_committed - 1 - nr_consecutive, - &de); - if (de.old_chunk != *last_old_chunk - nr_consecutive || - de.new_chunk != *last_new_chunk - nr_consecutive) + &ce); + if (ce.old_chunk != *last_old_chunk - nr_consecutive || + ce.new_chunk != *last_new_chunk - nr_consecutive) break; } @@ -753,7 +757,7 @@ static int persistent_commit_merge(struct dm_exception_store *store, for (i = 0; i < nr_merged; i++) clear_exception(ps, ps->current_committed - 1 - i); - r = area_io(ps, WRITE); + r = area_io(ps, WRITE_FLUSH_FUA); if (r < 0) return r; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 9ecff5f3023a..6f758870fc19 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -30,16 +30,6 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; ((ti)->type->name == dm_snapshot_merge_target_name) /* - * The percentage increment we will wake up users at - */ -#define WAKE_UP_PERCENT 5 - -/* - * kcopyd priority of snapshot operations - */ -#define SNAPSHOT_COPY_PRIORITY 2 - -/* * The size of the mempool used to track chunks in use. */ #define MIN_IOS 256 @@ -180,6 +170,13 @@ struct dm_snap_pending_exception { * kcopyd. */ int started; + + /* + * For writing a complete chunk, bypassing the copy. + */ + struct bio *full_bio; + bio_end_io_t *full_bio_end_io; + void *full_bio_private; }; /* @@ -1055,8 +1052,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) { - ti->error = "Cannot allocate snapshot context private " - "structure"; + ti->error = "Cannot allocate private snapshot structure"; r = -ENOMEM; goto bad; } @@ -1380,6 +1376,7 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) struct dm_snapshot *s = pe->snap; struct bio *origin_bios = NULL; struct bio *snapshot_bios = NULL; + struct bio *full_bio = NULL; int error = 0; if (!success) { @@ -1415,10 +1412,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) */ dm_insert_exception(&s->complete, e); - out: +out: dm_remove_exception(&pe->e); snapshot_bios = bio_list_get(&pe->snapshot_bios); origin_bios = bio_list_get(&pe->origin_bios); + full_bio = pe->full_bio; + if (full_bio) { + full_bio->bi_end_io = pe->full_bio_end_io; + full_bio->bi_private = pe->full_bio_private; + } free_pending_exception(pe); increment_pending_exceptions_done_count(); @@ -1426,10 +1428,15 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) up_write(&s->lock); /* Submit any pending write bios */ - if (error) + if (error) { + if (full_bio) + bio_io_error(full_bio); error_bios(snapshot_bios); - else + } else { + if (full_bio) + bio_endio(full_bio, 0); flush_bios(snapshot_bios); + } retry_origin_bios(s, origin_bios); } @@ -1480,8 +1487,33 @@ static void start_copy(struct dm_snap_pending_exception *pe) dest.count = src.count; /* Hand over to kcopyd */ - dm_kcopyd_copy(s->kcopyd_client, - &src, 1, &dest, 0, copy_callback, pe); + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); +} + +static void full_bio_end_io(struct bio *bio, int error) +{ + void *callback_data = bio->bi_private; + + dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0); +} + +static void start_full_bio(struct dm_snap_pending_exception *pe, + struct bio *bio) +{ + struct dm_snapshot *s = pe->snap; + void *callback_data; + + pe->full_bio = bio; + pe->full_bio_end_io = bio->bi_end_io; + pe->full_bio_private = bio->bi_private; + + callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, + copy_callback, pe); + + bio->bi_end_io = full_bio_end_io; + bio->bi_private = callback_data; + + generic_make_request(bio); } static struct dm_snap_pending_exception * @@ -1519,6 +1551,7 @@ __find_pending_exception(struct dm_snapshot *s, bio_list_init(&pe->origin_bios); bio_list_init(&pe->snapshot_bios); pe->started = 0; + pe->full_bio = NULL; if (s->store->type->prepare_exception(s->store, &pe->e)) { free_pending_exception(pe); @@ -1612,10 +1645,19 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, } remap_exception(s, &pe->e, bio, chunk); - bio_list_add(&pe->snapshot_bios, bio); r = DM_MAPIO_SUBMITTED; + if (!pe->started && + bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { + pe->started = 1; + up_write(&s->lock); + start_full_bio(pe, bio); + goto out; + } + + bio_list_add(&pe->snapshot_bios, bio); + if (!pe->started) { /* this is protected by snap->lock */ pe->started = 1; @@ -1628,9 +1670,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, map_context->ptr = track_chunk(s, chunk); } - out_unlock: +out_unlock: up_write(&s->lock); - out: +out: return r; } @@ -1974,7 +2016,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector, pe_to_start_now = pe; } - next_snapshot: +next_snapshot: up_write(&snap->lock); if (pe_to_start_now) { diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index bfe9c2333cea..986b8754bb08 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -54,7 +54,6 @@ struct dm_table { sector_t *highs; struct dm_target *targets; - unsigned discards_supported:1; unsigned integrity_supported:1; /* @@ -154,12 +153,11 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) return NULL; size = nmemb * elem_size; - addr = vmalloc(size); - if (addr) - memset(addr, 0, size); + addr = vzalloc(size); return addr; } +EXPORT_SYMBOL(dm_vcalloc); /* * highs, and targets are managed as dynamic arrays during a @@ -209,7 +207,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, INIT_LIST_HEAD(&t->devices); INIT_LIST_HEAD(&t->target_callbacks); atomic_set(&t->holders, 0); - t->discards_supported = 1; if (!num_targets) num_targets = KEYS_PER_NODE; @@ -281,6 +278,7 @@ void dm_table_get(struct dm_table *t) { atomic_inc(&t->holders); } +EXPORT_SYMBOL(dm_table_get); void dm_table_put(struct dm_table *t) { @@ -290,6 +288,7 @@ void dm_table_put(struct dm_table *t) smp_mb__before_atomic_dec(); atomic_dec(&t->holders); } +EXPORT_SYMBOL(dm_table_put); /* * Checks to see if we need to extend highs or targets. @@ -455,13 +454,14 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, * Add a device to the list, or just increment the usage count if * it's already present. */ -static int __table_get_device(struct dm_table *t, struct dm_target *ti, - const char *path, fmode_t mode, struct dm_dev **result) +int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, + struct dm_dev **result) { int r; dev_t uninitialized_var(dev); struct dm_dev_internal *dd; unsigned int major, minor; + struct dm_table *t = ti->table; BUG_ON(!t); @@ -509,6 +509,7 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, *result = &dd->dm_dev; return 0; } +EXPORT_SYMBOL(dm_get_device); int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) @@ -539,23 +540,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, * If not we'll force DM to use PAGE_SIZE or * smaller I/O, just to be safe. */ - - if (q->merge_bvec_fn && !ti->type->merge) + if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) blk_limits_max_hw_sectors(limits, (unsigned int) (PAGE_SIZE >> 9)); return 0; } EXPORT_SYMBOL_GPL(dm_set_device_limits); -int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, - struct dm_dev **result) -{ - return __table_get_device(ti->table, ti, path, mode, result); -} - - /* - * Decrement a devices use count and remove it if necessary. + * Decrement a device's use count and remove it if necessary. */ void dm_put_device(struct dm_target *ti, struct dm_dev *d) { @@ -568,6 +561,7 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) kfree(dd); } } +EXPORT_SYMBOL(dm_put_device); /* * Checks to see if the target joins onto the end of the table. @@ -791,8 +785,9 @@ int dm_table_add_target(struct dm_table *t, const char *type, t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; - if (!tgt->num_discard_requests) - t->discards_supported = 0; + if (!tgt->num_discard_requests && tgt->discards_supported) + DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.", + dm_device_name(t->md), type); return 0; @@ -802,6 +797,63 @@ int dm_table_add_target(struct dm_table *t, const char *type, return r; } +/* + * Target argument parsing helpers. + */ +static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error, unsigned grouped) +{ + const char *arg_str = dm_shift_arg(arg_set); + + if (!arg_str || + (sscanf(arg_str, "%u", value) != 1) || + (*value < arg->min) || + (*value > arg->max) || + (grouped && arg_set->argc < *value)) { + *error = arg->error; + return -EINVAL; + } + + return 0; +} + +int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error) +{ + return validate_next_arg(arg, arg_set, value, error, 0); +} +EXPORT_SYMBOL(dm_read_arg); + +int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set, + unsigned *value, char **error) +{ + return validate_next_arg(arg, arg_set, value, error, 1); +} +EXPORT_SYMBOL(dm_read_arg_group); + +const char *dm_shift_arg(struct dm_arg_set *as) +{ + char *r; + + if (as->argc) { + as->argc--; + r = *as->argv; + as->argv++; + return r; + } + + return NULL; +} +EXPORT_SYMBOL(dm_shift_arg); + +void dm_consume_args(struct dm_arg_set *as, unsigned num_args) +{ + BUG_ON(as->argc < num_args); + as->argc -= num_args; + as->argv += num_args; +} +EXPORT_SYMBOL(dm_consume_args); + static int dm_table_set_type(struct dm_table *t) { unsigned i; @@ -1077,11 +1129,13 @@ void dm_table_event(struct dm_table *t) t->event_fn(t->event_context); mutex_unlock(&_event_lock); } +EXPORT_SYMBOL(dm_table_event); sector_t dm_table_get_size(struct dm_table *t) { return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; } +EXPORT_SYMBOL(dm_table_get_size); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) { @@ -1194,9 +1248,45 @@ static void dm_table_set_integrity(struct dm_table *t) blk_get_integrity(template_disk)); } +static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + unsigned flush = (*(unsigned *)data); + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && (q->flush_flags & flush); +} + +static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) +{ + struct dm_target *ti; + unsigned i = 0; + + /* + * Require at least one underlying device to support flushes. + * t->devices includes internal dm devices such as mirror logs + * so we need to use iterate_devices here, which targets + * supporting flushes must provide. + */ + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (!ti->num_flush_requests) + continue; + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, device_flush_capable, &flush)) + return 1; + } + + return 0; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { + unsigned flush = 0; + /* * Copy table's limits to the DM device's request_queue */ @@ -1207,6 +1297,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, else queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + if (dm_table_supports_flush(t, REQ_FLUSH)) { + flush |= REQ_FLUSH; + if (dm_table_supports_flush(t, REQ_FUA)) + flush |= REQ_FUA; + } + blk_queue_flush(q, flush); + dm_table_set_integrity(t); /* @@ -1237,6 +1334,7 @@ fmode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } +EXPORT_SYMBOL(dm_table_get_mode); static void suspend_targets(struct dm_table *t, unsigned postsuspend) { @@ -1345,6 +1443,7 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; } +EXPORT_SYMBOL(dm_table_get_md); static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) @@ -1359,19 +1458,19 @@ bool dm_table_supports_discards(struct dm_table *t) struct dm_target *ti; unsigned i = 0; - if (!t->discards_supported) - return 0; - /* * Unless any target used by the table set discards_supported, * require at least one underlying device to support discards. * t->devices includes internal dm devices such as mirror logs * so we need to use iterate_devices here, which targets - * supporting discard must provide. + * supporting discard selectively must provide. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); + if (!ti->num_discard_requests) + continue; + if (ti->discards_supported) return 1; @@ -1382,13 +1481,3 @@ bool dm_table_supports_discards(struct dm_table *t) return 0; } - -EXPORT_SYMBOL(dm_vcalloc); -EXPORT_SYMBOL(dm_get_device); -EXPORT_SYMBOL(dm_put_device); -EXPORT_SYMBOL(dm_table_event); -EXPORT_SYMBOL(dm_table_get_size); -EXPORT_SYMBOL(dm_table_get_mode); -EXPORT_SYMBOL(dm_table_get_md); -EXPORT_SYMBOL(dm_table_put); -EXPORT_SYMBOL(dm_table_get); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 0cf68b478878..52b39f335bb3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -37,6 +37,8 @@ static const char *_name = DM_NAME; static unsigned int major = 0; static unsigned int _major = 0; +static DEFINE_IDR(_minor_idr); + static DEFINE_SPINLOCK(_minor_lock); /* * For bio-based dm. @@ -109,6 +111,7 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); #define DMF_FREEING 3 #define DMF_DELETING 4 #define DMF_NOFLUSH_SUSPENDING 5 +#define DMF_MERGE_IS_OPTIONAL 6 /* * Work processed by per-device workqueue. @@ -313,6 +316,12 @@ static void __exit dm_exit(void) while (i--) _exits[i](); + + /* + * Should be empty by this point. + */ + idr_remove_all(&_minor_idr); + idr_destroy(&_minor_idr); } /* @@ -1171,7 +1180,8 @@ static int __clone_and_map_discard(struct clone_info *ci) /* * Even though the device advertised discard support, - * reconfiguration might have changed that since the + * that does not mean every target supports it, and + * reconfiguration might also have changed that since the * check was performed. */ if (!ti->num_discard_requests) @@ -1705,8 +1715,6 @@ static int dm_any_congested(void *congested_data, int bdi_bits) /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ -static DEFINE_IDR(_minor_idr); - static void free_minor(int minor) { spin_lock(&_minor_lock); @@ -1800,7 +1808,6 @@ static void dm_init_md_queue(struct mapped_device *md) blk_queue_make_request(md->queue, dm_request); blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); blk_queue_merge_bvec(md->queue, dm_merge_bvec); - blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); } /* @@ -1986,6 +1993,59 @@ static void __set_size(struct mapped_device *md, sector_t size) } /* + * Return 1 if the queue has a compulsory merge_bvec_fn function. + * + * If this function returns 0, then the device is either a non-dm + * device without a merge_bvec_fn, or it is a dm device that is + * able to split any bios it receives that are too big. + */ +int dm_queue_merge_is_compulsory(struct request_queue *q) +{ + struct mapped_device *dev_md; + + if (!q->merge_bvec_fn) + return 0; + + if (q->make_request_fn == dm_request) { + dev_md = q->queuedata; + if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags)) + return 0; + } + + return 1; +} + +static int dm_device_merge_is_compulsory(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct block_device *bdev = dev->bdev; + struct request_queue *q = bdev_get_queue(bdev); + + return dm_queue_merge_is_compulsory(q); +} + +/* + * Return 1 if it is acceptable to ignore merge_bvec_fn based + * on the properties of the underlying devices. + */ +static int dm_table_merge_is_optional(struct dm_table *table) +{ + unsigned i = 0; + struct dm_target *ti; + + while (i < dm_table_get_num_targets(table)) { + ti = dm_table_get_target(table, i++); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL)) + return 0; + } + + return 1; +} + +/* * Returns old map, which caller must destroy. */ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, @@ -1995,6 +2055,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, struct request_queue *q = md->queue; sector_t size; unsigned long flags; + int merge_is_optional; size = dm_table_get_size(t); @@ -2020,10 +2081,16 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, __bind_mempools(md, t); + merge_is_optional = dm_table_merge_is_optional(t); + write_lock_irqsave(&md->map_lock, flags); old_map = md->map; md->map = t; dm_table_set_restrictions(t, q, limits); + if (merge_is_optional) + set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); + else + clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags); write_unlock_irqrestore(&md->map_lock, flags); return old_map; diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 1aaf16746da8..6745dbd278a4 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -66,6 +66,8 @@ int dm_table_alloc_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); +int dm_queue_merge_is_compulsory(struct request_queue *q); + void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); void dm_set_md_type(struct mapped_device *md, unsigned type); diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 5b0dba6d4efa..d724a18b5285 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -1989,14 +1989,20 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) return -EINVAL; } + /* + * It's important to set the bp->state to the value different from + * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int() + * may restart the Tx from the NAPI context (see bnx2x_tx_int()). + */ + bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; + smp_mb(); + /* Stop Tx */ bnx2x_tx_disable(bp); #ifdef BCM_CNIC bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); #endif - bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; - smp_mb(); bp->rx_mode = BNX2X_RX_MODE_NONE; diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index 06727f32e505..dc24de40e336 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h @@ -1204,6 +1204,8 @@ struct drv_port_mb { #define LINK_STATUS_PFC_ENABLED 0x20000000 + #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 + u32 port_stx; u32 stat_nig_timer; diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index bcd8f0038628..d45b1555a602 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -1546,6 +1546,12 @@ static void bnx2x_umac_enable(struct link_params *params, vars->line_speed); break; } + if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)) + val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE; + + if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)) + val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE; + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); udelay(50); @@ -1661,10 +1667,20 @@ static void bnx2x_xmac_disable(struct link_params *params) { u8 port = params->port; struct bnx2x *bp = params->bp; - u32 xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; if (REG_RD(bp, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) { + /* + * Send an indication to change the state in the NIG back to XON + * Clearing this bit enables the next set of this bit to get + * rising edge + */ + pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl & ~(1<<1))); + REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, + (pfc_ctrl | (1<<1))); DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); usleep_range(1000, 1000); @@ -1729,6 +1745,10 @@ static int bnx2x_emac_enable(struct link_params *params, DP(NETIF_MSG_LINK, "enabling EMAC\n"); + /* Disable BMAC */ + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + /* enable emac and not bmac */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); @@ -2583,12 +2603,6 @@ static int bnx2x_bmac1_enable(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, wb_data, 2); - if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { - REG_RD_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LSS_STATUS, - wb_data, 2); - if (wb_data[0] > 0) - return -ESRCH; - } return 0; } @@ -2654,16 +2668,6 @@ static int bnx2x_bmac2_enable(struct link_params *params, udelay(30); bnx2x_update_pfc_bmac2(params, vars, is_lb); - if (vars->phy_flags & PHY_TX_ERROR_CHECK_FLAG) { - REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LSS_STAT, - wb_data, 2); - if (wb_data[0] > 0) { - DP(NETIF_MSG_LINK, "Got bad LSS status 0x%x\n", - wb_data[0]); - return -ESRCH; - } - } - return 0; } @@ -2949,7 +2953,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, u32 val; u16 i; int rc = 0; - + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); /* address */ val = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | @@ -3003,6 +3009,9 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, } } + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); return rc; } @@ -3012,6 +3021,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, u32 tmp; u8 i; int rc = 0; + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); /* address */ @@ -3065,7 +3077,9 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val); } } - + if (phy->flags & FLAGS_MDC_MDIO_WA_B0) + bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, + EMAC_MDIO_STATUS_10MB); return rc; } @@ -4353,6 +4367,9 @@ void bnx2x_link_status_update(struct link_params *params, vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); vars->phy_flags = PHY_XGXS_FLAG; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; + if (vars->link_up) { DP(NETIF_MSG_LINK, "phy link up\n"); @@ -4444,6 +4461,8 @@ void bnx2x_link_status_update(struct link_params *params, /* indicate no mac active */ vars->mac_type = MAC_TYPE_NONE; + if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; } /* Sync media type */ @@ -5903,20 +5922,30 @@ int bnx2x_set_led(struct link_params *params, tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); - return rc; + /* + * return here without enabling traffic + * LED blink andsetting rate in ON mode. + * In oper mode, enabling LED blink + * and setting rate is needed. + */ + if (mode == LED_MODE_ON) + return rc; } - } else if (SINGLE_MEDIA_DIRECT(params) && - (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp))) { + } else if (SINGLE_MEDIA_DIRECT(params)) { /* * This is a work-around for HW issue found when link * is up in CL73 */ - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); - } else { + if (CHIP_IS_E1x(bp) || + CHIP_IS_E2(bp) || + (mode == LED_MODE_ON)) + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); + else + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, + hw_led_mode); + } else REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); - } REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); /* Set blinking rate to ~15.9Hz */ @@ -6160,6 +6189,7 @@ static int bnx2x_update_link_down(struct link_params *params, /* update shared memory */ vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | LINK_STATUS_LINK_UP | + LINK_STATUS_PHYSICAL_LINK_FLAG | LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | @@ -6197,7 +6227,8 @@ static int bnx2x_update_link_up(struct link_params *params, u8 port = params->port; int rc = 0; - vars->link_status |= LINK_STATUS_LINK_UP; + vars->link_status |= (LINK_STATUS_LINK_UP | + LINK_STATUS_PHYSICAL_LINK_FLAG); vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG; if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) @@ -7998,6 +8029,9 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params, bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val); + /* Restart microcode to re-read the new mode */ + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_warpcore_reset_lane(bp, phy, 0); } @@ -8116,7 +8150,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params) offsetof(struct shmem_region, dev_info. port_feature_config[params->port]. config)); - bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_SET, gpio_port); @@ -8125,8 +8158,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params) * Disable transmit for this module */ phy->media_type = ETH_PHY_NOT_PRESENT; - if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == - PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) + if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == + PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) || + CHIP_IS_E3(bp)) bnx2x_sfp_set_transmitter(params, phy, 0); } } @@ -8228,9 +8262,6 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, u16 cnt, val, tmp1; struct bnx2x *bp = params->bp; - /* SPF+ PHY: Set flag to check for Tx error */ - vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ @@ -8414,9 +8445,6 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); - /* SPF+ PHY: Set flag to check for Tx error */ - vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); bnx2x_wait_reset_complete(bp, phy, params); @@ -8585,9 +8613,6 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ - /* SPF+ PHY: Set flag to check for Tx error */ - vars->phy_flags = PHY_TX_ERROR_CHECK_FLAG; - bnx2x_wait_reset_complete(bp, phy, params); rx_alarm_ctrl_val = (1<<2) | (1<<5) ; /* Should be 0x6 to enable XS on Tx side. */ @@ -9243,7 +9268,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, if (phy->req_duplex == DUPLEX_FULL) autoneg_val |= (1<<8); - bnx2x_cl45_write(bp, phy, + /* + * Always write this if this is not 84833. + * For 84833, write it only when it's a forced speed. + */ + if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || + ((autoneg_val & (1<<12)) == 0)) + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); @@ -9257,13 +9288,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x3200); - } else if (phy->req_line_speed != SPEED_10 && - phy->req_line_speed != SPEED_100) { + } else bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 1); - } + /* Save spirom version */ bnx2x_save_848xx_spirom_version(phy, params); @@ -9756,11 +9786,9 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &val16); - /* Put to low power mode on newer FW */ - if ((val16 & 0x303f) > 0x1009) - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, 0x800); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, 0x800); } } @@ -10191,8 +10219,15 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, u32 cfg_pin; u8 port; - /* This works with E3 only, no need to check the chip - before determining the port. */ + /* + * In case of no EPIO routed to reset the GPHY, put it + * in low power mode. + */ + bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); + /* + * This works with E3 only, no need to check the chip + * before determining the port. + */ port = params->port; cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, @@ -10603,7 +10638,8 @@ static struct bnx2x_phy phy_warpcore = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_HW_LOCK_REQUIRED, + .flags = (FLAGS_HW_LOCK_REQUIRED | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -10729,7 +10765,8 @@ static struct bnx2x_phy phy_8706 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_INIT_XGXS_FIRST, + .flags = (FLAGS_INIT_XGXS_FIRST | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -10760,7 +10797,8 @@ static struct bnx2x_phy phy_8726 = { .addr = 0xff, .def_md_devad = 0, .flags = (FLAGS_HW_LOCK_REQUIRED | - FLAGS_INIT_XGXS_FIRST), + FLAGS_INIT_XGXS_FIRST | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -10791,7 +10829,8 @@ static struct bnx2x_phy phy_8727 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, .addr = 0xff, .def_md_devad = 0, - .flags = FLAGS_FAN_FAILURE_DET_REQ, + .flags = (FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11112,6 +11151,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, */ if (CHIP_REV(bp) == CHIP_REV_Ax) phy->flags |= FLAGS_MDC_MDIO_WA; + else + phy->flags |= FLAGS_MDC_MDIO_WA_B0; } else { switch (switch_cfg) { case SWITCH_CFG_1G: @@ -11500,13 +11541,12 @@ void bnx2x_init_xmac_loopback(struct link_params *params, * Set WC to loopback mode since link is required to provide clock * to the XMAC in 20G mode */ - if (vars->line_speed == SPEED_20000) { - bnx2x_set_aer_mmd(params, ¶ms->phy[0]); - bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); - params->phy[INT_PHY].config_loopback( + bnx2x_set_aer_mmd(params, ¶ms->phy[0]); + bnx2x_warpcore_reset_lane(bp, ¶ms->phy[0], 0); + params->phy[INT_PHY].config_loopback( ¶ms->phy[INT_PHY], params); - } + bnx2x_xmac_enable(params, vars, 1); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } @@ -11684,12 +11724,16 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, bnx2x_set_led(params, vars, LED_MODE_OFF, 0); if (reset_ext_phy) { + bnx2x_set_mdio_clk(bp, params->chip_id, port); for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { - if (params->phy[phy_index].link_reset) + if (params->phy[phy_index].link_reset) { + bnx2x_set_aer_mmd(params, + ¶ms->phy[phy_index]); params->phy[phy_index].link_reset( ¶ms->phy[phy_index], params); + } if (params->phy[phy_index].flags & FLAGS_REARM_LATCH_SIGNAL) clear_latch_ind = 1; @@ -12178,10 +12222,6 @@ static void bnx2x_analyze_link_error(struct link_params *params, u8 led_mode; u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; - /*DP(NETIF_MSG_LINK, "CHECK LINK: %x half_open:%x-> lss:%x\n", - vars->link_up, - half_open_conn, lss_status);*/ - if ((lss_status ^ half_open_conn) == 0) return; @@ -12194,6 +12234,7 @@ static void bnx2x_analyze_link_error(struct link_params *params, * b. Update link_vars->link_up */ if (lss_status) { + DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n"); vars->link_status &= ~LINK_STATUS_LINK_UP; vars->link_up = 0; vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; @@ -12203,6 +12244,7 @@ static void bnx2x_analyze_link_error(struct link_params *params, */ led_mode = LED_MODE_OFF; } else { + DP(NETIF_MSG_LINK, "Remote Fault cleared\n"); vars->link_status |= LINK_STATUS_LINK_UP; vars->link_up = 1; vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; @@ -12219,6 +12261,15 @@ static void bnx2x_analyze_link_error(struct link_params *params, bnx2x_notify_link_changed(bp); } +/****************************************************************************** +* Description: +* This function checks for half opened connection change indication. +* When such change occurs, it calls the bnx2x_analyze_link_error +* to check if Remote Fault is set or cleared. Reception of remote fault +* status message in the MAC indicates that the peer's MAC has detected +* a fault, for example, due to break in the TX side of fiber. +* +******************************************************************************/ static void bnx2x_check_half_open_conn(struct link_params *params, struct link_vars *vars) { @@ -12229,9 +12280,28 @@ static void bnx2x_check_half_open_conn(struct link_params *params, if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) return; - if (!CHIP_IS_E3(bp) && + if (CHIP_IS_E3(bp) && (REG_RD(bp, MISC_REG_RESET_REG_2) & - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))) { + (MISC_REGISTERS_RESET_REG_2_XMAC))) { + /* Check E3 XMAC */ + /* + * Note that link speed cannot be queried here, since it may be + * zero while link is down. In case UMAC is active, LSS will + * simply not be set + */ + mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + + /* Clear stick bits (Requires rising edge) */ + REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0); + REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS | + XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS); + if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) + lss_status = 1; + + bnx2x_analyze_link_error(params, vars, lss_status); + } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { /* Check E1X / E2 BMAC */ u32 lss_status_reg; u32 wb_data[2]; @@ -12253,14 +12323,20 @@ static void bnx2x_check_half_open_conn(struct link_params *params, void bnx2x_period_func(struct link_params *params, struct link_vars *vars) { struct bnx2x *bp = params->bp; + u16 phy_idx; if (!params) { - DP(NETIF_MSG_LINK, "Ininitliazed params !\n"); + DP(NETIF_MSG_LINK, "Uninitialized params !\n"); return; } - /* DP(NETIF_MSG_LINK, "Periodic called vars->phy_flags 0x%x speed 0x%x - RESET_REG_2 0x%x\n", vars->phy_flags, vars->line_speed, - REG_RD(bp, MISC_REG_RESET_REG_2)); */ - bnx2x_check_half_open_conn(params, vars); + + for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { + if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { + bnx2x_set_aer_mmd(params, ¶ms->phy[phy_idx]); + bnx2x_check_half_open_conn(params, vars); + break; + } + } + if (CHIP_IS_E3(bp)) bnx2x_check_over_curr(params, vars); } diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h index 6a7708d5da37..c12db6da213e 100644 --- a/drivers/net/bnx2x/bnx2x_link.h +++ b/drivers/net/bnx2x/bnx2x_link.h @@ -145,6 +145,8 @@ struct bnx2x_phy { #define FLAGS_SFP_NOT_APPROVED (1<<7) #define FLAGS_MDC_MDIO_WA (1<<8) #define FLAGS_DUMMY_READ (1<<9) +#define FLAGS_MDC_MDIO_WA_B0 (1<<10) +#define FLAGS_TX_ERROR_CHECK (1<<12) /* preemphasis values for the rx side */ u16 rx_preemphasis[4]; @@ -276,7 +278,6 @@ struct link_vars { #define PHY_PHYSICAL_LINK_FLAG (1<<2) #define PHY_HALF_OPEN_CONN_FLAG (1<<3) #define PHY_OVER_CURRENT_FLAG (1<<4) -#define PHY_TX_ERROR_CHECK_FLAG (1<<5) u8 mac_type; #define MAC_TYPE_NONE 0 diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 02461fef8751..27b5ecb11830 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -4771,9 +4771,11 @@ The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] - header pointer. */ #define UCM_REG_XX_TABLE 0xe0300 +#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28) #define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15) #define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24) #define UMAC_COMMAND_CONFIG_REG_PAD_EN (0x1<<5) +#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE (0x1<<8) #define UMAC_COMMAND_CONFIG_REG_PROMIS_EN (0x1<<4) #define UMAC_COMMAND_CONFIG_REG_RX_ENA (0x1<<1) #define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) @@ -5622,8 +5624,9 @@ #define EMAC_MDIO_COMM_START_BUSY (1L<<29) #define EMAC_MDIO_MODE_AUTO_POLL (1L<<4) #define EMAC_MDIO_MODE_CLAUSE_45 (1L<<31) -#define EMAC_MDIO_MODE_CLOCK_CNT (0x3fL<<16) +#define EMAC_MDIO_MODE_CLOCK_CNT (0x3ffL<<16) #define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT 16 +#define EMAC_MDIO_STATUS_10MB (1L<<1) #define EMAC_MODE_25G_MODE (1L<<5) #define EMAC_MODE_HALF_DUPLEX (1L<<1) #define EMAC_MODE_PORT_GMII (2L<<2) @@ -5634,6 +5637,7 @@ #define EMAC_REG_EMAC_MAC_MATCH 0x10 #define EMAC_REG_EMAC_MDIO_COMM 0xac #define EMAC_REG_EMAC_MDIO_MODE 0xb4 +#define EMAC_REG_EMAC_MDIO_STATUS 0xb0 #define EMAC_REG_EMAC_MODE 0x0 #define EMAC_REG_EMAC_RX_MODE 0xc8 #define EMAC_REG_EMAC_RX_MTU_SIZE 0x9c diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index c5f0f04219f3..5548d464261a 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -838,6 +838,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); msleep(10); /* Test each interrupt */ @@ -856,6 +857,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) adapter->test_icr = 0; ew32(IMC, mask); ew32(ICS, mask); + E1000_WRITE_FLUSH(); msleep(10); if (adapter->test_icr & mask) { @@ -873,6 +875,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) adapter->test_icr = 0; ew32(IMS, mask); ew32(ICS, mask); + E1000_WRITE_FLUSH(); msleep(10); if (!(adapter->test_icr & mask)) { @@ -890,6 +893,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) adapter->test_icr = 0; ew32(IMC, ~mask & 0x00007FFF); ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); msleep(10); if (adapter->test_icr) { @@ -901,6 +905,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); msleep(10); /* Unhook test interrupt handler */ @@ -1394,6 +1399,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) if (unlikely(++k == txdr->count)) k = 0; } ew32(TDT, k); + E1000_WRITE_FLUSH(); msleep(200); time = jiffies; /* set the start time for the receive */ good_cnt = 0; diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 1698622af434..8545c7aa93eb 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -446,6 +446,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw) /* Must reset the PHY before resetting the MAC */ if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); msleep(5); } @@ -3752,6 +3753,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw) /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); ew32(EECD, eecd); + E1000_WRITE_FLUSH(); udelay(1); } @@ -3824,6 +3826,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw) eecd &= ~E1000_EECD_SK; /* Lower SCK */ ew32(EECD, eecd); + E1000_WRITE_FLUSH(); udelay(hw->eeprom.delay_usec); } else if (hw->eeprom.type == e1000_eeprom_microwire) { diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c index c0ecb2d9fdb7..e4f42257c24c 100644 --- a/drivers/net/e1000e/es2lan.c +++ b/drivers/net/e1000e/es2lan.c @@ -1313,6 +1313,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); udelay(2); @@ -1347,6 +1348,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); udelay(2); diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index cb1a3623253e..06d88f316dce 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -28,8 +28,8 @@ /* ethtool support for e1000 */ -#include <linux/interrupt.h> #include <linux/netdevice.h> +#include <linux/interrupt.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/slab.h> @@ -964,6 +964,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); + e1e_flush(); usleep_range(10000, 20000); /* Test each interrupt */ @@ -996,6 +997,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) adapter->test_icr = 0; ew32(IMC, mask); ew32(ICS, mask); + e1e_flush(); usleep_range(10000, 20000); if (adapter->test_icr & mask) { @@ -1014,6 +1016,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) adapter->test_icr = 0; ew32(IMS, mask); ew32(ICS, mask); + e1e_flush(); usleep_range(10000, 20000); if (!(adapter->test_icr & mask)) { @@ -1032,6 +1035,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) adapter->test_icr = 0; ew32(IMC, ~mask & 0x00007FFF); ew32(ICS, ~mask & 0x00007FFF); + e1e_flush(); usleep_range(10000, 20000); if (adapter->test_icr) { @@ -1043,6 +1047,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); + e1e_flush(); usleep_range(10000, 20000); /* Unhook test interrupt handler */ @@ -1276,6 +1281,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) E1000_CTRL_FD); /* Force Duplex to FULL */ ew32(CTRL, ctrl_reg); + e1e_flush(); udelay(500); return 0; @@ -1418,6 +1424,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) */ #define E1000_SERDES_LB_ON 0x410 ew32(SCTL, E1000_SERDES_LB_ON); + e1e_flush(); usleep_range(10000, 20000); return 0; @@ -1513,6 +1520,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) hw->phy.media_type == e1000_media_type_internal_serdes) { #define E1000_SERDES_LB_OFF 0x400 ew32(SCTL, E1000_SERDES_LB_OFF); + e1e_flush(); usleep_range(10000, 20000); break; } @@ -1592,6 +1600,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) k = 0; } ew32(TDT, k); + e1e_flush(); msleep(200); time = jiffies; /* set the start time for the receive */ good_cnt = 0; diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index c1752124f3cd..4e36978b8fd8 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -283,6 +283,7 @@ static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; ew32(CTRL, ctrl); + e1e_flush(); udelay(10); ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; ew32(CTRL, ctrl); @@ -1230,9 +1231,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) ew32(CTRL, reg); ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + e1e_flush(); udelay(20); ew32(CTRL, ctrl_reg); ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); udelay(20); out: @@ -2134,8 +2137,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, ret_val = 0; for (i = 0; i < words; i++) { - if ((dev_spec->shadow_ram) && - (dev_spec->shadow_ram[offset+i].modified)) { + if (dev_spec->shadow_ram[offset+i].modified) { data[i] = dev_spec->shadow_ram[offset+i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, @@ -3090,6 +3092,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ret_val = e1000_acquire_swflag_ich8lan(hw); e_dbg("Issuing a global reset to ich8lan\n"); ew32(CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ msleep(20); if (!ret_val) diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 65580b405942..7898a67d6505 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -1986,6 +1986,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); ew32(EECD, eecd); + e1e_flush(); udelay(1); /* diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 4353ad56cf16..ab4be80f7ab5 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -31,12 +31,12 @@ #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> -#include <linux/interrupt.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/delay.h> #include <linux/netdevice.h> +#include <linux/interrupt.h> #include <linux/tcp.h> #include <linux/ipv6.h> #include <linux/slab.h> diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 2a6ee13285b1..8666476cb9be 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -537,6 +537,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); udelay(2); @@ -609,6 +610,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); udelay(2); diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c index 1c97861596f0..f67b8aebc89c 100644 --- a/drivers/net/gianfar_ptp.c +++ b/drivers/net/gianfar_ptp.c @@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects) /* Caller must hold etsects->lock. */ static void set_fipers(struct etsects *etsects) { - u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); - - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE)); - gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); + set_alarm(etsects); gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE); } /* @@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); set_alarm(etsects); - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD); spin_unlock_irqrestore(&etsects->lock, flags); diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c index 7dcd65cede56..40407124e722 100644 --- a/drivers/net/igb/e1000_nvm.c +++ b/drivers/net/igb/e1000_nvm.c @@ -285,6 +285,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw) /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); wr32(E1000_EECD, eecd); + wrfl(); udelay(1); timeout = NVM_MAX_RETRY_SPI; diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index ff244ce803ce..414b0225be89 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -1225,6 +1225,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) /* Disable all the interrupts */ wr32(E1000_IMC, ~0); + wrfl(); msleep(10); /* Define all writable bits for ICS */ @@ -1268,6 +1269,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_IMC, mask); wr32(E1000_ICS, mask); + wrfl(); msleep(10); if (adapter->test_icr & mask) { @@ -1289,6 +1291,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_IMS, mask); wr32(E1000_ICS, mask); + wrfl(); msleep(10); if (!(adapter->test_icr & mask)) { @@ -1310,6 +1313,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_IMC, ~mask); wr32(E1000_ICS, ~mask); + wrfl(); msleep(10); if (adapter->test_icr & mask) { @@ -1321,6 +1325,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) /* Disable all the interrupts */ wr32(E1000_IMC, ~0); + wrfl(); msleep(10); /* Unhook test interrupt handler */ diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index dc599059512a..40d4c405fd7e 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1052,6 +1052,7 @@ msi_only: kfree(adapter->vf_data); adapter->vf_data = NULL; wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); msleep(100); dev_info(&adapter->pdev->dev, "IOV Disabled\n"); } @@ -2022,7 +2023,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (hw->bus.func == 0) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); - else if (hw->mac.type == e1000_82580) + else if (hw->mac.type >= e1000_82580) hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &eeprom_data); @@ -2198,6 +2199,7 @@ static void __devexit igb_remove(struct pci_dev *pdev) kfree(adapter->vf_data); adapter->vf_data = NULL; wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + wrfl(); msleep(100); dev_info(&pdev->dev, "IOV Disabled\n"); } diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 1330c8e932da..40ed066e3ef4 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -1226,6 +1226,7 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter) /* disable transmits */ txdctl = er32(TXDCTL(0)); ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + e1e_flush(); msleep(10); /* Setup the HW Tx Head and Tail descriptor pointers */ @@ -1306,6 +1307,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) /* disable receives */ rxdctl = er32(RXDCTL(0)); ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + e1e_flush(); msleep(10); rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c index 954f6e938fb7..8b1c3484d271 100644 --- a/drivers/net/irda/smsc-ircc2.c +++ b/drivers/net/irda/smsc-ircc2.c @@ -2405,8 +2405,6 @@ static int __init smsc_superio_lpc(unsigned short cfg_base) * addresses making a subsystem device table necessary. */ #ifdef CONFIG_PCI -#define PCIID_VENDOR_INTEL 0x8086 -#define PCIID_VENDOR_ALI 0x10b9 static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = { /* * Subsystems needing entries: @@ -2416,7 +2414,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini */ { /* Guessed entry */ - .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ + .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ .device = 0x24cc, .subvendor = 0x103c, .subdevice = 0x08bc, @@ -2429,7 +2427,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini .name = "HP nx5000 family", }, { - .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ + .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ .device = 0x24cc, .subvendor = 0x103c, .subdevice = 0x088c, @@ -2443,7 +2441,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini .name = "HP nc8000 family", }, { - .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ + .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ .device = 0x24cc, .subvendor = 0x103c, .subdevice = 0x0890, @@ -2456,7 +2454,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini .name = "HP nc6000 family", }, { - .vendor = PCIID_VENDOR_INTEL, /* Intel 82801DBM LPC bridge */ + .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801DBM LPC bridge */ .device = 0x24cc, .subvendor = 0x0e11, .subdevice = 0x0860, @@ -2471,7 +2469,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini }, { /* Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge */ - .vendor = PCIID_VENDOR_INTEL, + .vendor = PCI_VENDOR_ID_INTEL, .device = 0x24c0, .subvendor = 0x1179, .subdevice = 0xffff, /* 0xffff is "any" */ @@ -2484,7 +2482,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini .name = "Toshiba laptop with Intel 82801DB/DBL LPC bridge", }, { - .vendor = PCIID_VENDOR_INTEL, /* Intel 82801CAM ISA bridge */ + .vendor = PCI_VENDOR_ID_INTEL, /* Intel 82801CAM ISA bridge */ .device = 0x248c, .subvendor = 0x1179, .subdevice = 0xffff, /* 0xffff is "any" */ @@ -2498,7 +2496,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini }, { /* 82801DBM (ICH4-M) LPC Interface Bridge */ - .vendor = PCIID_VENDOR_INTEL, + .vendor = PCI_VENDOR_ID_INTEL, .device = 0x24cc, .subvendor = 0x1179, .subdevice = 0xffff, /* 0xffff is "any" */ @@ -2512,7 +2510,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini }, { /* ALi M1533/M1535 PCI to ISA Bridge [Aladdin IV/V/V+] */ - .vendor = PCIID_VENDOR_ALI, + .vendor = PCI_VENDOR_ID_AL, .device = 0x1533, .subvendor = 0x1179, .subdevice = 0xffff, /* 0xffff is "any" */ diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c index c982ab9f9005..38b362b67857 100644 --- a/drivers/net/ixgb/ixgb_ee.c +++ b/drivers/net/ixgb/ixgb_ee.c @@ -57,6 +57,7 @@ ixgb_raise_clock(struct ixgb_hw *hw, */ *eecd_reg = *eecd_reg | IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, *eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); } @@ -75,6 +76,7 @@ ixgb_lower_clock(struct ixgb_hw *hw, */ *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, *eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); } @@ -112,6 +114,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, eecd_reg |= IXGB_EECD_DI; IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); @@ -206,21 +209,25 @@ ixgb_standby_eeprom(struct ixgb_hw *hw) /* Deselect EEPROM */ eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK); IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); /* Clock high */ eecd_reg |= IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); /* Select EEPROM */ eecd_reg |= IXGB_EECD_CS; IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); /* Clock low */ eecd_reg &= ~IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); } @@ -239,11 +246,13 @@ ixgb_clock_eeprom(struct ixgb_hw *hw) /* Rising edge of clock */ eecd_reg |= IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); /* Falling edge of clock */ eecd_reg &= ~IXGB_EECD_SK; IXGB_WRITE_REG(hw, EECD, eecd_reg); + IXGB_WRITE_FLUSH(hw); udelay(50); } diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c index 6cb2e42ff4c1..3d61a9e4faf7 100644 --- a/drivers/net/ixgb/ixgb_hw.c +++ b/drivers/net/ixgb/ixgb_hw.c @@ -149,6 +149,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw) */ IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN); IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN); + IXGB_WRITE_FLUSH(hw); msleep(IXGB_DELAY_BEFORE_RESET); /* Issue a global reset to the MAC. This will reset the chip's @@ -1220,6 +1221,7 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw) ctrl &= ~IXGB_CTRL0_SDP2; ctrl |= IXGB_CTRL0_SDP3; IXGB_WRITE_REG(hw, CTRL0, ctrl); + IXGB_WRITE_FLUSH(hw); /* SerDes needs extra delay */ msleep(IXGB_SUN_PHY_RESET_DELAY); diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index 3b3dd4df4c5c..34f30ec79c2e 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -213,6 +213,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.get_firmware_version = &ixgbe_get_phy_firmware_version_tnx; break; diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 777051f54e53..fc1375f26fe5 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -2632,6 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); usleep_range(10000, 20000); } diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index dc649553a0a6..82d4244c6e10 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1378,6 +1378,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) /* Disable all the interrupts */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); /* Test each interrupt */ @@ -1398,6 +1399,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) ~mask & 0x00007FFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); if (adapter->test_icr & mask) { @@ -1415,6 +1417,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) adapter->test_icr = 0; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); if (!(adapter->test_icr &mask)) { @@ -1435,6 +1438,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) ~mask & 0x00007FFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, ~mask & 0x00007FFF); + IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); if (adapter->test_icr) { @@ -1446,6 +1450,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) /* Disable all the interrupts */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); + IXGBE_WRITE_FLUSH(&adapter->hw); usleep_range(10000, 20000); /* Unhook test interrupt handler */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 1be617545dc9..e86297b32733 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -184,6 +184,7 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + IXGBE_WRITE_FLUSH(hw); /* take a breather then clean up driver data */ msleep(100); @@ -1005,7 +1006,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) struct ixgbe_adapter *adapter = dev_get_drvdata(dev); unsigned long event = *(unsigned long *)data; - if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) + if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) return 0; switch (event) { diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 735f686c3b36..f7ca3511b9fe 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -1585,6 +1585,7 @@ static s32 ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) *i2cctl |= IXGBE_I2C_CLK_OUT; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ udelay(IXGBE_I2C_T_RISE); @@ -1605,6 +1606,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) *i2cctl &= ~IXGBE_I2C_CLK_OUT; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ udelay(IXGBE_I2C_T_FALL); @@ -1628,6 +1630,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) *i2cctl &= ~IXGBE_I2C_DATA_OUT; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index bec30ed91adc..2696c78e9f46 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -162,6 +162,7 @@ mac_reset_top: ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); msleep(50); diff --git a/drivers/net/macb.c b/drivers/net/macb.c index 0fcdc25699d8..dc4e305a1087 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -322,6 +322,9 @@ static void macb_tx(struct macb *bp) for (i = 0; i < TX_RING_SIZE; i++) bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); + /* Add wrap bit */ + bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); + /* free transmit buffer in upper layer*/ for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { struct ring_info *rp = &bp->tx_skb[tail]; diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c index 5e7109178061..5ada5b469112 100644 --- a/drivers/net/mlx4/en_port.c +++ b/drivers/net/mlx4/en_port.c @@ -128,7 +128,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, memset(context, 0, sizeof *context); context->base_qpn = cpu_to_be32(base_qpn); - context->n_mac = 0x7; + context->n_mac = 0x2; context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn); context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT | diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index c94b3426d355..f0ee35df4dd7 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -1117,6 +1117,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->port = port; mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); + info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + + (port - 1) * (1 << log_num_mac); sprintf(info->dev_name, "mlx4_port%d", port); info->port_attr.attr.name = info->dev_name; diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 1f95afda6841..609e0ec14cee 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c @@ -258,9 +258,12 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn) if (validate_index(dev, table, index)) goto out; - table->entries[index] = 0; - mlx4_set_port_mac_table(dev, port, table->entries); - --table->total; + /* Check whether this address has reference count */ + if (!(--table->refs[index])) { + table->entries[index] = 0; + mlx4_set_port_mac_table(dev, port, table->entries); + --table->total; + } out: mutex_unlock(&table->mutex); } diff --git a/drivers/net/niu.c b/drivers/net/niu.c index cd6c2317e29e..ed47585a6862 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -9201,7 +9201,7 @@ static int __devinit niu_ldg_init(struct niu *np) first_chan = 0; for (i = 0; i < port; i++) - first_chan += parent->rxchan_per_port[port]; + first_chan += parent->rxchan_per_port[i]; num_chan = parent->rxchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { @@ -9217,7 +9217,7 @@ static int __devinit niu_ldg_init(struct niu *np) first_chan = 0; for (i = 0; i < port; i++) - first_chan += parent->txchan_per_port[port]; + first_chan += parent->txchan_per_port[i]; num_chan = parent->txchan_per_port[port]; for (i = first_chan; i < (first_chan + num_chan); i++) { err = niu_ldg_assign_ldn(np, parent, diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2cd8dc5847b4..cb6e0b486b1e 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -34,8 +34,7 @@ #define PAGESEL 0x13 #define LAYER4 0x02 #define LAYER2 0x01 -#define MAX_RXTS 4 -#define MAX_TXTS 4 +#define MAX_RXTS 64 #define N_EXT_TS 1 #define PSF_PTPVER 2 #define PSF_EVNT 0x4000 @@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts) rxts->seqid = p->seqid; rxts->msgtype = (p->msgtype >> 12) & 0xf; rxts->hash = p->msgtype & 0x0fff; - rxts->tmo = jiffies + HZ; + rxts->tmo = jiffies + 2; } static u64 phy2txts(struct phy_txts *p) diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c77286edba4d..02339b3352e7 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -1092,6 +1092,21 @@ rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type) rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type); } +struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; +}; + +static void rtl_write_exgmac_batch(void __iomem *ioaddr, + const struct exgmac_reg *r, int len) +{ + while (len-- > 0) { + rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } +} + static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) { u8 value = 0xff; @@ -3117,6 +3132,18 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) RTL_W32(MAC0, low); RTL_R32(MAC0); + if (tp->mac_version == RTL_GIGA_MAC_VER_34) { + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = low }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = high }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 | + low >> 16 }, + }; + + rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e)); + } + RTL_W8(Cfg9346, Cfg9346_Lock); spin_unlock_irq(&tp->lock); diff --git a/drivers/net/slip.c b/drivers/net/slip.c index cbe8865e322a..ba08341fb92c 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl) memcpy(skb_put(skb, count), sl->rbuff, count); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IP); - netif_rx(skb); + netif_rx_ni(skb); dev->stats.rx_packets++; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index fd622a66ebbf..a03336e086d5 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -53,7 +53,7 @@ #include <linux/usb/usbnet.h> #include <linux/usb/cdc.h> -#define DRIVER_VERSION "01-June-2011" +#define DRIVER_VERSION "04-Aug-2011" /* CDC NCM subclass 3.2.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 @@ -163,35 +163,8 @@ cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } -static int -cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req, - void *data, u16 flags, u16 *actlen, u16 timeout) -{ - int err; - - err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ? - usb_rcvctrlpipe(ctx->udev, 0) : - usb_sndctrlpipe(ctx->udev, 0), - req->bNotificationType, req->bmRequestType, - req->wValue, - req->wIndex, data, - req->wLength, timeout); - - if (err < 0) { - if (actlen) - *actlen = 0; - return err; - } - - if (actlen) - *actlen = err; - - return 0; -} - static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) { - struct usb_cdc_notification req; u32 val; u8 flags; u8 iface_no; @@ -200,14 +173,14 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS; - req.wValue = 0; - req.wIndex = cpu_to_le16(iface_no); - req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm)); - - err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000); - if (err) { + err = usb_control_msg(ctx->udev, + usb_rcvctrlpipe(ctx->udev, 0), + USB_CDC_GET_NTB_PARAMETERS, + USB_TYPE_CLASS | USB_DIR_IN + | USB_RECIP_INTERFACE, + 0, iface_no, &ctx->ncm_parm, + sizeof(ctx->ncm_parm), 10000); + if (err < 0) { pr_debug("failed GET_NTB_PARAMETERS\n"); return 1; } @@ -253,31 +226,26 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) /* inform device about NTB input size changes */ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | - USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE; - req.wValue = 0; - req.wIndex = cpu_to_le16(iface_no); if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { struct usb_cdc_ncm_ndp_input_size ndp_in_sz; - - req.wLength = 8; - ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); - ndp_in_sz.wNtbInMaxDatagrams = - cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX); - ndp_in_sz.wReserved = 0; - err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL, - 1000); + err = usb_control_msg(ctx->udev, + usb_sndctrlpipe(ctx->udev, 0), + USB_CDC_SET_NTB_INPUT_SIZE, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + 0, iface_no, &ndp_in_sz, 8, 1000); } else { __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); - - req.wLength = 4; - err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0, - NULL, 1000); + err = usb_control_msg(ctx->udev, + usb_sndctrlpipe(ctx->udev, 0), + USB_CDC_SET_NTB_INPUT_SIZE, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + 0, iface_no, &dwNtbInMaxSize, 4, 1000); } - if (err) + if (err < 0) pr_debug("Setting NTB Input Size failed\n"); } @@ -332,29 +300,24 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) /* set CRC Mode */ if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | - USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_CRC_MODE; - req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED); - req.wIndex = cpu_to_le16(iface_no); - req.wLength = 0; - - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); - if (err) + err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), + USB_CDC_SET_CRC_MODE, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + USB_CDC_NCM_CRC_NOT_APPENDED, + iface_no, NULL, 0, 1000); + if (err < 0) pr_debug("Setting CRC mode off failed\n"); } /* set NTB format, if both formats are supported */ if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | - USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_NTB_FORMAT; - req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT); - req.wIndex = cpu_to_le16(iface_no); - req.wLength = 0; - - err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000); - if (err) + err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), + USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS + | USB_DIR_OUT | USB_RECIP_INTERFACE, + USB_CDC_NCM_NTB16_FORMAT, + iface_no, NULL, 0, 1000); + if (err < 0) pr_debug("Setting NTB format to 16-bit failed\n"); } @@ -364,17 +327,13 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { __le16 max_datagram_size; u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); - - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | - USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE; - req.wValue = 0; - req.wIndex = cpu_to_le16(iface_no); - req.wLength = cpu_to_le16(2); - - err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, - 1000); - if (err) { + err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), + USB_CDC_GET_MAX_DATAGRAM_SIZE, + USB_TYPE_CLASS | USB_DIR_IN + | USB_RECIP_INTERFACE, + 0, iface_no, &max_datagram_size, + 2, 1000); + if (err < 0) { pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", CDC_NCM_MIN_DATAGRAM_SIZE); } else { @@ -395,17 +354,15 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) CDC_NCM_MIN_DATAGRAM_SIZE; /* if value changed, update device */ - req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | - USB_RECIP_INTERFACE; - req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE; - req.wValue = 0; - req.wIndex = cpu_to_le16(iface_no); - req.wLength = 2; - max_datagram_size = cpu_to_le16(ctx->max_datagram_size); - - err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, - 0, NULL, 1000); - if (err) + err = usb_control_msg(ctx->udev, + usb_sndctrlpipe(ctx->udev, 0), + USB_CDC_SET_MAX_DATAGRAM_SIZE, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + 0, + iface_no, &max_datagram_size, + 2, 1000); + if (err < 0) pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); } @@ -671,7 +628,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) u32 rem; u32 offset; u32 last_offset; - u16 n = 0; + u16 n = 0, index; u8 ready2send = 0; /* if there is a remaining skb, it gets priority */ @@ -859,8 +816,8 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq); ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset); - ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16), - ctx->tx_ndp_modulus); + index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus); + ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index); memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16)); ctx->tx_seq++; @@ -873,12 +830,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem); ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */ - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex, + memcpy(((u8 *)skb_out->data) + index, &(ctx->tx_ncm.ndp16), sizeof(ctx->tx_ncm.ndp16)); - memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex + - sizeof(ctx->tx_ncm.ndp16), + memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16), &(ctx->tx_ncm.dpe16), (ctx->tx_curr_frame_num + 1) * sizeof(struct usb_cdc_ncm_dpe16)); diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 041fb7d43c4f..ef3b236b5145 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (dev) { set_bit(RTL8150_UNPLUG, &dev->flags); - tasklet_disable(&dev->tl); tasklet_kill(&dev->tl); unregister_netdev(dev->netdev); unlink_all_urbs(dev); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index 9ff7c30573b8..44d9d8d56490 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -309,11 +309,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, u8 i; u32 val; - if (ah->is_pciexpress != true) - return; - - /* Do not touch SerDes registers */ - if (ah->config.pcie_powersave_enable == 2) + if (ah->is_pciexpress != true || ah->aspm_enabled != true) return; /* Nothing to do on restore for 11N */ diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 8efdec247c02..ad2bb2bf4e8a 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -519,11 +519,7 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off) { - if (ah->is_pciexpress != true) - return; - - /* Do not touch SerDes registers */ - if (ah->config.pcie_powersave_enable == 2) + if (ah->is_pciexpress != true || ah->aspm_enabled != true) return; /* Nothing to do on restore for 11N */ diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8006ce0c7357..8dcefe74f4c3 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -318,6 +318,14 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah) REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); } +static void ath9k_hw_aspm_init(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (common->bus_ops->aspm_init) + common->bus_ops->aspm_init(common); +} + /* This should work for all families including legacy */ static bool ath9k_hw_chip_test(struct ath_hw *ah) { @@ -378,7 +386,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah) ah->config.additional_swba_backoff = 0; ah->config.ack_6mb = 0x0; ah->config.cwm_ignore_extcca = 0; - ah->config.pcie_powersave_enable = 0; ah->config.pcie_clock_req = 0; ah->config.pcie_waen = 0; ah->config.analog_shiftreg = 1; @@ -598,7 +605,7 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->is_pciexpress) - ath9k_hw_configpcipowersave(ah, 0, 0); + ath9k_hw_aspm_init(ah); else ath9k_hw_disablepcie(ah); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 6acd0f975ae1..c79889036ec4 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -219,7 +219,6 @@ struct ath9k_ops_config { int additional_swba_backoff; int ack_6mb; u32 cwm_ignore_extcca; - u8 pcie_powersave_enable; bool pcieSerDesWrite; u8 pcie_clock_req; u32 pcie_waen; @@ -673,6 +672,7 @@ struct ath_hw { bool sw_mgmt_crypto; bool is_pciexpress; + bool aspm_enabled; bool is_monitoring; bool need_an_top2_fixup; u16 tx_trig_level; @@ -874,6 +874,7 @@ struct ath_bus_ops { bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); void (*bt_coex_prep)(struct ath_common *common); void (*extn_synch_en)(struct ath_common *common); + void (*aspm_init)(struct ath_common *common); }; static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index ac5107172f94..aa0ff7e2c922 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -670,8 +670,10 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band) static void ath9k_init_txpower_limits(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath9k_channel *curchan = ah->curchan; + ah->txchainmask = common->tx_chainmask; if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 3bad0b2cf9a3..be4ea1329813 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -16,6 +16,7 @@ #include <linux/nl80211.h> #include <linux/pci.h> +#include <linux/pci-aspm.h> #include <linux/ath9k_platform.h> #include "ath9k.h" @@ -115,12 +116,38 @@ static void ath_pci_extn_synch_enable(struct ath_common *common) pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl); } +static void ath_pci_aspm_init(struct ath_common *common) +{ + struct ath_softc *sc = (struct ath_softc *) common->priv; + struct ath_hw *ah = sc->sc_ah; + struct pci_dev *pdev = to_pci_dev(sc->dev); + struct pci_dev *parent; + int pos; + u8 aspm; + + if (!pci_is_pcie(pdev)) + return; + + parent = pdev->bus->self; + if (WARN_ON(!parent)) + return; + + pos = pci_pcie_cap(parent); + pci_read_config_byte(parent, pos + PCI_EXP_LNKCTL, &aspm); + if (aspm & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) { + ah->aspm_enabled = true; + /* Initialize PCIe PM and SERDES registers. */ + ath9k_hw_configpcipowersave(ah, 0, 0); + } +} + static const struct ath_bus_ops ath_pci_bus_ops = { .ath_bus_type = ATH_PCI, .read_cachesize = ath_pci_read_cachesize, .eeprom_read = ath_pci_eeprom_read, .bt_coex_prep = ath_pci_bt_coex_prep, .extn_synch_en = ath_pci_extn_synch_enable, + .aspm_init = ath_pci_aspm_init, }; static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c index dab67a12d73b..73fe3cdf796b 100644 --- a/drivers/net/wireless/iwlegacy/iwl-3945.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945.c @@ -1746,7 +1746,11 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) } memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); - + /* + * We do not commit tx power settings while channel changing, + * do it now if tx power changed. + */ + iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); return 0; } diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c index bd4b000733f7..ecdc6e557428 100644 --- a/drivers/net/wireless/iwlegacy/iwl-4965.c +++ b/drivers/net/wireless/iwlegacy/iwl-4965.c @@ -1235,7 +1235,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); iwl_legacy_print_rx_config_cmd(priv, ctx); - goto set_tx_power; + /* + * We do not commit tx power settings while channel changing, + * do it now if tx power changed. + */ + iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); + return 0; } /* If we are currently associated and the new config requires @@ -1315,7 +1320,6 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c iwl4965_init_sensitivity(priv); -set_tx_power: /* If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames */ ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 3eeb12ebe6e9..c95cefd529dc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -365,6 +365,7 @@ static struct iwl_base_params iwl5000_base_params = { .chain_noise_scale = 1000, .wd_timeout = IWL_LONG_WD_TIMEOUT, .max_event_log_size = 512, + .no_idle_support = true, }; static struct iwl_ht_params iwl5000_ht_params = { .ht_greenfield_support = true, diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 3e6bb734dcb7..02817a438550 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -135,6 +135,7 @@ struct iwl_mod_params { * @temperature_kelvin: temperature report by uCode in kelvin * @max_event_log_size: size of event log buffer size for ucode event logging * @shadow_reg_enable: HW shadhow register bit + * @no_idle_support: do not support idle mode */ struct iwl_base_params { int eeprom_size; @@ -156,6 +157,7 @@ struct iwl_base_params { bool temperature_kelvin; u32 max_event_log_size; const bool shadow_reg_enable; + const bool no_idle_support; }; /* * @advanced_bt_coexist: support advanced bt coexist diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index fb7e436b40c7..69d4ec467dca 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -134,6 +134,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus) static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) { bus->drv_data = drv_data; + pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data); } static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], @@ -454,8 +455,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } - pci_set_drvdata(pdev, bus); - bus->dev = &pdev->dev; bus->irq = pdev->irq; bus->ops = &pci_ops; @@ -494,11 +493,12 @@ static void iwl_pci_down(struct iwl_bus *bus) static void __devexit iwl_pci_remove(struct pci_dev *pdev) { - struct iwl_bus *bus = pci_get_drvdata(pdev); + struct iwl_priv *priv = pci_get_drvdata(pdev); + void *bus_specific = priv->bus->bus_specific; - iwl_remove(bus->drv_data); + iwl_remove(priv); - iwl_pci_down(bus); + iwl_pci_down(bus_specific); } #ifdef CONFIG_PM @@ -506,20 +506,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) static int iwl_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct iwl_bus *bus = pci_get_drvdata(pdev); + struct iwl_priv *priv = pci_get_drvdata(pdev); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. */ - return iwl_suspend(bus->drv_data); + return iwl_suspend(priv); } static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct iwl_bus *bus = pci_get_drvdata(pdev); + struct iwl_priv *priv = pci_get_drvdata(pdev); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if @@ -532,7 +532,7 @@ static int iwl_pci_resume(struct device *device) */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - return iwl_resume(bus->drv_data); + return iwl_resume(priv); } static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 3ec619c6881c..cd64df05f9ed 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -349,7 +349,8 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, if (priv->wowlan) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); - else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE) + else if (!priv->cfg->base_params->no_idle_support && + priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { /* in thermal throttling low power state */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 84ab7d1acb6a..ef67f6786a84 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -703,8 +703,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) /* * Add space for the TXWI in front of the skb. */ - skb_push(entry->skb, TXWI_DESC_SIZE); - memset(entry->skb, 0, TXWI_DESC_SIZE); + memset(skb_push(entry->skb, TXWI_DESC_SIZE), 0, TXWI_DESC_SIZE); /* * Register descriptor details in skb frame descriptor. diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index 15cdc7e57fc4..4cdf247a870d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -355,7 +355,8 @@ static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf * return CIPHER_NONE; } -static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry, +static inline void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, + struct sk_buff *skb, struct txentry_desc *txdesc) { } diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index 8efab3983528..4ccf23805973 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -113,7 +113,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) * due to possible race conditions in mac80211. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - goto exit_fail; + goto exit_free_skb; /* * Use the ATIM queue if appropriate and present. @@ -127,7 +127,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) ERROR(rt2x00dev, "Attempt to send packet over invalid queue %d.\n" "Please file bug report to %s.\n", qid, DRV_PROJECT); - goto exit_fail; + goto exit_free_skb; } /* @@ -159,6 +159,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) exit_fail: rt2x00queue_pause_queue(queue); + exit_free_skb: dev_kfree_skb_any(skb); } EXPORT_SYMBOL_GPL(rt2x00mac_tx); diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 5efd57833489..56f12358389d 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -1696,15 +1696,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn); pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn); - /*find bridge info */ - pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; - for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) { - if (bridge_pdev->vendor == pcibridge_vendors[tmp]) { - pcipriv->ndis_adapter.pcibridge_vendor = tmp; - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - ("Pci Bridge Vendor is found index: %d\n", - tmp)); - break; + if (bridge_pdev) { + /*find bridge info if available */ + pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor; + for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) { + if (bridge_pdev->vendor == pcibridge_vendors[tmp]) { + pcipriv->ndis_adapter.pcibridge_vendor = tmp; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Pci Bridge Vendor is found index:" + " %d\n", tmp)); + break; + } } } diff --git a/drivers/of/address.c b/drivers/of/address.c index da1f4b9605df..72c33fbe451d 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -610,6 +610,6 @@ void __iomem *of_iomap(struct device_node *np, int index) if (of_address_to_resource(np, index, &res)) return NULL; - return ioremap(res.start, 1 + res.end - res.start); + return ioremap(res.start, resource_size(&res)); } EXPORT_SYMBOL(of_iomap); diff --git a/drivers/of/base.c b/drivers/of/base.c index 02ed36719def..3ff22e32b602 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -610,8 +610,9 @@ EXPORT_SYMBOL(of_find_node_by_phandle); * * The out_value is modified only if a valid u32 value can be decoded. */ -int of_property_read_u32_array(const struct device_node *np, char *propname, - u32 *out_values, size_t sz) +int of_property_read_u32_array(const struct device_node *np, + const char *propname, u32 *out_values, + size_t sz) { struct property *prop = of_find_property(np, propname, NULL); const __be32 *val; @@ -645,7 +646,7 @@ EXPORT_SYMBOL_GPL(of_property_read_u32_array); * * The out_string pointer is modified only if a valid string can be decoded. */ -int of_property_read_string(struct device_node *np, char *propname, +int of_property_read_string(struct device_node *np, const char *propname, const char **out_string) { struct property *prop = of_find_property(np, propname, NULL); diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c index 3007662ac614..ef0105fa52b1 100644 --- a/drivers/of/gpio.c +++ b/drivers/of/gpio.c @@ -127,8 +127,8 @@ EXPORT_SYMBOL(of_gpio_count); * gpio chips. This function performs only one sanity check: whether gpio * is less than ngpios (that is specified in the gpio_chip). */ -static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, - const void *gpio_spec, u32 *flags) +int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, + const void *gpio_spec, u32 *flags) { const __be32 *gpio = gpio_spec; const u32 n = be32_to_cpup(gpio); @@ -152,6 +152,7 @@ static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, return n; } +EXPORT_SYMBOL(of_gpio_simple_xlate); /** * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index a70fa89f76fd..220285760b68 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -110,7 +110,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, } -static struct acpi_dock_ops acpiphp_dock_ops = { +static const struct acpi_dock_ops acpiphp_dock_ops = { .handler = handle_hotplug_event_func, }; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 45e0191c35dd..1e88d4785321 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -769,4 +769,12 @@ config INTEL_OAKTRAIL enable/disable the Camera, WiFi, BT etc. devices. If in doubt, say Y here; it will only load on supported platforms. +config SAMSUNG_Q10 + tristate "Samsung Q10 Extras" + depends on SERIO_I8042 + select BACKLIGHT_CLASS_DEVICE + ---help--- + This driver provides support for backlight control on Samsung Q10 + and related laptops, including Dell Latitude X200. + endif # X86_PLATFORM_DEVICES diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index afc1f832aa67..293a320d9faa 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -44,3 +44,4 @@ obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o obj-$(CONFIG_MXM_WMI) += mxm-wmi.o obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o +obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index e1c4938b301b..af2bb20cb2fb 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -99,6 +99,7 @@ enum acer_wmi_event_ids { static const struct key_entry acer_wmi_keymap[] = { {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ + {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ @@ -304,6 +305,10 @@ static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { .wireless = 2, }; +static struct quirk_entry quirk_lenovo_ideapad_s205 = { + .wireless = 3, +}; + /* The Aspire One has a dummy ACPI-WMI interface - disable it */ static struct dmi_system_id __devinitdata acer_blacklist[] = { { @@ -450,6 +455,15 @@ static struct dmi_system_id acer_quirks[] = { }, .driver_data = &quirk_medion_md_98300, }, + { + .callback = dmi_matched, + .ident = "Lenovo Ideapad S205", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"), + }, + .driver_data = &quirk_lenovo_ideapad_s205, + }, {} }; @@ -542,6 +556,12 @@ struct wmi_interface *iface) return AE_ERROR; *value = result & 0x1; return AE_OK; + case 3: + err = ec_read(0x78, &result); + if (err) + return AE_ERROR; + *value = result & 0x1; + return AE_OK; default: err = ec_read(0xA, &result); if (err) @@ -1266,8 +1286,13 @@ static void acer_rfkill_update(struct work_struct *ignored) acpi_status status; status = get_u32(&state, ACER_CAP_WIRELESS); - if (ACPI_SUCCESS(status)) - rfkill_set_sw_state(wireless_rfkill, !state); + if (ACPI_SUCCESS(status)) { + if (quirks->wireless == 3) { + rfkill_set_hw_state(wireless_rfkill, !state); + } else { + rfkill_set_sw_state(wireless_rfkill, !state); + } + } if (has_cap(ACER_CAP_BLUETOOTH)) { status = get_u32(&state, ACER_CAP_BLUETOOTH); @@ -1400,6 +1425,9 @@ static ssize_t show_bool_threeg(struct device *dev, { u32 result; \ acpi_status status; + + pr_info("This threeg sysfs will be removed in 2012" + " - used by: %s\n", current->comm); if (wmi_has_guid(WMID_GUID3)) status = wmid3_get_device_status(&result, ACER_WMID3_GDS_THREEG); @@ -1415,8 +1443,10 @@ static ssize_t set_bool_threeg(struct device *dev, { u32 tmp = simple_strtoul(buf, NULL, 10); acpi_status status = set_u32(tmp, ACER_CAP_THREEG); - if (ACPI_FAILURE(status)) - return -EINVAL; + pr_info("This threeg sysfs will be removed in 2012" + " - used by: %s\n", current->comm); + if (ACPI_FAILURE(status)) + return -EINVAL; return count; } static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, @@ -1425,6 +1455,8 @@ static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, static ssize_t show_interface(struct device *dev, struct device_attribute *attr, char *buf) { + pr_info("This interface sysfs will be removed in 2012" + " - used by: %s\n", current->comm); switch (interface->type) { case ACER_AMW0: return sprintf(buf, "AMW0\n"); diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index fca3489218b7..760c6d7624fe 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -182,6 +182,7 @@ static const struct bios_settings_t bios_tbl[] = { {"Acer", "Aspire 1810T", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, + {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, /* Acer 531 */ {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, /* Gateway */ @@ -703,15 +704,15 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Peter Feuerer"); MODULE_DESCRIPTION("Aspire One temperature and fan driver"); MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); -MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1410*:"); -MODULE_ALIAS("dmi:*:*Acer*:pnAspire 1810*:"); +MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:"); +MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:"); MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); -MODULE_ALIAS("dmi:*:*Packard Bell*:pnAOA*:"); -MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOA*:"); -MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMU*:"); -MODULE_ALIAS("dmi:*:*Packard Bell*:pnDOTMA*:"); +MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:"); +MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:"); +MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:"); +MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:"); module_init(acerhdf_init); module_exit(acerhdf_exit); diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index d65df92e2acc..fa6d7ec68b26 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -70,11 +70,10 @@ MODULE_LICENSE("GPL"); * WAPF defines the behavior of the Fn+Fx wlan key * The significance of values is yet to be found, but * most of the time: - * 0x0 will do nothing - * 0x1 will allow to control the device with Fn+Fx key. - * 0x4 will send an ACPI event (0x88) while pressing the Fn+Fx key - * 0x5 like 0x1 or 0x4 - * So, if something doesn't work as you want, just try other values =) + * Bit | Bluetooth | WLAN + * 0 | Hardware | Hardware + * 1 | Hardware | Software + * 4 | Software | Software */ static uint wapf = 1; module_param(wapf, uint, 0444); diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 0580d99b0798..b0859d4183e8 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -38,6 +38,24 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("wmi:"ASUS_NB_WMI_EVENT_GUID); +/* + * WAPF defines the behavior of the Fn+Fx wlan key + * The significance of values is yet to be found, but + * most of the time: + * Bit | Bluetooth | WLAN + * 0 | Hardware | Hardware + * 1 | Hardware | Software + * 4 | Software | Software + */ +static uint wapf; +module_param(wapf, uint, 0444); +MODULE_PARM_DESC(wapf, "WAPF value"); + +static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver) +{ + driver->wapf = wapf; +} + static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x30, { KEY_VOLUMEUP } }, { KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, @@ -53,16 +71,16 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x51, { KEY_WWW } }, { KE_KEY, 0x55, { KEY_CALC } }, { KE_KEY, 0x5C, { KEY_F15 } }, /* Power Gear key */ - { KE_KEY, 0x5D, { KEY_WLAN } }, - { KE_KEY, 0x5E, { KEY_WLAN } }, - { KE_KEY, 0x5F, { KEY_WLAN } }, + { KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */ + { KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */ + { KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */ { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, - { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, + { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, { KE_KEY, 0x82, { KEY_CAMERA } }, { KE_KEY, 0x88, { KEY_RFKILL } }, { KE_KEY, 0x8A, { KEY_PROG1 } }, @@ -81,6 +99,7 @@ static struct asus_wmi_driver asus_nb_wmi_driver = { .keymap = asus_nb_wmi_keymap, .input_name = "Asus WMI hotkeys", .input_phys = ASUS_NB_WMI_FILE "/input0", + .quirks = asus_nb_wmi_quirks, }; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 65b66aa44c78..95cba9ebf6c0 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -44,6 +44,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/platform_device.h> +#include <linux/thermal.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -66,6 +67,8 @@ MODULE_LICENSE("GPL"); #define NOTIFY_BRNUP_MAX 0x1f #define NOTIFY_BRNDOWN_MIN 0x20 #define NOTIFY_BRNDOWN_MAX 0x2e +#define NOTIFY_KBD_BRTUP 0xc4 +#define NOTIFY_KBD_BRTDWN 0xc5 /* WMI Methods */ #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ @@ -93,6 +96,7 @@ MODULE_LICENSE("GPL"); /* Wireless */ #define ASUS_WMI_DEVID_HW_SWITCH 0x00010001 #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 +#define ASUS_WMI_DEVID_CWAP 0x00010003 #define ASUS_WMI_DEVID_WLAN 0x00010011 #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 #define ASUS_WMI_DEVID_GPS 0x00010015 @@ -102,6 +106,12 @@ MODULE_LICENSE("GPL"); /* Leds */ /* 0x000200XX and 0x000400XX */ +#define ASUS_WMI_DEVID_LED1 0x00020011 +#define ASUS_WMI_DEVID_LED2 0x00020012 +#define ASUS_WMI_DEVID_LED3 0x00020013 +#define ASUS_WMI_DEVID_LED4 0x00020014 +#define ASUS_WMI_DEVID_LED5 0x00020015 +#define ASUS_WMI_DEVID_LED6 0x00020016 /* Backlight and Brightness */ #define ASUS_WMI_DEVID_BACKLIGHT 0x00050011 @@ -174,13 +184,18 @@ struct asus_wmi { struct led_classdev tpd_led; int tpd_led_wk; + struct led_classdev kbd_led; + int kbd_led_wk; struct workqueue_struct *led_workqueue; struct work_struct tpd_led_work; + struct work_struct kbd_led_work; struct asus_rfkill wlan; struct asus_rfkill bluetooth; struct asus_rfkill wimax; struct asus_rfkill wwan3g; + struct asus_rfkill gps; + struct asus_rfkill uwb; struct hotplug_slot *hotplug_slot; struct mutex hotplug_lock; @@ -205,6 +220,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus) asus->inputdev->phys = asus->driver->input_phys; asus->inputdev->id.bustype = BUS_HOST; asus->inputdev->dev.parent = &asus->platform_device->dev; + set_bit(EV_REP, asus->inputdev->evbit); err = sparse_keymap_setup(asus->inputdev, asus->driver->keymap, NULL); if (err) @@ -359,30 +375,80 @@ static enum led_brightness tpd_led_get(struct led_classdev *led_cdev) return read_tpd_led_state(asus); } -static int asus_wmi_led_init(struct asus_wmi *asus) +static void kbd_led_update(struct work_struct *work) { - int rv; + int ctrl_param = 0; + struct asus_wmi *asus; - if (read_tpd_led_state(asus) < 0) - return 0; + asus = container_of(work, struct asus_wmi, kbd_led_work); - asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); - if (!asus->led_workqueue) - return -ENOMEM; - INIT_WORK(&asus->tpd_led_work, tpd_led_update); + /* + * bits 0-2: level + * bit 7: light on/off + */ + if (asus->kbd_led_wk > 0) + ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); - asus->tpd_led.name = "asus::touchpad"; - asus->tpd_led.brightness_set = tpd_led_set; - asus->tpd_led.brightness_get = tpd_led_get; - asus->tpd_led.max_brightness = 1; + asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); +} - rv = led_classdev_register(&asus->platform_device->dev, &asus->tpd_led); - if (rv) { - destroy_workqueue(asus->led_workqueue); - return rv; +static int kbd_led_read(struct asus_wmi *asus, int *level, int *env) +{ + int retval; + + /* + * bits 0-2: level + * bit 7: light on/off + * bit 8-10: environment (0: dark, 1: normal, 2: light) + * bit 17: status unknown + */ + retval = asus_wmi_get_devstate_bits(asus, ASUS_WMI_DEVID_KBD_BACKLIGHT, + 0xFFFF); + + /* Unknown status is considered as off */ + if (retval == 0x8000) + retval = 0; + + if (retval >= 0) { + if (level) + *level = retval & 0x80 ? retval & 0x7F : 0; + if (env) + *env = (retval >> 8) & 0x7F; + retval = 0; } - return 0; + return retval; +} + +static void kbd_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct asus_wmi *asus; + + asus = container_of(led_cdev, struct asus_wmi, kbd_led); + + if (value > asus->kbd_led.max_brightness) + value = asus->kbd_led.max_brightness; + else if (value < 0) + value = 0; + + asus->kbd_led_wk = value; + queue_work(asus->led_workqueue, &asus->kbd_led_work); +} + +static enum led_brightness kbd_led_get(struct led_classdev *led_cdev) +{ + struct asus_wmi *asus; + int retval, value; + + asus = container_of(led_cdev, struct asus_wmi, kbd_led); + + retval = kbd_led_read(asus, &value, NULL); + + if (retval < 0) + return retval; + + return value; } static void asus_wmi_led_exit(struct asus_wmi *asus) @@ -393,6 +459,48 @@ static void asus_wmi_led_exit(struct asus_wmi *asus) destroy_workqueue(asus->led_workqueue); } +static int asus_wmi_led_init(struct asus_wmi *asus) +{ + int rv = 0; + + asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); + if (!asus->led_workqueue) + return -ENOMEM; + + if (read_tpd_led_state(asus) >= 0) { + INIT_WORK(&asus->tpd_led_work, tpd_led_update); + + asus->tpd_led.name = "asus::touchpad"; + asus->tpd_led.brightness_set = tpd_led_set; + asus->tpd_led.brightness_get = tpd_led_get; + asus->tpd_led.max_brightness = 1; + + rv = led_classdev_register(&asus->platform_device->dev, + &asus->tpd_led); + if (rv) + goto error; + } + + if (kbd_led_read(asus, NULL, NULL) >= 0) { + INIT_WORK(&asus->kbd_led_work, kbd_led_update); + + asus->kbd_led.name = "asus::kbd_backlight"; + asus->kbd_led.brightness_set = kbd_led_set; + asus->kbd_led.brightness_get = kbd_led_get; + asus->kbd_led.max_brightness = 3; + + rv = led_classdev_register(&asus->platform_device->dev, + &asus->kbd_led); + } + +error: + if (rv) + asus_wmi_led_exit(asus); + + return rv; +} + + /* * PCI hotplug (for wlan rfkill) */ @@ -729,6 +837,16 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus) rfkill_destroy(asus->wwan3g.rfkill); asus->wwan3g.rfkill = NULL; } + if (asus->gps.rfkill) { + rfkill_unregister(asus->gps.rfkill); + rfkill_destroy(asus->gps.rfkill); + asus->gps.rfkill = NULL; + } + if (asus->uwb.rfkill) { + rfkill_unregister(asus->uwb.rfkill); + rfkill_destroy(asus->uwb.rfkill); + asus->uwb.rfkill = NULL; + } } static int asus_wmi_rfkill_init(struct asus_wmi *asus) @@ -763,6 +881,18 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus) if (result && result != -ENODEV) goto exit; + result = asus_new_rfkill(asus, &asus->gps, "asus-gps", + RFKILL_TYPE_GPS, ASUS_WMI_DEVID_GPS); + + if (result && result != -ENODEV) + goto exit; + + result = asus_new_rfkill(asus, &asus->uwb, "asus-uwb", + RFKILL_TYPE_UWB, ASUS_WMI_DEVID_UWB); + + if (result && result != -ENODEV) + goto exit; + if (!asus->driver->hotplug_wireless) goto exit; @@ -797,8 +927,8 @@ exit: * Hwmon device */ static ssize_t asus_hwmon_pwm1(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct asus_wmi *asus = dev_get_drvdata(dev); u32 value; @@ -809,7 +939,7 @@ static ssize_t asus_hwmon_pwm1(struct device *dev, if (err < 0) return err; - value |= 0xFF; + value &= 0xFF; if (value == 1) /* Low Speed */ value = 85; @@ -825,7 +955,26 @@ static ssize_t asus_hwmon_pwm1(struct device *dev, return sprintf(buf, "%d\n", value); } +static ssize_t asus_hwmon_temp1(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + u32 value; + int err; + + err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_THERMAL_CTRL, &value); + + if (err < 0) + return err; + + value = KELVIN_TO_CELSIUS((value & 0xFFFF)) * 1000; + + return sprintf(buf, "%d\n", value); +} + static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL, 0); static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) @@ -836,12 +985,13 @@ static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_name.dev_attr.attr, NULL }; static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, - struct attribute *attr, int idx) + struct attribute *attr, int idx) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev->parent); @@ -852,6 +1002,8 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, if (attr == &sensor_dev_attr_pwm1.dev_attr.attr) dev_id = ASUS_WMI_DEVID_FAN_CTRL; + else if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr) + dev_id = ASUS_WMI_DEVID_THERMAL_CTRL; if (dev_id != -1) { int err = asus_wmi_get_devstate(asus, dev_id, &value); @@ -869,9 +1021,13 @@ static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, * - reverved bits are non-zero * - sfun and presence bit are not set */ - if (value != ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 + if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) ok = false; + } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) { + /* If value is zero, something is clearly wrong */ + if (value == 0) + ok = false; } return ok ? attr->mode : 0; @@ -904,6 +1060,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus) pr_err("Could not register asus hwmon device\n"); return PTR_ERR(hwmon); } + dev_set_drvdata(hwmon, asus); asus->hwmon_device = hwmon; result = sysfs_create_group(&hwmon->kobj, &hwmon_attribute_group); if (result) @@ -1060,6 +1217,8 @@ static void asus_wmi_notify(u32 value, void *context) acpi_status status; int code; int orig_code; + unsigned int key_value = 1; + bool autorelease = 1; status = wmi_get_event_data(value, &response); if (status != AE_OK) { @@ -1075,6 +1234,13 @@ static void asus_wmi_notify(u32 value, void *context) code = obj->integer.value; orig_code = code; + if (asus->driver->key_filter) { + asus->driver->key_filter(asus->driver, &code, &key_value, + &autorelease); + if (code == ASUS_WMI_KEY_IGNORE) + goto exit; + } + if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX) code = NOTIFY_BRNUP_MIN; else if (code >= NOTIFY_BRNDOWN_MIN && @@ -1084,7 +1250,8 @@ static void asus_wmi_notify(u32 value, void *context) if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) { if (!acpi_video_backlight_support()) asus_wmi_backlight_notify(asus, orig_code); - } else if (!sparse_keymap_report_event(asus->inputdev, code, 1, true)) + } else if (!sparse_keymap_report_event(asus->inputdev, code, + key_value, autorelease)) pr_info("Unknown key %x pressed\n", code); exit: @@ -1164,14 +1331,18 @@ ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER); static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - int value; + int value, rv; if (!count || sscanf(buf, "%i", &value) != 1) return -EINVAL; if (value < 0 || value > 2) return -EINVAL; - return asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); + rv = asus_wmi_evaluate_method(ASUS_WMI_METHODID_CFVS, value, 0, NULL); + if (rv < 0) + return rv; + + return count; } static DEVICE_ATTR(cpufv, S_IRUGO | S_IWUSR, NULL, store_cpufv); @@ -1234,7 +1405,7 @@ static int asus_wmi_platform_init(struct asus_wmi *asus) /* We don't know yet what to do with this version... */ if (!asus_wmi_evaluate_method(ASUS_WMI_METHODID_SPEC, 0, 0x9, &rv)) { - pr_info("BIOS WMI version: %d.%d", rv >> 8, rv & 0xFF); + pr_info("BIOS WMI version: %d.%d", rv >> 16, rv & 0xFF); asus->spec = rv; } @@ -1266,6 +1437,12 @@ static int asus_wmi_platform_init(struct asus_wmi *asus) return -ENODEV; } + /* CWAP allow to define the behavior of the Fn+F2 key, + * this method doesn't seems to be present on Eee PCs */ + if (asus->driver->wapf >= 0) + asus_wmi_set_devstate(ASUS_WMI_DEVID_CWAP, + asus->driver->wapf, NULL); + return asus_wmi_sysfs_init(asus->platform_device); } @@ -1568,6 +1745,14 @@ static int asus_hotk_restore(struct device *device) bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WWAN3G); rfkill_set_sw_state(asus->wwan3g.rfkill, bl); } + if (asus->gps.rfkill) { + bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPS); + rfkill_set_sw_state(asus->gps.rfkill, bl); + } + if (asus->uwb.rfkill) { + bl = !asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_UWB); + rfkill_set_sw_state(asus->uwb.rfkill, bl); + } return 0; } @@ -1604,7 +1789,7 @@ static int asus_wmi_probe(struct platform_device *pdev) static bool used; -int asus_wmi_register_driver(struct asus_wmi_driver *driver) +int __init_or_module asus_wmi_register_driver(struct asus_wmi_driver *driver) { struct platform_driver *platform_driver; struct platform_device *platform_device; diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h index c044522c8766..8147c10161cc 100644 --- a/drivers/platform/x86/asus-wmi.h +++ b/drivers/platform/x86/asus-wmi.h @@ -29,12 +29,15 @@ #include <linux/platform_device.h> +#define ASUS_WMI_KEY_IGNORE (-1) + struct module; struct key_entry; struct asus_wmi; struct asus_wmi_driver { bool hotplug_wireless; + int wapf; const char *name; struct module *owner; @@ -44,6 +47,10 @@ struct asus_wmi_driver { const struct key_entry *keymap; const char *input_name; const char *input_phys; + /* Returns new code, value, and autorelease values in arguments. + * Return ASUS_WMI_KEY_IGNORE in code if event should be ignored. */ + void (*key_filter) (struct asus_wmi_driver *driver, int *code, + unsigned int *value, bool *autorelease); int (*probe) (struct platform_device *device); void (*quirks) (struct asus_wmi_driver *driver); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index e39ab1d3ed87..f31fa4efa725 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -612,7 +612,6 @@ static int __init dell_init(void) if (!bufferpage) goto fail_buffer; buffer = page_address(bufferpage); - mutex_init(&buffer_mutex); ret = dell_setup_rfkill(); diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index ce790827e199..fa9a2171cc13 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -54,6 +54,8 @@ MODULE_ALIAS("wmi:"DELL_EVENT_GUID); */ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { + { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } }, + { KE_KEY, 0xe045, { KEY_PROG1 } }, { KE_KEY, 0xe009, { KEY_EJECTCD } }, @@ -85,6 +87,11 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { { KE_IGNORE, 0xe013, { KEY_RESERVED } }, { KE_IGNORE, 0xe020, { KEY_MUTE } }, + + /* Shortcut and audio panel keys */ + { KE_IGNORE, 0xe025, { KEY_RESERVED } }, + { KE_IGNORE, 0xe026, { KEY_RESERVED } }, + { KE_IGNORE, 0xe02e, { KEY_VOLUMEDOWN } }, { KE_IGNORE, 0xe030, { KEY_VOLUMEUP } }, { KE_IGNORE, 0xe033, { KEY_KBDILLUMUP } }, @@ -92,6 +99,9 @@ static const struct key_entry dell_wmi_legacy_keymap[] __initconst = { { KE_IGNORE, 0xe03a, { KEY_CAPSLOCK } }, { KE_IGNORE, 0xe045, { KEY_NUMLOCK } }, { KE_IGNORE, 0xe046, { KEY_SCROLLLOCK } }, + { KE_IGNORE, 0xe0f7, { KEY_MUTE } }, + { KE_IGNORE, 0xe0f8, { KEY_VOLUMEDOWN } }, + { KE_IGNORE, 0xe0f9, { KEY_VOLUMEUP } }, { KE_END, 0 } }; diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 4aa867a9b88b..9f6e64302b45 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c @@ -56,6 +56,11 @@ MODULE_PARM_DESC(hotplug_wireless, "If your laptop needs that, please report to " "acpi4asus-user@lists.sourceforge.net."); +/* Values for T101MT "Home" key */ +#define HOME_PRESS 0xe4 +#define HOME_HOLD 0xea +#define HOME_RELEASE 0xe5 + static const struct key_entry eeepc_wmi_keymap[] = { /* Sleep already handled via generic ACPI code */ { KE_KEY, 0x30, { KEY_VOLUMEUP } }, @@ -71,6 +76,7 @@ static const struct key_entry eeepc_wmi_keymap[] = { { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } }, { KE_KEY, 0xe0, { KEY_PROG1 } }, /* Task Manager */ { KE_KEY, 0xe1, { KEY_F14 } }, /* Change Resolution */ + { KE_KEY, HOME_PRESS, { KEY_CONFIG } }, /* Home/Express gate key */ { KE_KEY, 0xe8, { KEY_SCREENLOCK } }, { KE_KEY, 0xe9, { KEY_BRIGHTNESS_ZERO } }, { KE_KEY, 0xeb, { KEY_CAMERA_ZOOMOUT } }, @@ -81,6 +87,25 @@ static const struct key_entry eeepc_wmi_keymap[] = { { KE_END, 0}, }; +static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code, + unsigned int *value, bool *autorelease) +{ + switch (*code) { + case HOME_PRESS: + *value = 1; + *autorelease = 0; + break; + case HOME_HOLD: + *code = ASUS_WMI_KEY_IGNORE; + break; + case HOME_RELEASE: + *code = HOME_PRESS; + *value = 0; + *autorelease = 0; + break; + } +} + static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level, void *context, void **retval) { @@ -141,6 +166,7 @@ static void eeepc_dmi_check(struct asus_wmi_driver *driver) static void eeepc_wmi_quirks(struct asus_wmi_driver *driver) { driver->hotplug_wireless = hotplug_wireless; + driver->wapf = -1; eeepc_dmi_check(driver); } @@ -151,6 +177,7 @@ static struct asus_wmi_driver asus_wmi_driver = { .keymap = eeepc_wmi_keymap, .input_name = "Eee PC WMI hotkeys", .input_phys = EEEPC_WMI_FILE "/input0", + .key_filter = eeepc_wmi_key_filter, .probe = eeepc_wmi_probe, .quirks = eeepc_wmi_quirks, }; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index bfdda33feb26..0c595410e788 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -32,13 +32,22 @@ #include <linux/platform_device.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> +#include <linux/backlight.h> +#include <linux/fb.h> #define IDEAPAD_RFKILL_DEV_NUM (3) +#define CFG_BT_BIT (16) +#define CFG_3G_BIT (17) +#define CFG_WIFI_BIT (18) +#define CFG_CAMERA_BIT (19) + struct ideapad_private { struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; struct platform_device *platform_device; struct input_dev *inputdev; + struct backlight_device *blightdev; + unsigned long cfg; }; static acpi_handle ideapad_handle; @@ -155,7 +164,7 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data) } /* - * camera power + * sysfs */ static ssize_t show_ideapad_cam(struct device *dev, struct device_attribute *attr, @@ -186,6 +195,44 @@ static ssize_t store_ideapad_cam(struct device *dev, static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); +static ssize_t show_ideapad_cfg(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ideapad_private *priv = dev_get_drvdata(dev); + + return sprintf(buf, "0x%.8lX\n", priv->cfg); +} + +static DEVICE_ATTR(cfg, 0444, show_ideapad_cfg, NULL); + +static struct attribute *ideapad_attributes[] = { + &dev_attr_camera_power.attr, + &dev_attr_cfg.attr, + NULL +}; + +static mode_t ideapad_is_visible(struct kobject *kobj, + struct attribute *attr, + int idx) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct ideapad_private *priv = dev_get_drvdata(dev); + bool supported; + + if (attr == &dev_attr_camera_power.attr) + supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); + else + supported = true; + + return supported ? attr->mode : 0; +} + +static struct attribute_group ideapad_attribute_group = { + .is_visible = ideapad_is_visible, + .attrs = ideapad_attributes +}; + /* * Rfkill */ @@ -197,9 +244,9 @@ struct ideapad_rfk_data { }; const struct ideapad_rfk_data ideapad_rfk_data[] = { - { "ideapad_wlan", 18, 0x15, RFKILL_TYPE_WLAN }, - { "ideapad_bluetooth", 16, 0x17, RFKILL_TYPE_BLUETOOTH }, - { "ideapad_3g", 17, 0x20, RFKILL_TYPE_WWAN }, + { "ideapad_wlan", CFG_WIFI_BIT, 0x15, RFKILL_TYPE_WLAN }, + { "ideapad_bluetooth", CFG_BT_BIT, 0x17, RFKILL_TYPE_BLUETOOTH }, + { "ideapad_3g", CFG_3G_BIT, 0x20, RFKILL_TYPE_WWAN }, }; static int ideapad_rfk_set(void *data, bool blocked) @@ -265,8 +312,7 @@ static int __devinit ideapad_register_rfkill(struct acpi_device *adevice, return 0; } -static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice, - int dev) +static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev) { struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); @@ -280,15 +326,6 @@ static void __devexit ideapad_unregister_rfkill(struct acpi_device *adevice, /* * Platform device */ -static struct attribute *ideapad_attributes[] = { - &dev_attr_camera_power.attr, - NULL -}; - -static struct attribute_group ideapad_attribute_group = { - .attrs = ideapad_attributes -}; - static int __devinit ideapad_platform_init(struct ideapad_private *priv) { int result; @@ -369,7 +406,7 @@ err_free_dev: return error; } -static void __devexit ideapad_input_exit(struct ideapad_private *priv) +static void ideapad_input_exit(struct ideapad_private *priv) { sparse_keymap_free(priv->inputdev); input_unregister_device(priv->inputdev); @@ -383,6 +420,98 @@ static void ideapad_input_report(struct ideapad_private *priv, } /* + * backlight + */ +static int ideapad_backlight_get_brightness(struct backlight_device *blightdev) +{ + unsigned long now; + + if (read_ec_data(ideapad_handle, 0x12, &now)) + return -EIO; + return now; +} + +static int ideapad_backlight_update_status(struct backlight_device *blightdev) +{ + if (write_ec_cmd(ideapad_handle, 0x13, blightdev->props.brightness)) + return -EIO; + if (write_ec_cmd(ideapad_handle, 0x33, + blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1)) + return -EIO; + + return 0; +} + +static const struct backlight_ops ideapad_backlight_ops = { + .get_brightness = ideapad_backlight_get_brightness, + .update_status = ideapad_backlight_update_status, +}; + +static int ideapad_backlight_init(struct ideapad_private *priv) +{ + struct backlight_device *blightdev; + struct backlight_properties props; + unsigned long max, now, power; + + if (read_ec_data(ideapad_handle, 0x11, &max)) + return -EIO; + if (read_ec_data(ideapad_handle, 0x12, &now)) + return -EIO; + if (read_ec_data(ideapad_handle, 0x18, &power)) + return -EIO; + + memset(&props, 0, sizeof(struct backlight_properties)); + props.max_brightness = max; + props.type = BACKLIGHT_PLATFORM; + blightdev = backlight_device_register("ideapad", + &priv->platform_device->dev, + priv, + &ideapad_backlight_ops, + &props); + if (IS_ERR(blightdev)) { + pr_err("Could not register backlight device\n"); + return PTR_ERR(blightdev); + } + + priv->blightdev = blightdev; + blightdev->props.brightness = now; + blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; + backlight_update_status(blightdev); + + return 0; +} + +static void ideapad_backlight_exit(struct ideapad_private *priv) +{ + if (priv->blightdev) + backlight_device_unregister(priv->blightdev); + priv->blightdev = NULL; +} + +static void ideapad_backlight_notify_power(struct ideapad_private *priv) +{ + unsigned long power; + struct backlight_device *blightdev = priv->blightdev; + + if (read_ec_data(ideapad_handle, 0x18, &power)) + return; + blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; +} + +static void ideapad_backlight_notify_brightness(struct ideapad_private *priv) +{ + unsigned long now; + + /* if we control brightness via acpi video driver */ + if (priv->blightdev == NULL) { + read_ec_data(ideapad_handle, 0x12, &now); + return; + } + + backlight_force_update(priv->blightdev, BACKLIGHT_UPDATE_HOTKEY); +} + +/* * module init/exit */ static const struct acpi_device_id ideapad_device_ids[] = { @@ -393,10 +522,11 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); static int __devinit ideapad_acpi_add(struct acpi_device *adevice) { - int ret, i, cfg; + int ret, i; + unsigned long cfg; struct ideapad_private *priv; - if (read_method_int(adevice->handle, "_CFG", &cfg)) + if (read_method_int(adevice->handle, "_CFG", (int *)&cfg)) return -ENODEV; priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -404,6 +534,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) return -ENOMEM; dev_set_drvdata(&adevice->dev, priv); ideapad_handle = adevice->handle; + priv->cfg = cfg; ret = ideapad_platform_init(priv); if (ret) @@ -414,15 +545,25 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) goto input_failed; for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) { - if (test_bit(ideapad_rfk_data[i].cfgbit, (unsigned long *)&cfg)) + if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg)) ideapad_register_rfkill(adevice, i); else priv->rfk[i] = NULL; } ideapad_sync_rfk_state(adevice); + if (!acpi_video_backlight_support()) { + ret = ideapad_backlight_init(priv); + if (ret && ret != -ENODEV) + goto backlight_failed; + } + return 0; +backlight_failed: + for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) + ideapad_unregister_rfkill(adevice, i); + ideapad_input_exit(priv); input_failed: ideapad_platform_exit(priv); platform_failed: @@ -435,6 +576,7 @@ static int __devexit ideapad_acpi_remove(struct acpi_device *adevice, int type) struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); int i; + ideapad_backlight_exit(priv); for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) ideapad_unregister_rfkill(adevice, i); ideapad_input_exit(priv); @@ -459,12 +601,19 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) vpc1 = (vpc2 << 8) | vpc1; for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) { if (test_bit(vpc_bit, &vpc1)) { - if (vpc_bit == 9) + switch (vpc_bit) { + case 9: ideapad_sync_rfk_state(adevice); - else if (vpc_bit == 4) - read_ec_data(handle, 0x12, &vpc2); - else + break; + case 4: + ideapad_backlight_notify_brightness(priv); + break; + case 2: + ideapad_backlight_notify_power(priv); + break; + default: ideapad_input_report(priv, vpc_bit); + } } } } diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 5ffe7c398148..809a3ae943c6 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -403,7 +403,7 @@ static void ips_cpu_raise(struct ips_driver *ips) thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); - turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; + turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; @@ -438,7 +438,7 @@ static void ips_cpu_lower(struct ips_driver *ips) thm_writew(THM_MPCPC, (new_limit * 10) / 8); - turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDC_OVR_EN; + turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c index 809adea4965f..abddc83e9fd7 100644 --- a/drivers/platform/x86/intel_menlow.c +++ b/drivers/platform/x86/intel_menlow.c @@ -477,6 +477,8 @@ static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl, return AE_ERROR; } + return AE_OK; + aux1_not_found: if (status == AE_NOT_FOUND) return AE_OK; diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 3a578323122b..ccd7b1f83519 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c @@ -493,20 +493,30 @@ static int mid_thermal_probe(struct platform_device *pdev) /* Register each sensor with the generic thermal framework*/ for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { + struct thermal_device_info *td_info = initialize_sensor(i); + + if (!td_info) { + ret = -ENOMEM; + goto err; + } pinfo->tzd[i] = thermal_zone_device_register(name[i], - 0, initialize_sensor(i), &tzd_ops, 0, 0, 0, 0); - if (IS_ERR(pinfo->tzd[i])) - goto reg_fail; + 0, td_info, &tzd_ops, 0, 0, 0, 0); + if (IS_ERR(pinfo->tzd[i])) { + kfree(td_info); + ret = PTR_ERR(pinfo->tzd[i]); + goto err; + } } pinfo->pdev = pdev; platform_set_drvdata(pdev, pinfo); return 0; -reg_fail: - ret = PTR_ERR(pinfo->tzd[i]); - while (--i >= 0) +err: + while (--i >= 0) { + kfree(pinfo->tzd[i]->devdata); thermal_zone_device_unregister(pinfo->tzd[i]); + } configure_adc(0); kfree(pinfo); return ret; @@ -524,8 +534,10 @@ static int mid_thermal_remove(struct platform_device *pdev) int i; struct platform_info *pinfo = platform_get_drvdata(pdev); - for (i = 0; i < MSIC_THERMAL_SENSORS; i++) + for (i = 0; i < MSIC_THERMAL_SENSORS; i++) { + kfree(pinfo->tzd[i]->devdata); thermal_zone_device_unregister(pinfo->tzd[i]); + } kfree(pinfo); platform_set_drvdata(pdev, NULL); diff --git a/drivers/platform/x86/intel_rar_register.c b/drivers/platform/x86/intel_rar_register.c index bde47e9080cd..c8a6aed45277 100644 --- a/drivers/platform/x86/intel_rar_register.c +++ b/drivers/platform/x86/intel_rar_register.c @@ -637,15 +637,13 @@ end_function: return error; } -const struct pci_device_id rar_pci_id_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(rar_pci_id_tbl) = { { PCI_VDEVICE(INTEL, 0x4110) }, { 0 } }; MODULE_DEVICE_TABLE(pci, rar_pci_id_tbl); -const struct pci_device_id *my_id_table = rar_pci_id_tbl; - /* field for registering driver to PCI device */ static struct pci_driver rar_pci_driver = { .name = "rar_register_driver", diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 940accbe28d3..c86665369a22 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -725,7 +725,7 @@ static void ipc_remove(struct pci_dev *pdev) intel_scu_devices_destroy(); } -static const struct pci_device_id pci_ids[] = { +static DEFINE_PCI_DEVICE_TABLE(pci_ids) = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080e)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x082a)}, { 0,} diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 3ff629df9f01..f204643c5052 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -538,6 +538,15 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = { }, .callback = dmi_check_cb }, + { + .ident = "MSI U270", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "Micro-Star International Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"), + }, + .callback = dmi_check_cb + }, { } }; @@ -996,3 +1005,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N034:*"); MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*"); MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*"); MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*"); +MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*"); diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index c832e3356cd6..6f40bf202dc7 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -272,6 +272,7 @@ static int __init msi_wmi_init(void) err_free_backlight: backlight_device_unregister(backlight); err_free_input: + sparse_keymap_free(msi_wmi_input_dev); input_unregister_device(msi_wmi_input_dev); err_uninstall_notifier: wmi_remove_notify_handler(MSIWMI_EVENT_GUID); diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index d347116d150e..359163011044 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -521,6 +521,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { .callback = dmi_check_cb, }, { + .ident = "N510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N510"), + DMI_MATCH(DMI_BOARD_NAME, "N510"), + }, + .callback = dmi_check_cb, + }, + { .ident = "X125", .matches = { DMI_MATCH(DMI_SYS_VENDOR, @@ -601,6 +611,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { .callback = dmi_check_cb, }, { + .ident = "N150/N210/N220", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, + "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "N150/N210/N220"), + DMI_MATCH(DMI_BOARD_NAME, "N150/N210/N220"), + }, + .callback = dmi_check_cb, + }, + { .ident = "N150/N210/N220/N230", .matches = { DMI_MATCH(DMI_SYS_VENDOR, diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c new file mode 100644 index 000000000000..1e54ae74274c --- /dev/null +++ b/drivers/platform/x86/samsung-q10.c @@ -0,0 +1,196 @@ +/* + * Driver for Samsung Q10 and related laptops: controls the backlight + * + * Copyright (c) 2011 Frederick van der Wyck <fvanderwyck@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/backlight.h> +#include <linux/i8042.h> +#include <linux/dmi.h> + +#define SAMSUNGQ10_BL_MAX_INTENSITY 255 +#define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185 + +#define SAMSUNGQ10_BL_8042_CMD 0xbe +#define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 } + +static int samsungq10_bl_brightness; + +static bool force; +module_param(force, bool, 0); +MODULE_PARM_DESC(force, + "Disable the DMI check and force the driver to be loaded"); + +static int samsungq10_bl_set_intensity(struct backlight_device *bd) +{ + + int brightness = bd->props.brightness; + unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA; + + c[2] = (unsigned char)brightness; + i8042_lock_chip(); + i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD); + i8042_unlock_chip(); + samsungq10_bl_brightness = brightness; + + return 0; +} + +static int samsungq10_bl_get_intensity(struct backlight_device *bd) +{ + return samsungq10_bl_brightness; +} + +static const struct backlight_ops samsungq10_bl_ops = { + .get_brightness = samsungq10_bl_get_intensity, + .update_status = samsungq10_bl_set_intensity, +}; + +#ifdef CONFIG_PM_SLEEP +static int samsungq10_suspend(struct device *dev) +{ + return 0; +} + +static int samsungq10_resume(struct device *dev) +{ + + struct backlight_device *bd = dev_get_drvdata(dev); + + samsungq10_bl_set_intensity(bd); + return 0; +} +#else +#define samsungq10_suspend NULL +#define samsungq10_resume NULL +#endif + +static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops, + samsungq10_suspend, samsungq10_resume); + +static int __devinit samsungq10_probe(struct platform_device *pdev) +{ + + struct backlight_properties props; + struct backlight_device *bd; + + memset(&props, 0, sizeof(struct backlight_properties)); + props.type = BACKLIGHT_PLATFORM; + props.max_brightness = SAMSUNGQ10_BL_MAX_INTENSITY; + bd = backlight_device_register("samsung", &pdev->dev, NULL, + &samsungq10_bl_ops, &props); + if (IS_ERR(bd)) + return PTR_ERR(bd); + + platform_set_drvdata(pdev, bd); + + bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY; + samsungq10_bl_set_intensity(bd); + + return 0; +} + +static int __devexit samsungq10_remove(struct platform_device *pdev) +{ + + struct backlight_device *bd = platform_get_drvdata(pdev); + + bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY; + samsungq10_bl_set_intensity(bd); + + backlight_device_unregister(bd); + + return 0; +} + +static struct platform_driver samsungq10_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + .pm = &samsungq10_pm_ops, + }, + .probe = samsungq10_probe, + .remove = __devexit_p(samsungq10_remove), +}; + +static struct platform_device *samsungq10_device; + +static int __init dmi_check_callback(const struct dmi_system_id *id) +{ + printk(KERN_INFO KBUILD_MODNAME ": found model '%s'\n", id->ident); + return 1; +} + +static struct dmi_system_id __initdata samsungq10_dmi_table[] = { + { + .ident = "Samsung Q10", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Samsung"), + DMI_MATCH(DMI_PRODUCT_NAME, "SQ10"), + }, + .callback = dmi_check_callback, + }, + { + .ident = "Samsung Q20", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"), + DMI_MATCH(DMI_PRODUCT_NAME, "SENS Q20"), + }, + .callback = dmi_check_callback, + }, + { + .ident = "Samsung Q25", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG Electronics"), + DMI_MATCH(DMI_PRODUCT_NAME, "NQ25"), + }, + .callback = dmi_check_callback, + }, + { + .ident = "Dell Latitude X200", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "X200"), + }, + .callback = dmi_check_callback, + }, + { }, +}; +MODULE_DEVICE_TABLE(dmi, samsungq10_dmi_table); + +static int __init samsungq10_init(void) +{ + if (!force && !dmi_check_system(samsungq10_dmi_table)) + return -ENODEV; + + samsungq10_device = platform_create_bundle(&samsungq10_driver, + samsungq10_probe, + NULL, 0, NULL, 0); + + if (IS_ERR(samsungq10_device)) + return PTR_ERR(samsungq10_device); + + return 0; +} + +static void __exit samsungq10_exit(void) +{ + platform_device_unregister(samsungq10_device); + platform_driver_unregister(&samsungq10_driver); +} + +module_init(samsungq10_init); +module_exit(samsungq10_exit); + +MODULE_AUTHOR("Frederick van der Wyck <fvanderwyck@gmail.com>"); +MODULE_DESCRIPTION("Samsung Q10 Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 26c5b117df22..7bd829f247eb 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3186,8 +3186,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */ /* (assignments unknown, please report if found) */ + KEY_UNKNOWN, KEY_UNKNOWN, + + /* + * The mic mute button only sends 0x1a. It does not + * automatically mute the mic or change the mute light. + */ + KEY_MICMUTE, /* 0x1a: Mic mute (since ?400 or so) */ + + /* (assignments unknown, please report if found) */ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, - KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + KEY_UNKNOWN, }, }; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d3e38790906e..d8e6a429e8ba 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -20,6 +20,7 @@ #include <linux/debugfs.h> #include <linux/device.h> #include <linux/slab.h> +#include <linux/async.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/suspend.h> @@ -33,6 +34,8 @@ #include "dummy.h" +#define rdev_crit(rdev, fmt, ...) \ + pr_crit("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_err(rdev, fmt, ...) \ pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__) #define rdev_warn(rdev, fmt, ...) \ @@ -78,11 +81,13 @@ struct regulator { char *supply_name; struct device_attribute dev_attr; struct regulator_dev *rdev; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif }; static int _regulator_is_enabled(struct regulator_dev *rdev); -static int _regulator_disable(struct regulator_dev *rdev, - struct regulator_dev **supply_rdev_ptr); +static int _regulator_disable(struct regulator_dev *rdev); static int _regulator_get_voltage(struct regulator_dev *rdev); static int _regulator_get_current_limit(struct regulator_dev *rdev); static unsigned int _regulator_get_mode(struct regulator_dev *rdev); @@ -90,6 +95,9 @@ static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); static int _regulator_do_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV); +static struct regulator *create_regulator(struct regulator_dev *rdev, + struct device *dev, + const char *supply_name); static const char *rdev_get_name(struct regulator_dev *rdev) { @@ -143,8 +151,11 @@ static int regulator_check_voltage(struct regulator_dev *rdev, if (*min_uV < rdev->constraints->min_uV) *min_uV = rdev->constraints->min_uV; - if (*min_uV > *max_uV) + if (*min_uV > *max_uV) { + rdev_err(rdev, "unsupportable voltage range: %d-%duV\n", + *min_uV, *max_uV); return -EINVAL; + } return 0; } @@ -197,8 +208,11 @@ static int regulator_check_current_limit(struct regulator_dev *rdev, if (*min_uA < rdev->constraints->min_uA) *min_uA = rdev->constraints->min_uA; - if (*min_uA > *max_uA) + if (*min_uA > *max_uA) { + rdev_err(rdev, "unsupportable current range: %d-%duA\n", + *min_uA, *max_uA); return -EINVAL; + } return 0; } @@ -213,6 +227,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode) case REGULATOR_MODE_STANDBY: break; default: + rdev_err(rdev, "invalid mode %x specified\n", *mode); return -EINVAL; } @@ -779,7 +794,6 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, if (ret < 0) { rdev_err(rdev, "failed to apply %duV constraint\n", rdev->constraints->min_uV); - rdev->constraints = NULL; return ret; } } @@ -882,7 +896,6 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = suspend_prepare(rdev, rdev->constraints->initial_state); if (ret < 0) { rdev_err(rdev, "failed to set suspend state\n"); - rdev->constraints = NULL; goto out; } } @@ -909,13 +922,15 @@ static int set_machine_constraints(struct regulator_dev *rdev, ret = ops->enable(rdev); if (ret < 0) { rdev_err(rdev, "failed to enable\n"); - rdev->constraints = NULL; goto out; } } print_constraints(rdev); + return 0; out: + kfree(rdev->constraints); + rdev->constraints = NULL; return ret; } @@ -929,21 +944,20 @@ out: * core if it's child is enabled. */ static int set_supply(struct regulator_dev *rdev, - struct regulator_dev *supply_rdev) + struct regulator_dev *supply_rdev) { int err; - err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj, - "supply"); - if (err) { - rdev_err(rdev, "could not add device link %s err %d\n", - supply_rdev->dev.kobj.name, err); - goto out; + rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); + + rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); + if (IS_ERR(rdev->supply)) { + err = PTR_ERR(rdev->supply); + rdev->supply = NULL; + return err; } - rdev->supply = supply_rdev; - list_add(&rdev->slist, &supply_rdev->supply_list); -out: - return err; + + return 0; } /** @@ -1032,7 +1046,7 @@ static void unset_regulator_supplies(struct regulator_dev *rdev) } } -#define REG_STR_SIZE 32 +#define REG_STR_SIZE 64 static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, @@ -1052,8 +1066,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, if (dev) { /* create a 'requested_microamps_name' sysfs entry */ - size = scnprintf(buf, REG_STR_SIZE, "microamps_requested_%s", - supply_name); + size = scnprintf(buf, REG_STR_SIZE, + "microamps_requested_%s-%s", + dev_name(dev), supply_name); if (size >= REG_STR_SIZE) goto overflow_err; @@ -1088,7 +1103,28 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, dev->kobj.name, err); goto link_name_err; } + } else { + regulator->supply_name = kstrdup(supply_name, GFP_KERNEL); + if (regulator->supply_name == NULL) + goto attr_err; + } + +#ifdef CONFIG_DEBUG_FS + regulator->debugfs = debugfs_create_dir(regulator->supply_name, + rdev->debugfs); + if (IS_ERR_OR_NULL(regulator->debugfs)) { + rdev_warn(rdev, "Failed to create debugfs directory\n"); + regulator->debugfs = NULL; + } else { + debugfs_create_u32("uA_load", 0444, regulator->debugfs, + ®ulator->uA_load); + debugfs_create_u32("min_uV", 0444, regulator->debugfs, + ®ulator->min_uV); + debugfs_create_u32("max_uV", 0444, regulator->debugfs, + ®ulator->max_uV); } +#endif + mutex_unlock(&rdev->mutex); return regulator; link_name_err: @@ -1267,13 +1303,17 @@ void regulator_put(struct regulator *regulator) mutex_lock(®ulator_list_mutex); rdev = regulator->rdev; +#ifdef CONFIG_DEBUG_FS + debugfs_remove_recursive(regulator->debugfs); +#endif + /* remove any sysfs entries */ if (regulator->dev) { sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); - kfree(regulator->supply_name); device_remove_file(regulator->dev, ®ulator->dev_attr); kfree(regulator->dev_attr.attr.name); } + kfree(regulator->supply_name); list_del(®ulator->list); kfree(regulator); @@ -1301,19 +1341,6 @@ static int _regulator_enable(struct regulator_dev *rdev) { int ret, delay; - if (rdev->use_count == 0) { - /* do we need to enable the supply regulator first */ - if (rdev->supply) { - mutex_lock(&rdev->supply->mutex); - ret = _regulator_enable(rdev->supply); - mutex_unlock(&rdev->supply->mutex); - if (ret < 0) { - rdev_err(rdev, "failed to enable: %d\n", ret); - return ret; - } - } - } - /* check voltage and requested load before enabling */ if (rdev->constraints && (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) @@ -1388,19 +1415,27 @@ int regulator_enable(struct regulator *regulator) struct regulator_dev *rdev = regulator->rdev; int ret = 0; + if (rdev->supply) { + ret = regulator_enable(rdev->supply); + if (ret != 0) + return ret; + } + mutex_lock(&rdev->mutex); ret = _regulator_enable(rdev); mutex_unlock(&rdev->mutex); + + if (ret != 0) + regulator_disable(rdev->supply); + return ret; } EXPORT_SYMBOL_GPL(regulator_enable); /* locks held by regulator_disable() */ -static int _regulator_disable(struct regulator_dev *rdev, - struct regulator_dev **supply_rdev_ptr) +static int _regulator_disable(struct regulator_dev *rdev) { int ret = 0; - *supply_rdev_ptr = NULL; if (WARN(rdev->use_count <= 0, "unbalanced disables for %s\n", rdev_get_name(rdev))) @@ -1427,9 +1462,6 @@ static int _regulator_disable(struct regulator_dev *rdev, NULL); } - /* decrease our supplies ref count and disable if required */ - *supply_rdev_ptr = rdev->supply; - rdev->use_count = 0; } else if (rdev->use_count > 1) { @@ -1440,6 +1472,7 @@ static int _regulator_disable(struct regulator_dev *rdev, rdev->use_count--; } + return ret; } @@ -1458,29 +1491,21 @@ static int _regulator_disable(struct regulator_dev *rdev, int regulator_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; - struct regulator_dev *supply_rdev = NULL; int ret = 0; mutex_lock(&rdev->mutex); - ret = _regulator_disable(rdev, &supply_rdev); + ret = _regulator_disable(rdev); mutex_unlock(&rdev->mutex); - /* decrease our supplies ref count and disable if required */ - while (supply_rdev != NULL) { - rdev = supply_rdev; - - mutex_lock(&rdev->mutex); - _regulator_disable(rdev, &supply_rdev); - mutex_unlock(&rdev->mutex); - } + if (ret == 0 && rdev->supply) + regulator_disable(rdev->supply); return ret; } EXPORT_SYMBOL_GPL(regulator_disable); /* locks held by regulator_force_disable() */ -static int _regulator_force_disable(struct regulator_dev *rdev, - struct regulator_dev **supply_rdev_ptr) +static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; @@ -1497,10 +1522,6 @@ static int _regulator_force_disable(struct regulator_dev *rdev, REGULATOR_EVENT_DISABLE, NULL); } - /* decrease our supplies ref count and disable if required */ - *supply_rdev_ptr = rdev->supply; - - rdev->use_count = 0; return ret; } @@ -1516,16 +1537,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev, int regulator_force_disable(struct regulator *regulator) { struct regulator_dev *rdev = regulator->rdev; - struct regulator_dev *supply_rdev = NULL; int ret; mutex_lock(&rdev->mutex); regulator->uA_load = 0; - ret = _regulator_force_disable(rdev, &supply_rdev); + ret = _regulator_force_disable(regulator->rdev); mutex_unlock(&rdev->mutex); - if (supply_rdev) - regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); + if (rdev->supply) + while (rdev->open_count--) + regulator_disable(rdev->supply); return ret; } @@ -2136,7 +2157,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) /* get input voltage */ input_uV = 0; if (rdev->supply) - input_uV = _regulator_get_voltage(rdev->supply); + input_uV = regulator_get_voltage(rdev->supply); if (input_uV <= 0) input_uV = rdev->constraints->input_uV; if (input_uV <= 0) { @@ -2206,17 +2227,8 @@ EXPORT_SYMBOL_GPL(regulator_unregister_notifier); static void _notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data) { - struct regulator_dev *_rdev; - /* call rdev chain first */ blocking_notifier_call_chain(&rdev->notifier, event, NULL); - - /* now notify regulator we supply */ - list_for_each_entry(_rdev, &rdev->supply_list, slist) { - mutex_lock(&_rdev->mutex); - _notifier_call_chain(_rdev, event, data); - mutex_unlock(&_rdev->mutex); - } } /** @@ -2264,6 +2276,13 @@ err: } EXPORT_SYMBOL_GPL(regulator_bulk_get); +static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) +{ + struct regulator_bulk_data *bulk = data; + + bulk->ret = regulator_enable(bulk->consumer); +} + /** * regulator_bulk_enable - enable multiple regulator consumers * @@ -2279,21 +2298,33 @@ EXPORT_SYMBOL_GPL(regulator_bulk_get); int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { + LIST_HEAD(async_domain); int i; - int ret; + int ret = 0; + + for (i = 0; i < num_consumers; i++) + async_schedule_domain(regulator_bulk_enable_async, + &consumers[i], &async_domain); + + async_synchronize_full_domain(&async_domain); + /* If any consumer failed we need to unwind any that succeeded */ for (i = 0; i < num_consumers; i++) { - ret = regulator_enable(consumers[i].consumer); - if (ret != 0) + if (consumers[i].ret != 0) { + ret = consumers[i].ret; goto err; + } } return 0; err: - pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret); - for (--i; i >= 0; --i) - regulator_disable(consumers[i].consumer); + for (i = 0; i < num_consumers; i++) + if (consumers[i].ret == 0) + regulator_disable(consumers[i].consumer); + else + pr_err("Failed to enable %s: %d\n", + consumers[i].supply, consumers[i].ret); return ret; } @@ -2589,9 +2620,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, rdev->owner = regulator_desc->owner; rdev->desc = regulator_desc; INIT_LIST_HEAD(&rdev->consumer_list); - INIT_LIST_HEAD(&rdev->supply_list); INIT_LIST_HEAD(&rdev->list); - INIT_LIST_HEAD(&rdev->slist); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); /* preform any regulator specific init */ @@ -2672,6 +2701,7 @@ unset_supplies: unset_regulator_supplies(rdev); scrub: + kfree(rdev->constraints); device_unregister(&rdev->dev); /* device core frees rdev */ rdev = ERR_PTR(ret); @@ -2703,7 +2733,7 @@ void regulator_unregister(struct regulator_dev *rdev) unset_regulator_supplies(rdev); list_del(&rdev->list); if (rdev->supply) - sysfs_remove_link(&rdev->dev.kobj, "supply"); + regulator_put(rdev->supply); device_unregister(&rdev->dev); kfree(rdev->constraints); mutex_unlock(®ulator_list_mutex); diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c index c7410bde7b5d..f6ef6694ab98 100644 --- a/drivers/regulator/dummy.c +++ b/drivers/regulator/dummy.c @@ -36,6 +36,29 @@ static struct regulator_desc dummy_desc = { .ops = &dummy_ops, }; +static int __devinit dummy_regulator_probe(struct platform_device *pdev) +{ + int ret; + + dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, + &dummy_initdata, NULL); + if (IS_ERR(dummy_regulator_rdev)) { + ret = PTR_ERR(dummy_regulator_rdev); + pr_err("Failed to register regulator: %d\n", ret); + return ret; + } + + return 0; +} + +static struct platform_driver dummy_regulator_driver = { + .probe = dummy_regulator_probe, + .driver = { + .name = "reg-dummy", + .owner = THIS_MODULE, + }, +}; + static struct platform_device *dummy_pdev; void __init regulator_dummy_init(void) @@ -55,12 +78,9 @@ void __init regulator_dummy_init(void) return; } - dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, - &dummy_initdata, NULL); - if (IS_ERR(dummy_regulator_rdev)) { - ret = PTR_ERR(dummy_regulator_rdev); - pr_err("Failed to register regulator: %d\n", ret); + ret = platform_driver_register(&dummy_regulator_driver); + if (ret != 0) { + pr_err("Failed to register dummy regulator driver: %d\n", ret); platform_device_unregister(dummy_pdev); - return; } } diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c index 55dd4e6650db..66d2d60b436a 100644 --- a/drivers/regulator/tps65910-regulator.c +++ b/drivers/regulator/tps65910-regulator.c @@ -49,7 +49,6 @@ #define TPS65911_REG_LDO7 11 #define TPS65911_REG_LDO8 12 -#define TPS65910_NUM_REGULATOR 13 #define TPS65910_SUPPLY_STATE_ENABLED 0x1 /* supported VIO voltages in milivolts */ @@ -264,11 +263,12 @@ static struct tps_info tps65911_regs[] = { }; struct tps65910_reg { - struct regulator_desc desc[TPS65910_NUM_REGULATOR]; + struct regulator_desc *desc; struct tps65910 *mfd; - struct regulator_dev *rdev[TPS65910_NUM_REGULATOR]; - struct tps_info *info[TPS65910_NUM_REGULATOR]; + struct regulator_dev **rdev; + struct tps_info **info; struct mutex mutex; + int num_regulators; int mode; int (*get_ctrl_reg)(int); }; @@ -759,8 +759,13 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev, mult = (selector / VDD1_2_NUM_VOLTS) + 1; volt = VDD1_2_MIN_VOLT + (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; + break; case TPS65911_REG_VDDCTRL: volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); + break; + default: + BUG(); + return -EINVAL; } return volt * 100 * mult; @@ -897,16 +902,42 @@ static __devinit int tps65910_probe(struct platform_device *pdev) switch(tps65910_chip_id(tps65910)) { case TPS65910: pmic->get_ctrl_reg = &tps65910_get_ctrl_register; + pmic->num_regulators = ARRAY_SIZE(tps65910_regs); info = tps65910_regs; + break; case TPS65911: pmic->get_ctrl_reg = &tps65911_get_ctrl_register; + pmic->num_regulators = ARRAY_SIZE(tps65911_regs); info = tps65911_regs; + break; default: pr_err("Invalid tps chip version\n"); + kfree(pmic); return -ENODEV; } - for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) { + pmic->desc = kcalloc(pmic->num_regulators, + sizeof(struct regulator_desc), GFP_KERNEL); + if (!pmic->desc) { + err = -ENOMEM; + goto err_free_pmic; + } + + pmic->info = kcalloc(pmic->num_regulators, + sizeof(struct tps_info *), GFP_KERNEL); + if (!pmic->info) { + err = -ENOMEM; + goto err_free_desc; + } + + pmic->rdev = kcalloc(pmic->num_regulators, + sizeof(struct regulator_dev *), GFP_KERNEL); + if (!pmic->rdev) { + err = -ENOMEM; + goto err_free_info; + } + + for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) { /* Register the regulators */ pmic->info[i] = info; @@ -938,7 +969,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev) "failed to register %s regulator\n", pdev->name); err = PTR_ERR(rdev); - goto err; + goto err_unregister_regulator; } /* Save regulator for cleanup */ @@ -946,23 +977,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev) } return 0; -err: +err_unregister_regulator: while (--i >= 0) regulator_unregister(pmic->rdev[i]); - + kfree(pmic->rdev); +err_free_info: + kfree(pmic->info); +err_free_desc: + kfree(pmic->desc); +err_free_pmic: kfree(pmic); return err; } static int __devexit tps65910_remove(struct platform_device *pdev) { - struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev); + struct tps65910_reg *pmic = platform_get_drvdata(pdev); int i; - for (i = 0; i < TPS65910_NUM_REGULATOR; i++) - regulator_unregister(tps65910_reg->rdev[i]); + for (i = 0; i < pmic->num_regulators; i++) + regulator_unregister(pmic->rdev[i]); - kfree(tps65910_reg); + kfree(pmic->rdev); + kfree(pmic->info); + kfree(pmic->desc); + kfree(pmic); return 0; } diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 87fe0f75a56e..ee8747f4fa08 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -835,8 +835,8 @@ static struct regulator_ops twlsmps_ops = { remap_conf) \ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ remap_conf, TWL4030, twl4030fixed_ops) -#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \ - TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ +#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \ + TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \ 0x0, TWL6030, twl6030fixed_ops) #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ @@ -856,24 +856,22 @@ static struct regulator_ops twlsmps_ops = { }, \ } -#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ +#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ .base = offset, \ - .id = num, \ .min_mV = min_mVolts, \ .max_mV = max_mVolts, \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ - .n_voltages = (max_mVolts - min_mVolts)/100, \ + .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } -#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ +#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \ .base = offset, \ - .id = num, \ .min_mV = min_mVolts, \ .max_mV = max_mVolts, \ .desc = { \ @@ -903,9 +901,8 @@ static struct regulator_ops twlsmps_ops = { }, \ } -#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \ +#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) { \ .base = offset, \ - .id = num, \ .delay = turnon_delay, \ .desc = { \ .name = #label, \ @@ -916,9 +913,8 @@ static struct regulator_ops twlsmps_ops = { }, \ } -#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \ +#define TWL6025_ADJUSTABLE_SMPS(label, offset) { \ .base = offset, \ - .id = num, \ .min_mV = 600, \ .max_mV = 2100, \ .desc = { \ @@ -961,32 +957,32 @@ static struct twlreg_info twl_regs[] = { /* 6030 REG with base as PMC Slave Misc : 0x0030 */ /* Turnon-delay and remap configuration values for 6030 are not verified since the specification is not public */ - TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1), - TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2), - TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3), - TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4), - TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5), - TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7), - TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0), - TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0), - TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0), - TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0), - TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0), + TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300), + TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300), + TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300), + TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300), + TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300), + TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300), + TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0), + TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0), + TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0), + TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0), + TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0), /* 6025 are renamed compared to 6030 versions */ - TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1), - TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2), - TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3), - TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4), - TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5), - TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7), - TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16), - TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17), - TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18), - - TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1), - TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2), - TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3), + TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300), + TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300), + + TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34), + TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10), + TWL6025_ADJUSTABLE_SMPS(VIO, 0x16), }; static u8 twl_get_smps_offset(void) diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index a0982e809851..bd3531d8b2ac 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -267,23 +267,6 @@ static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev, return vsel; } -static int wm831x_buckv_select_max_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - u16 vsel; - - if (max_uV < 600000 || max_uV > 1800000) - return -EINVAL; - - vsel = ((max_uV - 600000) / 12500) + 8; - - if (wm831x_buckv_list_voltage(rdev, vsel) < min_uV || - wm831x_buckv_list_voltage(rdev, vsel) < max_uV) - return -EINVAL; - - return vsel; -} - static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); @@ -338,28 +321,23 @@ static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, if (ret < 0) return ret; - /* Set the high voltage as the DVS voltage. This is optimised - * for CPUfreq usage, most processors will keep the maximum - * voltage constant and lower the minimum with the frequency. */ - vsel = wm831x_buckv_select_max_voltage(rdev, min_uV, max_uV); - if (vsel < 0) { - /* This should never happen - at worst the same vsel - * should be chosen */ - WARN_ON(vsel < 0); - return 0; + /* + * If this VSEL is higher than the last one we've seen then + * remember it as the DVS VSEL. This is optimised for CPUfreq + * usage where we want to get to the highest voltage very + * quickly. + */ + if (vsel > dcdc->dvs_vsel) { + ret = wm831x_set_bits(wm831x, dvs_reg, + WM831X_DC1_DVS_VSEL_MASK, + dcdc->dvs_vsel); + if (ret == 0) + dcdc->dvs_vsel = vsel; + else + dev_warn(wm831x->dev, + "Failed to set DCDC DVS VSEL: %d\n", ret); } - /* Don't bother if it's the same VSEL we're already using */ - if (vsel == dcdc->on_vsel) - return 0; - - ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, vsel); - if (ret == 0) - dcdc->dvs_vsel = vsel; - else - dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n", - ret); - return 0; } @@ -456,27 +434,6 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc, if (!pdata || !pdata->dvs_gpio) return; - switch (pdata->dvs_control_src) { - case 1: - ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT; - break; - case 2: - ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT; - break; - default: - dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n", - pdata->dvs_control_src, dcdc->name); - return; - } - - ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL, - WM831X_DC1_DVS_SRC_MASK, ctrl); - if (ret < 0) { - dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n", - dcdc->name, ret); - return; - } - ret = gpio_request(pdata->dvs_gpio, "DCDC DVS"); if (ret < 0) { dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n", @@ -498,17 +455,57 @@ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc, } dcdc->dvs_gpio = pdata->dvs_gpio; + + switch (pdata->dvs_control_src) { + case 1: + ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT; + break; + case 2: + ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT; + break; + default: + dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n", + pdata->dvs_control_src, dcdc->name); + return; + } + + /* If DVS_VSEL is set to the minimum value then raise it to ON_VSEL + * to make bootstrapping a bit smoother. + */ + if (!dcdc->dvs_vsel) { + ret = wm831x_set_bits(wm831x, + dcdc->base + WM831X_DCDC_DVS_CONTROL, + WM831X_DC1_DVS_VSEL_MASK, dcdc->on_vsel); + if (ret == 0) + dcdc->dvs_vsel = dcdc->on_vsel; + else + dev_warn(wm831x->dev, "Failed to set DVS_VSEL: %d\n", + ret); + } + + ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL, + WM831X_DC1_DVS_SRC_MASK, ctrl); + if (ret < 0) { + dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n", + dcdc->name, ret); + } } static __devinit int wm831x_buckv_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; - int id = pdev->id % ARRAY_SIZE(pdata->dcdc); + int id; struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; + if (pdata && pdata->wm831x_num) + id = (pdata->wm831x_num * 10) + 1; + else + id = 0; + id = pdev->id - id; + dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) @@ -545,7 +542,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev) } dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK; - ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); + ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL); if (ret < 0) { dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret); goto err; @@ -709,11 +706,17 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; - int id = pdev->id % ARRAY_SIZE(pdata->dcdc); + int id; struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; + if (pdata && pdata->wm831x_num) + id = (pdata->wm831x_num * 10) + 1; + else + id = 0; + id = pdev->id - id; + dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) @@ -1046,3 +1049,4 @@ MODULE_DESCRIPTION("WM831x DC-DC convertor driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-buckv"); MODULE_ALIAS("platform:wm831x-buckp"); +MODULE_ALIAS("platform:wm831x-epe"); diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index 2220cf8defb1..6709710a059e 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c @@ -310,11 +310,17 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; - int id = pdev->id % ARRAY_SIZE(pdata->ldo); + int id; struct wm831x_ldo *ldo; struct resource *res; int ret, irq; + if (pdata && pdata->wm831x_num) + id = (pdata->wm831x_num * 10) + 1; + else + id = 0; + id = pdev->id - id; + dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); if (pdata == NULL || pdata->ldo[id] == NULL) @@ -574,11 +580,17 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; - int id = pdev->id % ARRAY_SIZE(pdata->ldo); + int id; struct wm831x_ldo *ldo; struct resource *res; int ret, irq; + if (pdata && pdata->wm831x_num) + id = (pdata->wm831x_num * 10) + 1; + else + id = 0; + id = pdev->id - id; + dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); if (pdata == NULL || pdata->ldo[id] == NULL) @@ -764,11 +776,18 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; - int id = pdev->id % ARRAY_SIZE(pdata->ldo); + int id; struct wm831x_ldo *ldo; struct resource *res; int ret; + if (pdata && pdata->wm831x_num) + id = (pdata->wm831x_num * 10) + 1; + else + id = 0; + id = pdev->id - id; + + dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); if (pdata == NULL || pdata->ldo[id] == NULL) diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index 35b2958d5106..1a6a690f24db 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -43,7 +43,7 @@ static int wm8994_ldo_enable(struct regulator_dev *rdev) if (!ldo->enable) return 0; - gpio_set_value(ldo->enable, 1); + gpio_set_value_cansleep(ldo->enable, 1); ldo->is_enabled = true; return 0; @@ -57,7 +57,7 @@ static int wm8994_ldo_disable(struct regulator_dev *rdev) if (!ldo->enable) return -EINVAL; - gpio_set_value(ldo->enable, 0); + gpio_set_value_cansleep(ldo->enable, 0); ldo->is_enabled = false; return 0; diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index bcae8dd41496..7789002bdd5c 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -368,7 +368,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev) pr_info("%s: already running\n", pdev->name); /* force to 24 hour mode */ - new_ctrl = reg & ~(OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP); + new_ctrl = reg & (OMAP_RTC_CTRL_SPLIT|OMAP_RTC_CTRL_AUTO_COMP); new_ctrl |= OMAP_RTC_CTRL_STOP; /* BOARD-SPECIFIC CUSTOMIZATION CAN GO HERE: diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 432444af7ee4..a1d3ddba99cc 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -24,6 +24,7 @@ #include <linux/mutex.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/vmalloc.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> @@ -888,11 +889,11 @@ char *dasd_get_user_string(const char __user *user_buf, size_t user_len) { char *buffer; - buffer = kmalloc(user_len + 1, GFP_KERNEL); + buffer = vmalloc(user_len + 1); if (buffer == NULL) return ERR_PTR(-ENOMEM); if (copy_from_user(buffer, user_buf, user_len) != 0) { - kfree(buffer); + vfree(buffer); return ERR_PTR(-EFAULT); } /* got the string, now strip linefeed. */ @@ -930,7 +931,7 @@ static ssize_t dasd_stats_write(struct file *file, dasd_profile_off(prof); } else rc = -EINVAL; - kfree(buffer); + vfree(buffer); return rc; } @@ -1042,7 +1043,7 @@ static ssize_t dasd_stats_global_write(struct file *file, dasd_global_profile_level = DASD_PROFILE_OFF; } else rc = -EINVAL; - kfree(buffer); + vfree(buffer); return rc; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 30fb979d684d..6e835c9fdfcb 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1461,6 +1461,15 @@ dasd_eckd_check_characteristics(struct dasd_device *device) "Read device characteristic failed, rc=%d", rc); goto out_err3; } + + if ((device->features & DASD_FEATURE_USERAW) && + !(private->rdc_data.facilities.RT_in_LR)) { + dev_err(&device->cdev->dev, "The storage server does not " + "support raw-track access\n"); + rc = -EINVAL; + goto out_err3; + } + /* find the valid cylinder size */ if (private->rdc_data.no_cyl == LV_COMPAT_CYL && private->rdc_data.long_no_cyl) diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 6c3c5364d082..e12989fff4ff 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c @@ -312,14 +312,14 @@ static ssize_t dasd_stats_proc_write(struct file *file, pr_info("The statistics have been reset\n"); } else goto out_parse_error; - kfree(buffer); + vfree(buffer); return user_len; out_parse_error: rc = -EINVAL; pr_warning("%s is not a supported value for /proc/dasd/statistics\n", str); out_error: - kfree(buffer); + vfree(buffer); return rc; #else pr_warning("/proc/dasd/statistics: is not activated in this kernel\n"); diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index 7ad30e72f868..5f9f929e891c 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -82,12 +82,9 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, return -EFAULT; } else { len = *count; - rc = copy_from_user(buf, buffer, sizeof(buf)); - if (rc != 0) - return -EFAULT; - buf[sizeof(buf) - 1] = '\0'; - if (strict_strtoul(buf, 0, &val) != 0) - return -EINVAL; + rc = kstrtoul_from_user(buffer, len, 0, &val); + if (rc) + return rc; if (val != 0 && val != 1) return -EINVAL; callhome_enabled = val; diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 7bc643f3f5ab..e5c966462c5a 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -14,6 +14,8 @@ #include "chsc.h" #define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */ +#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */ +#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */ #define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */ /* diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f8b03a636e49..0e615cb912d0 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -188,19 +188,13 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf, struct qdio_irq *irq_ptr = seq->private; struct qdio_q *q; unsigned long val; - char buf[8]; int ret, i; if (!irq_ptr) return 0; - if (count >= sizeof(buf)) - return -EINVAL; - if (copy_from_user(&buf, ubuf, count)) - return -EFAULT; - buf[count] = 0; - - ret = strict_strtoul(buf, 10, &val); - if (ret < 0) + + ret = kstrtoul_from_user(ubuf, count, 10, &val); + if (ret) return ret; switch (val) { diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index e58169c32474..288c9140290e 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -313,7 +313,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) unsigned long schid = *((u32 *) &q->irq_ptr->schid); unsigned int fc = QDIO_SIGA_WRITE; u64 start_time = 0; - int cc; + int retries = 0, cc; if (is_qebsm(q)) { schid = q->irq_ptr->sch_token; @@ -325,6 +325,7 @@ again: /* hipersocket busy condition */ if (unlikely(*busy_bit)) { WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); + retries++; if (!start_time) { start_time = get_clock(); @@ -333,6 +334,11 @@ again: if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) goto again; } + if (retries) { + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, + "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); + DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); + } return cc; } @@ -728,13 +734,14 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) static int qdio_kick_outbound_q(struct qdio_q *q) { + int retries = 0, cc; unsigned int busy_bit; - int cc; if (!need_siga_out(q)) return 0; DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); +retry: qperf_inc(q, siga_write); cc = qdio_siga_output(q, &busy_bit); @@ -743,7 +750,11 @@ static int qdio_kick_outbound_q(struct qdio_q *q) break; case 2: if (busy_bit) { - DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); + while (++retries < QDIO_BUSY_BIT_RETRIES) { + mdelay(QDIO_BUSY_BIT_RETRY_DELAY); + goto retry; + } + DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); cc |= QDIO_ERROR_SIGA_BUSY; } else DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); @@ -753,6 +764,10 @@ static int qdio_kick_outbound_q(struct qdio_q *q) DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); break; } + if (retries) { + DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); + DBF_ERROR("count:%u", retries); + } return cc; } diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index d6702e57d428..dc8d022c07a1 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -34,6 +34,9 @@ static LIST_HEAD(clock_list); static DEFINE_SPINLOCK(clock_lock); static DEFINE_MUTEX(clock_list_sem); +/* clock disable operations are not passed on to hardware during boot */ +static int allow_disable; + void clk_rate_table_build(struct clk *clk, struct cpufreq_frequency_table *freq_table, int nr_freqs, @@ -228,7 +231,7 @@ static void __clk_disable(struct clk *clk) return; if (!(--clk->usecount)) { - if (likely(clk->ops && clk->ops->disable)) + if (likely(allow_disable && clk->ops && clk->ops->disable)) clk->ops->disable(clk); if (likely(clk->parent)) __clk_disable(clk->parent); @@ -393,7 +396,7 @@ int clk_register(struct clk *clk) { int ret; - if (clk == NULL || IS_ERR(clk)) + if (IS_ERR_OR_NULL(clk)) return -EINVAL; /* @@ -744,3 +747,25 @@ err_out: return err; } late_initcall(clk_debugfs_init); + +static int __init clk_late_init(void) +{ + unsigned long flags; + struct clk *clk; + + /* disable all clocks with zero use count */ + mutex_lock(&clock_list_sem); + spin_lock_irqsave(&clock_lock, flags); + + list_for_each_entry(clk, &clock_list, node) + if (!clk->usecount && clk->ops && clk->ops->disable) + clk->ops->disable(clk); + + /* from now on allow clock disable operations */ + allow_disable = 1; + + spin_unlock_irqrestore(&clock_lock, flags); + mutex_unlock(&clock_list_sem); + return 0; +} +late_initcall(clk_late_init); diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index eba88c749fb1..730b4a37b823 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -2267,17 +2267,13 @@ static int __devexit pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); - int status = 0; + if (!pl022) return 0; /* Remove the queue */ - status = destroy_queue(pl022); - if (status != 0) { - dev_err(&adev->dev, - "queue remove failed (%d)\n", status); - return status; - } + if (destroy_queue(pl022) != 0) + dev_err(&adev->dev, "queue remove failed\n"); load_ssp_default_config(pl022); pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); @@ -2289,7 +2285,6 @@ pl022_remove(struct amba_device *adev) spi_unregister_master(pl022->master); spi_master_put(pl022->master); amba_set_drvdata(adev, NULL); - dev_dbg(&adev->dev, "remove succeeded\n"); return 0; } diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig index 564ff4e0dbc4..8345fb457a40 100644 --- a/drivers/target/iscsi/Kconfig +++ b/drivers/target/iscsi/Kconfig @@ -1,5 +1,6 @@ config ISCSI_TARGET tristate "Linux-iSCSI.org iSCSI Target Mode Stack" + depends on NET select CRYPTO select CRYPTO_CRC32C select CRYPTO_CRC32C_INTEL if X86 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 14c81c4265bd..c24fb10de60b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -120,7 +120,7 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf) struct iscsi_tiqn *tiqn = NULL; int ret; - if (strlen(buf) > ISCSI_IQN_LEN) { + if (strlen(buf) >= ISCSI_IQN_LEN) { pr_err("Target IQN exceeds %d bytes\n", ISCSI_IQN_LEN); return ERR_PTR(-EINVAL); @@ -1857,7 +1857,7 @@ static int iscsit_handle_text_cmd( char *text_ptr, *text_in; int cmdsn_ret, niov = 0, rx_got, rx_size; u32 checksum = 0, data_crc = 0, payload_length; - u32 padding = 0, text_length = 0; + u32 padding = 0, pad_bytes = 0, text_length = 0; struct iscsi_cmd *cmd; struct kvec iov[3]; struct iscsi_text *hdr; @@ -1896,7 +1896,7 @@ static int iscsit_handle_text_cmd( padding = ((-payload_length) & 3); if (padding != 0) { - iov[niov].iov_base = cmd->pad_bytes; + iov[niov].iov_base = &pad_bytes; iov[niov++].iov_len = padding; rx_size += padding; pr_debug("Receiving %u additional bytes" @@ -1917,7 +1917,7 @@ static int iscsit_handle_text_cmd( if (conn->conn_ops->DataDigest) { iscsit_do_crypto_hash_buf(&conn->conn_rx_hash, text_in, text_length, - padding, cmd->pad_bytes, + padding, (u8 *)&pad_bytes, (u8 *)&data_crc); if (checksum != data_crc) { @@ -3468,7 +3468,12 @@ static inline void iscsit_thread_check_cpumask( } #else -#define iscsit_thread_get_cpumask(X) ({}) + +void iscsit_thread_get_cpumask(struct iscsi_conn *conn) +{ + return; +} + #define iscsit_thread_check_cpumask(X, Y, Z) ({}) #endif /* CONFIG_SMP */ diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 32bb92c44450..f095e65b1ccf 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -181,7 +181,7 @@ struct se_tpg_np *lio_target_call_addnptotpg( return ERR_PTR(-EOVERFLOW); } memset(buf, 0, MAX_PORTAL_LEN + 1); - snprintf(buf, MAX_PORTAL_LEN, "%s", name); + snprintf(buf, MAX_PORTAL_LEN + 1, "%s", name); memset(&sockaddr, 0, sizeof(struct __kernel_sockaddr_storage)); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 713a4d23557a..4d087ac11067 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -978,7 +978,7 @@ struct iscsi_login *iscsi_target_init_negotiation( pr_err("Unable to allocate memory for struct iscsi_login.\n"); iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); - goto out; + return NULL; } login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c75a01a1c475..89760329d5d0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1747,6 +1747,8 @@ int transport_generic_handle_cdb( } EXPORT_SYMBOL(transport_generic_handle_cdb); +static void transport_generic_request_failure(struct se_cmd *, + struct se_device *, int, int); /* * Used by fabric module frontends to queue tasks directly. * Many only be used from process context only @@ -1754,6 +1756,8 @@ EXPORT_SYMBOL(transport_generic_handle_cdb); int transport_handle_cdb_direct( struct se_cmd *cmd) { + int ret; + if (!cmd->se_lun) { dump_stack(); pr_err("cmd->se_lun is NULL\n"); @@ -1765,8 +1769,31 @@ int transport_handle_cdb_direct( " from interrupt context\n"); return -EINVAL; } - - return transport_generic_new_cmd(cmd); + /* + * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following + * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue() + * in existing usage to ensure that outstanding descriptors are handled + * correctly during shutdown via transport_generic_wait_for_tasks() + * + * Also, we don't take cmd->t_state_lock here as we only expect + * this to be called for initial descriptor submission. + */ + cmd->t_state = TRANSPORT_NEW_CMD; + atomic_set(&cmd->t_transport_active, 1); + /* + * transport_generic_new_cmd() is already handling QUEUE_FULL, + * so follow TRANSPORT_NEW_CMD processing thread context usage + * and call transport_generic_request_failure() if necessary.. + */ + ret = transport_generic_new_cmd(cmd); + if (ret == -EAGAIN) + return 0; + else if (ret < 0) { + cmd->transport_error_status = ret; + transport_generic_request_failure(cmd, NULL, 0, + (cmd->data_direction != DMA_TO_DEVICE)); + } + return 0; } EXPORT_SYMBOL(transport_handle_cdb_direct); @@ -3324,7 +3351,7 @@ static int transport_generic_cmd_sequencer( goto out_invalid_cdb_field; } - cmd->t_task_lba = get_unaligned_be16(&cdb[2]); + cmd->t_task_lba = get_unaligned_be64(&cdb[2]); passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index f7fff7ed63c3..bd4fe21a23b8 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -187,4 +187,9 @@ void ft_dump_cmd(struct ft_cmd *, const char *caller); ssize_t ft_format_wwn(char *, size_t, u64); +/* + * Underlying HW specific helper function + */ +void ft_invl_hw_context(struct ft_cmd *); + #endif /* __TCM_FC_H__ */ diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 09df38b4610c..5654dc22f7ae 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -320,6 +320,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) default: pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); + ft_invl_hw_context(cmd); fc_frame_free(fp); transport_generic_free_cmd(&cmd->se_cmd, 0, 0); break; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 8e2a46ddcccb..c37f4cd96452 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -213,62 +213,49 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) goto drop; + f_ctl = ntoh24(fh->fh_f_ctl); + ep = fc_seq_exch(seq); + lport = ep->lp; + if (cmd->was_ddp_setup) { + BUG_ON(!ep); + BUG_ON(!lport); + } + /* - * Doesn't expect even single byte of payload. Payload + * Doesn't expect payload if DDP is setup. Payload * is expected to be copied directly to user buffers - * due to DDP (Large Rx offload) feature, hence - * BUG_ON if BUF is non-NULL + * due to DDP (Large Rx offload), */ buf = fc_frame_payload_get(fp, 1); - if (cmd->was_ddp_setup && buf) { - pr_debug("%s: When DDP was setup, not expected to" - "receive frame with payload, Payload shall be" - "copied directly to buffer instead of coming " - "via. legacy receive queues\n", __func__); - BUG_ON(buf); - } + if (buf) + pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " + "cmd->sg_cnt 0x%x. DDP was setup" + " hence not expected to receive frame with " + "payload, Frame will be dropped if " + "'Sequence Initiative' bit in f_ctl is " + "not set\n", __func__, ep->xid, f_ctl, + cmd->sg, cmd->sg_cnt); + /* + * Invalidate HW DDP context if it was setup for respective + * command. Invalidation of HW DDP context is requited in both + * situation (success and error). + */ + ft_invl_hw_context(cmd); /* - * If ft_cmd indicated 'ddp_setup', in that case only the last frame - * should come with 'TSI bit being set'. If 'TSI bit is not set and if - * data frame appears here, means error condition. In both the cases - * release the DDP context (ddp_put) and in error case, as well - * initiate error recovery mechanism. + * If "Sequence Initiative (TSI)" bit set in f_ctl, means last + * write data frame is received successfully where payload is + * posted directly to user buffer and only the last frame's + * header is posted in receive queue. + * + * If "Sequence Initiative (TSI)" bit is not set, means error + * condition w.r.t. DDP, hence drop the packet and let explict + * ABORTS from other end of exchange timer trigger the recovery. */ - ep = fc_seq_exch(seq); - if (cmd->was_ddp_setup) { - BUG_ON(!ep); - lport = ep->lp; - BUG_ON(!lport); - } - if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) { - f_ctl = ntoh24(fh->fh_f_ctl); - /* - * If TSI bit set in f_ctl, means last write data frame is - * received successfully where payload is posted directly - * to user buffer and only the last frame's header is posted - * in legacy receive queue - */ - if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */ - cmd->write_data_len = lport->tt.ddp_done(lport, - ep->xid); - goto last_frame; - } else { - /* - * Updating the write_data_len may be meaningless at - * this point, but just in case if required in future - * for debugging or any other purpose - */ - pr_err("%s: Received frame with TSI bit not" - " being SET, dropping the frame, " - "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", - __func__, cmd->sg, cmd->sg_cnt); - cmd->write_data_len = lport->tt.ddp_done(lport, - ep->xid); - lport->tt.seq_exch_abort(cmd->seq, 0); - goto drop; - } - } + if (f_ctl & FC_FC_SEQ_INIT) + goto last_frame; + else + goto drop; rel_off = ntohl(fh->fh_parm_offset); frame_len = fr_len(fp); @@ -331,3 +318,39 @@ last_frame: drop: fc_frame_free(fp); } + +/* + * Handle and cleanup any HW specific resources if + * received ABORTS, errors, timeouts. + */ +void ft_invl_hw_context(struct ft_cmd *cmd) +{ + struct fc_seq *seq = cmd->seq; + struct fc_exch *ep = NULL; + struct fc_lport *lport = NULL; + + BUG_ON(!cmd); + + /* Cleanup the DDP context in HW if DDP was setup */ + if (cmd->was_ddp_setup && seq) { + ep = fc_seq_exch(seq); + if (ep) { + lport = ep->lp; + if (lport && (ep->xid <= lport->lro_xid)) + /* + * "ddp_done" trigger invalidation of HW + * specific DDP context + */ + cmd->write_data_len = lport->tt.ddp_done(lport, + ep->xid); + + /* + * Resetting same variable to indicate HW's + * DDP context has been invalidated to avoid + * re_invalidation of same context (context is + * identified using ep->xid) + */ + cmd->was_ddp_setup = 0; + } + } +} diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index bf7c687519ef..f7f71b2d3101 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -14,11 +14,7 @@ menuconfig THERMAL If you want this support, you should say Y or M here. config THERMAL_HWMON - bool "Hardware monitoring support" + bool depends on THERMAL depends on HWMON=y || HWMON=THERMAL - help - The generic thermal sysfs driver's hardware monitoring support - requires a 2.10.7/3.0.2 or later lm-sensors userspace. - - Say Y if your user-space is new enough. + default y diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 0b1c82ad6805..708f8e92771a 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -420,6 +420,29 @@ thermal_cooling_device_trip_point_show(struct device *dev, /* hwmon sys I/F */ #include <linux/hwmon.h> + +/* thermal zone devices with the same type share one hwmon device */ +struct thermal_hwmon_device { + char type[THERMAL_NAME_LENGTH]; + struct device *device; + int count; + struct list_head tz_list; + struct list_head node; +}; + +struct thermal_hwmon_attr { + struct device_attribute attr; + char name[16]; +}; + +/* one temperature input for each thermal zone */ +struct thermal_hwmon_temp { + struct list_head hwmon_node; + struct thermal_zone_device *tz; + struct thermal_hwmon_attr temp_input; /* hwmon sys attr */ + struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ +}; + static LIST_HEAD(thermal_hwmon_list); static ssize_t @@ -437,9 +460,10 @@ temp_input_show(struct device *dev, struct device_attribute *attr, char *buf) int ret; struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); - struct thermal_zone_device *tz - = container_of(hwmon_attr, struct thermal_zone_device, + struct thermal_hwmon_temp *temp + = container_of(hwmon_attr, struct thermal_hwmon_temp, temp_input); + struct thermal_zone_device *tz = temp->tz; ret = tz->ops->get_temp(tz, &temperature); @@ -455,9 +479,10 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, { struct thermal_hwmon_attr *hwmon_attr = container_of(attr, struct thermal_hwmon_attr, attr); - struct thermal_zone_device *tz - = container_of(hwmon_attr, struct thermal_zone_device, + struct thermal_hwmon_temp *temp + = container_of(hwmon_attr, struct thermal_hwmon_temp, temp_crit); + struct thermal_zone_device *tz = temp->tz; long temperature; int ret; @@ -469,22 +494,54 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, } -static int -thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) +static struct thermal_hwmon_device * +thermal_hwmon_lookup_by_type(const struct thermal_zone_device *tz) { struct thermal_hwmon_device *hwmon; - int new_hwmon_device = 1; - int result; mutex_lock(&thermal_list_lock); list_for_each_entry(hwmon, &thermal_hwmon_list, node) if (!strcmp(hwmon->type, tz->type)) { - new_hwmon_device = 0; mutex_unlock(&thermal_list_lock); - goto register_sys_interface; + return hwmon; + } + mutex_unlock(&thermal_list_lock); + + return NULL; +} + +/* Find the temperature input matching a given thermal zone */ +static struct thermal_hwmon_temp * +thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon, + const struct thermal_zone_device *tz) +{ + struct thermal_hwmon_temp *temp; + + mutex_lock(&thermal_list_lock); + list_for_each_entry(temp, &hwmon->tz_list, hwmon_node) + if (temp->tz == tz) { + mutex_unlock(&thermal_list_lock); + return temp; } mutex_unlock(&thermal_list_lock); + return NULL; +} + +static int +thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) +{ + struct thermal_hwmon_device *hwmon; + struct thermal_hwmon_temp *temp; + int new_hwmon_device = 1; + int result; + + hwmon = thermal_hwmon_lookup_by_type(tz); + if (hwmon) { + new_hwmon_device = 0; + goto register_sys_interface; + } + hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL); if (!hwmon) return -ENOMEM; @@ -502,30 +559,36 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) goto free_mem; register_sys_interface: - tz->hwmon = hwmon; + temp = kzalloc(sizeof(struct thermal_hwmon_temp), GFP_KERNEL); + if (!temp) { + result = -ENOMEM; + goto unregister_name; + } + + temp->tz = tz; hwmon->count++; - snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH, + snprintf(temp->temp_input.name, THERMAL_NAME_LENGTH, "temp%d_input", hwmon->count); - tz->temp_input.attr.attr.name = tz->temp_input.name; - tz->temp_input.attr.attr.mode = 0444; - tz->temp_input.attr.show = temp_input_show; - sysfs_attr_init(&tz->temp_input.attr.attr); - result = device_create_file(hwmon->device, &tz->temp_input.attr); + temp->temp_input.attr.attr.name = temp->temp_input.name; + temp->temp_input.attr.attr.mode = 0444; + temp->temp_input.attr.show = temp_input_show; + sysfs_attr_init(&temp->temp_input.attr.attr); + result = device_create_file(hwmon->device, &temp->temp_input.attr); if (result) - goto unregister_name; + goto free_temp_mem; if (tz->ops->get_crit_temp) { unsigned long temperature; if (!tz->ops->get_crit_temp(tz, &temperature)) { - snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH, + snprintf(temp->temp_crit.name, THERMAL_NAME_LENGTH, "temp%d_crit", hwmon->count); - tz->temp_crit.attr.attr.name = tz->temp_crit.name; - tz->temp_crit.attr.attr.mode = 0444; - tz->temp_crit.attr.show = temp_crit_show; - sysfs_attr_init(&tz->temp_crit.attr.attr); + temp->temp_crit.attr.attr.name = temp->temp_crit.name; + temp->temp_crit.attr.attr.mode = 0444; + temp->temp_crit.attr.show = temp_crit_show; + sysfs_attr_init(&temp->temp_crit.attr.attr); result = device_create_file(hwmon->device, - &tz->temp_crit.attr); + &temp->temp_crit.attr); if (result) goto unregister_input; } @@ -534,13 +597,15 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) mutex_lock(&thermal_list_lock); if (new_hwmon_device) list_add_tail(&hwmon->node, &thermal_hwmon_list); - list_add_tail(&tz->hwmon_node, &hwmon->tz_list); + list_add_tail(&temp->hwmon_node, &hwmon->tz_list); mutex_unlock(&thermal_list_lock); return 0; unregister_input: - device_remove_file(hwmon->device, &tz->temp_input.attr); + device_remove_file(hwmon->device, &temp->temp_input.attr); + free_temp_mem: + kfree(temp); unregister_name: if (new_hwmon_device) { device_remove_file(hwmon->device, &dev_attr_name); @@ -556,15 +621,30 @@ thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) static void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) { - struct thermal_hwmon_device *hwmon = tz->hwmon; + struct thermal_hwmon_device *hwmon; + struct thermal_hwmon_temp *temp; + + hwmon = thermal_hwmon_lookup_by_type(tz); + if (unlikely(!hwmon)) { + /* Should never happen... */ + dev_dbg(&tz->device, "hwmon device lookup failed!\n"); + return; + } + + temp = thermal_hwmon_lookup_temp(hwmon, tz); + if (unlikely(!temp)) { + /* Should never happen... */ + dev_dbg(&tz->device, "temperature input lookup failed!\n"); + return; + } - tz->hwmon = NULL; - device_remove_file(hwmon->device, &tz->temp_input.attr); + device_remove_file(hwmon->device, &temp->temp_input.attr); if (tz->ops->get_crit_temp) - device_remove_file(hwmon->device, &tz->temp_crit.attr); + device_remove_file(hwmon->device, &temp->temp_crit.attr); mutex_lock(&thermal_list_lock); - list_del(&tz->hwmon_node); + list_del(&temp->hwmon_node); + kfree(temp); if (!list_empty(&hwmon->tz_list)) { mutex_unlock(&thermal_list_lock); return; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index cb40b82daf36..4dcb37bbdf92 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -959,7 +959,7 @@ config SERIAL_IP22_ZILOG_CONSOLE config SERIAL_SH_SCI tristate "SuperH SCI(F) serial port support" - depends on HAVE_CLK && (SUPERH || H8300 || ARCH_SHMOBILE) + depends on HAVE_CLK && (SUPERH || ARCH_SHMOBILE) select SERIAL_CORE config SERIAL_SH_SCI_NR_UARTS diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 827db7654594..7e91b3d368cd 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1286,22 +1286,17 @@ static int serial_imx_resume(struct platform_device *dev) static int serial_imx_probe_dt(struct imx_port *sport, struct platform_device *pdev) { + static int portnum = 0; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_id = of_match_device(imx_uart_dt_ids, &pdev->dev); - int ret; if (!np) return -ENODEV; - ret = of_alias_get_id(np, "serial"); - if (ret < 0) { - pr_err("%s: failed to get alias id, errno %d\n", - __func__, ret); - return -ENODEV; - } else { - sport->port.line = ret; - } + sport->port.line = portnum++; + if (sport->port.line >= UART_NR) + return -EINVAL; if (of_get_property(np, "fsl,uart-has-rtscts", NULL)) sport->have_rtscts = 1; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ebd8629c108d..2ec57b2fb278 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -54,10 +54,6 @@ #include <asm/sh_bios.h> #endif -#ifdef CONFIG_H8300 -#include <asm/gpio.h> -#endif - #include "sh-sci.h" struct sci_port { @@ -66,12 +62,6 @@ struct sci_port { /* Platform configuration */ struct plat_sci_port *cfg; - /* Port enable callback */ - void (*enable)(struct uart_port *port); - - /* Port disable callback */ - void (*disable)(struct uart_port *port); - /* Break timer */ struct timer_list break_timer; int break_flag; @@ -81,6 +71,8 @@ struct sci_port { /* Function clock */ struct clk *fclk; + char *irqstr[SCIx_NR_IRQS]; + struct dma_chan *chan_tx; struct dma_chan *chan_rx; @@ -121,6 +113,278 @@ to_sci_port(struct uart_port *uart) return container_of(uart, struct sci_port, port); } +struct plat_sci_reg { + u8 offset, size; +}; + +/* Helper for invalidating specific entries of an inherited map. */ +#define sci_reg_invalid { .offset = 0, .size = 0 } + +static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = { + [SCIx_PROBE_REGTYPE] = { + [0 ... SCIx_NR_REGS - 1] = sci_reg_invalid, + }, + + /* + * Common SCI definitions, dependent on the port's regshift + * value. + */ + [SCIx_SCI_REGTYPE] = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x01, 8 }, + [SCSCR] = { 0x02, 8 }, + [SCxTDR] = { 0x03, 8 }, + [SCxSR] = { 0x04, 8 }, + [SCxRDR] = { 0x05, 8 }, + [SCFCR] = sci_reg_invalid, + [SCFDR] = sci_reg_invalid, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common definitions for legacy IrDA ports, dependent on + * regshift value. + */ + [SCIx_IRDA_REGTYPE] = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x01, 8 }, + [SCSCR] = { 0x02, 8 }, + [SCxTDR] = { 0x03, 8 }, + [SCxSR] = { 0x04, 8 }, + [SCxRDR] = { 0x05, 8 }, + [SCFCR] = { 0x06, 8 }, + [SCFDR] = { 0x07, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SCIFA definitions. + */ + [SCIx_SCIFA_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x20, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x24, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SCIFB definitions. + */ + [SCIx_SCIFB_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x40, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x60, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SH-3 SCIF definitions. + */ + [SCIx_SH3_SCIF_REGTYPE] = { + [SCSMR] = { 0x00, 8 }, + [SCBRR] = { 0x02, 8 }, + [SCSCR] = { 0x04, 8 }, + [SCxTDR] = { 0x06, 8 }, + [SCxSR] = { 0x08, 16 }, + [SCxRDR] = { 0x0a, 8 }, + [SCFCR] = { 0x0c, 8 }, + [SCFDR] = { 0x0e, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, + + /* + * Common SH-4(A) SCIF(B) definitions. + */ + [SCIx_SH4_SCIF_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = { 0x20, 16 }, + [SCLSR] = { 0x24, 16 }, + }, + + /* + * Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR + * register. + */ + [SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = { 0x24, 16 }, + }, + + /* + * Common SH-4(A) SCIF(B) definitions for ports with FIFO data + * count registers. + */ + [SCIx_SH4_SCIF_FIFODATA_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x0c, 8 }, + [SCxSR] = { 0x10, 16 }, + [SCxRDR] = { 0x14, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */ + [SCRFDR] = { 0x20, 16 }, + [SCSPTR] = { 0x24, 16 }, + [SCLSR] = { 0x28, 16 }, + }, + + /* + * SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR + * registers. + */ + [SCIx_SH7705_SCIF_REGTYPE] = { + [SCSMR] = { 0x00, 16 }, + [SCBRR] = { 0x04, 8 }, + [SCSCR] = { 0x08, 16 }, + [SCxTDR] = { 0x20, 8 }, + [SCxSR] = { 0x14, 16 }, + [SCxRDR] = { 0x24, 8 }, + [SCFCR] = { 0x18, 16 }, + [SCFDR] = { 0x1c, 16 }, + [SCTFDR] = sci_reg_invalid, + [SCRFDR] = sci_reg_invalid, + [SCSPTR] = sci_reg_invalid, + [SCLSR] = sci_reg_invalid, + }, +}; + +#define sci_getreg(up, offset) (sci_regmap[to_sci_port(up)->cfg->regtype] + offset) + +/* + * The "offset" here is rather misleading, in that it refers to an enum + * value relative to the port mapping rather than the fixed offset + * itself, which needs to be manually retrieved from the platform's + * register map for the given port. + */ +static unsigned int sci_serial_in(struct uart_port *p, int offset) +{ + struct plat_sci_reg *reg = sci_getreg(p, offset); + + if (reg->size == 8) + return ioread8(p->membase + (reg->offset << p->regshift)); + else if (reg->size == 16) + return ioread16(p->membase + (reg->offset << p->regshift)); + else + WARN(1, "Invalid register access\n"); + + return 0; +} + +static void sci_serial_out(struct uart_port *p, int offset, int value) +{ + struct plat_sci_reg *reg = sci_getreg(p, offset); + + if (reg->size == 8) + iowrite8(value, p->membase + (reg->offset << p->regshift)); + else if (reg->size == 16) + iowrite16(value, p->membase + (reg->offset << p->regshift)); + else + WARN(1, "Invalid register access\n"); +} + +#define sci_in(up, offset) (up->serial_in(up, offset)) +#define sci_out(up, offset, value) (up->serial_out(up, offset, value)) + +static int sci_probe_regmap(struct plat_sci_port *cfg) +{ + switch (cfg->type) { + case PORT_SCI: + cfg->regtype = SCIx_SCI_REGTYPE; + break; + case PORT_IRDA: + cfg->regtype = SCIx_IRDA_REGTYPE; + break; + case PORT_SCIFA: + cfg->regtype = SCIx_SCIFA_REGTYPE; + break; + case PORT_SCIFB: + cfg->regtype = SCIx_SCIFB_REGTYPE; + break; + case PORT_SCIF: + /* + * The SH-4 is a bit of a misnomer here, although that's + * where this particular port layout originated. This + * configuration (or some slight variation thereof) + * remains the dominant model for all SCIFs. + */ + cfg->regtype = SCIx_SH4_SCIF_REGTYPE; + break; + default: + printk(KERN_ERR "Can't probe register map for given port\n"); + return -EINVAL; + } + + return 0; +} + +static void sci_port_enable(struct sci_port *sci_port) +{ + if (!sci_port->port.dev) + return; + + pm_runtime_get_sync(sci_port->port.dev); + + clk_enable(sci_port->iclk); + sci_port->port.uartclk = clk_get_rate(sci_port->iclk); + clk_enable(sci_port->fclk); +} + +static void sci_port_disable(struct sci_port *sci_port) +{ + if (!sci_port->port.dev) + return; + + clk_disable(sci_port->fclk); + clk_disable(sci_port->iclk); + + pm_runtime_put_sync(sci_port->port.dev); +} + #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) #ifdef CONFIG_CONSOLE_POLL @@ -164,223 +428,76 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c) } #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ -#if defined(__H8300H__) || defined(__H8300S__) static void sci_init_pins(struct uart_port *port, unsigned int cflag) { - int ch = (port->mapbase - SMR0) >> 3; - - /* set DDR regs */ - H8300_GPIO_DDR(h8300_sci_pins[ch].port, - h8300_sci_pins[ch].rx, - H8300_GPIO_INPUT); - H8300_GPIO_DDR(h8300_sci_pins[ch].port, - h8300_sci_pins[ch].tx, - H8300_GPIO_OUTPUT); - - /* tx mark output*/ - H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - if (port->mapbase == 0xA4400000) { - __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); - __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); - } else if (port->mapbase == 0xA4410000) - __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - unsigned short data; - - if (cflag & CRTSCTS) { - /* enable RTS/CTS */ - if (port->mapbase == 0xa4430000) { /* SCIF0 */ - /* Clear PTCR bit 9-2; enable all scif pins but sck */ - data = __raw_readw(PORT_PTCR); - __raw_writew((data & 0xfc03), PORT_PTCR); - } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ - /* Clear PVCR bit 9-2 */ - data = __raw_readw(PORT_PVCR); - __raw_writew((data & 0xfc03), PORT_PVCR); - } - } else { - if (port->mapbase == 0xa4430000) { /* SCIF0 */ - /* Clear PTCR bit 5-2; enable only tx and rx */ - data = __raw_readw(PORT_PTCR); - __raw_writew((data & 0xffc3), PORT_PTCR); - } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ - /* Clear PVCR bit 5-2 */ - data = __raw_readw(PORT_PVCR); - __raw_writew((data & 0xffc3), PORT_PVCR); - } - } -} -#elif defined(CONFIG_CPU_SH3) -/* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - unsigned short data; - - /* We need to set SCPCR to enable RTS/CTS */ - data = __raw_readw(SCPCR); - /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ - __raw_writew(data & 0x0fcf, SCPCR); - - if (!(cflag & CRTSCTS)) { - /* We need to set SCPCR to enable RTS/CTS */ - data = __raw_readw(SCPCR); - /* Clear out SCP7MD1,0, SCP4MD1,0, - Set SCP6MD1,0 = {01} (output) */ - __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); + struct sci_port *s = to_sci_port(port); + struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR; - data = __raw_readb(SCPDR); - /* Set /RTS2 (bit6) = 0 */ - __raw_writeb(data & 0xbf, SCPDR); + /* + * Use port-specific handler if provided. + */ + if (s->cfg->ops && s->cfg->ops->init_pins) { + s->cfg->ops->init_pins(port, cflag); + return; } -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7722) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - unsigned short data; - if (port->mapbase == 0xffe00000) { - data = __raw_readw(PSCR); - data &= ~0x03cf; - if (!(cflag & CRTSCTS)) - data |= 0x0340; + /* + * For the generic path SCSPTR is necessary. Bail out if that's + * unavailable, too. + */ + if (!reg->size) + return; - __raw_writew(data, PSCR); - } -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) || \ - defined(CONFIG_CPU_SUBTYPE_SHX3) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ if (!(cflag & CRTSCTS)) - __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */ + sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */ } -#elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A) -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - if (!(cflag & CRTSCTS)) - __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ -} -#else -static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) -{ - /* Nothing to do */ -} -#endif -#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) -static int scif_txfill(struct uart_port *port) -{ - return sci_in(port, SCTFDR) & 0xff; -} - -static int scif_txroom(struct uart_port *port) +static int sci_txfill(struct uart_port *port) { - return SCIF_TXROOM_MAX - scif_txfill(port); -} + struct plat_sci_reg *reg; -static int scif_rxfill(struct uart_port *port) -{ - return sci_in(port, SCRFDR) & 0xff; -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) -static int scif_txfill(struct uart_port *port) -{ - if (port->mapbase == 0xffe00000 || - port->mapbase == 0xffe08000) - /* SCIF0/1*/ + reg = sci_getreg(port, SCTFDR); + if (reg->size) return sci_in(port, SCTFDR) & 0xff; - else - /* SCIF2 */ + + reg = sci_getreg(port, SCFDR); + if (reg->size) return sci_in(port, SCFDR) >> 8; -} -static int scif_txroom(struct uart_port *port) -{ - if (port->mapbase == 0xffe00000 || - port->mapbase == 0xffe08000) - /* SCIF0/1*/ - return SCIF_TXROOM_MAX - scif_txfill(port); - else - /* SCIF2 */ - return SCIF2_TXROOM_MAX - scif_txfill(port); + return !(sci_in(port, SCxSR) & SCI_TDRE); } -static int scif_rxfill(struct uart_port *port) -{ - if ((port->mapbase == 0xffe00000) || - (port->mapbase == 0xffe08000)) { - /* SCIF0/1*/ - return sci_in(port, SCRFDR) & 0xff; - } else { - /* SCIF2 */ - return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; - } -} -#elif defined(CONFIG_ARCH_SH7372) -static int scif_txfill(struct uart_port *port) +static int sci_txroom(struct uart_port *port) { - if (port->type == PORT_SCIFA) - return sci_in(port, SCFDR) >> 8; - else - return sci_in(port, SCTFDR); + return port->fifosize - sci_txfill(port); } -static int scif_txroom(struct uart_port *port) +static int sci_rxfill(struct uart_port *port) { - return port->fifosize - scif_txfill(port); -} + struct plat_sci_reg *reg; -static int scif_rxfill(struct uart_port *port) -{ - if (port->type == PORT_SCIFA) - return sci_in(port, SCFDR) & SCIF_RFDC_MASK; - else - return sci_in(port, SCRFDR); -} -#else -static int scif_txfill(struct uart_port *port) -{ - return sci_in(port, SCFDR) >> 8; -} + reg = sci_getreg(port, SCRFDR); + if (reg->size) + return sci_in(port, SCRFDR) & 0xff; -static int scif_txroom(struct uart_port *port) -{ - return SCIF_TXROOM_MAX - scif_txfill(port); -} + reg = sci_getreg(port, SCFDR); + if (reg->size) + return sci_in(port, SCFDR) & ((port->fifosize << 1) - 1); -static int scif_rxfill(struct uart_port *port) -{ - return sci_in(port, SCFDR) & SCIF_RFDC_MASK; + return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; } -#endif -static int sci_txfill(struct uart_port *port) +/* + * SCI helper for checking the state of the muxed port/RXD pins. + */ +static inline int sci_rxd_in(struct uart_port *port) { - return !(sci_in(port, SCxSR) & SCI_TDRE); -} + struct sci_port *s = to_sci_port(port); -static int sci_txroom(struct uart_port *port) -{ - return !sci_txfill(port); -} + if (s->cfg->port_reg <= 0) + return 1; -static int sci_rxfill(struct uart_port *port) -{ - return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; + return !!__raw_readb(s->cfg->port_reg); } /* ********************************************************************** * @@ -406,10 +523,7 @@ static void sci_transmit_chars(struct uart_port *port) return; } - if (port->type == PORT_SCI) - count = sci_txroom(port); - else - count = scif_txroom(port); + count = sci_txroom(port); do { unsigned char c; @@ -464,13 +578,8 @@ static void sci_receive_chars(struct uart_port *port) return; while (1) { - if (port->type == PORT_SCI) - count = sci_rxfill(port); - else - count = scif_rxfill(port); - /* Don't copy more bytes than there is room for in the buffer */ - count = tty_buffer_request_room(tty, count); + count = tty_buffer_request_room(tty, sci_rxfill(port)); /* If for any reason we can't copy more data, we're done! */ if (count == 0) @@ -561,8 +670,7 @@ static void sci_break_timer(unsigned long data) { struct sci_port *port = (struct sci_port *)data; - if (port->enable) - port->enable(&port->port); + sci_port_enable(port); if (sci_rxd_in(&port->port) == 0) { port->break_flag = 1; @@ -574,8 +682,7 @@ static void sci_break_timer(unsigned long data) } else port->break_flag = 0; - if (port->disable) - port->disable(&port->port); + sci_port_disable(port); } static int sci_handle_errors(struct uart_port *port) @@ -583,13 +690,19 @@ static int sci_handle_errors(struct uart_port *port) int copied = 0; unsigned short status = sci_in(port, SCxSR); struct tty_struct *tty = port->state->port.tty; + struct sci_port *s = to_sci_port(port); - if (status & SCxSR_ORER(port)) { - /* overrun error */ - if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) - copied++; + /* + * Handle overruns, if supported. + */ + if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) { + if (status & (1 << s->cfg->overrun_bit)) { + /* overrun error */ + if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) + copied++; - dev_notice(port->dev, "overrun error"); + dev_notice(port->dev, "overrun error"); + } } if (status & SCxSR_FER(port)) { @@ -637,12 +750,15 @@ static int sci_handle_errors(struct uart_port *port) static int sci_handle_fifo_overrun(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; + struct sci_port *s = to_sci_port(port); + struct plat_sci_reg *reg; int copied = 0; - if (port->type != PORT_SCIF) + reg = sci_getreg(port, SCLSR); + if (!reg->size) return 0; - if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { + if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) { sci_out(port, SCLSR, 0); tty_insert_flip_char(tty, 0, TTY_OVERRUN); @@ -840,74 +956,102 @@ static int sci_notifier(struct notifier_block *self, return NOTIFY_OK; } -static void sci_clk_enable(struct uart_port *port) -{ - struct sci_port *sci_port = to_sci_port(port); - - pm_runtime_get_sync(port->dev); +static struct sci_irq_desc { + const char *desc; + irq_handler_t handler; +} sci_irq_desc[] = { + /* + * Split out handlers, the default case. + */ + [SCIx_ERI_IRQ] = { + .desc = "rx err", + .handler = sci_er_interrupt, + }, - clk_enable(sci_port->iclk); - sci_port->port.uartclk = clk_get_rate(sci_port->iclk); - clk_enable(sci_port->fclk); -} + [SCIx_RXI_IRQ] = { + .desc = "rx full", + .handler = sci_rx_interrupt, + }, -static void sci_clk_disable(struct uart_port *port) -{ - struct sci_port *sci_port = to_sci_port(port); + [SCIx_TXI_IRQ] = { + .desc = "tx empty", + .handler = sci_tx_interrupt, + }, - clk_disable(sci_port->fclk); - clk_disable(sci_port->iclk); + [SCIx_BRI_IRQ] = { + .desc = "break", + .handler = sci_br_interrupt, + }, - pm_runtime_put_sync(port->dev); -} + /* + * Special muxed handler. + */ + [SCIx_MUX_IRQ] = { + .desc = "mux", + .handler = sci_mpxed_interrupt, + }, +}; static int sci_request_irq(struct sci_port *port) { - int i; - irqreturn_t (*handlers[4])(int irq, void *ptr) = { - sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt, - sci_br_interrupt, - }; - const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", - "SCI Transmit Data Empty", "SCI Break" }; - - if (port->cfg->irqs[0] == port->cfg->irqs[1]) { - if (unlikely(!port->cfg->irqs[0])) - return -ENODEV; - - if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt, - IRQF_DISABLED, "sci", port)) { - dev_err(port->port.dev, "Can't allocate IRQ\n"); - return -ENODEV; + struct uart_port *up = &port->port; + int i, j, ret = 0; + + for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) { + struct sci_irq_desc *desc; + unsigned int irq; + + if (SCIx_IRQ_IS_MUXED(port)) { + i = SCIx_MUX_IRQ; + irq = up->irq; + } else + irq = port->cfg->irqs[i]; + + desc = sci_irq_desc + i; + port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s", + dev_name(up->dev), desc->desc); + if (!port->irqstr[j]) { + dev_err(up->dev, "Failed to allocate %s IRQ string\n", + desc->desc); + goto out_nomem; } - } else { - for (i = 0; i < ARRAY_SIZE(handlers); i++) { - if (unlikely(!port->cfg->irqs[i])) - continue; - - if (request_irq(port->cfg->irqs[i], handlers[i], - IRQF_DISABLED, desc[i], port)) { - dev_err(port->port.dev, "Can't allocate IRQ\n"); - return -ENODEV; - } + + ret = request_irq(irq, desc->handler, up->irqflags, + port->irqstr[j], port); + if (unlikely(ret)) { + dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc); + goto out_noirq; } } return 0; + +out_noirq: + while (--i >= 0) + free_irq(port->cfg->irqs[i], port); + +out_nomem: + while (--j >= 0) + kfree(port->irqstr[j]); + + return ret; } static void sci_free_irq(struct sci_port *port) { int i; - if (port->cfg->irqs[0] == port->cfg->irqs[1]) - free_irq(port->cfg->irqs[0], port); - else { - for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) { - if (!port->cfg->irqs[i]) - continue; + /* + * Intentionally in reverse order so we iterate over the muxed + * IRQ first. + */ + for (i = 0; i < SCIx_NR_IRQS; i++) { + free_irq(port->cfg->irqs[i], port); + kfree(port->irqstr[i]); - free_irq(port->cfg->irqs[i], port); + if (SCIx_IRQ_IS_MUXED(port)) { + /* If there's only one IRQ, we're done. */ + return; } } } @@ -915,7 +1059,7 @@ static void sci_free_irq(struct sci_port *port) static unsigned int sci_tx_empty(struct uart_port *port) { unsigned short status = sci_in(port, SCxSR); - unsigned short in_tx_fifo = scif_txfill(port); + unsigned short in_tx_fifo = sci_txfill(port); return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; } @@ -1438,8 +1582,7 @@ static int sci_startup(struct uart_port *port) dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); - if (s->enable) - s->enable(port); + sci_port_enable(s); ret = sci_request_irq(s); if (unlikely(ret < 0)) @@ -1465,8 +1608,7 @@ static void sci_shutdown(struct uart_port *port) sci_free_dma(port); sci_free_irq(s); - if (s->disable) - s->disable(port); + sci_port_disable(s); } static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps, @@ -1513,8 +1655,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, if (likely(baud && port->uartclk)) t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk); - if (s->enable) - s->enable(port); + sci_port_enable(s); do { status = sci_in(port, SCxSR); @@ -1584,8 +1725,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, if ((termios->c_cflag & CREAD) != 0) sci_start_rx(port); - if (s->disable) - s->disable(port); + sci_port_disable(s); } static const char *sci_type(struct uart_port *port) @@ -1726,6 +1866,7 @@ static int __devinit sci_init_single(struct platform_device *dev, struct plat_sci_port *p) { struct uart_port *port = &sci_port->port; + int ret; port->ops = &sci_uart_ops; port->iotype = UPIO_MEM; @@ -1746,6 +1887,12 @@ static int __devinit sci_init_single(struct platform_device *dev, break; } + if (p->regtype == SCIx_PROBE_REGTYPE) { + ret = sci_probe_regmap(p); + if (unlikely(ret)) + return ret; + } + if (dev) { sci_port->iclk = clk_get(&dev->dev, "sci_ick"); if (IS_ERR(sci_port->iclk)) { @@ -1764,8 +1911,6 @@ static int __devinit sci_init_single(struct platform_device *dev, if (IS_ERR(sci_port->fclk)) sci_port->fclk = NULL; - sci_port->enable = sci_clk_enable; - sci_port->disable = sci_clk_disable; port->dev = &dev->dev; pm_runtime_enable(&dev->dev); @@ -1775,20 +1920,51 @@ static int __devinit sci_init_single(struct platform_device *dev, sci_port->break_timer.function = sci_break_timer; init_timer(&sci_port->break_timer); + /* + * Establish some sensible defaults for the error detection. + */ + if (!p->error_mask) + p->error_mask = (p->type == PORT_SCI) ? + SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK; + + /* + * Establish sensible defaults for the overrun detection, unless + * the part has explicitly disabled support for it. + */ + if (p->overrun_bit != SCIx_NOT_SUPPORTED) { + if (p->type == PORT_SCI) + p->overrun_bit = 5; + else if (p->scbrr_algo_id == SCBRR_ALGO_4) + p->overrun_bit = 9; + else + p->overrun_bit = 0; + + /* + * Make the error mask inclusive of overrun detection, if + * supported. + */ + p->error_mask |= (1 << p->overrun_bit); + } + sci_port->cfg = p; port->mapbase = p->mapbase; port->type = p->type; port->flags = p->flags; + port->regshift = p->regshift; /* - * The UART port needs an IRQ value, so we peg this to the TX IRQ + * The UART port needs an IRQ value, so we peg this to the RX IRQ * for the multi-IRQ ports, which is where we are primarily * concerned with the shutdown path synchronization. * * For the muxed case there's nothing more to do. */ port->irq = p->irqs[SCIx_RXI_IRQ]; + port->irqflags = IRQF_DISABLED; + + port->serial_in = sci_serial_in; + port->serial_out = sci_serial_out; if (p->dma_dev) dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", @@ -1814,8 +1990,7 @@ static void serial_console_write(struct console *co, const char *s, struct uart_port *port = &sci_port->port; unsigned short bits; - if (sci_port->enable) - sci_port->enable(port); + sci_port_enable(sci_port); uart_console_write(port, s, count, serial_console_putchar); @@ -1824,8 +1999,7 @@ static void serial_console_write(struct console *co, const char *s, while ((sci_in(port, SCxSR) & bits) != bits) cpu_relax(); - if (sci_port->disable) - sci_port->disable(port); + sci_port_disable(sci_port); } static int __devinit serial_console_setup(struct console *co, char *options) @@ -1857,20 +2031,13 @@ static int __devinit serial_console_setup(struct console *co, char *options) if (unlikely(ret != 0)) return ret; - if (sci_port->enable) - sci_port->enable(port); + sci_port_enable(sci_port); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); - ret = uart_set_options(port, co, baud, parity, bits, flow); -#if defined(__H8300H__) || defined(__H8300S__) - /* disable rx interrupt */ - if (ret == 0) - sci_stop_rx(port); -#endif /* TODO: disable clock */ - return ret; + return uart_set_options(port, co, baud, parity, bits, flow); } static struct console serial_console = { @@ -2081,3 +2248,5 @@ module_exit(sci_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sh-sci"); +MODULE_AUTHOR("Paul Mundt"); +MODULE_DESCRIPTION("SuperH SCI(F) serial driver"); diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h index b04d937c9110..e9bed038aa1f 100644 --- a/drivers/tty/serial/sh-sci.h +++ b/drivers/tty/serial/sh-sci.h @@ -2,169 +2,14 @@ #include <linux/io.h> #include <linux/gpio.h> -#if defined(CONFIG_H83007) || defined(CONFIG_H83068) -#include <asm/regs306x.h> -#endif -#if defined(CONFIG_H8S2678) -#include <asm/regs267x.h> -#endif - -#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ - defined(CONFIG_CPU_SUBTYPE_SH7707) || \ - defined(CONFIG_CPU_SUBTYPE_SH7708) || \ - defined(CONFIG_CPU_SUBTYPE_SH7709) -# define SCPCR 0xA4000116 /* 16 bit SCI and SCIF */ -# define SCPDR 0xA4000136 /* 8 bit SCI and SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7705) -# define SCIF0 0xA4400000 -# define SCIF2 0xA4410000 -# define SCPCR 0xA4000116 -# define SCPDR 0xA4000136 -#elif defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH73A0) || \ - defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) -# define PORT_PTCR 0xA405011EUL -# define PORT_PVCR 0xA4050122UL -# define SCIF_ORER 0x0200 /* overrun error bit */ -#elif defined(CONFIG_SH_RTS7751R2D) -# define SCSPTR1 0xFFE0001C /* 8 bit SCIF */ -# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ - defined(CONFIG_CPU_SUBTYPE_SH7091) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751R) -# define SCSPTR1 0xffe0001c /* 8 bit SCI */ -# define SCSPTR2 0xFFE80020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7760) -# define SCSPTR0 0xfe600024 /* 16 bit SCIF */ -# define SCSPTR1 0xfe610024 /* 16 bit SCIF */ -# define SCSPTR2 0xfe620024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -# define SCSPTR0 0xA4400000 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -# define PACR 0xa4050100 -# define PBCR 0xa4050102 -#elif defined(CONFIG_CPU_SUBTYPE_SH7343) -# define SCSPTR0 0xffe00010 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7722) -# define PADR 0xA4050120 -# define PSDR 0xA405013e -# define PWDR 0xA4050166 -# define PSCR 0xA405011E -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7366) -# define SCPDR0 0xA405013E /* 16 bit SCIF0 PSDR */ -# define SCSPTR0 SCPDR0 -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) -# define SCSPTR0 0xa4050160 -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7724) -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH4_202) -# define SCSPTR2 0xffe80020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_H83007) || defined(CONFIG_H83068) -# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) -#elif defined(CONFIG_H8S2678) -# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) -#elif defined(CONFIG_CPU_SUBTYPE_SH7757) -# define SCSPTR0 0xfe4b0020 -# define SCIF_ORER 0x0001 -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) -# define SCSPTR0 0xffe00024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7770) -# define SCSPTR0 0xff923020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7780) -# define SCSPTR0 0xffe00024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* Overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) -# define SCSPTR0 0xffea0024 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* Overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \ - defined(CONFIG_CPU_SUBTYPE_SH7203) || \ - defined(CONFIG_CPU_SUBTYPE_SH7206) || \ - defined(CONFIG_CPU_SUBTYPE_SH7263) -# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ -#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -# define SCSPTR0 0xf8400020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* overrun error bit */ -#elif defined(CONFIG_CPU_SUBTYPE_SHX3) -# define SCSPTR0 0xffc30020 /* 16 bit SCIF */ -# define SCIF_ORER 0x0001 /* Overrun error bit */ -#else -# error CPU subtype not defined -#endif - -/* SCxSR SCI */ -#define SCI_TDRE 0x80 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_RDRF 0x40 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_ORER 0x20 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_FER 0x10 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_PER 0x08 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -#define SCI_TEND 0x04 /* 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -/* SCI_MPB 0x02 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ -/* SCI_MPBT 0x01 * 7707 SCI, 7708 SCI, 7709 SCI, 7750 SCI */ - -#define SCI_ERRORS ( SCI_PER | SCI_FER | SCI_ORER) - -/* SCxSR SCIF */ -#define SCIF_ER 0x0080 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_TEND 0x0040 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_TDFE 0x0020 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_BRK 0x0010 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_FER 0x0008 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_PER 0x0004 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_RDF 0x0002 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ -#define SCIF_DR 0x0001 /* 7705 SCIF, 7707 SCIF, 7709 SCIF, 7750 SCIF */ - -#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH73A0) || \ - defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) -# define SCIF_ORER 0x0200 -# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK | SCIF_ORER) -# define SCIF_RFDC_MASK 0x007f -# define SCIF_TXROOM_MAX 64 -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) -# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK ) -# define SCIF_RFDC_MASK 0x007f -# define SCIF_TXROOM_MAX 64 -/* SH7763 SCIF2 support */ -# define SCIF2_RFDC_MASK 0x001f -# define SCIF2_TXROOM_MAX 16 -#else -# define SCIF_ERRORS ( SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK) -# define SCIF_RFDC_MASK 0x001f -# define SCIF_TXROOM_MAX 16 -#endif - -#ifndef SCIF_ORER -#define SCIF_ORER 0x0000 -#endif - #define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND) -#define SCxSR_ERRORS(port) (((port)->type == PORT_SCI) ? SCI_ERRORS : SCIF_ERRORS) #define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF) #define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE) #define SCxSR_FER(port) (((port)->type == PORT_SCI) ? SCI_FER : SCIF_FER) #define SCxSR_PER(port) (((port)->type == PORT_SCI) ? SCI_PER : SCIF_PER) #define SCxSR_BRK(port) (((port)->type == PORT_SCI) ? 0x00 : SCIF_BRK) -#define SCxSR_ORER(port) (((port)->type == PORT_SCI) ? SCI_ORER : SCIF_ORER) + +#define SCxSR_ERRORS(port) (to_sci_port(port)->cfg->error_mask) #if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ @@ -191,278 +36,3 @@ #define SCI_MAJOR 204 #define SCI_MINOR_START 8 - -#define SCI_IN(size, offset) \ - if ((size) == 8) { \ - return ioread8(port->membase + (offset)); \ - } else { \ - return ioread16(port->membase + (offset)); \ - } -#define SCI_OUT(size, offset, value) \ - if ((size) == 8) { \ - iowrite8(value, port->membase + (offset)); \ - } else if ((size) == 16) { \ - iowrite16(value, port->membase + (offset)); \ - } - -#define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\ - static inline unsigned int sci_##name##_in(struct uart_port *port) \ - { \ - if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ - SCI_IN(scif_size, scif_offset) \ - } else { /* PORT_SCI or PORT_SCIFA */ \ - SCI_IN(sci_size, sci_offset); \ - } \ - } \ - static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ - { \ - if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ - SCI_OUT(scif_size, scif_offset, value) \ - } else { /* PORT_SCI or PORT_SCIFA */ \ - SCI_OUT(sci_size, sci_offset, value); \ - } \ - } - -#ifdef CONFIG_H8300 -/* h8300 don't have SCIF */ -#define CPU_SCIF_FNS(name) \ - static inline unsigned int sci_##name##_in(struct uart_port *port) \ - { \ - return 0; \ - } \ - static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ - { \ - } -#else -#define CPU_SCIF_FNS(name, scif_offset, scif_size) \ - static inline unsigned int sci_##name##_in(struct uart_port *port) \ - { \ - SCI_IN(scif_size, scif_offset); \ - } \ - static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ - { \ - SCI_OUT(scif_size, scif_offset, value); \ - } -#endif - -#define CPU_SCI_FNS(name, sci_offset, sci_size) \ - static inline unsigned int sci_##name##_in(struct uart_port* port) \ - { \ - SCI_IN(sci_size, sci_offset); \ - } \ - static inline void sci_##name##_out(struct uart_port* port, unsigned int value) \ - { \ - SCI_OUT(sci_size, sci_offset, value); \ - } - -#if defined(CONFIG_CPU_SH3) || \ - defined(CONFIG_ARCH_SH73A0) || \ - defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) -#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ - CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) -#elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH7367) -#define SCIF_FNS(name, scif_offset, scif_size) \ - CPU_SCIF_FNS(name, scif_offset, scif_size) -#elif defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) || \ - defined(CONFIG_ARCH_SH73A0) -#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \ - CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) -#define SCIF_FNS(name, scif_offset, scif_size) \ - CPU_SCIF_FNS(name, scif_offset, scif_size) -#else -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ - CPU_SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh3_scif_offset, sh3_scif_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh3_scif_offset, sh3_scif_size) -#endif -#elif defined(__H8300H__) || defined(__H8300S__) -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ - CPU_SCI_FNS(name, h8_sci_offset, h8_sci_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name) -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ - defined(CONFIG_CPU_SUBTYPE_SH7724) - #define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scif_offset, sh4_scif_size) - #define SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) -#else -#define SCIx_FNS(name, sh3_sci_offset, sh3_sci_size, sh4_sci_offset, sh4_sci_size, \ - sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size, \ - h8_sci_offset, h8_sci_size) \ - CPU_SCIx_FNS(name, sh4_sci_offset, sh4_sci_size, sh4_scif_offset, sh4_scif_size) -#define SCIF_FNS(name, sh3_scif_offset, sh3_scif_size, sh4_scif_offset, sh4_scif_size) \ - CPU_SCIF_FNS(name, sh4_scif_offset, sh4_scif_size) -#endif - -#if defined(CONFIG_CPU_SUBTYPE_SH7705) || \ - defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) || \ - defined(CONFIG_ARCH_SH7367) - -SCIF_FNS(SCSMR, 0x00, 16) -SCIF_FNS(SCBRR, 0x04, 8) -SCIF_FNS(SCSCR, 0x08, 16) -SCIF_FNS(SCxSR, 0x14, 16) -SCIF_FNS(SCFCR, 0x18, 16) -SCIF_FNS(SCFDR, 0x1c, 16) -SCIF_FNS(SCxTDR, 0x20, 8) -SCIF_FNS(SCxRDR, 0x24, 8) -SCIF_FNS(SCLSR, 0x00, 0) -#elif defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) || \ - defined(CONFIG_ARCH_SH73A0) -SCIF_FNS(SCSMR, 0x00, 16) -SCIF_FNS(SCBRR, 0x04, 8) -SCIF_FNS(SCSCR, 0x08, 16) -SCIF_FNS(SCTDSR, 0x0c, 16) -SCIF_FNS(SCFER, 0x10, 16) -SCIF_FNS(SCxSR, 0x14, 16) -SCIF_FNS(SCFCR, 0x18, 16) -SCIF_FNS(SCFDR, 0x1c, 16) -SCIF_FNS(SCTFDR, 0x38, 16) -SCIF_FNS(SCRFDR, 0x3c, 16) -SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8) -SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8) -SCIF_FNS(SCLSR, 0x00, 0) -#elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ - defined(CONFIG_CPU_SUBTYPE_SH7724) -SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16) -SCIx_FNS(SCBRR, 0x04, 8, 0x04, 8) -SCIx_FNS(SCSCR, 0x08, 16, 0x08, 16) -SCIx_FNS(SCxTDR, 0x20, 8, 0x0c, 8) -SCIx_FNS(SCxSR, 0x14, 16, 0x10, 16) -SCIx_FNS(SCxRDR, 0x24, 8, 0x14, 8) -SCIx_FNS(SCSPTR, 0, 0, 0, 0) -SCIF_FNS(SCFCR, 0x18, 16) -SCIF_FNS(SCFDR, 0x1c, 16) -SCIF_FNS(SCLSR, 0x24, 16) -#else -/* reg SCI/SH3 SCI/SH4 SCIF/SH3 SCIF/SH4 SCI/H8*/ -/* name off sz off sz off sz off sz off sz*/ -SCIx_FNS(SCSMR, 0x00, 8, 0x00, 8, 0x00, 8, 0x00, 16, 0x00, 8) -SCIx_FNS(SCBRR, 0x02, 8, 0x04, 8, 0x02, 8, 0x04, 8, 0x01, 8) -SCIx_FNS(SCSCR, 0x04, 8, 0x08, 8, 0x04, 8, 0x08, 16, 0x02, 8) -SCIx_FNS(SCxTDR, 0x06, 8, 0x0c, 8, 0x06, 8, 0x0C, 8, 0x03, 8) -SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8) -SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) -SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) -#if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ - defined(CONFIG_CPU_SUBTYPE_SH7780) || \ - defined(CONFIG_CPU_SUBTYPE_SH7785) || \ - defined(CONFIG_CPU_SUBTYPE_SH7786) -SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) -SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) -SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) -SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) -SCIF_FNS(SCLSR, 0, 0, 0x28, 16) -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) -SCIF_FNS(SCFDR, 0, 0, 0x1C, 16) -SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) -SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) -SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) -SCIF_FNS(SCLSR, 0, 0, 0x28, 16) -#else -SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) -#if defined(CONFIG_CPU_SUBTYPE_SH7722) -SCIF_FNS(SCSPTR, 0, 0, 0, 0) -#else -SCIF_FNS(SCSPTR, 0, 0, 0x20, 16) -#endif -SCIF_FNS(SCLSR, 0, 0, 0x24, 16) -#endif -#endif -#define sci_in(port, reg) sci_##reg##_in(port) -#define sci_out(port, reg, value) sci_##reg##_out(port, value) - -/* H8/300 series SCI pins assignment */ -#if defined(__H8300H__) || defined(__H8300S__) -static const struct __attribute__((packed)) { - int port; /* GPIO port no */ - unsigned short rx,tx; /* GPIO bit no */ -} h8300_sci_pins[] = { -#if defined(CONFIG_H83007) || defined(CONFIG_H83068) - { /* SCI0 */ - .port = H8300_GPIO_P9, - .rx = H8300_GPIO_B2, - .tx = H8300_GPIO_B0, - }, - { /* SCI1 */ - .port = H8300_GPIO_P9, - .rx = H8300_GPIO_B3, - .tx = H8300_GPIO_B1, - }, - { /* SCI2 */ - .port = H8300_GPIO_PB, - .rx = H8300_GPIO_B7, - .tx = H8300_GPIO_B6, - } -#elif defined(CONFIG_H8S2678) - { /* SCI0 */ - .port = H8300_GPIO_P3, - .rx = H8300_GPIO_B2, - .tx = H8300_GPIO_B0, - }, - { /* SCI1 */ - .port = H8300_GPIO_P3, - .rx = H8300_GPIO_B3, - .tx = H8300_GPIO_B1, - }, - { /* SCI2 */ - .port = H8300_GPIO_P5, - .rx = H8300_GPIO_B1, - .tx = H8300_GPIO_B0, - } -#endif -}; -#endif - -#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ - defined(CONFIG_CPU_SUBTYPE_SH7707) || \ - defined(CONFIG_CPU_SUBTYPE_SH7708) || \ - defined(CONFIG_CPU_SUBTYPE_SH7709) -static inline int sci_rxd_in(struct uart_port *port) -{ - if (port->mapbase == 0xfffffe80) - return __raw_readb(SCPDR)&0x01 ? 1 : 0; /* SCI */ - return 1; -} -#elif defined(CONFIG_CPU_SUBTYPE_SH7750) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751) || \ - defined(CONFIG_CPU_SUBTYPE_SH7751R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ - defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ - defined(CONFIG_CPU_SUBTYPE_SH7091) -static inline int sci_rxd_in(struct uart_port *port) -{ - if (port->mapbase == 0xffe00000) - return __raw_readb(SCSPTR1)&0x01 ? 1 : 0; /* SCI */ - return 1; -} -#elif defined(__H8300H__) || defined(__H8300S__) -static inline int sci_rxd_in(struct uart_port *port) -{ - int ch = (port->mapbase - SMR0) >> 3; - return (H8300_SCI_DR(ch) & h8300_sci_pins[ch].rx) ? 1 : 0; -} -#else /* default case for non-SCI processors */ -static inline int sci_rxd_in(struct uart_port *port) -{ - return 1; -} -#endif diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 69407e72aac1..278aeaa92505 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -336,7 +336,7 @@ config BACKLIGHT_PCF50633 enable its driver. config BACKLIGHT_AAT2870 - bool "AnalogicTech AAT2870 Backlight" + tristate "AnalogicTech AAT2870 Backlight" depends on BACKLIGHT_CLASS_DEVICE && MFD_AAT2870_CORE help If you have a AnalogicTech AAT2870 say Y to enable the diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c index 4952a617563d..331f1ef1dad5 100644 --- a/drivers/video/backlight/aat2870_bl.c +++ b/drivers/video/backlight/aat2870_bl.c @@ -44,7 +44,7 @@ static inline int aat2870_brightness(struct aat2870_bl_driver_data *aat2870_bl, struct backlight_device *bd = aat2870_bl->bd; int val; - val = brightness * aat2870_bl->max_current; + val = brightness * (aat2870_bl->max_current - 1); val /= bd->props.max_brightness; return val; @@ -158,10 +158,10 @@ static int aat2870_bl_probe(struct platform_device *pdev) props.type = BACKLIGHT_RAW; bd = backlight_device_register("aat2870-backlight", &pdev->dev, aat2870_bl, &aat2870_bl_ops, &props); - if (!bd) { + if (IS_ERR(bd)) { dev_err(&pdev->dev, "Failed allocate memory for backlight device\n"); - ret = -ENOMEM; + ret = PTR_ERR(bd); goto out_kfree; } @@ -175,7 +175,7 @@ static int aat2870_bl_probe(struct platform_device *pdev) else aat2870_bl->channels = AAT2870_BL_CH_ALL; - if (pdata->max_brightness > 0) + if (pdata->max_current > 0) aat2870_bl->max_current = pdata->max_current; else aat2870_bl->max_current = AAT2870_CURRENT_27_9; diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index fdd5d4ae437d..4e888ac09b3f 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -504,14 +504,18 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev) return 0; r = omapdss_dsi_display_enable(dssdev); - if (r) - goto err; + if (r) { + dev_err(&dssdev->dev, "failed to enable DSI\n"); + goto err1; + } omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); r = _taal_enable_te(dssdev, true); - if (r) - goto err; + if (r) { + dev_err(&dssdev->dev, "failed to re-enable TE"); + goto err2; + } enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); @@ -521,13 +525,15 @@ static int taal_exit_ulps(struct omap_dss_device *dssdev) return 0; -err: - dev_err(&dssdev->dev, "exit ULPS failed"); - r = taal_panel_reset(dssdev); - - enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); - td->ulps_enabled = false; +err2: + dev_err(&dssdev->dev, "failed to exit ULPS"); + r = taal_panel_reset(dssdev); + if (!r) { + enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); + td->ulps_enabled = false; + } +err1: taal_queue_ulps_work(dssdev); return r; @@ -1241,11 +1247,8 @@ static void taal_power_off(struct omap_dss_device *dssdev) int r; r = taal_dcs_write_0(td, DCS_DISPLAY_OFF); - if (!r) { + if (!r) r = taal_sleep_in(td); - /* HACK: wait a bit so that the message goes through */ - msleep(10); - } if (r) { dev_err(&dssdev->dev, @@ -1317,8 +1320,11 @@ static void taal_disable(struct omap_dss_device *dssdev) dsi_bus_lock(dssdev); if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { - taal_wake_up(dssdev); - taal_power_off(dssdev); + int r; + + r = taal_wake_up(dssdev); + if (!r) + taal_power_off(dssdev); } dsi_bus_unlock(dssdev); @@ -1897,20 +1903,6 @@ err: mutex_unlock(&td->lock); } -static int taal_set_update_mode(struct omap_dss_device *dssdev, - enum omap_dss_update_mode mode) -{ - if (mode != OMAP_DSS_UPDATE_MANUAL) - return -EINVAL; - return 0; -} - -static enum omap_dss_update_mode taal_get_update_mode( - struct omap_dss_device *dssdev) -{ - return OMAP_DSS_UPDATE_MANUAL; -} - static struct omap_dss_driver taal_driver = { .probe = taal_probe, .remove = __exit_p(taal_remove), @@ -1920,9 +1912,6 @@ static struct omap_dss_driver taal_driver = { .suspend = taal_suspend, .resume = taal_resume, - .set_update_mode = taal_set_update_mode, - .get_update_mode = taal_get_update_mode, - .update = taal_update, .sync = taal_sync, diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index 6b3e2da11419..0d12524db14b 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig @@ -117,18 +117,6 @@ config OMAP2_DSS_MIN_FCK_PER_PCK Max FCK is 173MHz, so this doesn't work if your PCK is very high. -config OMAP2_DSS_SLEEP_BEFORE_RESET - bool "Sleep 50ms before DSS reset" - default y - help - For some unknown reason we may get SYNC_LOST errors from the display - subsystem at initialization time if we don't sleep before resetting - the DSS. See the source (dss.c) for more comments. - - However, 50ms is quite long time to sleep, and with some - configurations the SYNC_LOST may never happen, so the sleep can - be disabled here. - config OMAP2_DSS_SLEEP_AFTER_VENC_RESET bool "Sleep 20ms after VENC reset" default y diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 3da426719dd6..76821fefce9a 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c @@ -183,8 +183,11 @@ static int omap_dss_probe(struct platform_device *pdev) goto err_dss; } - /* keep clocks enabled to prevent context saves/restores during init */ - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + r = dispc_init_platform_driver(); + if (r) { + DSSERR("Failed to initialize dispc platform driver\n"); + goto err_dispc; + } r = rfbi_init_platform_driver(); if (r) { @@ -192,12 +195,6 @@ static int omap_dss_probe(struct platform_device *pdev) goto err_rfbi; } - r = dispc_init_platform_driver(); - if (r) { - DSSERR("Failed to initialize dispc platform driver\n"); - goto err_dispc; - } - r = venc_init_platform_driver(); if (r) { DSSERR("Failed to initialize venc platform driver\n"); @@ -238,8 +235,6 @@ static int omap_dss_probe(struct platform_device *pdev) pdata->default_device = dssdev; } - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); - return 0; err_register: @@ -268,11 +263,11 @@ static int omap_dss_remove(struct platform_device *pdev) dss_uninitialize_debugfs(); + hdmi_uninit_platform_driver(); + dsi_uninit_platform_driver(); venc_uninit_platform_driver(); - dispc_uninit_platform_driver(); rfbi_uninit_platform_driver(); - dsi_uninit_platform_driver(); - hdmi_uninit_platform_driver(); + dispc_uninit_platform_driver(); dss_uninit_platform_driver(); dss_uninit_overlays(pdev); diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 7a9a2e7d9685..0f3961a1ce26 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -33,6 +33,8 @@ #include <linux/workqueue.h> #include <linux/hardirq.h> #include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <plat/sram.h> #include <plat/clock.h> @@ -77,6 +79,12 @@ struct dispc_v_coef { s8 vc00; }; +enum omap_burst_size { + BURST_SIZE_X2 = 0, + BURST_SIZE_X4 = 1, + BURST_SIZE_X8 = 2, +}; + #define REG_GET(idx, start, end) \ FLD_GET(dispc_read_reg(idx), start, end) @@ -92,7 +100,11 @@ struct dispc_irq_stats { static struct { struct platform_device *pdev; void __iomem *base; + + int ctx_loss_cnt; + int irq; + struct clk *dss_clk; u32 fifo_size[3]; @@ -102,6 +114,7 @@ static struct { u32 error_irqs; struct work_struct error_work; + bool ctx_valid; u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS @@ -134,18 +147,34 @@ static inline u32 dispc_read_reg(const u16 idx) return __raw_readl(dispc.base + idx); } +static int dispc_get_ctx_loss_count(void) +{ + struct device *dev = &dispc.pdev->dev; + struct omap_display_platform_data *pdata = dev->platform_data; + struct omap_dss_board_info *board_data = pdata->board_data; + int cnt; + + if (!board_data->get_context_loss_count) + return -ENOENT; + + cnt = board_data->get_context_loss_count(dev); + + WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt); + + return cnt; +} + #define SR(reg) \ dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) #define RR(reg) \ dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)]) -void dispc_save_context(void) +static void dispc_save_context(void) { int i; - if (cpu_is_omap24xx()) - return; - SR(SYSCONFIG); + DSSDBG("dispc_save_context\n"); + SR(IRQENABLE); SR(CONTROL); SR(CONFIG); @@ -158,7 +187,8 @@ void dispc_save_context(void) SR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); SR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); - SR(GLOBAL_ALPHA); + if (dss_has_feature(FEAT_GLOBAL_ALPHA)) + SR(GLOBAL_ALPHA); SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { @@ -188,20 +218,25 @@ void dispc_save_context(void) SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); - SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); - SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); - SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); + if (dss_has_feature(FEAT_CPR)) { + SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); + SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); + SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); + } if (dss_has_feature(FEAT_MGR_LCD2)) { - SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); - SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); - SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); + if (dss_has_feature(FEAT_CPR)) { + SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); + SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); + SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); + } SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); } - SR(OVL_PRELOAD(OMAP_DSS_GFX)); + if (dss_has_feature(FEAT_PRELOAD)) + SR(OVL_PRELOAD(OMAP_DSS_GFX)); /* VID1 */ SR(OVL_BA0(OMAP_DSS_VIDEO1)); @@ -226,8 +261,10 @@ void dispc_save_context(void) for (i = 0; i < 5; i++) SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); - for (i = 0; i < 8; i++) - SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); + if (dss_has_feature(FEAT_FIR_COEF_V)) { + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); + } if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { SR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); @@ -248,7 +285,8 @@ void dispc_save_context(void) if (dss_has_feature(FEAT_ATTR2)) SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); - SR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); + if (dss_has_feature(FEAT_PRELOAD)) + SR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); /* VID2 */ SR(OVL_BA0(OMAP_DSS_VIDEO2)); @@ -273,8 +311,10 @@ void dispc_save_context(void) for (i = 0; i < 5; i++) SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); - for (i = 0; i < 8; i++) - SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); + if (dss_has_feature(FEAT_FIR_COEF_V)) { + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); + } if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { SR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); @@ -295,16 +335,35 @@ void dispc_save_context(void) if (dss_has_feature(FEAT_ATTR2)) SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); - SR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); + if (dss_has_feature(FEAT_PRELOAD)) + SR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); if (dss_has_feature(FEAT_CORE_CLK_DIV)) SR(DIVISOR); + + dispc.ctx_loss_cnt = dispc_get_ctx_loss_count(); + dispc.ctx_valid = true; + + DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt); } -void dispc_restore_context(void) +static void dispc_restore_context(void) { - int i; - RR(SYSCONFIG); + int i, ctx; + + DSSDBG("dispc_restore_context\n"); + + if (!dispc.ctx_valid) + return; + + ctx = dispc_get_ctx_loss_count(); + + if (ctx >= 0 && ctx == dispc.ctx_loss_cnt) + return; + + DSSDBG("ctx_loss_count: saved %d, current %d\n", + dispc.ctx_loss_cnt, ctx); + /*RR(IRQENABLE);*/ /*RR(CONTROL);*/ RR(CONFIG); @@ -317,7 +376,8 @@ void dispc_restore_context(void) RR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); RR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); - RR(GLOBAL_ALPHA); + if (dss_has_feature(FEAT_GLOBAL_ALPHA)) + RR(GLOBAL_ALPHA); RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { @@ -347,20 +407,25 @@ void dispc_restore_context(void) RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); - RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); - RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); - RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); + if (dss_has_feature(FEAT_CPR)) { + RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); + RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); + RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); + } if (dss_has_feature(FEAT_MGR_LCD2)) { RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); - RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); - RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); - RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); + if (dss_has_feature(FEAT_CPR)) { + RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); + RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); + RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); + } } - RR(OVL_PRELOAD(OMAP_DSS_GFX)); + if (dss_has_feature(FEAT_PRELOAD)) + RR(OVL_PRELOAD(OMAP_DSS_GFX)); /* VID1 */ RR(OVL_BA0(OMAP_DSS_VIDEO1)); @@ -385,8 +450,10 @@ void dispc_restore_context(void) for (i = 0; i < 5; i++) RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); - for (i = 0; i < 8; i++) - RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); + if (dss_has_feature(FEAT_FIR_COEF_V)) { + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); + } if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { RR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); @@ -407,7 +474,8 @@ void dispc_restore_context(void) if (dss_has_feature(FEAT_ATTR2)) RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); - RR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); + if (dss_has_feature(FEAT_PRELOAD)) + RR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); /* VID2 */ RR(OVL_BA0(OMAP_DSS_VIDEO2)); @@ -432,8 +500,10 @@ void dispc_restore_context(void) for (i = 0; i < 5; i++) RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); - for (i = 0; i < 8; i++) - RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); + if (dss_has_feature(FEAT_FIR_COEF_V)) { + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); + } if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { RR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); @@ -454,7 +524,8 @@ void dispc_restore_context(void) if (dss_has_feature(FEAT_ATTR2)) RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); - RR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); + if (dss_has_feature(FEAT_PRELOAD)) + RR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); if (dss_has_feature(FEAT_CORE_CLK_DIV)) RR(DIVISOR); @@ -471,19 +542,35 @@ void dispc_restore_context(void) * the context is fully restored */ RR(IRQENABLE); + + DSSDBG("context restored\n"); } #undef SR #undef RR -static inline void enable_clocks(bool enable) +int dispc_runtime_get(void) { - if (enable) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); - else - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + int r; + + DSSDBG("dispc_runtime_get\n"); + + r = pm_runtime_get_sync(&dispc.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; } +void dispc_runtime_put(void) +{ + int r; + + DSSDBG("dispc_runtime_put\n"); + + r = pm_runtime_put(&dispc.pdev->dev); + WARN_ON(r < 0); +} + + bool dispc_go_busy(enum omap_channel channel) { int bit; @@ -505,8 +592,6 @@ void dispc_go(enum omap_channel channel) int bit; bool enable_bit, go_bit; - enable_clocks(1); - if (channel == OMAP_DSS_CHANNEL_LCD || channel == OMAP_DSS_CHANNEL_LCD2) bit = 0; /* LCDENABLE */ @@ -520,7 +605,7 @@ void dispc_go(enum omap_channel channel) enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; if (!enable_bit) - goto end; + return; if (channel == OMAP_DSS_CHANNEL_LCD || channel == OMAP_DSS_CHANNEL_LCD2) @@ -535,7 +620,7 @@ void dispc_go(enum omap_channel channel) if (go_bit) { DSSERR("GO bit not down for channel %d\n", channel); - goto end; + return; } DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : @@ -545,8 +630,6 @@ void dispc_go(enum omap_channel channel) REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); else REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); -end: - enable_clocks(0); } static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) @@ -920,7 +1003,7 @@ static void _dispc_set_color_mode(enum omap_plane plane, REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); } -static void _dispc_set_channel_out(enum omap_plane plane, +void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel) { int shift; @@ -967,13 +1050,10 @@ static void _dispc_set_channel_out(enum omap_plane plane, dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); } -void dispc_set_burst_size(enum omap_plane plane, +static void dispc_set_burst_size(enum omap_plane plane, enum omap_burst_size burst_size) { int shift; - u32 val; - - enable_clocks(1); switch (plane) { case OMAP_DSS_GFX: @@ -988,11 +1068,24 @@ void dispc_set_burst_size(enum omap_plane plane, return; } - val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); - val = FLD_MOD(val, burst_size, shift+1, shift); - dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift); +} - enable_clocks(0); +static void dispc_configure_burst_sizes(void) +{ + int i; + const int burst_size = BURST_SIZE_X8; + + /* Configure burst size always to maximum size */ + for (i = 0; i < omap_dss_get_num_overlays(); ++i) + dispc_set_burst_size(i, burst_size); +} + +u32 dispc_get_burst_size(enum omap_plane plane) +{ + unsigned unit = dss_feat_get_burst_size_unit(); + /* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */ + return unit * 8; } void dispc_enable_gamma_table(bool enable) @@ -1009,6 +1102,40 @@ void dispc_enable_gamma_table(bool enable) REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9); } +void dispc_enable_cpr(enum omap_channel channel, bool enable) +{ + u16 reg; + + if (channel == OMAP_DSS_CHANNEL_LCD) + reg = DISPC_CONFIG; + else if (channel == OMAP_DSS_CHANNEL_LCD2) + reg = DISPC_CONFIG2; + else + return; + + REG_FLD_MOD(reg, enable, 15, 15); +} + +void dispc_set_cpr_coef(enum omap_channel channel, + struct omap_dss_cpr_coefs *coefs) +{ + u32 coef_r, coef_g, coef_b; + + if (channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2) + return; + + coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) | + FLD_VAL(coefs->rb, 9, 0); + coef_g = FLD_VAL(coefs->gr, 31, 22) | FLD_VAL(coefs->gg, 20, 11) | + FLD_VAL(coefs->gb, 9, 0); + coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) | + FLD_VAL(coefs->bb, 9, 0); + + dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r); + dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g); + dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b); +} + static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable) { u32 val; @@ -1029,9 +1156,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable) else bit = 10; - enable_clocks(1); REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit); - enable_clocks(0); } void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) @@ -1039,9 +1164,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) u32 val; BUG_ON((width > (1 << 11)) || (height > (1 << 11))); val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); - enable_clocks(1); dispc_write_reg(DISPC_SIZE_MGR(channel), val); - enable_clocks(0); } void dispc_set_digit_size(u16 width, u16 height) @@ -1049,9 +1172,7 @@ void dispc_set_digit_size(u16 width, u16 height) u32 val; BUG_ON((width > (1 << 11)) || (height > (1 << 11))); val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); - enable_clocks(1); dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); - enable_clocks(0); } static void dispc_read_plane_fifo_sizes(void) @@ -1059,18 +1180,17 @@ static void dispc_read_plane_fifo_sizes(void) u32 size; int plane; u8 start, end; + u32 unit; - enable_clocks(1); + unit = dss_feat_get_buffer_size_unit(); dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) { - size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)), - start, end); + size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(plane), start, end); + size *= unit; dispc.fifo_size[plane] = size; } - - enable_clocks(0); } u32 dispc_get_plane_fifo_size(enum omap_plane plane) @@ -1078,15 +1198,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane) return dispc.fifo_size[plane]; } -void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) +void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high) { u8 hi_start, hi_end, lo_start, lo_end; + u32 unit; + + unit = dss_feat_get_buffer_size_unit(); + + WARN_ON(low % unit != 0); + WARN_ON(high % unit != 0); + + low /= unit; + high /= unit; dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end); - enable_clocks(1); - DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n", plane, REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), @@ -1098,18 +1225,12 @@ void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane), FLD_VAL(high, hi_start, hi_end) | FLD_VAL(low, lo_start, lo_end)); - - enable_clocks(0); } void dispc_enable_fifomerge(bool enable) { - enable_clocks(1); - DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled"); REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14); - - enable_clocks(0); } static void _dispc_set_fir(enum omap_plane plane, @@ -1729,14 +1850,7 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width, return dispc_pclk_rate(channel) * vf * hf; } -void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out) -{ - enable_clocks(1); - _dispc_set_channel_out(plane, channel_out); - enable_clocks(0); -} - -static int _dispc_setup_plane(enum omap_plane plane, +int dispc_setup_plane(enum omap_plane plane, u32 paddr, u16 screen_width, u16 pos_x, u16 pos_y, u16 width, u16 height, @@ -1744,7 +1858,7 @@ static int _dispc_setup_plane(enum omap_plane plane, enum omap_color_mode color_mode, bool ilace, enum omap_dss_rotation_type rotation_type, - u8 rotation, int mirror, + u8 rotation, bool mirror, u8 global_alpha, u8 pre_mult_alpha, enum omap_channel channel, u32 puv_addr) { @@ -1758,6 +1872,14 @@ static int _dispc_setup_plane(enum omap_plane plane, u16 frame_height = height; unsigned int field_offset = 0; + DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> " + "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", + plane, paddr, screen_width, pos_x, pos_y, + width, height, + out_width, out_height, + ilace, color_mode, + rotation, mirror, channel); + if (paddr == 0) return -EINVAL; @@ -1903,9 +2025,13 @@ static int _dispc_setup_plane(enum omap_plane plane, return 0; } -static void _dispc_enable_plane(enum omap_plane plane, bool enable) +int dispc_enable_plane(enum omap_plane plane, bool enable) { + DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0); + + return 0; } static void dispc_disable_isr(void *data, u32 mask) @@ -1929,8 +2055,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable) int r; u32 irq; - enable_clocks(1); - /* When we disable LCD output, we need to wait until frame is done. * Otherwise the DSS is still working, and turning off the clocks * prevents DSS from going to OFF mode */ @@ -1964,8 +2088,6 @@ static void dispc_enable_lcd_out(enum omap_channel channel, bool enable) if (r) DSSERR("failed to unregister FRAMEDONE isr\n"); } - - enable_clocks(0); } static void _enable_digit_out(bool enable) @@ -1978,12 +2100,8 @@ static void dispc_enable_digit_out(bool enable) struct completion frame_done_completion; int r; - enable_clocks(1); - - if (REG_GET(DISPC_CONTROL, 1, 1) == enable) { - enable_clocks(0); + if (REG_GET(DISPC_CONTROL, 1, 1) == enable) return; - } if (enable) { unsigned long flags; @@ -2035,8 +2153,6 @@ static void dispc_enable_digit_out(bool enable) _omap_dispc_set_irqs(); spin_unlock_irqrestore(&dispc.irq_lock, flags); } - - enable_clocks(0); } bool dispc_is_channel_enabled(enum omap_channel channel) @@ -2067,9 +2183,7 @@ void dispc_lcd_enable_signal_polarity(bool act_high) if (!dss_has_feature(FEAT_LCDENABLEPOL)) return; - enable_clocks(1); REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29); - enable_clocks(0); } void dispc_lcd_enable_signal(bool enable) @@ -2077,9 +2191,7 @@ void dispc_lcd_enable_signal(bool enable) if (!dss_has_feature(FEAT_LCDENABLESIGNAL)) return; - enable_clocks(1); REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28); - enable_clocks(0); } void dispc_pck_free_enable(bool enable) @@ -2087,19 +2199,15 @@ void dispc_pck_free_enable(bool enable) if (!dss_has_feature(FEAT_PCKFREEENABLE)) return; - enable_clocks(1); REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27); - enable_clocks(0); } void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable) { - enable_clocks(1); if (channel == OMAP_DSS_CHANNEL_LCD2) REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); else REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); - enable_clocks(0); } @@ -2122,27 +2230,21 @@ void dispc_set_lcd_display_type(enum omap_channel channel, return; } - enable_clocks(1); if (channel == OMAP_DSS_CHANNEL_LCD2) REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); else REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); - enable_clocks(0); } void dispc_set_loadmode(enum omap_dss_load_mode mode) { - enable_clocks(1); REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1); - enable_clocks(0); } void dispc_set_default_color(enum omap_channel channel, u32 color) { - enable_clocks(1); dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color); - enable_clocks(0); } u32 dispc_get_default_color(enum omap_channel channel) @@ -2153,9 +2255,7 @@ u32 dispc_get_default_color(enum omap_channel channel) channel != OMAP_DSS_CHANNEL_LCD && channel != OMAP_DSS_CHANNEL_LCD2); - enable_clocks(1); l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel)); - enable_clocks(0); return l; } @@ -2164,7 +2264,6 @@ void dispc_set_trans_key(enum omap_channel ch, enum omap_dss_trans_key_type type, u32 trans_key) { - enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); else if (ch == OMAP_DSS_CHANNEL_DIGIT) @@ -2173,14 +2272,12 @@ void dispc_set_trans_key(enum omap_channel ch, REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); - enable_clocks(0); } void dispc_get_trans_key(enum omap_channel ch, enum omap_dss_trans_key_type *type, u32 *trans_key) { - enable_clocks(1); if (type) { if (ch == OMAP_DSS_CHANNEL_LCD) *type = REG_GET(DISPC_CONFIG, 11, 11); @@ -2194,33 +2291,28 @@ void dispc_get_trans_key(enum omap_channel ch, if (trans_key) *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch)); - enable_clocks(0); } void dispc_enable_trans_key(enum omap_channel ch, bool enable) { - enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); else if (ch == OMAP_DSS_CHANNEL_DIGIT) REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); else /* OMAP_DSS_CHANNEL_LCD2 */ REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); - enable_clocks(0); } void dispc_enable_alpha_blending(enum omap_channel ch, bool enable) { if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) return; - enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18); else if (ch == OMAP_DSS_CHANNEL_DIGIT) REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19); else /* OMAP_DSS_CHANNEL_LCD2 */ REG_FLD_MOD(DISPC_CONFIG2, enable, 18, 18); - enable_clocks(0); } bool dispc_alpha_blending_enabled(enum omap_channel ch) { @@ -2229,7 +2321,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch) if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) return false; - enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) enabled = REG_GET(DISPC_CONFIG, 18, 18); else if (ch == OMAP_DSS_CHANNEL_DIGIT) @@ -2238,7 +2329,6 @@ bool dispc_alpha_blending_enabled(enum omap_channel ch) enabled = REG_GET(DISPC_CONFIG2, 18, 18); else BUG(); - enable_clocks(0); return enabled; } @@ -2248,7 +2338,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch) { bool enabled; - enable_clocks(1); if (ch == OMAP_DSS_CHANNEL_LCD) enabled = REG_GET(DISPC_CONFIG, 10, 10); else if (ch == OMAP_DSS_CHANNEL_DIGIT) @@ -2257,7 +2346,6 @@ bool dispc_trans_key_enabled(enum omap_channel ch) enabled = REG_GET(DISPC_CONFIG2, 10, 10); else BUG(); - enable_clocks(0); return enabled; } @@ -2285,12 +2373,10 @@ void dispc_set_tft_data_lines(enum omap_channel channel, u8 data_lines) return; } - enable_clocks(1); if (channel == OMAP_DSS_CHANNEL_LCD2) REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); else REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); - enable_clocks(0); } void dispc_set_parallel_interface_mode(enum omap_channel channel, @@ -2322,8 +2408,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel, return; } - enable_clocks(1); - if (channel == OMAP_DSS_CHANNEL_LCD2) { l = dispc_read_reg(DISPC_CONTROL2); l = FLD_MOD(l, stallmode, 11, 11); @@ -2335,8 +2419,6 @@ void dispc_set_parallel_interface_mode(enum omap_channel channel, l = FLD_MOD(l, gpout1, 16, 16); dispc_write_reg(DISPC_CONTROL, l); } - - enable_clocks(0); } static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, @@ -2389,10 +2471,8 @@ static void _dispc_set_lcd_timings(enum omap_channel channel, int hsw, FLD_VAL(vbp, 31, 20); } - enable_clocks(1); dispc_write_reg(DISPC_TIMING_H(channel), timing_h); dispc_write_reg(DISPC_TIMING_V(channel), timing_v); - enable_clocks(0); } /* change name to mode? */ @@ -2435,10 +2515,8 @@ static void dispc_set_lcd_divisor(enum omap_channel channel, u16 lck_div, BUG_ON(lck_div < 1); BUG_ON(pck_div < 2); - enable_clocks(1); dispc_write_reg(DISPC_DIVISORo(channel), FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0)); - enable_clocks(0); } static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div, @@ -2457,7 +2535,7 @@ unsigned long dispc_fclk_rate(void) switch (dss_get_dispc_clk_source()) { case OMAP_DSS_CLK_SRC_FCK: - r = dss_clk_get_rate(DSS_CLK_FCK); + r = clk_get_rate(dispc.dss_clk); break; case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: dsidev = dsi_get_dsidev_from_id(0); @@ -2487,7 +2565,7 @@ unsigned long dispc_lclk_rate(enum omap_channel channel) switch (dss_get_lcd_clk_source(channel)) { case OMAP_DSS_CLK_SRC_FCK: - r = dss_clk_get_rate(DSS_CLK_FCK); + r = clk_get_rate(dispc.dss_clk); break; case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: dsidev = dsi_get_dsidev_from_id(0); @@ -2526,7 +2604,8 @@ void dispc_dump_clocks(struct seq_file *s) enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); enum omap_dss_clk_source lcd_clk_src; - enable_clocks(1); + if (dispc_runtime_get()) + return; seq_printf(s, "- DISPC -\n"); @@ -2574,7 +2653,8 @@ void dispc_dump_clocks(struct seq_file *s) seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); } - enable_clocks(0); + + dispc_runtime_put(); } #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS @@ -2629,7 +2709,8 @@ void dispc_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r)) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + if (dispc_runtime_get()) + return; DUMPREG(DISPC_REVISION); DUMPREG(DISPC_SYSCONFIG); @@ -2649,7 +2730,8 @@ void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD)); DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD)); DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD)); - DUMPREG(DISPC_GLOBAL_ALPHA); + if (dss_has_feature(FEAT_GLOBAL_ALPHA)) + DUMPREG(DISPC_GLOBAL_ALPHA); DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { @@ -2680,20 +2762,25 @@ void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); - DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); - DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); - DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); + if (dss_has_feature(FEAT_CPR)) { + DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); + } if (dss_has_feature(FEAT_MGR_LCD2)) { DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); - DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); - DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); - DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); + if (dss_has_feature(FEAT_CPR)) { + DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); + } } - DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX)); + if (dss_has_feature(FEAT_PRELOAD)) + DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX)); DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1)); DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1)); @@ -2744,14 +2831,16 @@ void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2)); DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3)); DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7)); + if (dss_has_feature(FEAT_FIR_COEF_V)) { + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7)); + } if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1)); @@ -2812,14 +2901,17 @@ void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2)); DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3)); DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6)); - DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7)); + + if (dss_has_feature(FEAT_FIR_COEF_V)) { + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7)); + } if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2)); @@ -2858,10 +2950,12 @@ void dispc_dump_regs(struct seq_file *s) if (dss_has_feature(FEAT_ATTR2)) DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); - DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1)); - DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2)); + if (dss_has_feature(FEAT_PRELOAD)) { + DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2)); + } - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + dispc_runtime_put(); #undef DUMPREG } @@ -2882,9 +2976,7 @@ static void _dispc_set_pol_freq(enum omap_channel channel, bool onoff, bool rf, l |= FLD_VAL(acbi, 11, 8); l |= FLD_VAL(acb, 7, 0); - enable_clocks(1); dispc_write_reg(DISPC_POL_FREQ(channel), l); - enable_clocks(0); } void dispc_set_pol_freq(enum omap_channel channel, @@ -3005,15 +3097,11 @@ static void _omap_dispc_set_irqs(void) mask |= isr_data->mask; } - enable_clocks(1); - old_mask = dispc_read_reg(DISPC_IRQENABLE); /* clear the irqstatus for newly enabled irqs */ dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask); dispc_write_reg(DISPC_IRQENABLE, mask); - - enable_clocks(0); } int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask) @@ -3522,13 +3610,6 @@ static void _omap_dispc_initial_config(void) { u32 l; - l = dispc_read_reg(DISPC_SYSCONFIG); - l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */ - l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */ - l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */ - l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */ - dispc_write_reg(DISPC_SYSCONFIG, l); - /* Exclusively enable DISPC_CORE_CLK and set divider to 1 */ if (dss_has_feature(FEAT_CORE_CLK_DIV)) { l = dispc_read_reg(DISPC_DIVISOR); @@ -3552,58 +3633,8 @@ static void _omap_dispc_initial_config(void) dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY); dispc_read_plane_fifo_sizes(); -} -int dispc_enable_plane(enum omap_plane plane, bool enable) -{ - DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); - - enable_clocks(1); - _dispc_enable_plane(plane, enable); - enable_clocks(0); - - return 0; -} - -int dispc_setup_plane(enum omap_plane plane, - u32 paddr, u16 screen_width, - u16 pos_x, u16 pos_y, - u16 width, u16 height, - u16 out_width, u16 out_height, - enum omap_color_mode color_mode, - bool ilace, - enum omap_dss_rotation_type rotation_type, - u8 rotation, bool mirror, u8 global_alpha, - u8 pre_mult_alpha, enum omap_channel channel, - u32 puv_addr) -{ - int r = 0; - - DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> " - "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", - plane, paddr, screen_width, pos_x, pos_y, - width, height, - out_width, out_height, - ilace, color_mode, - rotation, mirror, channel); - - enable_clocks(1); - - r = _dispc_setup_plane(plane, - paddr, screen_width, - pos_x, pos_y, - width, height, - out_width, out_height, - color_mode, ilace, - rotation_type, - rotation, mirror, - global_alpha, - pre_mult_alpha, - channel, puv_addr); - - enable_clocks(0); - - return r; + dispc_configure_burst_sizes(); } /* DISPC HW IP initialisation */ @@ -3612,9 +3643,19 @@ static int omap_dispchw_probe(struct platform_device *pdev) u32 rev; int r = 0; struct resource *dispc_mem; + struct clk *clk; dispc.pdev = pdev; + clk = clk_get(&pdev->dev, "fck"); + if (IS_ERR(clk)) { + DSSERR("can't get fck\n"); + r = PTR_ERR(clk); + goto err_get_clk; + } + + dispc.dss_clk = clk; + spin_lock_init(&dispc.irq_lock); #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS @@ -3628,62 +3669,103 @@ static int omap_dispchw_probe(struct platform_device *pdev) if (!dispc_mem) { DSSERR("can't get IORESOURCE_MEM DISPC\n"); r = -EINVAL; - goto fail0; + goto err_ioremap; } dispc.base = ioremap(dispc_mem->start, resource_size(dispc_mem)); if (!dispc.base) { DSSERR("can't ioremap DISPC\n"); r = -ENOMEM; - goto fail0; + goto err_ioremap; } dispc.irq = platform_get_irq(dispc.pdev, 0); if (dispc.irq < 0) { DSSERR("platform_get_irq failed\n"); r = -ENODEV; - goto fail1; + goto err_irq; } r = request_irq(dispc.irq, omap_dispc_irq_handler, IRQF_SHARED, "OMAP DISPC", dispc.pdev); if (r < 0) { DSSERR("request_irq failed\n"); - goto fail1; + goto err_irq; } - enable_clocks(1); + pm_runtime_enable(&pdev->dev); + + r = dispc_runtime_get(); + if (r) + goto err_runtime_get; _omap_dispc_initial_config(); _omap_dispc_initialize_irq(); - dispc_save_context(); - rev = dispc_read_reg(DISPC_REVISION); dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); - enable_clocks(0); + dispc_runtime_put(); return 0; -fail1: + +err_runtime_get: + pm_runtime_disable(&pdev->dev); + free_irq(dispc.irq, dispc.pdev); +err_irq: iounmap(dispc.base); -fail0: +err_ioremap: + clk_put(dispc.dss_clk); +err_get_clk: return r; } static int omap_dispchw_remove(struct platform_device *pdev) { + pm_runtime_disable(&pdev->dev); + + clk_put(dispc.dss_clk); + free_irq(dispc.irq, dispc.pdev); iounmap(dispc.base); return 0; } +static int dispc_runtime_suspend(struct device *dev) +{ + dispc_save_context(); + clk_disable(dispc.dss_clk); + dss_runtime_put(); + + return 0; +} + +static int dispc_runtime_resume(struct device *dev) +{ + int r; + + r = dss_runtime_get(); + if (r < 0) + return r; + + clk_enable(dispc.dss_clk); + dispc_restore_context(); + + return 0; +} + +static const struct dev_pm_ops dispc_pm_ops = { + .runtime_suspend = dispc_runtime_suspend, + .runtime_resume = dispc_runtime_resume, +}; + static struct platform_driver omap_dispchw_driver = { .probe = omap_dispchw_probe, .remove = omap_dispchw_remove, .driver = { .name = "omapdss_dispc", .owner = THIS_MODULE, + .pm = &dispc_pm_ops, }, }; diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index c2dfc8c50057..94495e45ec5a 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c @@ -29,6 +29,7 @@ #include <video/omapdss.h> #include "dss.h" +#include "dss_features.h" static ssize_t display_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -65,48 +66,6 @@ static ssize_t display_enabled_store(struct device *dev, return size; } -static ssize_t display_upd_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct omap_dss_device *dssdev = to_dss_device(dev); - enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO; - if (dssdev->driver->get_update_mode) - mode = dssdev->driver->get_update_mode(dssdev); - return snprintf(buf, PAGE_SIZE, "%d\n", mode); -} - -static ssize_t display_upd_mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct omap_dss_device *dssdev = to_dss_device(dev); - int val, r; - enum omap_dss_update_mode mode; - - if (!dssdev->driver->set_update_mode) - return -EINVAL; - - r = kstrtoint(buf, 0, &val); - if (r) - return r; - - switch (val) { - case OMAP_DSS_UPDATE_DISABLED: - case OMAP_DSS_UPDATE_AUTO: - case OMAP_DSS_UPDATE_MANUAL: - mode = (enum omap_dss_update_mode)val; - break; - default: - return -EINVAL; - } - - r = dssdev->driver->set_update_mode(dssdev, mode); - if (r) - return r; - - return size; -} - static ssize_t display_tear_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -294,8 +253,6 @@ static ssize_t display_wss_store(struct device *dev, static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, display_enabled_show, display_enabled_store); -static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR, - display_upd_mode_show, display_upd_mode_store); static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, display_tear_show, display_tear_store); static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, @@ -309,7 +266,6 @@ static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, static struct device_attribute *display_sysfs_attrs[] = { &dev_attr_enabled, - &dev_attr_update_mode, &dev_attr_tear_elim, &dev_attr_timings, &dev_attr_rotate, @@ -327,16 +283,13 @@ void omapdss_default_get_resolution(struct omap_dss_device *dssdev, EXPORT_SYMBOL(omapdss_default_get_resolution); void default_get_overlay_fifo_thresholds(enum omap_plane plane, - u32 fifo_size, enum omap_burst_size *burst_size, + u32 fifo_size, u32 burst_size, u32 *fifo_low, u32 *fifo_high) { - unsigned burst_size_bytes; - - *burst_size = OMAP_DSS_BURST_16x32; - burst_size_bytes = 16 * 32 / 8; + unsigned buf_unit = dss_feat_get_buffer_size_unit(); - *fifo_high = fifo_size - 1; - *fifo_low = fifo_size - burst_size_bytes; + *fifo_high = fifo_size - buf_unit; + *fifo_low = fifo_size - burst_size; } int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev) diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index ff6bd30132df..f053b180ecd7 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -23,7 +23,6 @@ #define DSS_SUBSYS_NAME "DPI" #include <linux/kernel.h> -#include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/errno.h> @@ -130,8 +129,6 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) bool is_tft; int r = 0; - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); - dispc_set_pol_freq(dssdev->manager->id, dssdev->panel.config, dssdev->panel.acbi, dssdev->panel.acb); @@ -144,7 +141,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, &lck_div, &pck_div); if (r) - goto err0; + return r; pck = fck / lck_div / pck_div / 1000; @@ -158,12 +155,10 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) dispc_set_lcd_timings(dssdev->manager->id, t); -err0: - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); - return r; + return 0; } -static int dpi_basic_init(struct omap_dss_device *dssdev) +static void dpi_basic_init(struct omap_dss_device *dssdev) { bool is_tft; @@ -175,8 +170,6 @@ static int dpi_basic_init(struct omap_dss_device *dssdev) OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); dispc_set_tft_data_lines(dssdev->manager->id, dssdev->phy.dpi.data_lines); - - return 0; } int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) @@ -186,31 +179,38 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); - goto err0; + goto err_start_dev; } if (cpu_is_omap34xx()) { r = regulator_enable(dpi.vdds_dsi_reg); if (r) - goto err1; + goto err_reg_enable; } - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + r = dss_runtime_get(); + if (r) + goto err_get_dss; - r = dpi_basic_init(dssdev); + r = dispc_runtime_get(); if (r) - goto err2; + goto err_get_dispc; + + dpi_basic_init(dssdev); if (dpi_use_dsi_pll(dssdev)) { - dss_clk_enable(DSS_CLK_SYSCK); + r = dsi_runtime_get(dpi.dsidev); + if (r) + goto err_get_dsi; + r = dsi_pll_init(dpi.dsidev, 0, 1); if (r) - goto err3; + goto err_dsi_pll_init; } r = dpi_set_mode(dssdev); if (r) - goto err4; + goto err_set_mode; mdelay(2); @@ -218,19 +218,22 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) return 0; -err4: +err_set_mode: if (dpi_use_dsi_pll(dssdev)) dsi_pll_uninit(dpi.dsidev, true); -err3: +err_dsi_pll_init: if (dpi_use_dsi_pll(dssdev)) - dss_clk_disable(DSS_CLK_SYSCK); -err2: - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + dsi_runtime_put(dpi.dsidev); +err_get_dsi: + dispc_runtime_put(); +err_get_dispc: + dss_runtime_put(); +err_get_dss: if (cpu_is_omap34xx()) regulator_disable(dpi.vdds_dsi_reg); -err1: +err_reg_enable: omap_dss_stop_device(dssdev); -err0: +err_start_dev: return r; } EXPORT_SYMBOL(omapdss_dpi_display_enable); @@ -242,10 +245,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev) if (dpi_use_dsi_pll(dssdev)) { dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); dsi_pll_uninit(dpi.dsidev, true); - dss_clk_disable(DSS_CLK_SYSCK); + dsi_runtime_put(dpi.dsidev); } - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + dispc_runtime_put(); + dss_runtime_put(); if (cpu_is_omap34xx()) regulator_disable(dpi.vdds_dsi_reg); @@ -257,11 +261,26 @@ EXPORT_SYMBOL(omapdss_dpi_display_disable); void dpi_set_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { + int r; + DSSDBG("dpi_set_timings\n"); dssdev->panel.timings = *timings; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { + r = dss_runtime_get(); + if (r) + return; + + r = dispc_runtime_get(); + if (r) { + dss_runtime_put(); + return; + } + dpi_set_mode(dssdev); dispc_go(dssdev->manager->id); + + dispc_runtime_put(); + dss_runtime_put(); } } EXPORT_SYMBOL(dpi_set_timings); diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 345757cfcbee..7adbbeb84334 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -36,6 +36,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/debugfs.h> +#include <linux/pm_runtime.h> #include <video/omapdss.h> #include <plat/clock.h> @@ -267,8 +268,12 @@ struct dsi_isr_tables { struct dsi_data { struct platform_device *pdev; void __iomem *base; + int irq; + struct clk *dss_clk; + struct clk *sys_clk; + void (*dsi_mux_pads)(bool enable); struct dsi_clock_info current_cinfo; @@ -389,15 +394,6 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev, return __raw_readl(dsi->base + idx.idx); } - -void dsi_save_context(void) -{ -} - -void dsi_restore_context(void) -{ -} - void dsi_bus_lock(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); @@ -493,9 +489,18 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name) total_bytes * 1000 / total_us); } #else -#define dsi_perf_mark_setup(x) -#define dsi_perf_mark_start(x) -#define dsi_perf_show(x, y) +static inline void dsi_perf_mark_setup(struct platform_device *dsidev) +{ +} + +static inline void dsi_perf_mark_start(struct platform_device *dsidev) +{ +} + +static inline void dsi_perf_show(struct platform_device *dsidev, + const char *name) +{ +} #endif static void print_irq_status(u32 status) @@ -1039,13 +1044,27 @@ static u32 dsi_get_errors(struct platform_device *dsidev) return e; } -/* DSI func clock. this could also be dsi_pll_hsdiv_dsi_clk */ -static inline void enable_clocks(bool enable) +int dsi_runtime_get(struct platform_device *dsidev) { - if (enable) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); - else - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + int r; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + DSSDBG("dsi_runtime_get\n"); + + r = pm_runtime_get_sync(&dsi->pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +void dsi_runtime_put(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r; + + DSSDBG("dsi_runtime_put\n"); + + r = pm_runtime_put(&dsi->pdev->dev); + WARN_ON(r < 0); } /* source clock for DSI PLL. this could also be PCLKFREE */ @@ -1055,9 +1074,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev, struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); if (enable) - dss_clk_enable(DSS_CLK_SYSCK); + clk_enable(dsi->sys_clk); else - dss_clk_disable(DSS_CLK_SYSCK); + clk_disable(dsi->sys_clk); if (enable && dsi->pll_locked) { if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) @@ -1150,10 +1169,11 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev) { unsigned long r; int dsi_module = dsi_get_dsidev_id(dsidev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { /* DSI FCLK source is DSS_CLK_FCK */ - r = dss_clk_get_rate(DSS_CLK_FCK); + r = clk_get_rate(dsi->dss_clk); } else { /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ r = dsi_get_pll_hsdiv_dsi_rate(dsidev); @@ -1262,7 +1282,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, return -EINVAL; if (cinfo->use_sys_clk) { - cinfo->clkin = dss_clk_get_rate(DSS_CLK_SYSCK); + cinfo->clkin = clk_get_rate(dsi->sys_clk); /* XXX it is unclear if highfreq should be used * with DSS_SYS_CLK source also */ cinfo->highfreq = 0; @@ -1311,7 +1331,7 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, int match = 0; unsigned long dss_sys_clk, max_dss_fck; - dss_sys_clk = dss_clk_get_rate(DSS_CLK_SYSCK); + dss_sys_clk = clk_get_rate(dsi->sys_clk); max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); @@ -1601,7 +1621,6 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, dsi->vdds_dsi_reg = vdds_dsi; } - enable_clocks(1); dsi_enable_pll_clock(dsidev, 1); /* * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. @@ -1653,7 +1672,6 @@ err1: } err0: dsi_disable_scp_clk(dsidev); - enable_clocks(0); dsi_enable_pll_clock(dsidev, 0); return r; } @@ -1671,7 +1689,6 @@ void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) } dsi_disable_scp_clk(dsidev); - enable_clocks(0); dsi_enable_pll_clock(dsidev, 0); DSSDBG("PLL uninit done\n"); @@ -1688,7 +1705,8 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, dispc_clk_src = dss_get_dispc_clk_source(); dsi_clk_src = dss_get_dsi_clk_source(dsi_module); - enable_clocks(1); + if (dsi_runtime_get(dsidev)) + return; seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); @@ -1731,7 +1749,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); - enable_clocks(0); + dsi_runtime_put(dsidev); } void dsi_dump_clocks(struct seq_file *s) @@ -1873,7 +1891,8 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev, { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + if (dsi_runtime_get(dsidev)) + return; dsi_enable_scp_clk(dsidev); DUMPREG(DSI_REVISION); @@ -1947,7 +1966,7 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev, DUMPREG(DSI_PLL_CONFIGURATION2); dsi_disable_scp_clk(dsidev); - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + dsi_runtime_put(dsidev); #undef DUMPREG } @@ -2463,28 +2482,6 @@ static void dsi_cio_uninit(struct platform_device *dsidev) dsi->dsi_mux_pads(false); } -static int _dsi_wait_reset(struct platform_device *dsidev) -{ - int t = 0; - - while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) { - if (++t > 5) { - DSSERR("soft reset failed\n"); - return -ENODEV; - } - udelay(1); - } - - return 0; -} - -static int _dsi_reset(struct platform_device *dsidev) -{ - /* Soft reset */ - REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1); - return _dsi_wait_reset(dsidev); -} - static void dsi_config_tx_fifo(struct platform_device *dsidev, enum fifo_size size1, enum fifo_size size2, enum fifo_size size3, enum fifo_size size4) @@ -3386,6 +3383,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev) dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + /* Reset LANEx_ULPS_SIG2 */ + REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2), + 7, 5); + dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); dsi_if_enable(dsidev, false); @@ -4198,22 +4199,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev, dsi_pll_uninit(dsidev, disconnect_lanes); } -static int dsi_core_init(struct platform_device *dsidev) -{ - /* Autoidle */ - REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0); - - /* ENWAKEUP */ - REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2); - - /* SIDLEMODE smart-idle */ - REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3); - - _dsi_initialize_irq(dsidev); - - return 0; -} - int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); @@ -4229,37 +4214,37 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); - goto err0; + goto err_start_dev; } - enable_clocks(1); - dsi_enable_pll_clock(dsidev, 1); - - r = _dsi_reset(dsidev); + r = dsi_runtime_get(dsidev); if (r) - goto err1; + goto err_get_dsi; - dsi_core_init(dsidev); + dsi_enable_pll_clock(dsidev, 1); + + _dsi_initialize_irq(dsidev); r = dsi_display_init_dispc(dssdev); if (r) - goto err1; + goto err_init_dispc; r = dsi_display_init_dsi(dssdev); if (r) - goto err2; + goto err_init_dsi; mutex_unlock(&dsi->lock); return 0; -err2: +err_init_dsi: dsi_display_uninit_dispc(dssdev); -err1: - enable_clocks(0); +err_init_dispc: dsi_enable_pll_clock(dsidev, 0); + dsi_runtime_put(dsidev); +err_get_dsi: omap_dss_stop_device(dssdev); -err0: +err_start_dev: mutex_unlock(&dsi->lock); DSSDBG("dsi_display_enable FAILED\n"); return r; @@ -4278,11 +4263,16 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev, mutex_lock(&dsi->lock); + dsi_sync_vc(dsidev, 0); + dsi_sync_vc(dsidev, 1); + dsi_sync_vc(dsidev, 2); + dsi_sync_vc(dsidev, 3); + dsi_display_uninit_dispc(dssdev); dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps); - enable_clocks(0); + dsi_runtime_put(dsidev); dsi_enable_pll_clock(dsidev, 0); omap_dss_stop_device(dssdev); @@ -4302,16 +4292,11 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable) EXPORT_SYMBOL(omapdss_dsi_enable_te); void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, - u32 fifo_size, enum omap_burst_size *burst_size, + u32 fifo_size, u32 burst_size, u32 *fifo_low, u32 *fifo_high) { - unsigned burst_size_bytes; - - *burst_size = OMAP_DSS_BURST_16x32; - burst_size_bytes = 16 * 32 / 8; - - *fifo_high = fifo_size - burst_size_bytes; - *fifo_low = fifo_size - burst_size_bytes * 2; + *fifo_high = fifo_size - burst_size; + *fifo_low = fifo_size - burst_size * 2; } int dsi_init_display(struct omap_dss_device *dssdev) @@ -4437,7 +4422,47 @@ static void dsi_calc_clock_param_ranges(struct platform_device *dsidev) dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); } -static int dsi_init(struct platform_device *dsidev) +static int dsi_get_clocks(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct clk *clk; + + clk = clk_get(&dsidev->dev, "fck"); + if (IS_ERR(clk)) { + DSSERR("can't get fck\n"); + return PTR_ERR(clk); + } + + dsi->dss_clk = clk; + + if (cpu_is_omap34xx() || cpu_is_omap3630()) + clk = clk_get(&dsidev->dev, "dss2_alwon_fck"); + else + clk = clk_get(&dsidev->dev, "sys_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get sys_clk\n"); + clk_put(dsi->dss_clk); + dsi->dss_clk = NULL; + return PTR_ERR(clk); + } + + dsi->sys_clk = clk; + + return 0; +} + +static void dsi_put_clocks(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->dss_clk) + clk_put(dsi->dss_clk); + if (dsi->sys_clk) + clk_put(dsi->sys_clk); +} + +/* DSI1 HW IP initialisation */ +static int omap_dsi1hw_probe(struct platform_device *dsidev) { struct omap_display_platform_data *dss_plat_data; struct omap_dss_board_info *board_info; @@ -4449,7 +4474,7 @@ static int dsi_init(struct platform_device *dsidev) dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); if (!dsi) { r = -ENOMEM; - goto err0; + goto err_alloc; } dsi->pdev = dsidev; @@ -4472,6 +4497,12 @@ static int dsi_init(struct platform_device *dsidev) mutex_init(&dsi->lock); sema_init(&dsi->bus_lock, 1); + r = dsi_get_clocks(dsidev); + if (r) + goto err_get_clk; + + pm_runtime_enable(&dsidev->dev); + INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work, dsi_framedone_timeout_work_callback); @@ -4484,26 +4515,26 @@ static int dsi_init(struct platform_device *dsidev) if (!dsi_mem) { DSSERR("can't get IORESOURCE_MEM DSI\n"); r = -EINVAL; - goto err1; + goto err_ioremap; } dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem)); if (!dsi->base) { DSSERR("can't ioremap DSI\n"); r = -ENOMEM; - goto err1; + goto err_ioremap; } dsi->irq = platform_get_irq(dsi->pdev, 0); if (dsi->irq < 0) { DSSERR("platform_get_irq failed\n"); r = -ENODEV; - goto err2; + goto err_get_irq; } r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev); if (r < 0) { DSSERR("request_irq failed\n"); - goto err2; + goto err_get_irq; } /* DSI VCs initialization */ @@ -4515,7 +4546,9 @@ static int dsi_init(struct platform_device *dsidev) dsi_calc_clock_param_ranges(dsidev); - enable_clocks(1); + r = dsi_runtime_get(dsidev); + if (r) + goto err_get_dsi; rev = dsi_read_reg(dsidev, DSI_REVISION); dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", @@ -4523,21 +4556,32 @@ static int dsi_init(struct platform_device *dsidev) dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev); - enable_clocks(0); + dsi_runtime_put(dsidev); return 0; -err2: + +err_get_dsi: + free_irq(dsi->irq, dsi->pdev); +err_get_irq: iounmap(dsi->base); -err1: +err_ioremap: + pm_runtime_disable(&dsidev->dev); +err_get_clk: kfree(dsi); -err0: +err_alloc: return r; } -static void dsi_exit(struct platform_device *dsidev) +static int omap_dsi1hw_remove(struct platform_device *dsidev) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + WARN_ON(dsi->scp_clk_refcount > 0); + + pm_runtime_disable(&dsidev->dev); + + dsi_put_clocks(dsidev); + if (dsi->vdds_dsi_reg != NULL) { if (dsi->vdds_dsi_enabled) { regulator_disable(dsi->vdds_dsi_reg); @@ -4553,38 +4597,56 @@ static void dsi_exit(struct platform_device *dsidev) kfree(dsi); - DSSDBG("omap_dsi_exit\n"); + return 0; } -/* DSI1 HW IP initialisation */ -static int omap_dsi1hw_probe(struct platform_device *dsidev) +static int dsi_runtime_suspend(struct device *dev) { - int r; + struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev)); - r = dsi_init(dsidev); - if (r) { - DSSERR("Failed to initialize DSI\n"); - goto err_dsi; - } -err_dsi: - return r; + clk_disable(dsi->dss_clk); + + dispc_runtime_put(); + dss_runtime_put(); + + return 0; } -static int omap_dsi1hw_remove(struct platform_device *dsidev) +static int dsi_runtime_resume(struct device *dev) { - struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dsi_data *dsi = dsi_get_dsidrv_data(to_platform_device(dev)); + int r; + + r = dss_runtime_get(); + if (r) + goto err_get_dss; + + r = dispc_runtime_get(); + if (r) + goto err_get_dispc; + + clk_enable(dsi->dss_clk); - dsi_exit(dsidev); - WARN_ON(dsi->scp_clk_refcount > 0); return 0; + +err_get_dispc: + dss_runtime_put(); +err_get_dss: + return r; } +static const struct dev_pm_ops dsi_pm_ops = { + .runtime_suspend = dsi_runtime_suspend, + .runtime_resume = dsi_runtime_resume, +}; + static struct platform_driver omap_dsi1hw_driver = { .probe = omap_dsi1hw_probe, .remove = omap_dsi1hw_remove, .driver = { .name = "omapdss_dsi1", .owner = THIS_MODULE, + .pm = &dsi_pm_ops, }, }; diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index d9489d5c4f08..0f9c3a6457a5 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -28,6 +28,8 @@ #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <video/omapdss.h> #include <plat/clock.h> @@ -59,15 +61,9 @@ struct dss_reg { static struct { struct platform_device *pdev; void __iomem *base; - int ctx_id; struct clk *dpll4_m4_ck; - struct clk *dss_ick; - struct clk *dss_fck; - struct clk *dss_sys_clk; - struct clk *dss_tv_fck; - struct clk *dss_video_fck; - unsigned num_clks_enabled; + struct clk *dss_clk; unsigned long cache_req_pck; unsigned long cache_prate; @@ -78,6 +74,7 @@ static struct { enum omap_dss_clk_source dispc_clk_source; enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; + bool ctx_valid; u32 ctx[DSS_SZ_REGS / sizeof(u32)]; } dss; @@ -87,13 +84,6 @@ static const char * const dss_generic_clk_source_names[] = { [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", }; -static void dss_clk_enable_all_no_ctx(void); -static void dss_clk_disable_all_no_ctx(void); -static void dss_clk_enable_no_ctx(enum dss_clock clks); -static void dss_clk_disable_no_ctx(enum dss_clock clks); - -static int _omap_dss_wait_reset(void); - static inline void dss_write_reg(const struct dss_reg idx, u32 val) { __raw_writel(val, dss.base + idx.idx); @@ -109,12 +99,10 @@ static inline u32 dss_read_reg(const struct dss_reg idx) #define RR(reg) \ dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)]) -void dss_save_context(void) +static void dss_save_context(void) { - if (cpu_is_omap24xx()) - return; + DSSDBG("dss_save_context\n"); - SR(SYSCONFIG); SR(CONTROL); if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & @@ -122,14 +110,19 @@ void dss_save_context(void) SR(SDI_CONTROL); SR(PLL_CONTROL); } + + dss.ctx_valid = true; + + DSSDBG("context saved\n"); } -void dss_restore_context(void) +static void dss_restore_context(void) { - if (_omap_dss_wait_reset()) - DSSERR("DSS not coming out of reset after sleep\n"); + DSSDBG("dss_restore_context\n"); + + if (!dss.ctx_valid) + return; - RR(SYSCONFIG); RR(CONTROL); if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & @@ -137,6 +130,8 @@ void dss_restore_context(void) RR(SDI_CONTROL); RR(PLL_CONTROL); } + + DSSDBG("context restored\n"); } #undef SR @@ -234,6 +229,7 @@ const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) return dss_generic_clk_source_names[clk_src]; } + void dss_dump_clocks(struct seq_file *s) { unsigned long dpll4_ck_rate; @@ -241,13 +237,14 @@ void dss_dump_clocks(struct seq_file *s) const char *fclk_name, *fclk_real_name; unsigned long fclk_rate; - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + if (dss_runtime_get()) + return; seq_printf(s, "- DSS -\n"); fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); - fclk_rate = dss_clk_get_rate(DSS_CLK_FCK); + fclk_rate = clk_get_rate(dss.dss_clk); if (dss.dpll4_m4_ck) { dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck)); @@ -273,14 +270,15 @@ void dss_dump_clocks(struct seq_file *s) fclk_rate); } - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + dss_runtime_put(); } void dss_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + if (dss_runtime_get()) + return; DUMPREG(DSS_REVISION); DUMPREG(DSS_SYSCONFIG); @@ -294,7 +292,7 @@ void dss_dump_regs(struct seq_file *s) DUMPREG(DSS_SDI_STATUS); } - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + dss_runtime_put(); #undef DUMPREG } @@ -437,7 +435,7 @@ int dss_calc_clock_rates(struct dss_clock_info *cinfo) } else { if (cinfo->fck_div != 0) return -EINVAL; - cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK); + cinfo->fck = clk_get_rate(dss.dss_clk); } return 0; @@ -467,7 +465,7 @@ int dss_set_clock_div(struct dss_clock_info *cinfo) int dss_get_clock_div(struct dss_clock_info *cinfo) { - cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK); + cinfo->fck = clk_get_rate(dss.dss_clk); if (dss.dpll4_m4_ck) { unsigned long prate; @@ -512,7 +510,7 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck, max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); - fck = dss_clk_get_rate(DSS_CLK_FCK); + fck = clk_get_rate(dss.dss_clk); if (req_pck == dss.cache_req_pck && ((cpu_is_omap34xx() && prate == dss.cache_prate) || dss.cache_dss_cinfo.fck == fck)) { @@ -539,7 +537,7 @@ retry: if (dss.dpll4_m4_ck == NULL) { struct dispc_clock_info cur_dispc; /* XXX can we change the clock on omap2? */ - fck = dss_clk_get_rate(DSS_CLK_FCK); + fck = clk_get_rate(dss.dss_clk); fck_div = 1; dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); @@ -616,28 +614,6 @@ found: return 0; } -static int _omap_dss_wait_reset(void) -{ - int t = 0; - - while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) { - if (++t > 1000) { - DSSERR("soft reset failed\n"); - return -ENODEV; - } - udelay(1); - } - - return 0; -} - -static int _omap_dss_reset(void) -{ - /* Soft reset */ - REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1); - return _omap_dss_wait_reset(); -} - void dss_set_venc_output(enum omap_dss_venc_type type) { int l = 0; @@ -663,424 +639,88 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select hdmi) REG_FLD_MOD(DSS_CONTROL, hdmi, 15, 15); /* VENC_HDMI_SWITCH */ } -static int dss_init(void) +static int dss_get_clocks(void) { + struct clk *clk; int r; - u32 rev; - struct resource *dss_mem; - struct clk *dpll4_m4_ck; - dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); - if (!dss_mem) { - DSSERR("can't get IORESOURCE_MEM DSS\n"); - r = -EINVAL; - goto fail0; - } - dss.base = ioremap(dss_mem->start, resource_size(dss_mem)); - if (!dss.base) { - DSSERR("can't ioremap DSS\n"); - r = -ENOMEM; - goto fail0; + clk = clk_get(&dss.pdev->dev, "fck"); + if (IS_ERR(clk)) { + DSSERR("can't get clock fck\n"); + r = PTR_ERR(clk); + goto err; } - /* disable LCD and DIGIT output. This seems to fix the synclost - * problem that we get, if the bootloader starts the DSS and - * the kernel resets it */ - omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440); - -#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET - /* We need to wait here a bit, otherwise we sometimes start to - * get synclost errors, and after that only power cycle will - * restore DSS functionality. I have no idea why this happens. - * And we have to wait _before_ resetting the DSS, but after - * enabling clocks. - * - * This bug was at least present on OMAP3430. It's unknown - * if it happens on OMAP2 or OMAP3630. - */ - msleep(50); -#endif - - _omap_dss_reset(); + dss.dss_clk = clk; - /* autoidle */ - REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0); - - /* Select DPLL */ - REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); - -#ifdef CONFIG_OMAP2_DSS_VENC - REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ - REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ - REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ -#endif if (cpu_is_omap34xx()) { - dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck"); - if (IS_ERR(dpll4_m4_ck)) { + clk = clk_get(NULL, "dpll4_m4_ck"); + if (IS_ERR(clk)) { DSSERR("Failed to get dpll4_m4_ck\n"); - r = PTR_ERR(dpll4_m4_ck); - goto fail1; + r = PTR_ERR(clk); + goto err; } } else if (cpu_is_omap44xx()) { - dpll4_m4_ck = clk_get(NULL, "dpll_per_m5x2_ck"); - if (IS_ERR(dpll4_m4_ck)) { - DSSERR("Failed to get dpll4_m4_ck\n"); - r = PTR_ERR(dpll4_m4_ck); - goto fail1; + clk = clk_get(NULL, "dpll_per_m5x2_ck"); + if (IS_ERR(clk)) { + DSSERR("Failed to get dpll_per_m5x2_ck\n"); + r = PTR_ERR(clk); + goto err; } } else { /* omap24xx */ - dpll4_m4_ck = NULL; + clk = NULL; } - dss.dpll4_m4_ck = dpll4_m4_ck; - - dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; - dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; - dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; - - dss_save_context(); - - rev = dss_read_reg(DSS_REVISION); - printk(KERN_INFO "OMAP DSS rev %d.%d\n", - FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + dss.dpll4_m4_ck = clk; return 0; -fail1: - iounmap(dss.base); -fail0: - return r; -} - -static void dss_exit(void) -{ +err: + if (dss.dss_clk) + clk_put(dss.dss_clk); if (dss.dpll4_m4_ck) clk_put(dss.dpll4_m4_ck); - iounmap(dss.base); -} - -/* CONTEXT */ -static int dss_get_ctx_id(void) -{ - struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data; - int r; - - if (!pdata->board_data->get_last_off_on_transaction_id) - return 0; - r = pdata->board_data->get_last_off_on_transaction_id(&dss.pdev->dev); - if (r < 0) { - dev_err(&dss.pdev->dev, "getting transaction ID failed, " - "will force context restore\n"); - r = -1; - } - return r; -} - -int dss_need_ctx_restore(void) -{ - int id = dss_get_ctx_id(); - - if (id < 0 || id != dss.ctx_id) { - DSSDBG("ctx id %d -> id %d\n", - dss.ctx_id, id); - dss.ctx_id = id; - return 1; - } else { - return 0; - } -} - -static void save_all_ctx(void) -{ - DSSDBG("save context\n"); - - dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK); - - dss_save_context(); - dispc_save_context(); -#ifdef CONFIG_OMAP2_DSS_DSI - dsi_save_context(); -#endif - - dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK); -} - -static void restore_all_ctx(void) -{ - DSSDBG("restore context\n"); - - dss_clk_enable_all_no_ctx(); - - dss_restore_context(); - dispc_restore_context(); -#ifdef CONFIG_OMAP2_DSS_DSI - dsi_restore_context(); -#endif - - dss_clk_disable_all_no_ctx(); -} - -static int dss_get_clock(struct clk **clock, const char *clk_name) -{ - struct clk *clk; - - clk = clk_get(&dss.pdev->dev, clk_name); - - if (IS_ERR(clk)) { - DSSERR("can't get clock %s", clk_name); - return PTR_ERR(clk); - } - - *clock = clk; - - DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk)); - - return 0; -} - -static int dss_get_clocks(void) -{ - int r; - struct omap_display_platform_data *pdata = dss.pdev->dev.platform_data; - - dss.dss_ick = NULL; - dss.dss_fck = NULL; - dss.dss_sys_clk = NULL; - dss.dss_tv_fck = NULL; - dss.dss_video_fck = NULL; - - r = dss_get_clock(&dss.dss_ick, "ick"); - if (r) - goto err; - - r = dss_get_clock(&dss.dss_fck, "fck"); - if (r) - goto err; - - if (!pdata->opt_clock_available) { - r = -ENODEV; - goto err; - } - - if (pdata->opt_clock_available("sys_clk")) { - r = dss_get_clock(&dss.dss_sys_clk, "sys_clk"); - if (r) - goto err; - } - - if (pdata->opt_clock_available("tv_clk")) { - r = dss_get_clock(&dss.dss_tv_fck, "tv_clk"); - if (r) - goto err; - } - - if (pdata->opt_clock_available("video_clk")) { - r = dss_get_clock(&dss.dss_video_fck, "video_clk"); - if (r) - goto err; - } - - return 0; - -err: - if (dss.dss_ick) - clk_put(dss.dss_ick); - if (dss.dss_fck) - clk_put(dss.dss_fck); - if (dss.dss_sys_clk) - clk_put(dss.dss_sys_clk); - if (dss.dss_tv_fck) - clk_put(dss.dss_tv_fck); - if (dss.dss_video_fck) - clk_put(dss.dss_video_fck); - return r; } static void dss_put_clocks(void) { - if (dss.dss_video_fck) - clk_put(dss.dss_video_fck); - if (dss.dss_tv_fck) - clk_put(dss.dss_tv_fck); - if (dss.dss_sys_clk) - clk_put(dss.dss_sys_clk); - clk_put(dss.dss_fck); - clk_put(dss.dss_ick); -} - -unsigned long dss_clk_get_rate(enum dss_clock clk) -{ - switch (clk) { - case DSS_CLK_ICK: - return clk_get_rate(dss.dss_ick); - case DSS_CLK_FCK: - return clk_get_rate(dss.dss_fck); - case DSS_CLK_SYSCK: - return clk_get_rate(dss.dss_sys_clk); - case DSS_CLK_TVFCK: - return clk_get_rate(dss.dss_tv_fck); - case DSS_CLK_VIDFCK: - return clk_get_rate(dss.dss_video_fck); - } - - BUG(); - return 0; -} - -static unsigned count_clk_bits(enum dss_clock clks) -{ - unsigned num_clks = 0; - - if (clks & DSS_CLK_ICK) - ++num_clks; - if (clks & DSS_CLK_FCK) - ++num_clks; - if (clks & DSS_CLK_SYSCK) - ++num_clks; - if (clks & DSS_CLK_TVFCK) - ++num_clks; - if (clks & DSS_CLK_VIDFCK) - ++num_clks; - - return num_clks; -} - -static void dss_clk_enable_no_ctx(enum dss_clock clks) -{ - unsigned num_clks = count_clk_bits(clks); - - if (clks & DSS_CLK_ICK) - clk_enable(dss.dss_ick); - if (clks & DSS_CLK_FCK) - clk_enable(dss.dss_fck); - if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk) - clk_enable(dss.dss_sys_clk); - if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck) - clk_enable(dss.dss_tv_fck); - if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck) - clk_enable(dss.dss_video_fck); - - dss.num_clks_enabled += num_clks; -} - -void dss_clk_enable(enum dss_clock clks) -{ - bool check_ctx = dss.num_clks_enabled == 0; - - dss_clk_enable_no_ctx(clks); - - /* - * HACK: On omap4 the registers may not be accessible right after - * enabling the clocks. At some point this will be handled by - * pm_runtime, but for the time begin this should make things work. - */ - if (cpu_is_omap44xx() && check_ctx) - udelay(10); - - if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore()) - restore_all_ctx(); + if (dss.dpll4_m4_ck) + clk_put(dss.dpll4_m4_ck); + clk_put(dss.dss_clk); } -static void dss_clk_disable_no_ctx(enum dss_clock clks) +struct clk *dss_get_ick(void) { - unsigned num_clks = count_clk_bits(clks); - - if (clks & DSS_CLK_ICK) - clk_disable(dss.dss_ick); - if (clks & DSS_CLK_FCK) - clk_disable(dss.dss_fck); - if ((clks & DSS_CLK_SYSCK) && dss.dss_sys_clk) - clk_disable(dss.dss_sys_clk); - if ((clks & DSS_CLK_TVFCK) && dss.dss_tv_fck) - clk_disable(dss.dss_tv_fck); - if ((clks & DSS_CLK_VIDFCK) && dss.dss_video_fck) - clk_disable(dss.dss_video_fck); - - dss.num_clks_enabled -= num_clks; + return clk_get(&dss.pdev->dev, "ick"); } -void dss_clk_disable(enum dss_clock clks) +int dss_runtime_get(void) { - if (cpu_is_omap34xx()) { - unsigned num_clks = count_clk_bits(clks); - - BUG_ON(dss.num_clks_enabled < num_clks); + int r; - if (dss.num_clks_enabled == num_clks) - save_all_ctx(); - } + DSSDBG("dss_runtime_get\n"); - dss_clk_disable_no_ctx(clks); + r = pm_runtime_get_sync(&dss.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; } -static void dss_clk_enable_all_no_ctx(void) +void dss_runtime_put(void) { - enum dss_clock clks; - - clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK; - if (cpu_is_omap34xx()) - clks |= DSS_CLK_VIDFCK; - dss_clk_enable_no_ctx(clks); -} - -static void dss_clk_disable_all_no_ctx(void) -{ - enum dss_clock clks; + int r; - clks = DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_SYSCK | DSS_CLK_TVFCK; - if (cpu_is_omap34xx()) - clks |= DSS_CLK_VIDFCK; - dss_clk_disable_no_ctx(clks); -} + DSSDBG("dss_runtime_put\n"); -#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) -/* CLOCKS */ -static void core_dump_clocks(struct seq_file *s) -{ - int i; - struct clk *clocks[5] = { - dss.dss_ick, - dss.dss_fck, - dss.dss_sys_clk, - dss.dss_tv_fck, - dss.dss_video_fck - }; - - const char *names[5] = { - "ick", - "fck", - "sys_clk", - "tv_fck", - "video_fck" - }; - - seq_printf(s, "- CORE -\n"); - - seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled); - - for (i = 0; i < 5; i++) { - if (!clocks[i]) - continue; - seq_printf(s, "%s (%s)%*s\t%lu\t%d\n", - names[i], - clocks[i]->name, - 24 - strlen(names[i]) - strlen(clocks[i]->name), - "", - clk_get_rate(clocks[i]), - clocks[i]->usecount); - } + r = pm_runtime_put(&dss.pdev->dev); + WARN_ON(r < 0); } -#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */ /* DEBUGFS */ #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) void dss_debug_dump_clocks(struct seq_file *s) { - core_dump_clocks(s); dss_dump_clocks(s); dispc_dump_clocks(s); #ifdef CONFIG_OMAP2_DSS_DSI @@ -1089,28 +729,51 @@ void dss_debug_dump_clocks(struct seq_file *s) } #endif - /* DSS HW IP initialisation */ static int omap_dsshw_probe(struct platform_device *pdev) { + struct resource *dss_mem; + u32 rev; int r; dss.pdev = pdev; + dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0); + if (!dss_mem) { + DSSERR("can't get IORESOURCE_MEM DSS\n"); + r = -EINVAL; + goto err_ioremap; + } + dss.base = ioremap(dss_mem->start, resource_size(dss_mem)); + if (!dss.base) { + DSSERR("can't ioremap DSS\n"); + r = -ENOMEM; + goto err_ioremap; + } + r = dss_get_clocks(); if (r) goto err_clocks; - dss_clk_enable_all_no_ctx(); + pm_runtime_enable(&pdev->dev); - dss.ctx_id = dss_get_ctx_id(); - DSSDBG("initial ctx id %u\n", dss.ctx_id); + r = dss_runtime_get(); + if (r) + goto err_runtime_get; - r = dss_init(); - if (r) { - DSSERR("Failed to initialize DSS\n"); - goto err_dss; - } + /* Select DPLL */ + REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); + +#ifdef CONFIG_OMAP2_DSS_VENC + REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ + REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ + REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ +#endif + dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; + dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; r = dpi_init(); if (r) { @@ -1124,42 +787,66 @@ static int omap_dsshw_probe(struct platform_device *pdev) goto err_sdi; } - dss_clk_disable_all_no_ctx(); + rev = dss_read_reg(DSS_REVISION); + printk(KERN_INFO "OMAP DSS rev %d.%d\n", + FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + + dss_runtime_put(); + return 0; err_sdi: dpi_exit(); err_dpi: - dss_exit(); -err_dss: - dss_clk_disable_all_no_ctx(); + dss_runtime_put(); +err_runtime_get: + pm_runtime_disable(&pdev->dev); dss_put_clocks(); err_clocks: + iounmap(dss.base); +err_ioremap: return r; } static int omap_dsshw_remove(struct platform_device *pdev) { + dpi_exit(); + sdi_exit(); - dss_exit(); + iounmap(dss.base); - /* - * As part of hwmod changes, DSS is not the only controller of dss - * clocks; hwmod framework itself will also enable clocks during hwmod - * init for dss, and autoidle is set in h/w for DSS. Hence, there's no - * need to disable clocks if their usecounts > 1. - */ - WARN_ON(dss.num_clks_enabled > 0); + pm_runtime_disable(&pdev->dev); dss_put_clocks(); + + return 0; +} + +static int dss_runtime_suspend(struct device *dev) +{ + dss_save_context(); + clk_disable(dss.dss_clk); return 0; } +static int dss_runtime_resume(struct device *dev) +{ + clk_enable(dss.dss_clk); + dss_restore_context(); + return 0; +} + +static const struct dev_pm_ops dss_pm_ops = { + .runtime_suspend = dss_runtime_suspend, + .runtime_resume = dss_runtime_resume, +}; + static struct platform_driver omap_dsshw_driver = { .probe = omap_dsshw_probe, .remove = omap_dsshw_remove, .driver = { .name = "omapdss_dss", .owner = THIS_MODULE, + .pm = &dss_pm_ops, }, }; diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index 8ab6d43329bb..9c94b1152c20 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -97,26 +97,12 @@ extern unsigned int dss_debug; #define FLD_MOD(orig, val, start, end) \ (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) -enum omap_burst_size { - OMAP_DSS_BURST_4x32 = 0, - OMAP_DSS_BURST_8x32 = 1, - OMAP_DSS_BURST_16x32 = 2, -}; - enum omap_parallel_interface_mode { OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */ OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */ OMAP_DSS_PARALLELMODE_DSI, }; -enum dss_clock { - DSS_CLK_ICK = 1 << 0, /* DSS_L3_ICLK and DSS_L4_ICLK */ - DSS_CLK_FCK = 1 << 1, /* DSS1_ALWON_FCLK */ - DSS_CLK_SYSCK = 1 << 2, /* DSS2_ALWON_FCLK */ - DSS_CLK_TVFCK = 1 << 3, /* DSS_TV_FCLK */ - DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/ -}; - enum dss_hdmi_venc_clk_source_select { DSS_VENC_TV_CLK = 0, DSS_HDMI_M_PCLK = 1, @@ -194,7 +180,7 @@ void dss_uninit_device(struct platform_device *pdev, bool dss_use_replication(struct omap_dss_device *dssdev, enum omap_color_mode mode); void default_get_overlay_fifo_thresholds(enum omap_plane plane, - u32 fifo_size, enum omap_burst_size *burst_size, + u32 fifo_size, u32 burst_size, u32 *fifo_low, u32 *fifo_high); /* manager */ @@ -220,13 +206,12 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force); int dss_init_platform_driver(void); void dss_uninit_platform_driver(void); +int dss_runtime_get(void); +void dss_runtime_put(void); + +struct clk *dss_get_ick(void); + void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); -void dss_save_context(void); -void dss_restore_context(void); -void dss_clk_enable(enum dss_clock clks); -void dss_clk_disable(enum dss_clock clks); -unsigned long dss_clk_get_rate(enum dss_clock clk); -int dss_need_ctx_restore(void); const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); void dss_dump_clocks(struct seq_file *s); @@ -283,15 +268,15 @@ struct file_operations; int dsi_init_platform_driver(void); void dsi_uninit_platform_driver(void); +int dsi_runtime_get(struct platform_device *dsidev); +void dsi_runtime_put(struct platform_device *dsidev); + void dsi_dump_clocks(struct seq_file *s); void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, const struct file_operations *debug_fops); void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, const struct file_operations *debug_fops); -void dsi_save_context(void); -void dsi_restore_context(void); - int dsi_init_display(struct omap_dss_device *display); void dsi_irq_handler(void); unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); @@ -304,7 +289,7 @@ int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, bool enable_hsdiv); void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes); void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, - u32 fifo_size, enum omap_burst_size *burst_size, + u32 fifo_size, u32 burst_size, u32 *fifo_low, u32 *fifo_high); void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev); void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); @@ -317,6 +302,13 @@ static inline int dsi_init_platform_driver(void) static inline void dsi_uninit_platform_driver(void) { } +static inline int dsi_runtime_get(struct platform_device *dsidev) +{ + return 0; +} +static inline void dsi_runtime_put(struct platform_device *dsidev) +{ +} static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) { WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); @@ -384,8 +376,8 @@ void dispc_dump_regs(struct seq_file *s); void dispc_irq_handler(void); void dispc_fake_vsync_irq(void); -void dispc_save_context(void); -void dispc_restore_context(void); +int dispc_runtime_get(void); +void dispc_runtime_put(void); void dispc_enable_sidle(void); void dispc_disable_sidle(void); @@ -398,10 +390,12 @@ void dispc_enable_fifohandcheck(enum omap_channel channel, bool enable); void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height); void dispc_set_digit_size(u16 width, u16 height); u32 dispc_get_plane_fifo_size(enum omap_plane plane); -void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high); +void dispc_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high); void dispc_enable_fifomerge(bool enable); -void dispc_set_burst_size(enum omap_plane plane, - enum omap_burst_size burst_size); +u32 dispc_get_burst_size(enum omap_plane plane); +void dispc_enable_cpr(enum omap_channel channel, bool enable); +void dispc_set_cpr_coef(enum omap_channel channel, + struct omap_dss_cpr_coefs *coefs); void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr); void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr); diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index 1c18888e5df3..b415c4ee621d 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c @@ -49,6 +49,9 @@ struct omap_dss_features { const enum omap_color_mode *supported_color_modes; const char * const *clksrc_names; const struct dss_param_range *dss_params; + + const u32 buffer_size_unit; + const u32 burst_size_unit; }; /* This struct is assigned to one of the below during initialization */ @@ -274,6 +277,8 @@ static const struct omap_dss_features omap2_dss_features = { .supported_color_modes = omap2_dss_supported_color_modes, .clksrc_names = omap2_dss_clk_source_names, .dss_params = omap2_dss_param_range, + .buffer_size_unit = 1, + .burst_size_unit = 8, }; /* OMAP3 DSS Features */ @@ -286,7 +291,9 @@ static const struct omap_dss_features omap3430_dss_features = { FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | - FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC, + FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC | + FEAT_VENC_REQUIRES_TV_DAC_CLK | FEAT_CPR | FEAT_PRELOAD | + FEAT_FIR_COEF_V, .num_mgrs = 2, .num_ovls = 3, @@ -294,6 +301,8 @@ static const struct omap_dss_features omap3430_dss_features = { .supported_color_modes = omap3_dss_supported_color_modes, .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, + .buffer_size_unit = 1, + .burst_size_unit = 8, }; static const struct omap_dss_features omap3630_dss_features = { @@ -306,7 +315,8 @@ static const struct omap_dss_features omap3630_dss_features = { FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG | - FEAT_DSI_PLL_FREQSEL, + FEAT_DSI_PLL_FREQSEL | FEAT_CPR | FEAT_PRELOAD | + FEAT_FIR_COEF_V, .num_mgrs = 2, .num_ovls = 3, @@ -314,6 +324,8 @@ static const struct omap_dss_features omap3630_dss_features = { .supported_color_modes = omap3_dss_supported_color_modes, .clksrc_names = omap3_dss_clk_source_names, .dss_params = omap3_dss_param_range, + .buffer_size_unit = 1, + .burst_size_unit = 8, }; /* OMAP4 DSS Features */ @@ -327,7 +339,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | - FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, + FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | + FEAT_CPR | FEAT_PRELOAD | FEAT_FIR_COEF_V, .num_mgrs = 3, .num_ovls = 3, @@ -335,6 +348,8 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = { .supported_color_modes = omap4_dss_supported_color_modes, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, + .buffer_size_unit = 16, + .burst_size_unit = 16, }; /* For all the other OMAP4 versions */ @@ -348,7 +363,8 @@ static const struct omap_dss_features omap4_dss_features = { FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE | - FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, + FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2 | FEAT_CPR | + FEAT_PRELOAD | FEAT_FIR_COEF_V, .num_mgrs = 3, .num_ovls = 3, @@ -356,6 +372,8 @@ static const struct omap_dss_features omap4_dss_features = { .supported_color_modes = omap4_dss_supported_color_modes, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, + .buffer_size_unit = 16, + .burst_size_unit = 16, }; /* Functions returning values related to a DSS feature */ @@ -401,6 +419,16 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) return omap_current_dss_features->clksrc_names[id]; } +u32 dss_feat_get_buffer_size_unit(void) +{ + return omap_current_dss_features->buffer_size_unit; +} + +u32 dss_feat_get_burst_size_unit(void) +{ + return omap_current_dss_features->burst_size_unit; +} + /* DSS has_feature check */ bool dss_has_feature(enum dss_feat_id id) { diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index 07b346f7d916..b7398cbcda5f 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h @@ -51,6 +51,10 @@ enum dss_feat_id { FEAT_HDMI_CTS_SWMODE = 1 << 19, FEAT_HANDLE_UV_SEPARATE = 1 << 20, FEAT_ATTR2 = 1 << 21, + FEAT_VENC_REQUIRES_TV_DAC_CLK = 1 << 22, + FEAT_CPR = 1 << 23, + FEAT_PRELOAD = 1 << 24, + FEAT_FIR_COEF_V = 1 << 25, }; /* DSS register field id */ @@ -90,6 +94,9 @@ bool dss_feat_color_mode_supported(enum omap_plane plane, enum omap_color_mode color_mode); const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); +u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ +u32 dss_feat_get_burst_size_unit(void); /* in bytes */ + bool dss_has_feature(enum dss_feat_id id); void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); void dss_features_init(void); diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index b0555f4f0a78..256f27a9064a 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -29,6 +29,9 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/string.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/clk.h> #include <video/omapdss.h> #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) @@ -51,6 +54,9 @@ static struct { u8 edid_set; bool custom_set; struct hdmi_config cfg; + + struct clk *sys_clk; + struct clk *hdmi_clk; } hdmi; /* @@ -162,6 +168,27 @@ static inline int hdmi_wait_for_bit_change(const struct hdmi_reg idx, return val; } +static int hdmi_runtime_get(void) +{ + int r; + + DSSDBG("hdmi_runtime_get\n"); + + r = pm_runtime_get_sync(&hdmi.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void hdmi_runtime_put(void) +{ + int r; + + DSSDBG("hdmi_runtime_put\n"); + + r = pm_runtime_put(&hdmi.pdev->dev); + WARN_ON(r < 0); +} + int hdmi_init_display(struct omap_dss_device *dssdev) { DSSDBG("init_display\n"); @@ -311,30 +338,11 @@ static int hdmi_phy_init(void) return 0; } -static int hdmi_wait_softreset(void) -{ - /* reset W1 */ - REG_FLD_MOD(HDMI_WP_SYSCONFIG, 0x1, 0, 0); - - /* wait till SOFTRESET == 0 */ - if (hdmi_wait_for_bit_change(HDMI_WP_SYSCONFIG, 0, 0, 0) != 0) { - DSSERR("sysconfig reset failed\n"); - return -ETIMEDOUT; - } - - return 0; -} - static int hdmi_pll_program(struct hdmi_pll_info *fmt) { u16 r = 0; enum hdmi_clk_refsel refsel; - /* wait for wrapper reset */ - r = hdmi_wait_softreset(); - if (r) - return r; - r = hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); if (r) return r; @@ -1064,7 +1072,7 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, unsigned long clkin, refclk; u32 mf; - clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000; + clkin = clk_get_rate(hdmi.sys_clk) / 10000; /* * Input clock is predivided by N + 1 * out put of which is reference clk @@ -1098,16 +1106,6 @@ static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd); } -static void hdmi_enable_clocks(int enable) -{ - if (enable) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | - DSS_CLK_SYSCK | DSS_CLK_VIDFCK); - else - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | - DSS_CLK_SYSCK | DSS_CLK_VIDFCK); -} - static int hdmi_power_on(struct omap_dss_device *dssdev) { int r, code = 0; @@ -1115,7 +1113,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) struct omap_video_timings *p; unsigned long phy; - hdmi_enable_clocks(1); + r = hdmi_runtime_get(); + if (r) + return r; dispc_enable_channel(OMAP_DSS_CHANNEL_DIGIT, 0); @@ -1180,7 +1180,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) return 0; err: - hdmi_enable_clocks(0); + hdmi_runtime_put(); return -EIO; } @@ -1191,7 +1191,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev) hdmi_wp_video_start(0); hdmi_phy_off(); hdmi_set_pll_pwr(HDMI_PLLPWRCMD_ALLOFF); - hdmi_enable_clocks(0); + hdmi_runtime_put(); hdmi.edid_set = 0; } @@ -1686,14 +1686,43 @@ static struct snd_soc_dai_driver hdmi_codec_dai_drv = { }; #endif +static int hdmi_get_clocks(struct platform_device *pdev) +{ + struct clk *clk; + + clk = clk_get(&pdev->dev, "sys_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get sys_clk\n"); + return PTR_ERR(clk); + } + + hdmi.sys_clk = clk; + + clk = clk_get(&pdev->dev, "dss_48mhz_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get hdmi_clk\n"); + clk_put(hdmi.sys_clk); + return PTR_ERR(clk); + } + + hdmi.hdmi_clk = clk; + + return 0; +} + +static void hdmi_put_clocks(void) +{ + if (hdmi.sys_clk) + clk_put(hdmi.sys_clk); + if (hdmi.hdmi_clk) + clk_put(hdmi.hdmi_clk); +} + /* HDMI HW IP initialisation */ static int omapdss_hdmihw_probe(struct platform_device *pdev) { struct resource *hdmi_mem; -#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ - defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) - int ret; -#endif + int r; hdmi.pdata = pdev->dev.platform_data; hdmi.pdev = pdev; @@ -1713,17 +1742,25 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev) return -ENOMEM; } + r = hdmi_get_clocks(pdev); + if (r) { + iounmap(hdmi.base_wp); + return r; + } + + pm_runtime_enable(&pdev->dev); + hdmi_panel_init(); #if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) /* Register ASoC codec DAI */ - ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv, + r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv, &hdmi_codec_dai_drv, 1); - if (ret) { + if (r) { DSSERR("can't register ASoC HDMI audio codec\n"); - return ret; + return r; } #endif return 0; @@ -1738,17 +1775,62 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev) snd_soc_unregister_codec(&pdev->dev); #endif + pm_runtime_disable(&pdev->dev); + + hdmi_put_clocks(); + iounmap(hdmi.base_wp); return 0; } +static int hdmi_runtime_suspend(struct device *dev) +{ + clk_disable(hdmi.hdmi_clk); + clk_disable(hdmi.sys_clk); + + dispc_runtime_put(); + dss_runtime_put(); + + return 0; +} + +static int hdmi_runtime_resume(struct device *dev) +{ + int r; + + r = dss_runtime_get(); + if (r < 0) + goto err_get_dss; + + r = dispc_runtime_get(); + if (r < 0) + goto err_get_dispc; + + + clk_enable(hdmi.sys_clk); + clk_enable(hdmi.hdmi_clk); + + return 0; + +err_get_dispc: + dss_runtime_put(); +err_get_dss: + return r; +} + +static const struct dev_pm_ops hdmi_pm_ops = { + .runtime_suspend = hdmi_runtime_suspend, + .runtime_resume = hdmi_runtime_resume, +}; + static struct platform_driver omapdss_hdmihw_driver = { .probe = omapdss_hdmihw_probe, .remove = omapdss_hdmihw_remove, .driver = { .name = "omapdss_hdmi", .owner = THIS_MODULE, + .pm = &hdmi_pm_ops, }, }; diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index 9aeea50e33ff..13d72d5c714b 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c @@ -275,6 +275,108 @@ static ssize_t manager_alpha_blending_enabled_store( return size; } +static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable); +} + +static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + int v; + int r; + bool enable; + + if (!dss_has_feature(FEAT_CPR)) + return -ENODEV; + + r = kstrtoint(buf, 0, &v); + if (r) + return r; + + enable = !!v; + + mgr->get_manager_info(mgr, &info); + + if (info.cpr_enable == enable) + return size; + + info.cpr_enable = enable; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + +static ssize_t manager_cpr_coef_show(struct omap_overlay_manager *mgr, + char *buf) +{ + struct omap_overlay_manager_info info; + + mgr->get_manager_info(mgr, &info); + + return snprintf(buf, PAGE_SIZE, + "%d %d %d %d %d %d %d %d %d\n", + info.cpr_coefs.rr, + info.cpr_coefs.rg, + info.cpr_coefs.rb, + info.cpr_coefs.gr, + info.cpr_coefs.gg, + info.cpr_coefs.gb, + info.cpr_coefs.br, + info.cpr_coefs.bg, + info.cpr_coefs.bb); +} + +static ssize_t manager_cpr_coef_store(struct omap_overlay_manager *mgr, + const char *buf, size_t size) +{ + struct omap_overlay_manager_info info; + struct omap_dss_cpr_coefs coefs; + int r, i; + s16 *arr; + + if (!dss_has_feature(FEAT_CPR)) + return -ENODEV; + + if (sscanf(buf, "%hd %hd %hd %hd %hd %hd %hd %hd %hd", + &coefs.rr, &coefs.rg, &coefs.rb, + &coefs.gr, &coefs.gg, &coefs.gb, + &coefs.br, &coefs.bg, &coefs.bb) != 9) + return -EINVAL; + + arr = (s16[]){ coefs.rr, coefs.rg, coefs.rb, + coefs.gr, coefs.gg, coefs.gb, + coefs.br, coefs.bg, coefs.bb }; + + for (i = 0; i < 9; ++i) { + if (arr[i] < -512 || arr[i] > 511) + return -EINVAL; + } + + mgr->get_manager_info(mgr, &info); + + info.cpr_coefs = coefs; + + r = mgr->set_manager_info(mgr, &info); + if (r) + return r; + + r = mgr->apply(mgr); + if (r) + return r; + + return size; +} + struct manager_attribute { struct attribute attr; ssize_t (*show)(struct omap_overlay_manager *, char *); @@ -300,6 +402,12 @@ static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR, static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR, manager_alpha_blending_enabled_show, manager_alpha_blending_enabled_store); +static MANAGER_ATTR(cpr_enable, S_IRUGO|S_IWUSR, + manager_cpr_enable_show, + manager_cpr_enable_store); +static MANAGER_ATTR(cpr_coef, S_IRUGO|S_IWUSR, + manager_cpr_coef_show, + manager_cpr_coef_store); static struct attribute *manager_sysfs_attrs[] = { @@ -310,6 +418,8 @@ static struct attribute *manager_sysfs_attrs[] = { &manager_attr_trans_key_value.attr, &manager_attr_trans_key_enabled.attr, &manager_attr_alpha_blending_enabled.attr, + &manager_attr_cpr_enable.attr, + &manager_attr_cpr_coef.attr, NULL }; @@ -391,33 +501,14 @@ struct overlay_cache_data { bool enabled; - u32 paddr; - void __iomem *vaddr; - u32 p_uv_addr; /* relevant for NV12 format only */ - u16 screen_width; - u16 width; - u16 height; - enum omap_color_mode color_mode; - u8 rotation; - enum omap_dss_rotation_type rotation_type; - bool mirror; - - u16 pos_x; - u16 pos_y; - u16 out_width; /* if 0, out_width == width */ - u16 out_height; /* if 0, out_height == height */ - u8 global_alpha; - u8 pre_mult_alpha; + struct omap_overlay_info info; enum omap_channel channel; bool replication; bool ilace; - enum omap_burst_size burst_size; u32 fifo_low; u32 fifo_high; - - bool manual_update; }; struct manager_cache_data { @@ -429,15 +520,8 @@ struct manager_cache_data { * VSYNC/EVSYNC */ bool shadow_dirty; - u32 default_color; - - enum omap_dss_trans_key_type trans_key_type; - u32 trans_key; - bool trans_enabled; - - bool alpha_enabled; + struct omap_overlay_manager_info info; - bool manual_upd_display; bool manual_update; bool do_manual_update; @@ -539,24 +623,15 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr) if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return 0; + if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) + return 0; + if (dssdev->type == OMAP_DISPLAY_TYPE_VENC || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; } else { - if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { - enum omap_dss_update_mode mode; - mode = dssdev->driver->get_update_mode(dssdev); - if (mode != OMAP_DSS_UPDATE_AUTO) - return 0; - - irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? - DISPC_IRQ_FRAMEDONE - : DISPC_IRQ_FRAMEDONE2; - } else { - irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? - DISPC_IRQ_VSYNC - : DISPC_IRQ_VSYNC2; - } + irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? + DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2; } mc = &dss_cache.manager_cache[mgr->id]; @@ -617,24 +692,15 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl) if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) return 0; + if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) + return 0; + if (dssdev->type == OMAP_DISPLAY_TYPE_VENC || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) { irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; } else { - if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { - enum omap_dss_update_mode mode; - mode = dssdev->driver->get_update_mode(dssdev); - if (mode != OMAP_DSS_UPDATE_AUTO) - return 0; - - irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? - DISPC_IRQ_FRAMEDONE - : DISPC_IRQ_FRAMEDONE2; - } else { - irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? - DISPC_IRQ_VSYNC - : DISPC_IRQ_VSYNC2; - } + irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ? + DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2; } oc = &dss_cache.overlay_cache[ovl->id]; @@ -720,10 +786,12 @@ static bool rectangle_intersects(int x1, int y1, int w1, int h1, static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc) { - if (oc->out_width != 0 && oc->width != oc->out_width) + struct omap_overlay_info *oi = &oc->info; + + if (oi->out_width != 0 && oi->width != oi->out_width) return true; - if (oc->out_height != 0 && oc->height != oc->out_height) + if (oi->out_height != 0 && oi->height != oi->out_height) return true; return false; @@ -733,6 +801,8 @@ static int configure_overlay(enum omap_plane plane) { struct overlay_cache_data *c; struct manager_cache_data *mc; + struct omap_overlay_info *oi; + struct omap_overlay_manager_info *mi; u16 outw, outh; u16 x, y, w, h; u32 paddr; @@ -742,6 +812,7 @@ static int configure_overlay(enum omap_plane plane) DSSDBGF("%d", plane); c = &dss_cache.overlay_cache[plane]; + oi = &c->info; if (!c->enabled) { dispc_enable_plane(plane, 0); @@ -749,21 +820,22 @@ static int configure_overlay(enum omap_plane plane) } mc = &dss_cache.manager_cache[c->channel]; + mi = &mc->info; - x = c->pos_x; - y = c->pos_y; - w = c->width; - h = c->height; - outw = c->out_width == 0 ? c->width : c->out_width; - outh = c->out_height == 0 ? c->height : c->out_height; - paddr = c->paddr; + x = oi->pos_x; + y = oi->pos_y; + w = oi->width; + h = oi->height; + outw = oi->out_width == 0 ? oi->width : oi->out_width; + outh = oi->out_height == 0 ? oi->height : oi->out_height; + paddr = oi->paddr; orig_w = w; orig_h = h; orig_outw = outw; orig_outh = outh; - if (c->manual_update && mc->do_manual_update) { + if (mc->manual_update && mc->do_manual_update) { unsigned bpp; unsigned scale_x_m = w, scale_x_d = outw; unsigned scale_y_m = h, scale_y_d = outh; @@ -775,7 +847,7 @@ static int configure_overlay(enum omap_plane plane) return 0; } - switch (c->color_mode) { + switch (oi->color_mode) { case OMAP_DSS_COLOR_NV12: bpp = 8; break; @@ -805,23 +877,23 @@ static int configure_overlay(enum omap_plane plane) BUG(); } - if (mc->x > c->pos_x) { + if (mc->x > oi->pos_x) { x = 0; - outw -= (mc->x - c->pos_x); - paddr += (mc->x - c->pos_x) * + outw -= (mc->x - oi->pos_x); + paddr += (mc->x - oi->pos_x) * scale_x_m / scale_x_d * bpp / 8; } else { - x = c->pos_x - mc->x; + x = oi->pos_x - mc->x; } - if (mc->y > c->pos_y) { + if (mc->y > oi->pos_y) { y = 0; - outh -= (mc->y - c->pos_y); - paddr += (mc->y - c->pos_y) * + outh -= (mc->y - oi->pos_y); + paddr += (mc->y - oi->pos_y) * scale_y_m / scale_y_d * - c->screen_width * bpp / 8; + oi->screen_width * bpp / 8; } else { - y = c->pos_y - mc->y; + y = oi->pos_y - mc->y; } if (mc->w < (x + outw)) @@ -840,8 +912,8 @@ static int configure_overlay(enum omap_plane plane) * the width if the original width was bigger. */ if ((w & 1) && - (c->color_mode == OMAP_DSS_COLOR_YUV2 || - c->color_mode == OMAP_DSS_COLOR_UYVY)) { + (oi->color_mode == OMAP_DSS_COLOR_YUV2 || + oi->color_mode == OMAP_DSS_COLOR_UYVY)) { if (orig_w > w) w += 1; else @@ -851,19 +923,19 @@ static int configure_overlay(enum omap_plane plane) r = dispc_setup_plane(plane, paddr, - c->screen_width, + oi->screen_width, x, y, w, h, outw, outh, - c->color_mode, + oi->color_mode, c->ilace, - c->rotation_type, - c->rotation, - c->mirror, - c->global_alpha, - c->pre_mult_alpha, + oi->rotation_type, + oi->rotation, + oi->mirror, + oi->global_alpha, + oi->pre_mult_alpha, c->channel, - c->p_uv_addr); + oi->p_uv_addr); if (r) { /* this shouldn't happen */ @@ -874,8 +946,7 @@ static int configure_overlay(enum omap_plane plane) dispc_enable_replication(plane, c->replication); - dispc_set_burst_size(plane, c->burst_size); - dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high); + dispc_set_fifo_threshold(plane, c->fifo_low, c->fifo_high); dispc_enable_plane(plane, 1); @@ -884,16 +955,21 @@ static int configure_overlay(enum omap_plane plane) static void configure_manager(enum omap_channel channel) { - struct manager_cache_data *c; + struct omap_overlay_manager_info *mi; DSSDBGF("%d", channel); - c = &dss_cache.manager_cache[channel]; + /* picking info from the cache */ + mi = &dss_cache.manager_cache[channel].info; - dispc_set_default_color(channel, c->default_color); - dispc_set_trans_key(channel, c->trans_key_type, c->trans_key); - dispc_enable_trans_key(channel, c->trans_enabled); - dispc_enable_alpha_blending(channel, c->alpha_enabled); + dispc_set_default_color(channel, mi->default_color); + dispc_set_trans_key(channel, mi->trans_key_type, mi->trans_key); + dispc_enable_trans_key(channel, mi->trans_enabled); + dispc_enable_alpha_blending(channel, mi->alpha_enabled); + if (dss_has_feature(FEAT_CPR)) { + dispc_enable_cpr(channel, mi->cpr_enable); + dispc_set_cpr_coef(channel, &mi->cpr_coefs); + } } /* configure_dispc() tries to write values from cache to shadow registers. @@ -928,7 +1004,7 @@ static int configure_dispc(void) if (!oc->dirty) continue; - if (oc->manual_update && !mc->do_manual_update) + if (mc->manual_update && !mc->do_manual_update) continue; if (mgr_busy[oc->channel]) { @@ -976,7 +1052,7 @@ static int configure_dispc(void) /* We don't need GO with manual update display. LCD iface will * always be turned off after frame, and new settings will be * taken in to use at next update */ - if (!mc->manual_upd_display) + if (!mc->manual_update) dispc_go(i); } @@ -1011,6 +1087,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev, { struct overlay_cache_data *oc; struct manager_cache_data *mc; + struct omap_overlay_info *oi; const int num_ovls = dss_feat_get_num_ovls(); struct omap_overlay_manager *mgr; int i; @@ -1053,6 +1130,7 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev, unsigned outw, outh; oc = &dss_cache.overlay_cache[i]; + oi = &oc->info; if (oc->channel != mgr->id) continue; @@ -1068,39 +1146,39 @@ void dss_setup_partial_planes(struct omap_dss_device *dssdev, if (!dispc_is_overlay_scaled(oc)) continue; - outw = oc->out_width == 0 ? - oc->width : oc->out_width; - outh = oc->out_height == 0 ? - oc->height : oc->out_height; + outw = oi->out_width == 0 ? + oi->width : oi->out_width; + outh = oi->out_height == 0 ? + oi->height : oi->out_height; /* is the overlay outside the update region? */ if (!rectangle_intersects(x, y, w, h, - oc->pos_x, oc->pos_y, + oi->pos_x, oi->pos_y, outw, outh)) continue; /* if the overlay totally inside the update region? */ - if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh, + if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh, x, y, w, h)) continue; - if (x > oc->pos_x) - x1 = oc->pos_x; + if (x > oi->pos_x) + x1 = oi->pos_x; else x1 = x; - if (y > oc->pos_y) - y1 = oc->pos_y; + if (y > oi->pos_y) + y1 = oi->pos_y; else y1 = y; - if ((x + w) < (oc->pos_x + outw)) - x2 = oc->pos_x + outw; + if ((x + w) < (oi->pos_x + outw)) + x2 = oi->pos_x + outw; else x2 = x + w; - if ((y + h) < (oc->pos_y + outh)) - y2 = oc->pos_y + outh; + if ((y + h) < (oi->pos_y + outh)) + y2 = oi->pos_y + outh; else y2 = y + h; @@ -1236,6 +1314,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name); + r = dispc_runtime_get(); + if (r) + return r; + spin_lock_irqsave(&dss_cache.lock, flags); /* Configure overlays */ @@ -1275,23 +1357,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) ovl->info_dirty = false; oc->dirty = true; - - oc->paddr = ovl->info.paddr; - oc->vaddr = ovl->info.vaddr; - oc->p_uv_addr = ovl->info.p_uv_addr; - oc->screen_width = ovl->info.screen_width; - oc->width = ovl->info.width; - oc->height = ovl->info.height; - oc->color_mode = ovl->info.color_mode; - oc->rotation = ovl->info.rotation; - oc->rotation_type = ovl->info.rotation_type; - oc->mirror = ovl->info.mirror; - oc->pos_x = ovl->info.pos_x; - oc->pos_y = ovl->info.pos_y; - oc->out_width = ovl->info.out_width; - oc->out_height = ovl->info.out_height; - oc->global_alpha = ovl->info.global_alpha; - oc->pre_mult_alpha = ovl->info.pre_mult_alpha; + oc->info = ovl->info; oc->replication = dss_use_replication(dssdev, ovl->info.color_mode); @@ -1302,11 +1368,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) oc->enabled = true; - oc->manual_update = - dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE && - dssdev->driver->get_update_mode(dssdev) != - OMAP_DSS_UPDATE_AUTO; - ++num_planes_enabled; } @@ -1334,20 +1395,10 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) mgr->info_dirty = false; mc->dirty = true; - - mc->default_color = mgr->info.default_color; - mc->trans_key_type = mgr->info.trans_key_type; - mc->trans_key = mgr->info.trans_key; - mc->trans_enabled = mgr->info.trans_enabled; - mc->alpha_enabled = mgr->info.alpha_enabled; - - mc->manual_upd_display = - dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; + mc->info = mgr->info; mc->manual_update = - dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE && - dssdev->driver->get_update_mode(dssdev) != - OMAP_DSS_UPDATE_AUTO; + dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; } /* XXX TODO: Try to get fifomerge working. The problem is that it @@ -1368,7 +1419,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) /* Configure overlay fifos */ for (i = 0; i < omap_dss_get_num_overlays(); ++i) { struct omap_dss_device *dssdev; - u32 size; + u32 size, burst_size; ovl = omap_dss_get_overlay(i); @@ -1386,6 +1437,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) if (use_fifomerge) size *= 3; + burst_size = dispc_get_burst_size(ovl->id); + switch (dssdev->type) { case OMAP_DISPLAY_TYPE_DPI: case OMAP_DISPLAY_TYPE_DBI: @@ -1393,13 +1446,13 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) case OMAP_DISPLAY_TYPE_VENC: case OMAP_DISPLAY_TYPE_HDMI: default_get_overlay_fifo_thresholds(ovl->id, size, - &oc->burst_size, &oc->fifo_low, + burst_size, &oc->fifo_low, &oc->fifo_high); break; #ifdef CONFIG_OMAP2_DSS_DSI case OMAP_DISPLAY_TYPE_DSI: dsi_get_overlay_fifo_thresholds(ovl->id, size, - &oc->burst_size, &oc->fifo_low, + burst_size, &oc->fifo_low, &oc->fifo_high); break; #endif @@ -1409,7 +1462,6 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) } r = 0; - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); if (!dss_cache.irq_enabled) { u32 mask; @@ -1422,10 +1474,11 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) dss_cache.irq_enabled = true; } configure_dispc(); - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); spin_unlock_irqrestore(&dss_cache.lock, flags); + dispc_runtime_put(); + return r; } diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index 0f08025b1f0e..c84380c53c39 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c @@ -84,32 +84,42 @@ static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf, old_mgr = ovl->manager; + r = dispc_runtime_get(); + if (r) + return r; + /* detach old manager */ if (old_mgr) { r = ovl->unset_manager(ovl); if (r) { DSSERR("detach failed\n"); - return r; + goto err; } r = old_mgr->apply(old_mgr); if (r) - return r; + goto err; } if (mgr) { r = ovl->set_manager(ovl, mgr); if (r) { DSSERR("Failed to attach overlay\n"); - return r; + goto err; } r = mgr->apply(mgr); if (r) - return r; + goto err; } + dispc_runtime_put(); + return size; + +err: + dispc_runtime_put(); + return r; } static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf) @@ -238,6 +248,9 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, u8 alpha; struct omap_overlay_info info; + if (!dss_has_feature(FEAT_GLOBAL_ALPHA)) + return -ENODEV; + r = kstrtou8(buf, 0, &alpha); if (r) return r; @@ -504,7 +517,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl, ovl->manager = mgr; - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); /* XXX: When there is an overlay on a DSI manual update display, and * the overlay is first disabled, then moved to tv, and enabled, we * seem to get SYNC_LOST_DIGIT error. @@ -518,7 +530,6 @@ static int omap_dss_set_manager(struct omap_overlay *ovl, * the overlay, but before moving the overlay to TV. */ dispc_set_channel_out(ovl->id, mgr->id); - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); return 0; } @@ -719,6 +730,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) } if (mgr) { + dispc_runtime_get(); + for (i = 0; i < dss_feat_get_num_ovls(); i++) { struct omap_overlay *ovl; ovl = omap_dss_get_overlay(i); @@ -728,6 +741,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) omap_dss_set_manager(ovl, mgr); } } + + dispc_runtime_put(); } } diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index c06fbe0bc678..39f4c597026a 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -33,6 +33,8 @@ #include <linux/hrtimer.h> #include <linux/seq_file.h> #include <linux/semaphore.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <video/omapdss.h> #include "dss.h" @@ -120,12 +122,25 @@ static inline u32 rfbi_read_reg(const struct rfbi_reg idx) return __raw_readl(rfbi.base + idx.idx); } -static void rfbi_enable_clocks(bool enable) +static int rfbi_runtime_get(void) { - if (enable) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); - else - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + int r; + + DSSDBG("rfbi_runtime_get\n"); + + r = pm_runtime_get_sync(&rfbi.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void rfbi_runtime_put(void) +{ + int r; + + DSSDBG("rfbi_runtime_put\n"); + + r = pm_runtime_put(&rfbi.pdev->dev); + WARN_ON(r < 0); } void rfbi_bus_lock(void) @@ -805,7 +820,8 @@ void rfbi_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + if (rfbi_runtime_get()) + return; DUMPREG(RFBI_REVISION); DUMPREG(RFBI_SYSCONFIG); @@ -836,7 +852,7 @@ void rfbi_dump_regs(struct seq_file *s) DUMPREG(RFBI_VSYNC_WIDTH); DUMPREG(RFBI_HSYNC_WIDTH); - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + rfbi_runtime_put(); #undef DUMPREG } @@ -844,7 +860,9 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) { int r; - rfbi_enable_clocks(1); + r = rfbi_runtime_get(); + if (r) + return r; r = omap_dss_start_device(dssdev); if (r) { @@ -879,6 +897,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) err1: omap_dss_stop_device(dssdev); err0: + rfbi_runtime_put(); return r; } EXPORT_SYMBOL(omapdss_rfbi_display_enable); @@ -889,7 +908,7 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev) DISPC_IRQ_FRAMEDONE); omap_dss_stop_device(dssdev); - rfbi_enable_clocks(0); + rfbi_runtime_put(); } EXPORT_SYMBOL(omapdss_rfbi_display_disable); @@ -904,8 +923,9 @@ int rfbi_init_display(struct omap_dss_device *dssdev) static int omap_rfbihw_probe(struct platform_device *pdev) { u32 rev; - u32 l; struct resource *rfbi_mem; + struct clk *clk; + int r; rfbi.pdev = pdev; @@ -914,46 +934,102 @@ static int omap_rfbihw_probe(struct platform_device *pdev) rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); if (!rfbi_mem) { DSSERR("can't get IORESOURCE_MEM RFBI\n"); - return -EINVAL; + r = -EINVAL; + goto err_ioremap; } rfbi.base = ioremap(rfbi_mem->start, resource_size(rfbi_mem)); if (!rfbi.base) { DSSERR("can't ioremap RFBI\n"); - return -ENOMEM; + r = -ENOMEM; + goto err_ioremap; } - rfbi_enable_clocks(1); + pm_runtime_enable(&pdev->dev); + + r = rfbi_runtime_get(); + if (r) + goto err_get_rfbi; msleep(10); - rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000; + if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap3630()) + clk = dss_get_ick(); + else + clk = clk_get(&pdev->dev, "ick"); + if (IS_ERR(clk)) { + DSSERR("can't get ick\n"); + r = PTR_ERR(clk); + goto err_get_ick; + } + + rfbi.l4_khz = clk_get_rate(clk) / 1000; - /* Enable autoidle and smart-idle */ - l = rfbi_read_reg(RFBI_SYSCONFIG); - l |= (1 << 0) | (2 << 3); - rfbi_write_reg(RFBI_SYSCONFIG, l); + clk_put(clk); rev = rfbi_read_reg(RFBI_REVISION); dev_dbg(&pdev->dev, "OMAP RFBI rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); - rfbi_enable_clocks(0); + rfbi_runtime_put(); return 0; + +err_get_ick: + rfbi_runtime_put(); +err_get_rfbi: + pm_runtime_disable(&pdev->dev); + iounmap(rfbi.base); +err_ioremap: + return r; } static int omap_rfbihw_remove(struct platform_device *pdev) { + pm_runtime_disable(&pdev->dev); iounmap(rfbi.base); return 0; } +static int rfbi_runtime_suspend(struct device *dev) +{ + dispc_runtime_put(); + dss_runtime_put(); + + return 0; +} + +static int rfbi_runtime_resume(struct device *dev) +{ + int r; + + r = dss_runtime_get(); + if (r < 0) + goto err_get_dss; + + r = dispc_runtime_get(); + if (r < 0) + goto err_get_dispc; + + return 0; + +err_get_dispc: + dss_runtime_put(); +err_get_dss: + return r; +} + +static const struct dev_pm_ops rfbi_pm_ops = { + .runtime_suspend = rfbi_runtime_suspend, + .runtime_resume = rfbi_runtime_resume, +}; + static struct platform_driver omap_rfbihw_driver = { .probe = omap_rfbihw_probe, .remove = omap_rfbihw_remove, .driver = { .name = "omapdss_rfbi", .owner = THIS_MODULE, + .pm = &rfbi_pm_ops, }, }; diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 0bd4b0350f80..3a688c871a45 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -20,13 +20,11 @@ #define DSS_SUBSYS_NAME "SDI" #include <linux/kernel.h> -#include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/regulator/consumer.h> #include <video/omapdss.h> -#include <plat/cpu.h> #include "dss.h" static struct { @@ -60,14 +58,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); - goto err0; + goto err_start_dev; } r = regulator_enable(sdi.vdds_sdi_reg); if (r) - goto err1; + goto err_reg_enable; - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + r = dss_runtime_get(); + if (r) + goto err_get_dss; + + r = dispc_runtime_get(); + if (r) + goto err_get_dispc; sdi_basic_init(dssdev); @@ -80,7 +84,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) r = dss_calc_clock_div(1, t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) - goto err2; + goto err_calc_clock_div; fck = dss_cinfo.fck; lck_div = dispc_cinfo.lck_div; @@ -101,27 +105,34 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) r = dss_set_clock_div(&dss_cinfo); if (r) - goto err2; + goto err_set_dss_clock_div; r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); if (r) - goto err2; + goto err_set_dispc_clock_div; dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); if (r) - goto err1; + goto err_sdi_enable; mdelay(2); dssdev->manager->enable(dssdev->manager); return 0; -err2: - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + +err_sdi_enable: +err_set_dispc_clock_div: +err_set_dss_clock_div: +err_calc_clock_div: + dispc_runtime_put(); +err_get_dispc: + dss_runtime_put(); +err_get_dss: regulator_disable(sdi.vdds_sdi_reg); -err1: +err_reg_enable: omap_dss_stop_device(dssdev); -err0: +err_start_dev: return r; } EXPORT_SYMBOL(omapdss_sdi_display_enable); @@ -132,7 +143,8 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev) dss_sdi_disable(); - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); + dispc_runtime_put(); + dss_runtime_put(); regulator_disable(sdi.vdds_sdi_reg); diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index 980f919ed987..173c66430dad 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c @@ -33,11 +33,13 @@ #include <linux/seq_file.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> #include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" +#include "dss_features.h" /* Venc registers */ #define VENC_REV_ID 0x00 @@ -292,6 +294,9 @@ static struct { struct mutex venc_lock; u32 wss_data; struct regulator *vdda_dac_reg; + + struct clk *tv_clk; + struct clk *tv_dac_clk; } venc; static inline void venc_write_reg(int idx, u32 val) @@ -380,14 +385,25 @@ static void venc_reset(void) #endif } -static void venc_enable_clocks(int enable) +static int venc_runtime_get(void) +{ + int r; + + DSSDBG("venc_runtime_get\n"); + + r = pm_runtime_get_sync(&venc.pdev->dev); + WARN_ON(r < 0); + return r < 0 ? r : 0; +} + +static void venc_runtime_put(void) { - if (enable) - dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK | - DSS_CLK_VIDFCK); - else - dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK | DSS_CLK_TVFCK | - DSS_CLK_VIDFCK); + int r; + + DSSDBG("venc_runtime_put\n"); + + r = pm_runtime_put(&venc.pdev->dev); + WARN_ON(r < 0); } static const struct venc_config *venc_timings_to_config( @@ -406,8 +422,6 @@ static void venc_power_on(struct omap_dss_device *dssdev) { u32 l; - venc_enable_clocks(1); - venc_reset(); venc_write_config(venc_timings_to_config(&dssdev->panel.timings)); @@ -448,8 +462,6 @@ static void venc_power_off(struct omap_dss_device *dssdev) dssdev->platform_disable(dssdev); regulator_disable(venc.vdda_dac_reg); - - venc_enable_clocks(0); } @@ -487,6 +499,10 @@ static int venc_panel_enable(struct omap_dss_device *dssdev) goto err1; } + r = venc_runtime_get(); + if (r) + goto err1; + venc_power_on(dssdev); venc.wss_data = 0; @@ -520,6 +536,8 @@ static void venc_panel_disable(struct omap_dss_device *dssdev) venc_power_off(dssdev); + venc_runtime_put(); + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; omap_dss_stop_device(dssdev); @@ -538,20 +556,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev) return venc_panel_enable(dssdev); } -static enum omap_dss_update_mode venc_get_update_mode( - struct omap_dss_device *dssdev) -{ - return OMAP_DSS_UPDATE_AUTO; -} - -static int venc_set_update_mode(struct omap_dss_device *dssdev, - enum omap_dss_update_mode mode) -{ - if (mode != OMAP_DSS_UPDATE_AUTO) - return -EINVAL; - return 0; -} - static void venc_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { @@ -598,6 +602,7 @@ static u32 venc_get_wss(struct omap_dss_device *dssdev) static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) { const struct venc_config *config; + int r; DSSDBG("venc_set_wss\n"); @@ -608,16 +613,19 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) /* Invert due to VENC_L21_WC_CTL:INV=1 */ venc.wss_data = (wss ^ 0xfffff) << 8; - venc_enable_clocks(1); + r = venc_runtime_get(); + if (r) + goto err; venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | venc.wss_data); - venc_enable_clocks(0); + venc_runtime_put(); +err: mutex_unlock(&venc.venc_lock); - return 0; + return r; } static struct omap_dss_driver venc_driver = { @@ -632,9 +640,6 @@ static struct omap_dss_driver venc_driver = { .get_resolution = omapdss_default_get_resolution, .get_recommended_bpp = omapdss_default_get_recommended_bpp, - .set_update_mode = venc_set_update_mode, - .get_update_mode = venc_get_update_mode, - .get_timings = venc_get_timings, .set_timings = venc_set_timings, .check_timings = venc_check_timings, @@ -673,7 +678,8 @@ void venc_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) - venc_enable_clocks(1); + if (venc_runtime_get()) + return; DUMPREG(VENC_F_CONTROL); DUMPREG(VENC_VIDOUT_CTRL); @@ -717,16 +723,56 @@ void venc_dump_regs(struct seq_file *s) DUMPREG(VENC_OUTPUT_CONTROL); DUMPREG(VENC_OUTPUT_TEST); - venc_enable_clocks(0); + venc_runtime_put(); #undef DUMPREG } +static int venc_get_clocks(struct platform_device *pdev) +{ + struct clk *clk; + + clk = clk_get(&pdev->dev, "fck"); + if (IS_ERR(clk)) { + DSSERR("can't get fck\n"); + return PTR_ERR(clk); + } + + venc.tv_clk = clk; + + if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) { + if (cpu_is_omap34xx() || cpu_is_omap3630()) + clk = clk_get(&pdev->dev, "dss_96m_fck"); + else + clk = clk_get(&pdev->dev, "tv_dac_clk"); + if (IS_ERR(clk)) { + DSSERR("can't get tv_dac_clk\n"); + clk_put(venc.tv_clk); + return PTR_ERR(clk); + } + } else { + clk = NULL; + } + + venc.tv_dac_clk = clk; + + return 0; +} + +static void venc_put_clocks(void) +{ + if (venc.tv_clk) + clk_put(venc.tv_clk); + if (venc.tv_dac_clk) + clk_put(venc.tv_dac_clk); +} + /* VENC HW IP initialisation */ static int omap_venchw_probe(struct platform_device *pdev) { u8 rev_id; struct resource *venc_mem; + int r; venc.pdev = pdev; @@ -737,22 +783,40 @@ static int omap_venchw_probe(struct platform_device *pdev) venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0); if (!venc_mem) { DSSERR("can't get IORESOURCE_MEM VENC\n"); - return -EINVAL; + r = -EINVAL; + goto err_ioremap; } venc.base = ioremap(venc_mem->start, resource_size(venc_mem)); if (!venc.base) { DSSERR("can't ioremap VENC\n"); - return -ENOMEM; + r = -ENOMEM; + goto err_ioremap; } - venc_enable_clocks(1); + r = venc_get_clocks(pdev); + if (r) + goto err_get_clk; + + pm_runtime_enable(&pdev->dev); + + r = venc_runtime_get(); + if (r) + goto err_get_venc; rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); - venc_enable_clocks(0); + venc_runtime_put(); return omap_dss_register_driver(&venc_driver); + +err_get_venc: + pm_runtime_disable(&pdev->dev); + venc_put_clocks(); +err_get_clk: + iounmap(venc.base); +err_ioremap: + return r; } static int omap_venchw_remove(struct platform_device *pdev) @@ -763,16 +827,61 @@ static int omap_venchw_remove(struct platform_device *pdev) } omap_dss_unregister_driver(&venc_driver); + pm_runtime_disable(&pdev->dev); + venc_put_clocks(); + iounmap(venc.base); return 0; } +static int venc_runtime_suspend(struct device *dev) +{ + if (venc.tv_dac_clk) + clk_disable(venc.tv_dac_clk); + clk_disable(venc.tv_clk); + + dispc_runtime_put(); + dss_runtime_put(); + + return 0; +} + +static int venc_runtime_resume(struct device *dev) +{ + int r; + + r = dss_runtime_get(); + if (r < 0) + goto err_get_dss; + + r = dispc_runtime_get(); + if (r < 0) + goto err_get_dispc; + + clk_enable(venc.tv_clk); + if (venc.tv_dac_clk) + clk_enable(venc.tv_dac_clk); + + return 0; + +err_get_dispc: + dss_runtime_put(); +err_get_dss: + return r; +} + +static const struct dev_pm_ops venc_pm_ops = { + .runtime_suspend = venc_runtime_suspend, + .runtime_resume = venc_runtime_resume, +}; + static struct platform_driver omap_venchw_driver = { .probe = omap_venchw_probe, .remove = omap_venchw_remove, .driver = { .name = "omapdss_venc", .owner = THIS_MODULE, + .pm = &venc_pm_ops, }, }; diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index cff450392b79..6b1ac23dbbd3 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -316,67 +316,67 @@ int omapfb_update_window(struct fb_info *fbi, } EXPORT_SYMBOL(omapfb_update_window); -static int omapfb_set_update_mode(struct fb_info *fbi, +int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode) { struct omap_dss_device *display = fb2display(fbi); - enum omap_dss_update_mode um; + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omapfb_display_data *d; int r; - if (!display || !display->driver->set_update_mode) + if (!display) return -EINVAL; - switch (mode) { - case OMAPFB_UPDATE_DISABLED: - um = OMAP_DSS_UPDATE_DISABLED; - break; + if (mode != OMAPFB_AUTO_UPDATE && mode != OMAPFB_MANUAL_UPDATE) + return -EINVAL; - case OMAPFB_AUTO_UPDATE: - um = OMAP_DSS_UPDATE_AUTO; - break; + omapfb_lock(fbdev); - case OMAPFB_MANUAL_UPDATE: - um = OMAP_DSS_UPDATE_MANUAL; - break; + d = get_display_data(fbdev, display); - default: - return -EINVAL; + if (d->update_mode == mode) { + omapfb_unlock(fbdev); + return 0; } - r = display->driver->set_update_mode(display, um); + r = 0; + + if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { + if (mode == OMAPFB_AUTO_UPDATE) + omapfb_start_auto_update(fbdev, display); + else /* MANUAL_UPDATE */ + omapfb_stop_auto_update(fbdev, display); + + d->update_mode = mode; + } else { /* AUTO_UPDATE */ + if (mode == OMAPFB_MANUAL_UPDATE) + r = -EINVAL; + } + + omapfb_unlock(fbdev); return r; } -static int omapfb_get_update_mode(struct fb_info *fbi, +int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode) { struct omap_dss_device *display = fb2display(fbi); - enum omap_dss_update_mode m; + struct omapfb_info *ofbi = FB2OFB(fbi); + struct omapfb2_device *fbdev = ofbi->fbdev; + struct omapfb_display_data *d; if (!display) return -EINVAL; - if (!display->driver->get_update_mode) { - *mode = OMAPFB_AUTO_UPDATE; - return 0; - } + omapfb_lock(fbdev); - m = display->driver->get_update_mode(display); + d = get_display_data(fbdev, display); - switch (m) { - case OMAP_DSS_UPDATE_DISABLED: - *mode = OMAPFB_UPDATE_DISABLED; - break; - case OMAP_DSS_UPDATE_AUTO: - *mode = OMAPFB_AUTO_UPDATE; - break; - case OMAP_DSS_UPDATE_MANUAL: - *mode = OMAPFB_MANUAL_UPDATE; - break; - default: - BUG(); - } + *mode = d->update_mode; + + omapfb_unlock(fbdev); return 0; } diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 505bc12a3031..602b71a92d3c 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -46,6 +46,10 @@ static char *def_vram; static int def_vrfb; static int def_rotate; static int def_mirror; +static bool auto_update; +static unsigned int auto_update_freq; +module_param(auto_update, bool, 0); +module_param(auto_update_freq, uint, 0644); #ifdef DEBUG unsigned int omapfb_debug; @@ -1242,6 +1246,7 @@ static int omapfb_blank(int blank, struct fb_info *fbi) struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_dss_device *display = fb2display(fbi); + struct omapfb_display_data *d; int r = 0; if (!display) @@ -1249,6 +1254,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi) omapfb_lock(fbdev); + d = get_display_data(fbdev, display); + switch (blank) { case FB_BLANK_UNBLANK: if (display->state != OMAP_DSS_DISPLAY_SUSPENDED) @@ -1257,6 +1264,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi) if (display->driver->resume) r = display->driver->resume(display); + if ((display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) && + d->update_mode == OMAPFB_AUTO_UPDATE && + !d->auto_update_work_enabled) + omapfb_start_auto_update(fbdev, display); + break; case FB_BLANK_NORMAL: @@ -1268,6 +1280,9 @@ static int omapfb_blank(int blank, struct fb_info *fbi) if (display->state != OMAP_DSS_DISPLAY_ACTIVE) goto exit; + if (d->auto_update_work_enabled) + omapfb_stop_auto_update(fbdev, display); + if (display->driver->suspend) r = display->driver->suspend(display); @@ -1724,6 +1739,78 @@ err: return r; } +static void omapfb_auto_update_work(struct work_struct *work) +{ + struct omap_dss_device *dssdev; + struct omap_dss_driver *dssdrv; + struct omapfb_display_data *d; + u16 w, h; + unsigned int freq; + struct omapfb2_device *fbdev; + + d = container_of(work, struct omapfb_display_data, + auto_update_work.work); + + dssdev = d->dssdev; + dssdrv = dssdev->driver; + fbdev = d->fbdev; + + if (!dssdrv || !dssdrv->update) + return; + + if (dssdrv->sync) + dssdrv->sync(dssdev); + + dssdrv->get_resolution(dssdev, &w, &h); + dssdrv->update(dssdev, 0, 0, w, h); + + freq = auto_update_freq; + if (freq == 0) + freq = 20; + queue_delayed_work(fbdev->auto_update_wq, + &d->auto_update_work, HZ / freq); +} + +void omapfb_start_auto_update(struct omapfb2_device *fbdev, + struct omap_dss_device *display) +{ + struct omapfb_display_data *d; + + if (fbdev->auto_update_wq == NULL) { + struct workqueue_struct *wq; + + wq = create_singlethread_workqueue("omapfb_auto_update"); + + if (wq == NULL) { + dev_err(fbdev->dev, "Failed to create workqueue for " + "auto-update\n"); + return; + } + + fbdev->auto_update_wq = wq; + } + + d = get_display_data(fbdev, display); + + INIT_DELAYED_WORK(&d->auto_update_work, omapfb_auto_update_work); + + d->auto_update_work_enabled = true; + + omapfb_auto_update_work(&d->auto_update_work.work); +} + +void omapfb_stop_auto_update(struct omapfb2_device *fbdev, + struct omap_dss_device *display) +{ + struct omapfb_display_data *d; + + d = get_display_data(fbdev, display); + + cancel_delayed_work_sync(&d->auto_update_work); + + d->auto_update_work_enabled = false; +} + /* initialize fb_info, var, fix to something sane based on the display */ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi) { @@ -1858,10 +1945,21 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev) } for (i = 0; i < fbdev->num_displays; i++) { - if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED) - fbdev->displays[i]->driver->disable(fbdev->displays[i]); + struct omap_dss_device *dssdev = fbdev->displays[i].dssdev; + + if (fbdev->displays[i].auto_update_work_enabled) + omapfb_stop_auto_update(fbdev, dssdev); + + if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) + dssdev->driver->disable(dssdev); + + omap_dss_put_device(dssdev); + } - omap_dss_put_device(fbdev->displays[i]); + if (fbdev->auto_update_wq != NULL) { + flush_workqueue(fbdev->auto_update_wq); + destroy_workqueue(fbdev->auto_update_wq); + fbdev->auto_update_wq = NULL; } dev_set_drvdata(fbdev->dev, NULL); @@ -2084,14 +2182,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev, int r; u8 bpp; struct omap_video_timings timings, temp_timings; + struct omapfb_display_data *d; r = omapfb_mode_to_timings(mode_str, &timings, &bpp); if (r) return r; - fbdev->bpp_overrides[fbdev->num_bpp_overrides].dssdev = display; - fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp; - ++fbdev->num_bpp_overrides; + d = get_display_data(fbdev, display); + d->bpp_override = bpp; if (display->driver->check_timings) { r = display->driver->check_timings(display, &timings); @@ -2117,14 +2215,14 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev, static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev, struct omap_dss_device *dssdev) { - int i; + struct omapfb_display_data *d; BUG_ON(dssdev->driver->get_recommended_bpp == NULL); - for (i = 0; i < fbdev->num_bpp_overrides; ++i) { - if (dssdev == fbdev->bpp_overrides[i].dssdev) - return fbdev->bpp_overrides[i].bpp; - } + d = get_display_data(fbdev, dssdev); + + if (d->bpp_override != 0) + return d->bpp_override; return dssdev->driver->get_recommended_bpp(dssdev); } @@ -2156,9 +2254,9 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) display = NULL; for (i = 0; i < fbdev->num_displays; ++i) { - if (strcmp(fbdev->displays[i]->name, + if (strcmp(fbdev->displays[i].dssdev->name, display_str) == 0) { - display = fbdev->displays[i]; + display = fbdev->displays[i].dssdev; break; } } @@ -2182,6 +2280,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, struct omap_dss_device *dssdev) { struct omap_dss_driver *dssdrv = dssdev->driver; + struct omapfb_display_data *d; int r; r = dssdrv->enable(dssdev); @@ -2191,8 +2290,20 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, return r; } + d = get_display_data(fbdev, dssdev); + + d->fbdev = fbdev; + if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { u16 w, h; + + if (auto_update) { + omapfb_start_auto_update(fbdev, dssdev); + d->update_mode = OMAPFB_AUTO_UPDATE; + } else { + d->update_mode = OMAPFB_MANUAL_UPDATE; + } + if (dssdrv->enable_te) { r = dssdrv->enable_te(dssdev, 1); if (r) { @@ -2201,16 +2312,6 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, } } - if (dssdrv->set_update_mode) { - r = dssdrv->set_update_mode(dssdev, - OMAP_DSS_UPDATE_MANUAL); - if (r) { - dev_err(fbdev->dev, - "Failed to set update mode\n"); - return r; - } - } - dssdrv->get_resolution(dssdev, &w, &h); r = dssdrv->update(dssdev, 0, 0, w, h); if (r) { @@ -2219,15 +2320,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev, return r; } } else { - if (dssdrv->set_update_mode) { - r = dssdrv->set_update_mode(dssdev, - OMAP_DSS_UPDATE_AUTO); - if (r) { - dev_err(fbdev->dev, - "Failed to set update mode\n"); - return r; - } - } + d->update_mode = OMAPFB_AUTO_UPDATE; } return 0; @@ -2275,6 +2368,8 @@ static int omapfb_probe(struct platform_device *pdev) fbdev->num_displays = 0; dssdev = NULL; for_each_dss_dev(dssdev) { + struct omapfb_display_data *d; + omap_dss_get_device(dssdev); if (!dssdev->driver) { @@ -2282,7 +2377,12 @@ static int omapfb_probe(struct platform_device *pdev) r = -ENODEV; } - fbdev->displays[fbdev->num_displays++] = dssdev; + d = &fbdev->displays[fbdev->num_displays++]; + d->dssdev = dssdev; + if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) + d->update_mode = OMAPFB_MANUAL_UPDATE; + else + d->update_mode = OMAPFB_AUTO_UPDATE; } if (r) diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c index 2f5e817b2a9a..153bf1aceebc 100644 --- a/drivers/video/omap2/omapfb/omapfb-sysfs.c +++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c @@ -518,6 +518,39 @@ static ssize_t show_virt(struct device *dev, return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region->vaddr); } +static ssize_t show_upd_mode(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + enum omapfb_update_mode mode; + int r; + + r = omapfb_get_update_mode(fbi, &mode); + + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%u\n", (unsigned)mode); +} + +static ssize_t store_upd_mode(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *fbi = dev_get_drvdata(dev); + unsigned mode; + int r; + + r = kstrtouint(buf, 0, &mode); + if (r) + return r; + + r = omapfb_set_update_mode(fbi, mode); + if (r) + return r; + + return count; +} + static struct device_attribute omapfb_attrs[] = { __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type, store_rotate_type), @@ -528,6 +561,7 @@ static struct device_attribute omapfb_attrs[] = { store_overlays_rotate), __ATTR(phys_addr, S_IRUGO, show_phys, NULL), __ATTR(virt_addr, S_IRUGO, show_virt, NULL), + __ATTR(update_mode, S_IRUGO | S_IWUSR, show_upd_mode, store_upd_mode), }; int omapfb_create_sysfs(struct omapfb2_device *fbdev) diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h index aa1b1d974276..fdf0edeccf4e 100644 --- a/drivers/video/omap2/omapfb/omapfb.h +++ b/drivers/video/omap2/omapfb/omapfb.h @@ -73,6 +73,15 @@ struct omapfb_info { bool mirror; }; +struct omapfb_display_data { + struct omapfb2_device *fbdev; + struct omap_dss_device *dssdev; + u8 bpp_override; + enum omapfb_update_mode update_mode; + bool auto_update_work_enabled; + struct delayed_work auto_update_work; +}; + struct omapfb2_device { struct device *dev; struct mutex mtx; @@ -86,17 +95,13 @@ struct omapfb2_device { struct omapfb2_mem_region regions[10]; unsigned num_displays; - struct omap_dss_device *displays[10]; + struct omapfb_display_data displays[10]; unsigned num_overlays; struct omap_overlay *overlays[10]; unsigned num_managers; struct omap_overlay_manager *managers[10]; - unsigned num_bpp_overrides; - struct { - struct omap_dss_device *dssdev; - u8 bpp; - } bpp_overrides[10]; + struct workqueue_struct *auto_update_wq; }; struct omapfb_colormode { @@ -128,6 +133,13 @@ int dss_mode_to_fb_mode(enum omap_color_mode dssmode, int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl, u16 posx, u16 posy, u16 outw, u16 outh); +void omapfb_start_auto_update(struct omapfb2_device *fbdev, + struct omap_dss_device *display); +void omapfb_stop_auto_update(struct omapfb2_device *fbdev, + struct omap_dss_device *display); +int omapfb_get_update_mode(struct fb_info *fbi, enum omapfb_update_mode *mode); +int omapfb_set_update_mode(struct fb_info *fbi, enum omapfb_update_mode mode); + /* find the display connected to this fb, if any */ static inline struct omap_dss_device *fb2display(struct fb_info *fbi) { @@ -143,6 +155,19 @@ static inline struct omap_dss_device *fb2display(struct fb_info *fbi) return NULL; } +static inline struct omapfb_display_data *get_display_data( + struct omapfb2_device *fbdev, struct omap_dss_device *dssdev) +{ + int i; + + for (i = 0; i < fbdev->num_displays; ++i) + if (fbdev->displays[i].dssdev == dssdev) + return &fbdev->displays[i]; + + /* This should never happen */ + BUG(); +} + static inline void omapfb_lock(struct omapfb2_device *fbdev) { mutex_lock(&fbdev->mtx); diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h index 32549d177b19..dcaab9012ca2 100644 --- a/drivers/video/savage/savagefb.h +++ b/drivers/video/savage/savagefb.h @@ -55,7 +55,7 @@ #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) -#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR)) +#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) && (chip<=S3_PROSAVAGEDDR)) #define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index f441726ddf2b..86b0735e6aa0 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -36,9 +36,6 @@ config WATCHDOG_CORE and gives them the /dev/watchdog interface (and later also the sysfs interface). - To compile this driver as a module, choose M here: the module will - be called watchdog. - config WATCHDOG_NOWAYOUT bool "Disable watchdog shutdown on close" help diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index afa78a54711e..809f41c30c44 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c @@ -458,7 +458,15 @@ static int __devexit nv_tco_remove(struct platform_device *dev) static void nv_tco_shutdown(struct platform_device *dev) { + u32 val; + tco_timer_stop(); + + /* Some BIOSes fail the POST (once) if the NO_REBOOT flag is not + * unset during shutdown. */ + pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val); + val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT; + pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val); } static struct platform_driver nv_tco_driver = { diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c index db84f2322d1a..a267dc078daf 100644 --- a/drivers/watchdog/shwdt.c +++ b/drivers/watchdog/shwdt.c @@ -64,7 +64,7 @@ * misses its deadline, the kernel timer will allow the WDT to overflow. */ static int clock_division_ratio = WTCSR_CKS_4096; -#define next_ping_period(cks) msecs_to_jiffies(cks - 4) +#define next_ping_period(cks) (jiffies + msecs_to_jiffies(cks - 4)) static const struct watchdog_info sh_wdt_info; static struct platform_device *sh_wdt_dev; diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index f815283667af..5f7ff8e2fc14 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -11,7 +11,7 @@ config XEN_BALLOON config XEN_SELFBALLOONING bool "Dynamically self-balloon kernel memory to target" - depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP + depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP && XEN_TMEM default n help Self-ballooning dynamically balloons available kernel memory driven |