summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/asn1_decoder.c27
-rw-r--r--lib/devres.c13
-rw-r--r--lib/nmi_backtrace.c162
-rw-r--r--lib/pci_iomap.c7
6 files changed, 195 insertions, 19 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 8a49ff9d1502..2e491ac15622 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -525,4 +525,7 @@ config ARCH_HAS_SG_CHAIN
config ARCH_HAS_PMEM_API
bool
+config ARCH_HAS_MMIO_FLUSH
+ bool
+
endmenu
diff --git a/lib/Makefile b/lib/Makefile
index f01c558bf80d..13a7c6ae3fec 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o md5.o irq_regs.o argv_split.o \
proportions.o flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o
+ earlycpio.o seq_buf.o nmi_backtrace.o
obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o
lib-$(CONFIG_MMU) += ioremap.o
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 1a000bb050f9..2b3f46c049d4 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -24,15 +24,20 @@ static const unsigned char asn1_op_lengths[ASN1_OP__NR] = {
[ASN1_OP_MATCH_JUMP] = 1 + 1 + 1,
[ASN1_OP_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_MATCH_ANY] = 1,
+ [ASN1_OP_MATCH_ANY_OR_SKIP] = 1,
[ASN1_OP_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_MATCH_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_MATCH_ACT_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_JUMP_OR_SKIP] = 1 + 1 + 1,
[ASN1_OP_COND_MATCH_ANY] = 1,
+ [ASN1_OP_COND_MATCH_ANY_OR_SKIP] = 1,
[ASN1_OP_COND_MATCH_ANY_ACT] = 1 + 1,
+ [ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP] = 1 + 1,
[ASN1_OP_COND_FAIL] = 1,
[ASN1_OP_COMPLETE] = 1,
[ASN1_OP_ACT] = 1 + 1,
+ [ASN1_OP_MAYBE_ACT] = 1 + 1,
[ASN1_OP_RETURN] = 1,
[ASN1_OP_END_SEQ] = 1,
[ASN1_OP_END_SEQ_OF] = 1 + 1,
@@ -177,6 +182,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
unsigned char flags = 0;
#define FLAG_INDEFINITE_LENGTH 0x01
#define FLAG_MATCHED 0x02
+#define FLAG_LAST_MATCHED 0x04 /* Last tag matched */
#define FLAG_CONS 0x20 /* Corresponds to CONS bit in the opcode tag
* - ie. whether or not we are going to parse
* a compound type.
@@ -208,9 +214,9 @@ next_op:
unsigned char tmp;
/* Skip conditional matches if possible */
- if ((op & ASN1_OP_MATCH__COND &&
- flags & FLAG_MATCHED) ||
- dp == datalen) {
+ if ((op & ASN1_OP_MATCH__COND && flags & FLAG_MATCHED) ||
+ (op & ASN1_OP_MATCH__SKIP && dp == datalen)) {
+ flags &= ~FLAG_LAST_MATCHED;
pc += asn1_op_lengths[op];
goto next_op;
}
@@ -302,7 +308,9 @@ next_op:
/* Decide how to handle the operation */
switch (op) {
case ASN1_OP_MATCH_ANY_ACT:
+ case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
case ASN1_OP_COND_MATCH_ANY_ACT:
+ case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
if (ret < 0)
return ret;
@@ -319,8 +327,10 @@ next_op:
case ASN1_OP_MATCH:
case ASN1_OP_MATCH_OR_SKIP:
case ASN1_OP_MATCH_ANY:
+ case ASN1_OP_MATCH_ANY_OR_SKIP:
case ASN1_OP_COND_MATCH_OR_SKIP:
case ASN1_OP_COND_MATCH_ANY:
+ case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
skip_data:
if (!(flags & FLAG_CONS)) {
if (flags & FLAG_INDEFINITE_LENGTH) {
@@ -422,8 +432,15 @@ next_op:
pc += asn1_op_lengths[op];
goto next_op;
+ case ASN1_OP_MAYBE_ACT:
+ if (!(flags & FLAG_LAST_MATCHED)) {
+ pc += asn1_op_lengths[op];
+ goto next_op;
+ }
case ASN1_OP_ACT:
ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
+ if (ret < 0)
+ return ret;
pc += asn1_op_lengths[op];
goto next_op;
@@ -431,6 +448,7 @@ next_op:
if (unlikely(jsp <= 0))
goto jump_stack_underflow;
pc = jump_stack[--jsp];
+ flags |= FLAG_MATCHED | FLAG_LAST_MATCHED;
goto next_op;
default:
@@ -438,7 +456,8 @@ next_op:
}
/* Shouldn't reach here */
- pr_err("ASN.1 decoder error: Found reserved opcode (%u)\n", op);
+ pr_err("ASN.1 decoder error: Found reserved opcode (%u) pc=%zu\n",
+ op, pc);
return -EBADMSG;
data_overrun_error:
diff --git a/lib/devres.c b/lib/devres.c
index fbe2aac522e6..f13a2468ff39 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -119,10 +119,9 @@ EXPORT_SYMBOL(devm_iounmap);
* @dev: generic device to handle the resource for
* @res: resource to be handled
*
- * Checks that a resource is a valid memory region, requests the memory region
- * and ioremaps it either as cacheable or as non-cacheable memory depending on
- * the resource's flags. All operations are managed and will be undone on
- * driver detach.
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure. Usage example:
@@ -153,11 +152,7 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
return IOMEM_ERR_PTR(-EBUSY);
}
- if (res->flags & IORESOURCE_CACHEABLE)
- dest_ptr = devm_ioremap(dev, res->start, size);
- else
- dest_ptr = devm_ioremap_nocache(dev, res->start, size);
-
+ dest_ptr = devm_ioremap(dev, res->start, size);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
new file mode 100644
index 000000000000..88d3d32e5923
--- /dev/null
+++ b/lib/nmi_backtrace.c
@@ -0,0 +1,162 @@
+/*
+ * NMI backtrace support
+ *
+ * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
+ * with the following header:
+ *
+ * HW NMI watchdog support
+ *
+ * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
+ *
+ * Arch specific calls to support NMI watchdog
+ *
+ * Bits copied from original nmi.c file
+ */
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kprobes.h>
+#include <linux/nmi.h>
+#include <linux/seq_buf.h>
+
+#ifdef arch_trigger_all_cpu_backtrace
+/* For reliability, we're prepared to waste bits here. */
+static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
+static cpumask_t printtrace_mask;
+
+#define NMI_BUF_SIZE 4096
+
+struct nmi_seq_buf {
+ unsigned char buffer[NMI_BUF_SIZE];
+ struct seq_buf seq;
+};
+
+/* Safe printing in NMI context */
+static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
+
+/* "in progress" flag of arch_trigger_all_cpu_backtrace */
+static unsigned long backtrace_flag;
+
+static void print_seq_line(struct nmi_seq_buf *s, int start, int end)
+{
+ const char *buf = s->buffer + start;
+
+ printk("%.*s", (end - start) + 1, buf);
+}
+
+void nmi_trigger_all_cpu_backtrace(bool include_self,
+ void (*raise)(cpumask_t *mask))
+{
+ struct nmi_seq_buf *s;
+ int i, cpu, this_cpu = get_cpu();
+
+ if (test_and_set_bit(0, &backtrace_flag)) {
+ /*
+ * If there is already a trigger_all_cpu_backtrace() in progress
+ * (backtrace_flag == 1), don't output double cpu dump infos.
+ */
+ put_cpu();
+ return;
+ }
+
+ cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+ if (!include_self)
+ cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
+
+ cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask));
+
+ /*
+ * Set up per_cpu seq_buf buffers that the NMIs running on the other
+ * CPUs will write to.
+ */
+ for_each_cpu(cpu, to_cpumask(backtrace_mask)) {
+ s = &per_cpu(nmi_print_seq, cpu);
+ seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE);
+ }
+
+ if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+ pr_info("Sending NMI to %s CPUs:\n",
+ (include_self ? "all" : "other"));
+ raise(to_cpumask(backtrace_mask));
+ }
+
+ /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+ for (i = 0; i < 10 * 1000; i++) {
+ if (cpumask_empty(to_cpumask(backtrace_mask)))
+ break;
+ mdelay(1);
+ touch_softlockup_watchdog();
+ }
+
+ /*
+ * Now that all the NMIs have triggered, we can dump out their
+ * back traces safely to the console.
+ */
+ for_each_cpu(cpu, &printtrace_mask) {
+ int len, last_i = 0;
+
+ s = &per_cpu(nmi_print_seq, cpu);
+ len = seq_buf_used(&s->seq);
+ if (!len)
+ continue;
+
+ /* Print line by line. */
+ for (i = 0; i < len; i++) {
+ if (s->buffer[i] == '\n') {
+ print_seq_line(s, last_i, i);
+ last_i = i + 1;
+ }
+ }
+ /* Check if there was a partial line. */
+ if (last_i < len) {
+ print_seq_line(s, last_i, len - 1);
+ pr_cont("\n");
+ }
+ }
+
+ clear_bit(0, &backtrace_flag);
+ smp_mb__after_atomic();
+ put_cpu();
+}
+
+/*
+ * It is not safe to call printk() directly from NMI handlers.
+ * It may be fine if the NMI detected a lock up and we have no choice
+ * but to do so, but doing a NMI on all other CPUs to get a back trace
+ * can be done with a sysrq-l. We don't want that to lock up, which
+ * can happen if the NMI interrupts a printk in progress.
+ *
+ * Instead, we redirect the vprintk() to this nmi_vprintk() that writes
+ * the content into a per cpu seq_buf buffer. Then when the NMIs are
+ * all done, we can safely dump the contents of the seq_buf to a printk()
+ * from a non NMI context.
+ */
+static int nmi_vprintk(const char *fmt, va_list args)
+{
+ struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
+ unsigned int len = seq_buf_used(&s->seq);
+
+ seq_buf_vprintf(&s->seq, fmt, args);
+ return seq_buf_used(&s->seq) - len;
+}
+
+bool nmi_cpu_backtrace(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ printk_func_t printk_func_save = this_cpu_read(printk_func);
+
+ /* Replace printk to write into the NMI seq */
+ this_cpu_write(printk_func, nmi_vprintk);
+ pr_warn("NMI backtrace for cpu %d\n", cpu);
+ show_regs(regs);
+ this_cpu_write(printk_func, printk_func_save);
+
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ return true;
+ }
+
+ return false;
+}
+NOKPROBE_SYMBOL(nmi_cpu_backtrace);
+#endif
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index 5f5d24d1d53f..c10fba461454 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -41,11 +41,8 @@ void __iomem *pci_iomap_range(struct pci_dev *dev,
len = maxlen;
if (flags & IORESOURCE_IO)
return __pci_ioport_map(dev, start, len);
- if (flags & IORESOURCE_MEM) {
- if (flags & IORESOURCE_CACHEABLE)
- return ioremap(start, len);
- return ioremap_nocache(start, len);
- }
+ if (flags & IORESOURCE_MEM)
+ return ioremap(start, len);
/* What? */
return NULL;
}