summaryrefslogtreecommitdiff
path: root/arch/sh/include/asm
diff options
context:
space:
mode:
authorLucas De Marchi <lucas.demarchi@profusion.mobi>2011-03-30 22:57:33 -0300
committerLucas De Marchi <lucas.demarchi@profusion.mobi>2011-03-31 11:26:23 -0300
commit25985edcedea6396277003854657b5f3cb31a628 (patch)
treef026e810210a2ee7290caeb737c23cb6472b7c38 /arch/sh/include/asm
parent6aba74f2791287ec407e0f92487a725a25908067 (diff)
Fix common misspellings
Fixes generated by 'codespell' and manually reviewed. Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi>
Diffstat (limited to 'arch/sh/include/asm')
-rw-r--r--arch/sh/include/asm/page.h2
-rw-r--r--arch/sh/include/asm/pgtable_32.h2
-rw-r--r--arch/sh/include/asm/unaligned-sh4a.h2
3 files changed, 3 insertions, 3 deletions
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index c4e0b3d472b9..822d6084195b 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -186,7 +186,7 @@ typedef struct page *pgtable_t;
/*
* While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
* happily generate {ld/st}.q pairs, requiring us to have 8-byte
- * alignment to avoid traps. The kmalloc alignment is gauranteed by
+ * alignment to avoid traps. The kmalloc alignment is guaranteed by
* virtue of L1_CACHE_BYTES, requiring this to only be special cased
* for slab caches.
*/
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index b799fe71114c..0bce3d81569e 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -167,7 +167,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x)
#endif
/*
- * Mask of bits that are to be preserved accross pgprot changes.
+ * Mask of bits that are to be preserved across pgprot changes.
*/
#define _PAGE_CHG_MASK \
(PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h
index c48a9c3420da..95adc500cabc 100644
--- a/arch/sh/include/asm/unaligned-sh4a.h
+++ b/arch/sh/include/asm/unaligned-sh4a.h
@@ -9,7 +9,7 @@
* struct.
*
* The same note as with the movli.l/movco.l pair applies here, as long
- * as the load is gauranteed to be inlined, nothing else will hook in to
+ * as the load is guaranteed to be inlined, nothing else will hook in to
* r0 and we get the return value for free.
*
* NOTE: Due to the fact we require r0 encoding, care should be taken to