diff options
632 files changed, 50610 insertions, 17265 deletions
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt index 74dffc68ff9f..5a03a2801d67 100644 --- a/Documentation/crypto/api-intro.txt +++ b/Documentation/crypto/api-intro.txt @@ -19,15 +19,14 @@ At the lowest level are algorithms, which register dynamically with the API. 'Transforms' are user-instantiated objects, which maintain state, handle all -of the implementation logic (e.g. manipulating page vectors), provide an -abstraction to the underlying algorithms, and handle common logical -operations (e.g. cipher modes, HMAC for digests). However, at the user +of the implementation logic (e.g. manipulating page vectors) and provide an +abstraction to the underlying algorithms. However, at the user level they are very simple. Conceptually, the API layering looks like this: [transform api] (user interface) - [transform ops] (per-type logic glue e.g. cipher.c, digest.c) + [transform ops] (per-type logic glue e.g. cipher.c, compress.c) [algorithm api] (for registering algorithms) The idea is to make the user interface and algorithm registration API @@ -44,22 +43,27 @@ under development. Here's an example of how to use the API: #include <linux/crypto.h> + #include <linux/err.h> + #include <linux/scatterlist.h> struct scatterlist sg[2]; char result[128]; - struct crypto_tfm *tfm; + struct crypto_hash *tfm; + struct hash_desc desc; - tfm = crypto_alloc_tfm("md5", 0); - if (tfm == NULL) + tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) fail(); /* ... set up the scatterlists ... */ + + desc.tfm = tfm; + desc.flags = 0; - crypto_digest_init(tfm); - crypto_digest_update(tfm, &sg, 2); - crypto_digest_final(tfm, result); + if (crypto_hash_digest(&desc, &sg, 2, result)) + fail(); - crypto_free_tfm(tfm); + crypto_free_hash(tfm); Many real examples are available in the regression test module (tcrypt.c). @@ -126,7 +130,7 @@ might already be working on. BUGS Send bug reports to: -James Morris <jmorris@redhat.com> +Herbert Xu <herbert@gondor.apana.org.au> Cc: David S. Miller <davem@redhat.com> @@ -134,13 +138,14 @@ FURTHER INFORMATION For further patches and various updates, including the current TODO list, see: -http://samba.org/~jamesm/crypto/ +http://gondor.apana.org.au/~herbert/crypto/ AUTHORS James Morris David S. Miller +Herbert Xu CREDITS @@ -238,8 +243,11 @@ Anubis algorithm contributors: Tiger algorithm contributors: Aaron Grothe +VIA PadLock contributors: + Michal Ludvig + Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com> Please send any credits updates or corrections to: -James Morris <jmorris@redhat.com> +Herbert Xu <herbert@gondor.apana.org.au> diff --git a/MAINTAINERS b/MAINTAINERS index 7a08cdbb246a..766b1ad1d0f9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -298,6 +298,14 @@ L: info-linux@geode.amd.com W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html S: Supported +AMSO1100 RNIC DRIVER +P: Tom Tucker +M: tom@opengridcomputing.com +P: Steve Wise +M: swise@opengridcomputing.com +L: openib-general@openib.org +S: Maintained + AOA (Apple Onboard Audio) ALSA DRIVER P: Johannes Berg M: johannes@sipsolutions.net @@ -991,6 +999,14 @@ EFS FILESYSTEM W: http://aeschi.ch.eu.org/efs/ S: Orphan +EHCA (IBM GX bus InfiniBand adapter) DRIVER: +P: Hoang-Nam Nguyen +M: hnguyen@de.ibm.com +P: Christoph Raisch +M: raisch@de.ibm.com +L: openib-general@openib.org +S: Supported + EMU10K1 SOUND DRIVER P: James Courtier-Dutton M: James@superbug.demon.co.uk @@ -1783,6 +1799,13 @@ W: http://www.penguinppc.org/ L: linuxppc-embedded@ozlabs.org S: Maintained +LINUX FOR POWERPC PA SEMI PWRFICIENT +P: Olof Johansson +M: olof@lixom.net +W: http://www.pasemi.com/ +L: linuxppc-dev@ozlabs.org +S: Supported + LLC (802.2) P: Arnaldo Carvalho de Melo M: acme@conectiva.com.br @@ -2451,6 +2474,8 @@ S: Maintained S390 P: Martin Schwidefsky M: schwidefsky@de.ibm.com +P: Heiko Carstens +M: heiko.carstens@de.ibm.com M: linux390@de.ibm.com L: linux-390@vm.marist.edu W: http://www.ibm.com/developerworks/linux/linux390/ diff --git a/arch/i386/crypto/Makefile b/arch/i386/crypto/Makefile index 103c353d0a63..3fd19af18e34 100644 --- a/arch/i386/crypto/Makefile +++ b/arch/i386/crypto/Makefile @@ -5,5 +5,8 @@ # obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o +obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o aes-i586-y := aes-i586-asm.o aes.o +twofish-i586-y := twofish-i586-asm.o twofish.o + diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c index d3806daa3de3..49aad9397f10 100644 --- a/arch/i386/crypto/aes.c +++ b/arch/i386/crypto/aes.c @@ -379,12 +379,13 @@ static void gen_tabs(void) } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { int i; u32 ss[8]; struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; + u32 *flags = &tfm->crt_flags; /* encryption schedule */ diff --git a/arch/i386/crypto/twofish-i586-asm.S b/arch/i386/crypto/twofish-i586-asm.S new file mode 100644 index 000000000000..39b98ed2c1b9 --- /dev/null +++ b/arch/i386/crypto/twofish-i586-asm.S @@ -0,0 +1,335 @@ +/*************************************************************************** +* Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * +* * +* This program is free software; you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published by * +* the Free Software Foundation; either version 2 of the License, or * +* (at your option) any later version. * +* * +* This program is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with this program; if not, write to the * +* Free Software Foundation, Inc., * +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * +***************************************************************************/ + +.file "twofish-i586-asm.S" +.text + +#include <asm/asm-offsets.h> + +/* return adress at 0 */ + +#define in_blk 12 /* input byte array address parameter*/ +#define out_blk 8 /* output byte array address parameter*/ +#define tfm 4 /* Twofish context structure */ + +#define a_offset 0 +#define b_offset 4 +#define c_offset 8 +#define d_offset 12 + +/* Structure of the crypto context struct*/ + +#define s0 0 /* S0 Array 256 Words each */ +#define s1 1024 /* S1 Array */ +#define s2 2048 /* S2 Array */ +#define s3 3072 /* S3 Array */ +#define w 4096 /* 8 whitening keys (word) */ +#define k 4128 /* key 1-32 ( word ) */ + +/* define a few register aliases to allow macro substitution */ + +#define R0D %eax +#define R0B %al +#define R0H %ah + +#define R1D %ebx +#define R1B %bl +#define R1H %bh + +#define R2D %ecx +#define R2B %cl +#define R2H %ch + +#define R3D %edx +#define R3B %dl +#define R3H %dh + + +/* performs input whitening */ +#define input_whitening(src,context,offset)\ + xor w+offset(context), src; + +/* performs input whitening */ +#define output_whitening(src,context,offset)\ + xor w+16+offset(context), src; + +/* + * a input register containing a (rotated 16) + * b input register containing b + * c input register containing c + * d input register containing d (already rol $1) + * operations on a and b are interleaved to increase performance + */ +#define encrypt_round(a,b,c,d,round)\ + push d ## D;\ + movzx b ## B, %edi;\ + mov s1(%ebp,%edi,4),d ## D;\ + movzx a ## B, %edi;\ + mov s2(%ebp,%edi,4),%esi;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor s2(%ebp,%edi,4),d ## D;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s3(%ebp,%edi,4),%esi;\ + movzx b ## B, %edi;\ + xor s3(%ebp,%edi,4),d ## D;\ + movzx a ## B, %edi;\ + xor (%ebp,%edi,4), %esi;\ + movzx b ## H, %edi;\ + ror $15, b ## D;\ + xor (%ebp,%edi,4), d ## D;\ + movzx a ## H, %edi;\ + xor s1(%ebp,%edi,4),%esi;\ + pop %edi;\ + add d ## D, %esi;\ + add %esi, d ## D;\ + add k+round(%ebp), %esi;\ + xor %esi, c ## D;\ + rol $15, c ## D;\ + add k+4+round(%ebp),d ## D;\ + xor %edi, d ## D; + +/* + * a input register containing a (rotated 16) + * b input register containing b + * c input register containing c + * d input register containing d (already rol $1) + * operations on a and b are interleaved to increase performance + * last round has different rotations for the output preparation + */ +#define encrypt_last_round(a,b,c,d,round)\ + push d ## D;\ + movzx b ## B, %edi;\ + mov s1(%ebp,%edi,4),d ## D;\ + movzx a ## B, %edi;\ + mov s2(%ebp,%edi,4),%esi;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor s2(%ebp,%edi,4),d ## D;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s3(%ebp,%edi,4),%esi;\ + movzx b ## B, %edi;\ + xor s3(%ebp,%edi,4),d ## D;\ + movzx a ## B, %edi;\ + xor (%ebp,%edi,4), %esi;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor (%ebp,%edi,4), d ## D;\ + movzx a ## H, %edi;\ + xor s1(%ebp,%edi,4),%esi;\ + pop %edi;\ + add d ## D, %esi;\ + add %esi, d ## D;\ + add k+round(%ebp), %esi;\ + xor %esi, c ## D;\ + ror $1, c ## D;\ + add k+4+round(%ebp),d ## D;\ + xor %edi, d ## D; + +/* + * a input register containing a + * b input register containing b (rotated 16) + * c input register containing c + * d input register containing d (already rol $1) + * operations on a and b are interleaved to increase performance + */ +#define decrypt_round(a,b,c,d,round)\ + push c ## D;\ + movzx a ## B, %edi;\ + mov (%ebp,%edi,4), c ## D;\ + movzx b ## B, %edi;\ + mov s3(%ebp,%edi,4),%esi;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s1(%ebp,%edi,4),c ## D;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor (%ebp,%edi,4), %esi;\ + movzx a ## B, %edi;\ + xor s2(%ebp,%edi,4),c ## D;\ + movzx b ## B, %edi;\ + xor s1(%ebp,%edi,4),%esi;\ + movzx a ## H, %edi;\ + ror $15, a ## D;\ + xor s3(%ebp,%edi,4),c ## D;\ + movzx b ## H, %edi;\ + xor s2(%ebp,%edi,4),%esi;\ + pop %edi;\ + add %esi, c ## D;\ + add c ## D, %esi;\ + add k+round(%ebp), c ## D;\ + xor %edi, c ## D;\ + add k+4+round(%ebp),%esi;\ + xor %esi, d ## D;\ + rol $15, d ## D; + +/* + * a input register containing a + * b input register containing b (rotated 16) + * c input register containing c + * d input register containing d (already rol $1) + * operations on a and b are interleaved to increase performance + * last round has different rotations for the output preparation + */ +#define decrypt_last_round(a,b,c,d,round)\ + push c ## D;\ + movzx a ## B, %edi;\ + mov (%ebp,%edi,4), c ## D;\ + movzx b ## B, %edi;\ + mov s3(%ebp,%edi,4),%esi;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s1(%ebp,%edi,4),c ## D;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor (%ebp,%edi,4), %esi;\ + movzx a ## B, %edi;\ + xor s2(%ebp,%edi,4),c ## D;\ + movzx b ## B, %edi;\ + xor s1(%ebp,%edi,4),%esi;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s3(%ebp,%edi,4),c ## D;\ + movzx b ## H, %edi;\ + xor s2(%ebp,%edi,4),%esi;\ + pop %edi;\ + add %esi, c ## D;\ + add c ## D, %esi;\ + add k+round(%ebp), c ## D;\ + xor %edi, c ## D;\ + add k+4+round(%ebp),%esi;\ + xor %esi, d ## D;\ + ror $1, d ## D; + +.align 4 +.global twofish_enc_blk +.global twofish_dec_blk + +twofish_enc_blk: + push %ebp /* save registers according to calling convention*/ + push %ebx + push %esi + push %edi + + mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ + add $crypto_tfm_ctx_offset, %ebp /* ctx adress */ + mov in_blk+16(%esp),%edi /* input adress in edi */ + + mov (%edi), %eax + mov b_offset(%edi), %ebx + mov c_offset(%edi), %ecx + mov d_offset(%edi), %edx + input_whitening(%eax,%ebp,a_offset) + ror $16, %eax + input_whitening(%ebx,%ebp,b_offset) + input_whitening(%ecx,%ebp,c_offset) + input_whitening(%edx,%ebp,d_offset) + rol $1, %edx + + encrypt_round(R0,R1,R2,R3,0); + encrypt_round(R2,R3,R0,R1,8); + encrypt_round(R0,R1,R2,R3,2*8); + encrypt_round(R2,R3,R0,R1,3*8); + encrypt_round(R0,R1,R2,R3,4*8); + encrypt_round(R2,R3,R0,R1,5*8); + encrypt_round(R0,R1,R2,R3,6*8); + encrypt_round(R2,R3,R0,R1,7*8); + encrypt_round(R0,R1,R2,R3,8*8); + encrypt_round(R2,R3,R0,R1,9*8); + encrypt_round(R0,R1,R2,R3,10*8); + encrypt_round(R2,R3,R0,R1,11*8); + encrypt_round(R0,R1,R2,R3,12*8); + encrypt_round(R2,R3,R0,R1,13*8); + encrypt_round(R0,R1,R2,R3,14*8); + encrypt_last_round(R2,R3,R0,R1,15*8); + + output_whitening(%eax,%ebp,c_offset) + output_whitening(%ebx,%ebp,d_offset) + output_whitening(%ecx,%ebp,a_offset) + output_whitening(%edx,%ebp,b_offset) + mov out_blk+16(%esp),%edi; + mov %eax, c_offset(%edi) + mov %ebx, d_offset(%edi) + mov %ecx, (%edi) + mov %edx, b_offset(%edi) + + pop %edi + pop %esi + pop %ebx + pop %ebp + mov $1, %eax + ret + +twofish_dec_blk: + push %ebp /* save registers according to calling convention*/ + push %ebx + push %esi + push %edi + + + mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */ + add $crypto_tfm_ctx_offset, %ebp /* ctx adress */ + mov in_blk+16(%esp),%edi /* input adress in edi */ + + mov (%edi), %eax + mov b_offset(%edi), %ebx + mov c_offset(%edi), %ecx + mov d_offset(%edi), %edx + output_whitening(%eax,%ebp,a_offset) + output_whitening(%ebx,%ebp,b_offset) + ror $16, %ebx + output_whitening(%ecx,%ebp,c_offset) + output_whitening(%edx,%ebp,d_offset) + rol $1, %ecx + + decrypt_round(R0,R1,R2,R3,15*8); + decrypt_round(R2,R3,R0,R1,14*8); + decrypt_round(R0,R1,R2,R3,13*8); + decrypt_round(R2,R3,R0,R1,12*8); + decrypt_round(R0,R1,R2,R3,11*8); + decrypt_round(R2,R3,R0,R1,10*8); + decrypt_round(R0,R1,R2,R3,9*8); + decrypt_round(R2,R3,R0,R1,8*8); + decrypt_round(R0,R1,R2,R3,7*8); + decrypt_round(R2,R3,R0,R1,6*8); + decrypt_round(R0,R1,R2,R3,5*8); + decrypt_round(R2,R3,R0,R1,4*8); + decrypt_round(R0,R1,R2,R3,3*8); + decrypt_round(R2,R3,R0,R1,2*8); + decrypt_round(R0,R1,R2,R3,1*8); + decrypt_last_round(R2,R3,R0,R1,0); + + input_whitening(%eax,%ebp,c_offset) + input_whitening(%ebx,%ebp,d_offset) + input_whitening(%ecx,%ebp,a_offset) + input_whitening(%edx,%ebp,b_offset) + mov out_blk+16(%esp),%edi; + mov %eax, c_offset(%edi) + mov %ebx, d_offset(%edi) + mov %ecx, (%edi) + mov %edx, b_offset(%edi) + + pop %edi + pop %esi + pop %ebx + pop %ebp + mov $1, %eax + ret diff --git a/arch/i386/crypto/twofish.c b/arch/i386/crypto/twofish.c new file mode 100644 index 000000000000..e3004dfe9c7a --- /dev/null +++ b/arch/i386/crypto/twofish.c @@ -0,0 +1,97 @@ +/* + * Glue Code for optimized 586 assembler version of TWOFISH + * + * Originally Twofish for GPG + * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 + * 256-bit key length added March 20, 1999 + * Some modifications to reduce the text size by Werner Koch, April, 1998 + * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> + * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> + * + * The original author has disclaimed all copyright interest in this + * code and thus put it in the public domain. The subsequent authors + * have put this under the GNU General Public License. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + * This code is a "clean room" implementation, written from the paper + * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available + * through http://www.counterpane.com/twofish.html + * + * For background information on multiplication in finite fields, used for + * the matrix operations in the key schedule, see the book _Contemporary + * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the + * Third Edition. + */ + +#include <crypto/twofish.h> +#include <linux/crypto.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/types.h> + + +asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + +static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + twofish_enc_blk(tfm, dst, src); +} + +static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + twofish_dec_blk(tfm, dst, src); +} + +static struct crypto_alg alg = { + .cra_name = "twofish", + .cra_driver_name = "twofish-i586", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = TF_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct twofish_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = TF_MIN_KEY_SIZE, + .cia_max_keysize = TF_MAX_KEY_SIZE, + .cia_setkey = twofish_setkey, + .cia_encrypt = twofish_encrypt, + .cia_decrypt = twofish_decrypt + } + } +}; + +static int __init init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized"); +MODULE_ALIAS("twofish"); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 694b0c63ee50..de1ef2fa1a20 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -417,6 +417,17 @@ config PPC_MAPLE This option enables support for the Maple 970FX Evaluation Board. For more informations, refer to <http://www.970eval.com> +config PPC_PASEMI + depends on PPC_MULTIPLATFORM && PPC64 + bool "PA Semi SoC-based platforms" + default n + select MPIC + select PPC_UDBG_16550 + select GENERIC_TBSYNC + help + This option enables support for PA Semi's PWRficient line + of SoC processors, including PA6T-1682M + config PPC_CELL bool default n @@ -436,7 +447,8 @@ config PPC_IBM_CELL_BLADE select UDBG_RTAS_CONSOLE config UDBG_RTAS_CONSOLE - bool + bool "RTAS based debug console" + depends on PPC_RTAS default n config XICS diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index e29ef77d3b00..5ad149b47e34 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -18,6 +18,20 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. +config HCALL_STATS + bool "Hypervisor call instrumentation" + depends on PPC_PSERIES && DEBUG_FS + help + Adds code to keep track of the number of hypervisor calls made and + the amount of time spent in hypervisor callsr. Wall time spent in + each call is always calculated, and if available CPU cycles spent + are also calculated. A directory named hcall_inst is added at the + root of the debugfs filesystem. Within the hcall_inst directory + are files that contain CPU specific call statistics. + + This option will add a small amount of overhead to all hypervisor + calls. + config DEBUGGER bool "Enable debugger hooks" depends on DEBUG_KERNEL @@ -74,6 +88,8 @@ config XMON very early during boot. 'xmon=on' will just enable the xmon debugger hooks. 'xmon=off' will disable the debugger hooks if CONFIG_XMON_DEFAULT is set. + xmon will print a backtrace on the very first invocation. + 'xmon=nobt' will disable this autobacktrace. config XMON_DEFAULT bool "Enable xmon by default" diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index d961bfeed05f..e73774136b55 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -36,11 +36,16 @@ zliblinuxheader := zlib.h zconf.h zutil.h $(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader)) #$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h) -src-boot := crt0.S string.S prom.c stdio.c main.c div64.S +src-boot-$(CONFIG_PPC_MULTIPLATFORM) := of.c +src-boot := crt0.S string.S stdio.c main.c div64.S $(src-boot-y) src-boot += $(zlib) src-boot := $(addprefix $(obj)/, $(src-boot)) obj-boot := $(addsuffix .o, $(basename $(src-boot))) +ifeq ($(call cc-option-yn, -fstack-protector),y) +BOOTCFLAGS += -fno-stack-protector +endif + BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj) quiet_cmd_copy_zlib = COPY $@ diff --git a/arch/powerpc/boot/dts/mpc8349emds.dts b/arch/powerpc/boot/dts/mpc8349emds.dts index 12f5dbf3055f..efceb3432653 100644 --- a/arch/powerpc/boot/dts/mpc8349emds.dts +++ b/arch/powerpc/boot/dts/mpc8349emds.dts @@ -214,10 +214,10 @@ b800 0 0 4 700 15 8 /* IDSEL 0x18 */ - b000 0 0 1 700 15 8 - b000 0 0 2 700 16 8 - b000 0 0 3 700 17 8 - b000 0 0 4 700 14 8>; + c000 0 0 1 700 15 8 + c000 0 0 2 700 16 8 + c000 0 0 3 700 17 8 + c000 0 0 4 700 14 8>; interrupt-parent = <700>; interrupts = <42 8>; bus-range = <0 0>; @@ -274,10 +274,10 @@ b800 0 0 4 700 15 8 /* IDSEL 0x18 */ - b000 0 0 1 700 15 8 - b000 0 0 2 700 16 8 - b000 0 0 3 700 17 8 - b000 0 0 4 700 14 8>; + c000 0 0 1 700 15 8 + c000 0 0 2 700 16 8 + c000 0 0 3 700 17 8 + c000 0 0 4 700 14 8>; interrupt-parent = <700>; interrupts = <42 8>; bus-range = <0 0>; diff --git a/arch/powerpc/boot/flatdevtree.h b/arch/powerpc/boot/flatdevtree.h new file mode 100644 index 000000000000..761c8dc84008 --- /dev/null +++ b/arch/powerpc/boot/flatdevtree.h @@ -0,0 +1,46 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#ifndef FLATDEVTREE_H +#define FLATDEVTREE_H + +#include "types.h" + +/* Definitions used by the flattened device tree */ +#define OF_DT_HEADER 0xd00dfeed /* marker */ +#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */ +#define OF_DT_END_NODE 0x2 /* End node */ +#define OF_DT_PROP 0x3 /* Property: name off, size, content */ +#define OF_DT_NOP 0x4 /* nop */ +#define OF_DT_END 0x9 + +#define OF_DT_VERSION 0x10 + +struct boot_param_header { + u32 magic; /* magic word OF_DT_HEADER */ + u32 totalsize; /* total size of DT block */ + u32 off_dt_struct; /* offset to structure */ + u32 off_dt_strings; /* offset to strings */ + u32 off_mem_rsvmap; /* offset to memory reserve map */ + u32 version; /* format version */ + u32 last_comp_version; /* last compatible version */ + /* version 2 fields below */ + u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ + /* version 3 fields below */ + u32 dt_strings_size; /* size of the DT strings block */ +}; + +#endif /* FLATDEVTREE_H */ diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index b66634c9ea34..d719bb9333d1 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c @@ -14,17 +14,12 @@ #include "page.h" #include "string.h" #include "stdio.h" -#include "prom.h" #include "zlib.h" +#include "ops.h" +#include "flatdevtree.h" extern void flush_cache(void *, unsigned long); - -/* Value picked to match that used by yaboot */ -#define PROG_START 0x01400000 /* only used on 64-bit systems */ -#define RAM_END (512<<20) /* Fixme: use OF */ -#define ONE_MB 0x100000 - extern char _start[]; extern char __bss_start[]; extern char _end[]; @@ -33,14 +28,6 @@ extern char _vmlinux_end[]; extern char _initrd_start[]; extern char _initrd_end[]; -/* A buffer that may be edited by tools operating on a zImage binary so as to - * edit the command line passed to vmlinux (by setting /chosen/bootargs). - * The buffer is put in it's own section so that tools may locate it easier. - */ -static char builtin_cmdline[512] - __attribute__((section("__builtin_cmdline"))); - - struct addr_range { unsigned long addr; unsigned long size; @@ -51,21 +38,16 @@ static struct addr_range vmlinuz; static struct addr_range initrd; static unsigned long elfoffset; +static int is_64bit; -static char scratch[46912]; /* scratch space for gunzip, from zlib_inflate_workspacesize() */ +/* scratch space for gunzip; 46912 is from zlib_inflate_workspacesize() */ +static char scratch[46912]; static char elfheader[256]; - -typedef void (*kernel_entry_t)( unsigned long, - unsigned long, - void *, - void *); - +typedef void (*kernel_entry_t)(unsigned long, unsigned long, void *); #undef DEBUG -static unsigned long claim_base; - #define HEAD_CRC 2 #define EXTRA_FIELD 4 #define ORIG_NAME 8 @@ -123,24 +105,6 @@ static void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp) zlib_inflateEnd(&s); } -static unsigned long try_claim(unsigned long size) -{ - unsigned long addr = 0; - - for(; claim_base < RAM_END; claim_base += ONE_MB) { -#ifdef DEBUG - printf(" trying: 0x%08lx\n\r", claim_base); -#endif - addr = (unsigned long)claim(claim_base, size, 0); - if ((void *)addr != (void *)-1) - break; - } - if (addr == 0) - return 0; - claim_base = PAGE_ALIGN(claim_base + size); - return addr; -} - static int is_elf64(void *hdr) { Elf64_Ehdr *elf64 = hdr; @@ -169,16 +133,7 @@ static int is_elf64(void *hdr) vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset; vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset; -#if defined(PROG_START) - /* - * Maintain a "magic" minimum address. This keeps some older - * firmware platforms running. - */ - - if (claim_base < PROG_START) - claim_base = PROG_START; -#endif - + is_64bit = 1; return 1; } @@ -212,47 +167,9 @@ static int is_elf32(void *hdr) return 1; } -void export_cmdline(void* chosen_handle) -{ - int len; - char cmdline[2] = { 0, 0 }; - - if (builtin_cmdline[0] == 0) - return; - - len = getprop(chosen_handle, "bootargs", cmdline, sizeof(cmdline)); - if (len > 0 && cmdline[0] != 0) - return; - - setprop(chosen_handle, "bootargs", builtin_cmdline, - strlen(builtin_cmdline) + 1); -} - - -void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) +static void prep_kernel(unsigned long *a1, unsigned long *a2) { int len; - kernel_entry_t kernel_entry; - - memset(__bss_start, 0, _end - __bss_start); - - prom = (int (*)(void *)) promptr; - chosen_handle = finddevice("/chosen"); - if (chosen_handle == (void *) -1) - exit(); - if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4) - exit(); - - printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp); - - /* - * The first available claim_base must be above the end of the - * the loaded kernel wrapper file (_start to _end includes the - * initrd image if it is present) and rounded up to a nice - * 1 MB boundary for good measure. - */ - - claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB); vmlinuz.addr = (unsigned long)_vmlinux_start; vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start); @@ -263,43 +180,51 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) gunzip(elfheader, sizeof(elfheader), (unsigned char *)vmlinuz.addr, &len); } else - memcpy(elfheader, (const void *)vmlinuz.addr, sizeof(elfheader)); + memcpy(elfheader, (const void *)vmlinuz.addr, + sizeof(elfheader)); if (!is_elf64(elfheader) && !is_elf32(elfheader)) { printf("Error: not a valid PPC32 or PPC64 ELF file!\n\r"); exit(); } + if (platform_ops.image_hdr) + platform_ops.image_hdr(elfheader); - /* We need to claim the memsize plus the file offset since gzip + /* We need to alloc the memsize plus the file offset since gzip * will expand the header (file offset), then the kernel, then * possible rubbish we don't care about. But the kernel bss must * be claimed (it will be zero'd by the kernel itself) */ printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize); - vmlinux.addr = try_claim(vmlinux.memsize); + vmlinux.addr = (unsigned long)malloc(vmlinux.memsize); if (vmlinux.addr == 0) { printf("Can't allocate memory for kernel image !\n\r"); exit(); } /* - * Now we try to claim memory for the initrd (and copy it there) + * Now we try to alloc memory for the initrd (and copy it there) */ initrd.size = (unsigned long)(_initrd_end - _initrd_start); initrd.memsize = initrd.size; if ( initrd.size > 0 ) { - printf("Allocating 0x%lx bytes for initrd ...\n\r", initrd.size); - initrd.addr = try_claim(initrd.size); + printf("Allocating 0x%lx bytes for initrd ...\n\r", + initrd.size); + initrd.addr = (unsigned long)malloc((u32)initrd.size); if (initrd.addr == 0) { - printf("Can't allocate memory for initial ramdisk !\n\r"); + printf("Can't allocate memory for initial " + "ramdisk !\n\r"); exit(); } - a1 = initrd.addr; - a2 = initrd.size; - printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r", - initrd.addr, (unsigned long)_initrd_start, initrd.size); - memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size); - printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr)); + *a1 = initrd.addr; + *a2 = initrd.size; + printf("initial ramdisk moving 0x%lx <- 0x%lx " + "(0x%lx bytes)\n\r", initrd.addr, + (unsigned long)_initrd_start, initrd.size); + memmove((void *)initrd.addr, (void *)_initrd_start, + initrd.size); + printf("initrd head: 0x%lx\n\r", + *((unsigned long *)initrd.addr)); } /* Eventually gunzip the kernel */ @@ -311,11 +236,10 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) (unsigned char *)vmlinuz.addr, &len); printf("done 0x%lx bytes\n\r", len); } else { - memmove((void *)vmlinux.addr,(void *)vmlinuz.addr,vmlinuz.size); + memmove((void *)vmlinux.addr,(void *)vmlinuz.addr, + vmlinuz.size); } - export_cmdline(chosen_handle); - /* Skip over the ELF header */ #ifdef DEBUG printf("... skipping 0x%lx bytes of ELF header\n\r", @@ -324,23 +248,107 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) vmlinux.addr += elfoffset; flush_cache((void *)vmlinux.addr, vmlinux.size); +} - kernel_entry = (kernel_entry_t)vmlinux.addr; -#ifdef DEBUG - printf( "kernel:\n\r" - " entry addr = 0x%lx\n\r" - " a1 = 0x%lx,\n\r" - " a2 = 0x%lx,\n\r" - " prom = 0x%lx,\n\r" - " bi_recs = 0x%lx,\n\r", - (unsigned long)kernel_entry, a1, a2, - (unsigned long)prom, NULL); -#endif +void __attribute__ ((weak)) ft_init(void *dt_blob) +{ +} - kernel_entry(a1, a2, prom, NULL); +/* A buffer that may be edited by tools operating on a zImage binary so as to + * edit the command line passed to vmlinux (by setting /chosen/bootargs). + * The buffer is put in it's own section so that tools may locate it easier. + */ +static char builtin_cmdline[COMMAND_LINE_SIZE] + __attribute__((__section__("__builtin_cmdline"))); - printf("Error: Linux kernel returned to zImage bootloader!\n\r"); +static void get_cmdline(char *buf, int size) +{ + void *devp; + int len = strlen(builtin_cmdline); - exit(); + buf[0] = '\0'; + + if (len > 0) { /* builtin_cmdline overrides dt's /chosen/bootargs */ + len = min(len, size-1); + strncpy(buf, builtin_cmdline, len); + buf[len] = '\0'; + } + else if ((devp = finddevice("/chosen"))) + getprop(devp, "bootargs", buf, size); +} + +static void set_cmdline(char *buf) +{ + void *devp; + + if ((devp = finddevice("/chosen"))) + setprop(devp, "bootargs", buf, strlen(buf) + 1); } +/* Section where ft can be tacked on after zImage is built */ +union blobspace { + struct boot_param_header hdr; + char space[8*1024]; +} dt_blob __attribute__((__section__("__builtin_ft"))); + +struct platform_ops platform_ops; +struct dt_ops dt_ops; +struct console_ops console_ops; + +void start(unsigned long a1, unsigned long a2, void *promptr, void *sp) +{ + int have_dt = 0; + kernel_entry_t kentry; + char cmdline[COMMAND_LINE_SIZE]; + + memset(__bss_start, 0, _end - __bss_start); + memset(&platform_ops, 0, sizeof(platform_ops)); + memset(&dt_ops, 0, sizeof(dt_ops)); + memset(&console_ops, 0, sizeof(console_ops)); + + /* Override the dt_ops and device tree if there was an flat dev + * tree attached to the zImage. + */ + if (dt_blob.hdr.magic == OF_DT_HEADER) { + have_dt = 1; + ft_init(&dt_blob); + } + + if (platform_init(promptr)) + exit(); + if (console_ops.open && (console_ops.open() < 0)) + exit(); + if (platform_ops.fixups) + platform_ops.fixups(); + + printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", + _start, sp); + + prep_kernel(&a1, &a2); + + /* If cmdline came from zimage wrapper or if we can edit the one + * in the dt, print it out and edit it, if possible. + */ + if ((strlen(builtin_cmdline) > 0) || console_ops.edit_cmdline) { + get_cmdline(cmdline, COMMAND_LINE_SIZE); + printf("\n\rLinux/PowerPC load: %s", cmdline); + if (console_ops.edit_cmdline) + console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE); + printf("\n\r"); + set_cmdline(cmdline); + } + + if (console_ops.close) + console_ops.close(); + + kentry = (kernel_entry_t) vmlinux.addr; + if (have_dt) + kentry(dt_ops.ft_addr(), 0, NULL); + else + /* XXX initrd addr/size should be passed in properties */ + kentry(a1, a2, promptr); + + /* console closed so printf below may not work */ + printf("Error: Linux kernel returned to zImage boot wrapper!\n\r"); + exit(); +} diff --git a/arch/powerpc/boot/prom.c b/arch/powerpc/boot/of.c index fa0057736f6b..fd99f789a37b 100644 --- a/arch/powerpc/boot/prom.c +++ b/arch/powerpc/boot/of.c @@ -8,15 +8,29 @@ */ #include <stdarg.h> #include <stddef.h> +#include "types.h" +#include "elf.h" #include "string.h" #include "stdio.h" -#include "prom.h" +#include "page.h" +#include "ops.h" -int (*prom)(void *); -phandle chosen_handle; -ihandle stdout; +typedef void *ihandle; +typedef void *phandle; -int call_prom(const char *service, int nargs, int nret, ...) +extern char _end[]; + +/* Value picked to match that used by yaboot */ +#define PROG_START 0x01400000 /* only used on 64-bit systems */ +#define RAM_END (512<<20) /* Fixme: use OF */ +#define ONE_MB 0x100000 + +int (*prom) (void *); + + +static unsigned long claim_base; + +static int call_prom(const char *service, int nargs, int nret, ...) { int i; struct prom_args { @@ -45,7 +59,7 @@ int call_prom(const char *service, int nargs, int nret, ...) return (nret > 0)? args.args[nargs]: 0; } -int call_prom_ret(const char *service, int nargs, int nret, +static int call_prom_ret(const char *service, int nargs, int nret, unsigned int *rets, ...) { int i; @@ -79,11 +93,6 @@ int call_prom_ret(const char *service, int nargs, int nret, return (nret > 0)? args.args[nargs]: 0; } -int write(void *handle, void *ptr, int nb) -{ - return call_prom("write", 3, 1, handle, ptr, nb); -} - /* * Older OF's require that when claiming a specific range of addresses, * we claim the physical space in the /memory node and the virtual @@ -142,7 +151,7 @@ static int check_of_version(void) return 1; } -void *claim(unsigned long virt, unsigned long size, unsigned long align) +static void *claim(unsigned long virt, unsigned long size, unsigned long align) { int ret; unsigned int result; @@ -151,7 +160,7 @@ void *claim(unsigned long virt, unsigned long size, unsigned long align) need_map = check_of_version(); if (align || !need_map) return (void *) call_prom("claim", 3, 1, virt, size, align); - + ret = call_prom_ret("call-method", 5, 2, &result, "claim", memory, align, size, virt); if (ret != 0 || result == -1) @@ -163,3 +172,112 @@ void *claim(unsigned long virt, unsigned long size, unsigned long align) 0x12, size, virt, virt); return (void *) virt; } + +static void *of_try_claim(u32 size) +{ + unsigned long addr = 0; + static u8 first_time = 1; + + if (first_time) { + claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB); + first_time = 0; + } + + for(; claim_base < RAM_END; claim_base += ONE_MB) { +#ifdef DEBUG + printf(" trying: 0x%08lx\n\r", claim_base); +#endif + addr = (unsigned long)claim(claim_base, size, 0); + if ((void *)addr != (void *)-1) + break; + } + if (addr == 0) + return NULL; + claim_base = PAGE_ALIGN(claim_base + size); + return (void *)addr; +} + +static void of_image_hdr(const void *hdr) +{ + const Elf64_Ehdr *elf64 = hdr; + + if (elf64->e_ident[EI_CLASS] == ELFCLASS64) { + /* + * Maintain a "magic" minimum address. This keeps some older + * firmware platforms running. + */ + if (claim_base < PROG_START) + claim_base = PROG_START; + } +} + +static void of_exit(void) +{ + call_prom("exit", 0, 0); +} + +/* + * OF device tree routines + */ +static void *of_finddevice(const char *name) +{ + return (phandle) call_prom("finddevice", 1, 1, name); +} + +static int of_getprop(const void *phandle, const char *name, void *buf, + const int buflen) +{ + return call_prom("getprop", 4, 1, phandle, name, buf, buflen); +} + +static int of_setprop(const void *phandle, const char *name, const void *buf, + const int buflen) +{ + return call_prom("setprop", 4, 1, phandle, name, buf, buflen); +} + +/* + * OF console routines + */ +static void *of_stdout_handle; + +static int of_console_open(void) +{ + void *devp; + + if (((devp = finddevice("/chosen")) != NULL) + && (getprop(devp, "stdout", &of_stdout_handle, + sizeof(of_stdout_handle)) + == sizeof(of_stdout_handle))) + return 0; + + return -1; +} + +static void of_console_write(char *buf, int len) +{ + call_prom("write", 3, 1, of_stdout_handle, buf, len); +} + +int platform_init(void *promptr) +{ + platform_ops.fixups = NULL; + platform_ops.image_hdr = of_image_hdr; + platform_ops.malloc = of_try_claim; + platform_ops.free = NULL; + platform_ops.exit = of_exit; + + dt_ops.finddevice = of_finddevice; + dt_ops.getprop = of_getprop; + dt_ops.setprop = of_setprop; + dt_ops.translate_addr = NULL; + + console_ops.open = of_console_open; + console_ops.write = of_console_write; + console_ops.edit_cmdline = NULL; + console_ops.close = NULL; + console_ops.data = NULL; + + prom = (int (*)(void *))promptr; + return 0; +} diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h new file mode 100644 index 000000000000..135eb4bb03b4 --- /dev/null +++ b/arch/powerpc/boot/ops.h @@ -0,0 +1,100 @@ +/* + * Global definition of all the bootwrapper operations. + * + * Author: Mark A. Greer <mgreer@mvista.com> + * + * 2006 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#ifndef _PPC_BOOT_OPS_H_ +#define _PPC_BOOT_OPS_H_ + +#include "types.h" + +#define COMMAND_LINE_SIZE 512 +#define MAX_PATH_LEN 256 +#define MAX_PROP_LEN 256 /* What should this be? */ + +/* Platform specific operations */ +struct platform_ops { + void (*fixups)(void); + void (*image_hdr)(const void *); + void * (*malloc)(u32 size); + void (*free)(void *ptr, u32 size); + void (*exit)(void); +}; +extern struct platform_ops platform_ops; + +/* Device Tree operations */ +struct dt_ops { + void * (*finddevice)(const char *name); + int (*getprop)(const void *node, const char *name, void *buf, + const int buflen); + int (*setprop)(const void *node, const char *name, + const void *buf, const int buflen); + u64 (*translate_addr)(const char *path, const u32 *in_addr, + const u32 addr_len); + unsigned long (*ft_addr)(void); +}; +extern struct dt_ops dt_ops; + +/* Console operations */ +struct console_ops { + int (*open)(void); + void (*write)(char *buf, int len); + void (*edit_cmdline)(char *buf, int len); + void (*close)(void); + void *data; +}; +extern struct console_ops console_ops; + +/* Serial console operations */ +struct serial_console_data { + int (*open)(void); + void (*putc)(unsigned char c); + unsigned char (*getc)(void); + u8 (*tstc)(void); + void (*close)(void); +}; + +extern int platform_init(void *promptr); +extern void simple_alloc_init(void); +extern void ft_init(void *dt_blob); +extern int serial_console_init(void); + +static inline void *finddevice(const char *name) +{ + return (dt_ops.finddevice) ? dt_ops.finddevice(name) : NULL; +} + +static inline int getprop(void *devp, const char *name, void *buf, int buflen) +{ + return (dt_ops.getprop) ? dt_ops.getprop(devp, name, buf, buflen) : -1; +} + +static inline int setprop(void *devp, const char *name, void *buf, int buflen) +{ + return (dt_ops.setprop) ? dt_ops.setprop(devp, name, buf, buflen) : -1; +} + +static inline void *malloc(u32 size) +{ + return (platform_ops.malloc) ? platform_ops.malloc(size) : NULL; +} + +static inline void free(void *ptr, u32 size) +{ + if (platform_ops.free) + platform_ops.free(ptr, size); +} + +static inline void exit(void) +{ + if (platform_ops.exit) + platform_ops.exit(); + for(;;); +} + +#endif /* _PPC_BOOT_OPS_H_ */ diff --git a/arch/powerpc/boot/prom.h b/arch/powerpc/boot/prom.h deleted file mode 100644 index a57b184c564f..000000000000 --- a/arch/powerpc/boot/prom.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _PPC_BOOT_PROM_H_ -#define _PPC_BOOT_PROM_H_ - -typedef void *phandle; -typedef void *ihandle; - -extern int (*prom) (void *); -extern phandle chosen_handle; -extern ihandle stdout; - -int call_prom(const char *service, int nargs, int nret, ...); -int call_prom_ret(const char *service, int nargs, int nret, - unsigned int *rets, ...); - -extern int write(void *handle, void *ptr, int nb); -extern void *claim(unsigned long virt, unsigned long size, unsigned long aln); - -static inline void exit(void) -{ - call_prom("exit", 0, 0); -} - -static inline phandle finddevice(const char *name) -{ - return (phandle) call_prom("finddevice", 1, 1, name); -} - -static inline int getprop(void *phandle, const char *name, - void *buf, int buflen) -{ - return call_prom("getprop", 4, 1, phandle, name, buf, buflen); -} - - -static inline int setprop(void *phandle, const char *name, - void *buf, int buflen) -{ - return call_prom("setprop", 4, 1, phandle, name, buf, buflen); -} - -#endif /* _PPC_BOOT_PROM_H_ */ diff --git a/arch/powerpc/boot/stdio.c b/arch/powerpc/boot/stdio.c index b5aa522f8b77..6d5f6382e1ce 100644 --- a/arch/powerpc/boot/stdio.c +++ b/arch/powerpc/boot/stdio.c @@ -10,7 +10,7 @@ #include <stddef.h> #include "string.h" #include "stdio.h" -#include "prom.h" +#include "ops.h" size_t strnlen(const char * s, size_t count) { @@ -320,6 +320,6 @@ printf(const char *fmt, ...) va_start(args, fmt); n = vsprintf(sprint_buf, fmt, args); va_end(args); - write(stdout, sprint_buf, n); + console_ops.write(sprint_buf, n); return n; } diff --git a/arch/powerpc/boot/stdio.h b/arch/powerpc/boot/stdio.h index eb9e16c87aef..73b8a91bfb34 100644 --- a/arch/powerpc/boot/stdio.h +++ b/arch/powerpc/boot/stdio.h @@ -1,8 +1,16 @@ #ifndef _PPC_BOOT_STDIO_H_ #define _PPC_BOOT_STDIO_H_ +#include <stdarg.h> + +#define ENOMEM 12 /* Out of Memory */ +#define EINVAL 22 /* Invalid argument */ +#define ENOSPC 28 /* No space left on device */ + extern int printf(const char *fmt, ...); +#define fprintf(fmt, args...) printf(args) + extern int sprintf(char *buf, const char *fmt, ...); extern int vsprintf(char *buf, const char *fmt, va_list args); diff --git a/arch/powerpc/boot/types.h b/arch/powerpc/boot/types.h new file mode 100644 index 000000000000..79d26e708677 --- /dev/null +++ b/arch/powerpc/boot/types.h @@ -0,0 +1,23 @@ +#ifndef _TYPES_H_ +#define _TYPES_H_ + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +typedef unsigned char u8; +typedef unsigned short u16; +typedef unsigned int u32; +typedef unsigned long long u64; + +#define min(x,y) ({ \ + typeof(x) _x = (x); \ + typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x < _y ? _x : _y; }) + +#define max(x,y) ({ \ + typeof(x) _x = (x); \ + typeof(y) _y = (y); \ + (void) (&_x == &_y); \ + _x > _y ? _x : _y; }) + +#endif /* _TYPES_H_ */ diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 2860be106f4f..62ba66091a13 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -496,7 +496,7 @@ CONFIG_E1000=y # CONFIG_SKY2 is not set # CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set -# CONFIG_TIGON3 is not set +CONFIG_TIGON3=y # CONFIG_BNX2 is not set # CONFIG_MV643XX_ETH is not set diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 7d32ad0194a4..8b133afbdc20 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -16,7 +16,7 @@ obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ obj-y += vdso32/ obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ signal_64.o ptrace32.o \ - paca.o cpu_setup_power4.o \ + paca.o cpu_setup_ppc970.o \ firmware.o sysfs.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o @@ -51,7 +51,7 @@ extra-$(CONFIG_8xx) := head_8xx.o extra-y += vmlinux.lds obj-y += time.o prom.o traps.o setup-common.o \ - udbg.o misc.o + udbg.o misc.o io.o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 7ee84968087b..d06f378597bb 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -40,9 +40,10 @@ #ifdef CONFIG_PPC64 #include <asm/paca.h> #include <asm/lppaca.h> -#include <asm/iseries/hv_lp_event.h> #include <asm/cache.h> #include <asm/compat.h> +#include <asm/mmu.h> +#include <asm/hvcall.h> #endif #define DEFINE(sym, val) \ @@ -136,11 +137,18 @@ int main(void) DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); + DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); + DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); + DEFINE(SLBSHADOW_STACKVSID, + offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); + DEFINE(SLBSHADOW_STACKESID, + offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); + DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); #endif /* CONFIG_PPC64 */ /* RTAS */ @@ -159,6 +167,12 @@ int main(void) /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); + + /* hcall statistics */ + DEFINE(HCALL_STAT_SIZE, sizeof(struct hcall_stats)); + DEFINE(HCALL_STAT_CALLS, offsetof(struct hcall_stats, num_calls)); + DEFINE(HCALL_STAT_TB, offsetof(struct hcall_stats, tb_total)); + DEFINE(HCALL_STAT_PURR, offsetof(struct hcall_stats, purr_total)); #endif /* CONFIG_PPC64 */ DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); @@ -240,6 +254,7 @@ int main(void) DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value)); DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); + DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); #ifndef CONFIG_PPC64 DEFINE(pbe_address, offsetof(struct pbe, address)); diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index f4e5e14ee2b6..995fcef156fd 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -158,35 +158,35 @@ int btext_initialize(struct device_node *np) { unsigned int width, height, depth, pitch; unsigned long address = 0; - u32 *prop; + const u32 *prop; - prop = (u32 *)get_property(np, "linux,bootx-width", NULL); + prop = get_property(np, "linux,bootx-width", NULL); if (prop == NULL) - prop = (u32 *)get_property(np, "width", NULL); + prop = get_property(np, "width", NULL); if (prop == NULL) return -EINVAL; width = *prop; - prop = (u32 *)get_property(np, "linux,bootx-height", NULL); + prop = get_property(np, "linux,bootx-height", NULL); if (prop == NULL) - prop = (u32 *)get_property(np, "height", NULL); + prop = get_property(np, "height", NULL); if (prop == NULL) return -EINVAL; height = *prop; - prop = (u32 *)get_property(np, "linux,bootx-depth", NULL); + prop = get_property(np, "linux,bootx-depth", NULL); if (prop == NULL) - prop = (u32 *)get_property(np, "depth", NULL); + prop = get_property(np, "depth", NULL); if (prop == NULL) return -EINVAL; depth = *prop; pitch = width * ((depth + 7) / 8); - prop = (u32 *)get_property(np, "linux,bootx-linebytes", NULL); + prop = get_property(np, "linux,bootx-linebytes", NULL); if (prop == NULL) - prop = (u32 *)get_property(np, "linebytes", NULL); + prop = get_property(np, "linebytes", NULL); if (prop) pitch = *prop; if (pitch == 1) pitch = 0x1000; - prop = (u32 *)get_property(np, "address", NULL); + prop = get_property(np, "address", NULL); if (prop) address = *prop; @@ -214,11 +214,11 @@ int btext_initialize(struct device_node *np) int __init btext_find_display(int allow_nonstdout) { - char *name; + const char *name; struct device_node *np = NULL; int rc = -ENODEV; - name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); + name = get_property(of_chosen, "linux,stdout-path", NULL); if (name != NULL) { np = of_find_node_by_path(name); if (np != NULL) { diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_ppc970.S index 76e97aa71c45..652594891d58 100644 --- a/arch/powerpc/kernel/cpu_setup_power4.S +++ b/arch/powerpc/kernel/cpu_setup_ppc970.S @@ -16,27 +16,12 @@ #include <asm/asm-offsets.h> #include <asm/cache.h> -_GLOBAL(__970_cpu_preinit) - /* - * Do nothing if not running in HV mode - */ +_GLOBAL(__cpu_preinit_ppc970) + /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 beqlr - /* - * Deal only with PPC970 and PPC970FX. - */ - mfspr r0,SPRN_PVR - srwi r0,r0,16 - cmpwi r0,0x39 - beq 1f - cmpwi r0,0x3c - beq 1f - cmpwi r0,0x44 - bnelr -1: - /* Make sure HID4:rm_ci is off before MMU is turned off, that large * pages are enabled with HID4:61 and clear HID5:DCBZ_size and * HID5:DCBZ32_ill @@ -72,23 +57,6 @@ _GLOBAL(__970_cpu_preinit) isync blr -_GLOBAL(__setup_cpu_ppc970) - mfspr r0,SPRN_HID0 - li r11,5 /* clear DOZE and SLEEP */ - rldimi r0,r11,52,8 /* set NAP and DPM */ - li r11,0 - rldimi r0,r11,32,31 /* clear EN_ATTN */ - mtspr SPRN_HID0,r0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - sync - isync - blr - /* Definitions for the table use to save CPU states */ #define CS_HID0 0 #define CS_HID1 8 @@ -103,33 +71,30 @@ cpu_state_storage: .balign L1_CACHE_BYTES,0 .text -/* Called in normal context to backup CPU 0 state. This - * does not include cache settings. This function is also - * called for machine sleep. This does not include the MMU - * setup, BATs, etc... but rather the "special" registers - * like HID0, HID1, HID4, etc... - */ -_GLOBAL(__save_cpu_setup) - /* Some CR fields are volatile, we back it up all */ - mfcr r7 - - /* Get storage ptr */ - LOAD_REG_IMMEDIATE(r5,cpu_state_storage) - /* We only deal with 970 for now */ - mfspr r0,SPRN_PVR - srwi r0,r0,16 - cmpwi r0,0x39 - beq 1f - cmpwi r0,0x3c - beq 1f - cmpwi r0,0x44 - bne 2f - -1: /* skip if not running in HV mode */ +_GLOBAL(__setup_cpu_ppc970) + /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 - beq 2f + beqlr + + mfspr r0,SPRN_HID0 + li r11,5 /* clear DOZE and SLEEP */ + rldimi r0,r11,52,8 /* set NAP and DPM */ + li r11,0 + rldimi r0,r11,32,31 /* clear EN_ATTN */ + mtspr SPRN_HID0,r0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + sync + isync + + /* Save away cpu state */ + LOAD_REG_IMMEDIATE(r5,cpu_state_storage) /* Save HID0,1,4 and 5 */ mfspr r3,SPRN_HID0 @@ -141,35 +106,19 @@ _GLOBAL(__save_cpu_setup) mfspr r3,SPRN_HID5 std r3,CS_HID5(r5) -2: - mtcr r7 blr /* Called with no MMU context (typically MSR:IR/DR off) to * restore CPU state as backed up by the previous * function. This does not include cache setting */ -_GLOBAL(__restore_cpu_setup) - /* Get storage ptr (FIXME when using anton reloc as we - * are running with translation disabled here - */ - LOAD_REG_IMMEDIATE(r5,cpu_state_storage) - - /* We only deal with 970 for now */ - mfspr r0,SPRN_PVR - srwi r0,r0,16 - cmpwi r0,0x39 - beq 1f - cmpwi r0,0x3c - beq 1f - cmpwi r0,0x44 - bnelr - -1: /* skip if not running in HV mode */ +_GLOBAL(__restore_cpu_ppc970) + /* Do nothing if not running in HV mode */ mfmsr r0 rldicl. r0,r0,4,63 beqlr + LOAD_REG_IMMEDIATE(r5,cpu_state_storage) /* Before accessing memory, we make sure rm_ci is clear */ li r0,0 mfspr r3,SPRN_HID4 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 272e43622fd6..190a57e20765 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -39,7 +39,10 @@ extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); #endif /* CONFIG_PPC32 */ +#ifdef CONFIG_PPC64 extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); +extern void __restore_cpu_ppc970(void); +#endif /* CONFIG_PPC64 */ /* This table only contains "desktop" CPUs, it need to be filled with embedded * ones as well... @@ -55,6 +58,9 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); #define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \ PPC_FEATURE_TRUE_LE) +#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ + PPC_FEATURE_TRUE_LE | \ + PPC_FEATURE_HAS_ALTIVEC_COMP) #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ PPC_FEATURE_BOOKE) @@ -184,6 +190,7 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 128, .num_pmcs = 8, .cpu_setup = __setup_cpu_ppc970, + .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", @@ -199,6 +206,7 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 128, .num_pmcs = 8, .cpu_setup = __setup_cpu_ppc970, + .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", @@ -214,6 +222,7 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 128, .num_pmcs = 8, .cpu_setup = __setup_cpu_ppc970, + .cpu_restore = __restore_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", @@ -280,6 +289,17 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 128, .platform = "ppc-cell-be", }, + { /* PA Semi PA6T */ + .pvr_mask = 0x7fff0000, + .pvr_value = 0x00900000, + .cpu_name = "PA6T", + .cpu_features = CPU_FTRS_PA6T, + .cpu_user_features = COMMON_USER_PA6T, + .icache_bsize = 64, + .dcache_bsize = 64, + .num_pmcs = 6, + .platform = "pa6t", + }, { /* default match */ .pvr_mask = 0x00000000, .pvr_value = 0x00000000, @@ -929,6 +949,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* 405EP */ .pvr_mask = 0xffff0000, diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 371973be8d71..2f6f5a7bc69e 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -80,7 +80,7 @@ static int __init parse_savemaxmem(char *p) } __setup("savemaxmem=", parse_savemaxmem); -/* +/** * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied * @buf: target memory address for the copy; this can be in kernel address diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c index 36aaa7663f02..6c168f6ea142 100644 --- a/arch/powerpc/kernel/dma_64.c +++ b/arch/powerpc/kernel/dma_64.c @@ -35,10 +35,9 @@ int dma_supported(struct device *dev, u64 mask) { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - return dma_ops->dma_supported(dev, mask); - BUG(); - return 0; + BUG_ON(!dma_ops); + + return dma_ops->dma_supported(dev, mask); } EXPORT_SYMBOL(dma_supported); @@ -66,10 +65,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); - BUG(); - return NULL; + BUG_ON(!dma_ops); + + return dma_ops->alloc_coherent(dev, size, dma_handle, flag); } EXPORT_SYMBOL(dma_alloc_coherent); @@ -78,10 +76,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); - else - BUG(); + BUG_ON(!dma_ops); + + dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); } EXPORT_SYMBOL(dma_free_coherent); @@ -90,10 +87,9 @@ dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - return dma_ops->map_single(dev, cpu_addr, size, direction); - BUG(); - return (dma_addr_t)0; + BUG_ON(!dma_ops); + + return dma_ops->map_single(dev, cpu_addr, size, direction); } EXPORT_SYMBOL(dma_map_single); @@ -102,10 +98,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - dma_ops->unmap_single(dev, dma_addr, size, direction); - else - BUG(); + BUG_ON(!dma_ops); + + dma_ops->unmap_single(dev, dma_addr, size, direction); } EXPORT_SYMBOL(dma_unmap_single); @@ -115,11 +110,10 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - return dma_ops->map_single(dev, - (page_address(page) + offset), size, direction); - BUG(); - return (dma_addr_t)0; + BUG_ON(!dma_ops); + + return dma_ops->map_single(dev, page_address(page) + offset, size, + direction); } EXPORT_SYMBOL(dma_map_page); @@ -128,10 +122,9 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - dma_ops->unmap_single(dev, dma_address, size, direction); - else - BUG(); + BUG_ON(!dma_ops); + + dma_ops->unmap_single(dev, dma_address, size, direction); } EXPORT_SYMBOL(dma_unmap_page); @@ -140,10 +133,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - return dma_ops->map_sg(dev, sg, nents, direction); - BUG(); - return 0; + BUG_ON(!dma_ops); + + return dma_ops->map_sg(dev, sg, nents, direction); } EXPORT_SYMBOL(dma_map_sg); @@ -152,9 +144,8 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); - if (dma_ops) - dma_ops->unmap_sg(dev, sg, nhwentries, direction); - else - BUG(); + BUG_ON(!dma_ops); + + dma_ops->unmap_sg(dev, sg, nhwentries, direction); } EXPORT_SYMBOL(dma_unmap_sg); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 54d9f5cdaab4..2cd872b5283b 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -375,6 +375,14 @@ BEGIN_FTR_SECTION ld r7,KSP_VSID(r4) /* Get new stack's VSID */ oris r0,r6,(SLB_ESID_V)@h ori r0,r0,(SLB_NUM_BOLTED-1)@l + + /* Update the last bolted SLB */ + ld r9,PACA_SLBSHADOWPTR(r13) + li r12,0 + std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ + std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ + std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ + slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ slbmte r7,r0 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 6ff3cf506088..3065b472b95d 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -132,7 +132,7 @@ _GLOBAL(__secondary_hold) bne 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) - LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) + LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init) mtctr r4 mr r3,r24 bctr @@ -1484,19 +1484,17 @@ fwnmi_data_area: . = 0x8000 /* - * On pSeries, secondary processors spin in the following code. + * On pSeries and most other platforms, secondary processors spin + * in the following code. * At entry, r3 = this processor's number (physical cpu id) */ -_GLOBAL(pSeries_secondary_smp_init) +_GLOBAL(generic_secondary_smp_init) mr r24,r3 /* turn on 64-bit mode */ bl .enable_64b_mode isync - /* Copy some CPU settings from CPU 0 */ - bl .__restore_cpu_setup - /* Set up a paca value for this processor. Since we have the * physical cpu id in r24, we need to search the pacas to find * which logical id maps to our physical one. @@ -1522,15 +1520,28 @@ _GLOBAL(pSeries_secondary_smp_init) /* start. */ sync - /* Create a temp kernel stack for use before relocation is on. */ +#ifndef CONFIG_SMP + b 3b /* Never go on non-SMP */ +#else + cmpwi 0,r23,0 + beq 3b /* Loop until told to go */ + + /* See if we need to call a cpu state restore handler */ + LOAD_REG_IMMEDIATE(r23, cur_cpu_spec) + ld r23,0(r23) + ld r23,CPU_SPEC_RESTORE(r23) + cmpdi 0,r23,0 + beq 4f + ld r23,0(r23) + mtctr r23 + bctrl + +4: /* Create a temp kernel stack for use before relocation is on. */ ld r1,PACAEMERGSP(r13) subi r1,r1,STACK_FRAME_OVERHEAD - cmpwi 0,r23,0 -#ifdef CONFIG_SMP - bne .__secondary_start + b .__secondary_start #endif - b 3b /* Loop until told to go */ #ifdef CONFIG_PPC_ISERIES _STATIC(__start_initialization_iSeries) @@ -1611,7 +1622,16 @@ _GLOBAL(__start_initialization_multiplatform) bl .enable_64b_mode /* Setup some critical 970 SPRs before switching MMU off */ - bl .__970_cpu_preinit + mfspr r0,SPRN_PVR + srwi r0,r0,16 + cmpwi r0,0x39 /* 970 */ + beq 1f + cmpwi r0,0x3c /* 970FX */ + beq 1f + cmpwi r0,0x44 /* 970MP */ + bne 2f +1: bl .__cpu_preinit_ppc970 +2: /* Switch off MMU if not already */ LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) @@ -1728,7 +1748,7 @@ _STATIC(__after_prom_start) _GLOBAL(copy_and_flush) addi r5,r5,-8 addi r6,r6,-8 -4: li r0,16 /* Use the least common */ +4: li r0,8 /* Use the smallest common */ /* denominator cache line */ /* size. This results in */ /* extra cache line flushes */ @@ -1782,7 +1802,7 @@ _GLOBAL(pmac_secondary_start) isync /* Copy some CPU settings from CPU 0 */ - bl .__restore_cpu_setup + bl .__restore_cpu_ppc970 /* pSeries do that early though I don't think we really need it */ mfmsr r3 @@ -1932,12 +1952,6 @@ _STATIC(start_here_multiplatform) mr r5,r26 bl .identify_cpu - /* Save some low level config HIDs of CPU0 to be copied to - * other CPUs later on, or used for suspend/resume - */ - bl .__save_cpu_setup - sync - /* Do very early kernel initializations, including initial hash table, * stab and slb setup before we turn on relocation. */ diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 68e5ab0443d2..124dbcba94a8 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -167,7 +167,7 @@ static DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, ibmebusdev_show_name, NULL); static struct ibmebus_dev* __devinit ibmebus_register_device_common( - struct ibmebus_dev *dev, char *name) + struct ibmebus_dev *dev, const char *name) { int err = 0; @@ -194,10 +194,10 @@ static struct ibmebus_dev* __devinit ibmebus_register_device_node( struct device_node *dn) { struct ibmebus_dev *dev; - char *loc_code; + const char *loc_code; int length; - loc_code = (char *)get_property(dn, "ibm,loc-code", NULL); + loc_code = get_property(dn, "ibm,loc-code", NULL); if (!loc_code) { printk(KERN_WARNING "%s: node %s missing 'ibm,loc-code'\n", __FUNCTION__, dn->name ? dn->name : "<unknown>"); diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c new file mode 100644 index 000000000000..e98180686b35 --- /dev/null +++ b/arch/powerpc/kernel/io.c @@ -0,0 +1,131 @@ +/* + * I/O string operations + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * Copyright (C) 2006 IBM Corporation + * + * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) + * and Paul Mackerras. + * + * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com) + * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) + * + * Rewritten in C by Stephen Rothwell. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/compiler.h> +#include <linux/module.h> + +#include <asm/io.h> +#include <asm/firmware.h> +#include <asm/bug.h> + +void _insb(volatile u8 __iomem *port, void *buf, long count) +{ + u8 *tbuf = buf; + u8 tmp; + + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + if (unlikely(count <= 0)) + return; + asm volatile("sync"); + do { + tmp = *port; + asm volatile("eieio"); + *tbuf++ = tmp; + } while (--count != 0); + asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); +} +EXPORT_SYMBOL(_insb); + +void _outsb(volatile u8 __iomem *port, const void *buf, long count) +{ + const u8 *tbuf = buf; + + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + if (unlikely(count <= 0)) + return; + asm volatile("sync"); + do { + *port = *tbuf++; + } while (--count != 0); + asm volatile("sync"); +} +EXPORT_SYMBOL(_outsb); + +void _insw_ns(volatile u16 __iomem *port, void *buf, long count) +{ + u16 *tbuf = buf; + u16 tmp; + + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + if (unlikely(count <= 0)) + return; + asm volatile("sync"); + do { + tmp = *port; + asm volatile("eieio"); + *tbuf++ = tmp; + } while (--count != 0); + asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); +} +EXPORT_SYMBOL(_insw_ns); + +void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count) +{ + const u16 *tbuf = buf; + + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + if (unlikely(count <= 0)) + return; + asm volatile("sync"); + do { + *port = *tbuf++; + } while (--count != 0); + asm volatile("sync"); +} +EXPORT_SYMBOL(_outsw_ns); + +void _insl_ns(volatile u32 __iomem *port, void *buf, long count) +{ + u32 *tbuf = buf; + u32 tmp; + + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + if (unlikely(count <= 0)) + return; + asm volatile("sync"); + do { + tmp = *port; + asm volatile("eieio"); + *tbuf++ = tmp; + } while (--count != 0); + asm volatile("twi 0,%0,0; isync" : : "r" (tmp)); +} +EXPORT_SYMBOL(_insl_ns); + +void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count) +{ + const u32 *tbuf = buf; + + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + if (unlikely(count <= 0)) + return; + asm volatile("sync"); + do { + *port = *tbuf++; + } while (--count != 0); + asm volatile("sync"); +} +EXPORT_SYMBOL(_outsl_ns); diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 12c5971d6565..b4432332341f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -52,6 +52,7 @@ #include <linux/radix-tree.h> #include <linux/mutex.h> #include <linux/bootmem.h> +#include <linux/pci.h> #include <asm/uaccess.h> #include <asm/system.h> @@ -875,12 +876,14 @@ int pci_enable_msi(struct pci_dev * pdev) else return -1; } +EXPORT_SYMBOL(pci_enable_msi); void pci_disable_msi(struct pci_dev * pdev) { if (ppc_md.disable_msi) ppc_md.disable_msi(pdev); } +EXPORT_SYMBOL(pci_disable_msi); void pci_scan_msi_device(struct pci_dev *dev) {} int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;} @@ -888,6 +891,8 @@ void pci_disable_msix(struct pci_dev *dev) {} void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} void disable_msi_mode(struct pci_dev *dev, int pos, int type) {} void pci_no_msi(void) {} +EXPORT_SYMBOL(pci_enable_msix); +EXPORT_SYMBOL(pci_disable_msix); #endif diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index 40a39291861f..5e6ddfa474c0 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c @@ -39,16 +39,17 @@ static int __init add_legacy_port(struct device_node *np, int want_index, phys_addr_t taddr, unsigned long irq, upf_t flags, int irq_check_parent) { - u32 *clk, *spd, clock = BASE_BAUD * 16; + const u32 *clk, *spd; + u32 clock = BASE_BAUD * 16; int index; /* get clock freq. if present */ - clk = (u32 *)get_property(np, "clock-frequency", NULL); + clk = get_property(np, "clock-frequency", NULL); if (clk && *clk) clock = *clk; /* get default speed if present */ - spd = (u32 *)get_property(np, "current-speed", NULL); + spd = get_property(np, "current-speed", NULL); /* If we have a location index, then try to use it */ if (want_index >= 0 && want_index < MAX_LEGACY_SERIAL_PORTS) @@ -113,7 +114,7 @@ static int __init add_legacy_soc_port(struct device_node *np, struct device_node *soc_dev) { u64 addr; - u32 *addrp; + const u32 *addrp; upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; struct device_node *tsi = of_get_parent(np); @@ -144,15 +145,15 @@ static int __init add_legacy_soc_port(struct device_node *np, static int __init add_legacy_isa_port(struct device_node *np, struct device_node *isa_brg) { - u32 *reg; - char *typep; + const u32 *reg; + const char *typep; int index = -1; u64 taddr; DBG(" -> add_legacy_isa_port(%s)\n", np->full_name); /* Get the ISA port number */ - reg = (u32 *)get_property(np, "reg", NULL); + reg = get_property(np, "reg", NULL); if (reg == NULL) return -1; @@ -163,7 +164,7 @@ static int __init add_legacy_isa_port(struct device_node *np, /* Now look for an "ibm,aix-loc" property that gives us ordering * if any... */ - typep = (char *)get_property(np, "ibm,aix-loc", NULL); + typep = get_property(np, "ibm,aix-loc", NULL); /* If we have a location index, then use it */ if (typep && *typep == 'S') @@ -188,7 +189,7 @@ static int __init add_legacy_pci_port(struct device_node *np, struct device_node *pci_dev) { u64 addr, base; - u32 *addrp; + const u32 *addrp; unsigned int flags; int iotype, index = -1, lindex = 0; @@ -227,7 +228,7 @@ static int __init add_legacy_pci_port(struct device_node *np, * we get to their "reg" property */ if (np != pci_dev) { - u32 *reg = (u32 *)get_property(np, "reg", NULL); + const u32 *reg = get_property(np, "reg", NULL); if (reg && (*reg < 4)) index = lindex = *reg; } @@ -285,13 +286,13 @@ static void __init setup_legacy_serial_console(int console) void __init find_legacy_serial_ports(void) { struct device_node *np, *stdout = NULL; - char *path; + const char *path; int index; DBG(" -> find_legacy_serial_port()\n"); /* Now find out if one of these is out firmware console */ - path = (char *)get_property(of_chosen, "linux,stdout-path", NULL); + path = get_property(of_chosen, "linux,stdout-path", NULL); if (path != NULL) { stdout = of_find_node_by_path(path); if (stdout) @@ -491,8 +492,8 @@ static int __init check_legacy_serial_console(void) { struct device_node *prom_stdout = NULL; int speed = 0, offset = 0; - char *name; - u32 *spd; + const char *name; + const u32 *spd; DBG(" -> check_legacy_serial_console()\n"); @@ -513,7 +514,7 @@ static int __init check_legacy_serial_console(void) } /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ - name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); + name = get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) { DBG(" no linux,stdout-path !\n"); return -ENODEV; @@ -525,12 +526,12 @@ static int __init check_legacy_serial_console(void) } DBG("stdout is %s\n", prom_stdout->full_name); - name = (char *)get_property(prom_stdout, "name", NULL); + name = get_property(prom_stdout, "name", NULL); if (!name) { DBG(" stdout package has no name !\n"); goto not_found; } - spd = (u32 *)get_property(prom_stdout, "current-speed", NULL); + spd = get_property(prom_stdout, "current-speed", NULL); if (spd) speed = *spd; diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 23f34daa044a..41c05dcd68f4 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -32,7 +32,6 @@ #include <asm/rtas.h> #include <asm/system.h> #include <asm/time.h> -#include <asm/iseries/it_exp_vpd_panel.h> #include <asm/prom.h> #include <asm/vdso_datapage.h> @@ -183,8 +182,14 @@ static unsigned int h_get_ppp(unsigned long *entitled, unsigned long *resource) { unsigned long rc; - rc = plpar_hcall_4out(H_GET_PPP, 0, 0, 0, 0, entitled, unallocated, - aggregation, resource); + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_GET_PPP, retbuf); + + *entitled = retbuf[0]; + *unallocated = retbuf[1]; + *aggregation = retbuf[2]; + *resource = retbuf[3]; log_plpar_hcall_return(rc, "H_GET_PPP"); @@ -194,8 +199,12 @@ static unsigned int h_get_ppp(unsigned long *entitled, static void h_pic(unsigned long *pool_idle_time, unsigned long *num_procs) { unsigned long rc; - unsigned long dummy; - rc = plpar_hcall(H_PIC, 0, 0, 0, 0, pool_idle_time, num_procs, &dummy); + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_PIC, retbuf); + + *pool_idle_time = retbuf[0]; + *num_procs = retbuf[1]; if (rc != H_AUTHORITY) log_plpar_hcall_return(rc, "H_PIC"); @@ -310,12 +319,11 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) int partition_potential_processors; int partition_active_processors; struct device_node *rtas_node; - int *lrdrp = NULL; + const int *lrdrp = NULL; rtas_node = find_path_device("/rtas"); if (rtas_node) - lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", - NULL); + lrdrp = get_property(rtas_node, "ibm,lrdr-capacity", NULL); if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; @@ -520,7 +528,8 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *model = ""; const char *system_id = ""; const char *tmp; - unsigned int *lp_index_ptr, lp_index = 0; + const unsigned int *lp_index_ptr; + unsigned int lp_index = 0; seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); @@ -540,8 +549,7 @@ static int lparcfg_data(struct seq_file *m, void *v) if (firmware_has_feature(FW_FEATURE_ISERIES)) system_id += 4; } - lp_index_ptr = (unsigned int *) - get_property(rootdn, "ibm,partition-no", NULL); + lp_index_ptr = get_property(rootdn, "ibm,partition-no", NULL); if (lp_index_ptr) lp_index = *lp_index_ptr; } diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index be58985c7681..a24b09c27718 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -31,8 +31,8 @@ int default_machine_kexec_prepare(struct kimage *image) unsigned long begin, end; /* limits of segment */ unsigned long low, high; /* limits of blocked memory range */ struct device_node *node; - unsigned long *basep; - unsigned int *sizep; + const unsigned long *basep; + const unsigned int *sizep; if (!ppc_md.hpte_clear_all) return -ENOENT; @@ -72,10 +72,8 @@ int default_machine_kexec_prepare(struct kimage *image) /* We also should not overwrite the tce tables */ for (node = of_find_node_by_type(NULL, "pci"); node != NULL; node = of_find_node_by_type(node, "pci")) { - basep = (unsigned long *)get_property(node, "linux,tce-base", - NULL); - sizep = (unsigned int *)get_property(node, "linux,tce-size", - NULL); + basep = get_property(node, "linux,tce-base", NULL); + sizep = get_property(node, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) continue; diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index f770805f1215..330c9dc7db86 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S @@ -43,162 +43,3 @@ _GLOBAL(add_reloc_offset) add r3,r3,r5 mtlr r0 blr - -/* - * I/O string operations - * - * insb(port, buf, len) - * outsb(port, buf, len) - * insw(port, buf, len) - * outsw(port, buf, len) - * insl(port, buf, len) - * outsl(port, buf, len) - * insw_ns(port, buf, len) - * outsw_ns(port, buf, len) - * insl_ns(port, buf, len) - * outsl_ns(port, buf, len) - * - * The *_ns versions don't do byte-swapping. - */ -_GLOBAL(_insb) - sync - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- -00: lbz r5,0(r3) - eieio - stbu r5,1(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -_GLOBAL(_outsb) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,1 - blelr- - sync -00: lbzu r5,1(r4) - stb r5,0(r3) - bdnz 00b - sync - blr - -_GLOBAL(_insw) - sync - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhbrx r5,0,r3 - eieio - sthu r5,2(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -_GLOBAL(_outsw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- - sync -00: lhzu r5,2(r4) - sthbrx r5,0,r3 - bdnz 00b - sync - blr - -_GLOBAL(_insl) - sync - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwbrx r5,0,r3 - eieio - stwu r5,4(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -_GLOBAL(_outsl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- - sync -00: lwzu r5,4(r4) - stwbrx r5,0,r3 - bdnz 00b - sync - blr - -#ifdef CONFIG_PPC32 -_GLOBAL(__ide_mm_insw) -#endif -_GLOBAL(_insw_ns) - sync - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhz r5,0(r3) - eieio - sthu r5,2(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -#ifdef CONFIG_PPC32 -_GLOBAL(__ide_mm_outsw) -#endif -_GLOBAL(_outsw_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- - sync -00: lhzu r5,2(r4) - sth r5,0(r3) - bdnz 00b - sync - blr - -#ifdef CONFIG_PPC32 -_GLOBAL(__ide_mm_insl) -#endif -_GLOBAL(_insl_ns) - sync - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwz r5,0(r3) - eieio - stwu r5,4(r4) - bdnz 00b - twi 0,r5,0 - isync - blr - -#ifdef CONFIG_PPC32 -_GLOBAL(__ide_mm_outsl) -#endif -_GLOBAL(_outsl_ns) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- - sync -00: lwzu r5,4(r4) - stw r5,0(r3) - bdnz 00b - sync - blr - diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c index 3262b73a3a68..397c83eda20e 100644 --- a/arch/powerpc/kernel/of_device.c +++ b/arch/powerpc/kernel/of_device.c @@ -189,27 +189,9 @@ void of_release_dev(struct device *dev) int of_device_register(struct of_device *ofdev) { int rc; - struct of_device **odprop; BUG_ON(ofdev->node == NULL); - odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL); - if (!odprop) { - struct property *new_prop; - - new_prop = kmalloc(sizeof(struct property) + sizeof(struct of_device *), - GFP_KERNEL); - if (new_prop == NULL) - return -ENOMEM; - new_prop->name = "linux,device"; - new_prop->length = sizeof(sizeof(struct of_device *)); - new_prop->value = (unsigned char *)&new_prop[1]; - odprop = (struct of_device **)new_prop->value; - *odprop = NULL; - prom_add_property(ofdev->node, new_prop); - } - *odprop = ofdev; - rc = device_register(&ofdev->dev); if (rc) return rc; @@ -221,14 +203,8 @@ int of_device_register(struct of_device *ofdev) void of_device_unregister(struct of_device *ofdev) { - struct of_device **odprop; - device_remove_file(&ofdev->dev, &dev_attr_devspec); - odprop = (struct of_device **)get_property(ofdev->node, "linux,device", NULL); - if (odprop) - *odprop = NULL; - device_unregister(&ofdev->dev); } diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index c68741fed14b..55f1a25085cd 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -17,6 +17,7 @@ #include <asm/lppaca.h> #include <asm/iseries/it_lp_reg_save.h> #include <asm/paca.h> +#include <asm/mmu.h> /* This symbol is provided by the linker - let it fill in the paca @@ -45,6 +46,17 @@ struct lppaca lppaca[] = { }, }; +/* + * 3 persistent SLBs are registered here. The buffer will be zero + * initially, hence will all be invaild until we actually write them. + */ +struct slb_shadow slb_shadow[] __cacheline_aligned = { + [0 ... (NR_CPUS-1)] = { + .persistent = SLB_NUM_BOLTED, + .buffer_length = sizeof(struct slb_shadow), + }, +}; + /* The Paca is an array with one entry per processor. Each contains an * lppaca, which contains the information shared between the * hypervisor and Linux. @@ -59,7 +71,8 @@ struct lppaca lppaca[] = { .lock_token = 0x8000, \ .paca_index = (number), /* Paca Index */ \ .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ - .hw_cpu_id = 0xffff, + .hw_cpu_id = 0xffff, \ + .slb_shadow_ptr = &slb_shadow[number], #ifdef CONFIG_PPC_ISERIES #define PACA_INIT_ISERIES(number) \ diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 09b1e1bbb29b..9b49f8691d29 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -633,12 +633,12 @@ pcibios_alloc_controller(void) static void make_one_node_map(struct device_node* node, u8 pci_bus) { - int *bus_range; + const int *bus_range; int len; if (pci_bus >= pci_bus_count) return; - bus_range = (int *) get_property(node, "bus-range", &len); + bus_range = get_property(node, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, " "assuming it starts at 0\n", node->full_name); @@ -648,13 +648,13 @@ make_one_node_map(struct device_node* node, u8 pci_bus) for (node=node->child; node != 0;node = node->sibling) { struct pci_dev* dev; - unsigned int *class_code, *reg; + const unsigned int *class_code, *reg; - class_code = (unsigned int *) get_property(node, "class-code", NULL); + class_code = get_property(node, "class-code", NULL); if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) continue; - reg = (unsigned int *)get_property(node, "reg", NULL); + reg = get_property(node, "reg", NULL); if (!reg) continue; dev = pci_find_slot(pci_bus, ((reg[0] >> 8) & 0xff)); @@ -669,7 +669,7 @@ pcibios_make_OF_bus_map(void) { int i; struct pci_controller* hose; - u8* of_prop_map; + struct property *map_prop; pci_to_OF_bus_map = (u8*)kmalloc(pci_bus_count, GFP_KERNEL); if (!pci_to_OF_bus_map) { @@ -691,9 +691,12 @@ pcibios_make_OF_bus_map(void) continue; make_one_node_map(node, hose->first_busno); } - of_prop_map = get_property(find_path_device("/"), "pci-OF-bus-map", NULL); - if (of_prop_map) - memcpy(of_prop_map, pci_to_OF_bus_map, pci_bus_count); + map_prop = of_find_property(find_path_device("/"), + "pci-OF-bus-map", NULL); + if (map_prop) { + BUG_ON(pci_bus_count > map_prop->length); + memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count); + } #ifdef DEBUG printk("PCI->OF bus map:\n"); for (i=0; i<pci_bus_count; i++) { @@ -712,7 +715,7 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* struct device_node* sub_node; for (; node != 0;node = node->sibling) { - unsigned int *class_code; + const unsigned int *class_code; if (filter(node, data)) return node; @@ -722,7 +725,7 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* * a fake root for all functions of a multi-function device, * we go down them as well. */ - class_code = (unsigned int *) get_property(node, "class-code", NULL); + class_code = get_property(node, "class-code", NULL); if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) && strcmp(node->name, "multifunc-device")) @@ -737,10 +740,10 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void* static int scan_OF_pci_childs_iterator(struct device_node* node, void* data) { - unsigned int *reg; + const unsigned int *reg; u8* fdata = (u8*)data; - reg = (unsigned int *) get_property(node, "reg", NULL); + reg = get_property(node, "reg", NULL); if (reg && ((reg[0] >> 8) & 0xff) == fdata[1] && ((reg[0] >> 16) & 0xff) == fdata[0]) return 1; @@ -841,7 +844,7 @@ find_OF_pci_device_filter(struct device_node* node, void* data) int pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) { - unsigned int *reg; + const unsigned int *reg; struct pci_controller* hose; struct pci_dev* dev = NULL; @@ -854,7 +857,7 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) if (!scan_OF_pci_childs(((struct device_node*)hose->arch_data)->child, find_OF_pci_device_filter, (void *)node)) return -ENODEV; - reg = (unsigned int *) get_property(node, "reg", NULL); + reg = get_property(node, "reg", NULL); if (!reg) return -ENODEV; *bus = (reg[0] >> 16) & 0xff; @@ -885,8 +888,8 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary) { static unsigned int static_lc_ranges[256] __initdata; - unsigned int *dt_ranges, *lc_ranges, *ranges, *prev; - unsigned int size; + const unsigned int *dt_ranges; + unsigned int *lc_ranges, *ranges, *prev, size; int rlen = 0, orig_rlen; int memno = 0; struct resource *res; @@ -897,7 +900,7 @@ pci_process_bridge_OF_ranges(struct pci_controller *hose, * that can have more than 3 ranges, fortunately using contiguous * addresses -- BenH */ - dt_ranges = (unsigned int *) get_property(dev, "ranges", &rlen); + dt_ranges = get_property(dev, "ranges", &rlen); if (!dt_ranges) return; /* Sanity check, though hopefully that never happens */ diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 138134c8c17d..c1b1e14775e4 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -185,34 +185,6 @@ static void __devinit pci_setup_pci_controller(struct pci_controller *hose) spin_unlock(&hose_spinlock); } -static void add_linux_pci_domain(struct device_node *dev, - struct pci_controller *phb) -{ - struct property *of_prop; - unsigned int size; - - of_prop = (struct property *) - get_property(dev, "linux,pci-domain", &size); - if (of_prop != NULL) - return; - WARN_ON(of_prop && size < sizeof(int)); - if (of_prop && size < sizeof(int)) - of_prop = NULL; - size = sizeof(struct property) + sizeof(int); - if (of_prop == NULL) { - if (mem_init_done) - of_prop = kmalloc(size, GFP_KERNEL); - else - of_prop = alloc_bootmem(size); - } - memset(of_prop, 0, sizeof(struct property)); - of_prop->name = "linux,pci-domain"; - of_prop->length = sizeof(int); - of_prop->value = (unsigned char *)&of_prop[1]; - *((int *)of_prop->value) = phb->global_number; - prom_add_property(dev, of_prop); -} - struct pci_controller * pcibios_alloc_controller(struct device_node *dev) { struct pci_controller *phb; @@ -226,22 +198,13 @@ struct pci_controller * pcibios_alloc_controller(struct device_node *dev) pci_setup_pci_controller(phb); phb->arch_data = dev; phb->is_dynamic = mem_init_done; - if (dev) { + if (dev) PHB_SET_NODE(phb, of_node_to_nid(dev)); - add_linux_pci_domain(dev, phb); - } return phb; } void pcibios_free_controller(struct pci_controller *phb) { - if (phb->arch_data) { - struct device_node *np = phb->arch_data; - int *domain = (int *)get_property(np, - "linux,pci-domain", NULL); - if (domain) - *domain = -1; - } if (phb->is_dynamic) kfree(phb); } @@ -283,10 +246,10 @@ static void __init pcibios_claim_of_setup(void) #ifdef CONFIG_PPC_MULTIPLATFORM static u32 get_int_prop(struct device_node *np, const char *name, u32 def) { - u32 *prop; + const u32 *prop; int len; - prop = (u32 *) get_property(np, name, &len); + prop = get_property(np, name, &len); if (prop && len >= 4) return *prop; return def; @@ -315,10 +278,11 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) u64 base, size; unsigned int flags; struct resource *res; - u32 *addrs, i; + const u32 *addrs; + u32 i; int proplen; - addrs = (u32 *) get_property(node, "assigned-addresses", &proplen); + addrs = get_property(node, "assigned-addresses", &proplen); if (!addrs) return; DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); @@ -418,7 +382,7 @@ void __devinit of_scan_bus(struct device_node *node, struct pci_bus *bus) { struct device_node *child = NULL; - u32 *reg; + const u32 *reg; int reglen, devfn; struct pci_dev *dev; @@ -426,7 +390,7 @@ void __devinit of_scan_bus(struct device_node *node, while ((child = of_get_next_child(node, child)) != NULL) { DBG(" * %s\n", child->full_name); - reg = (u32 *) get_property(child, "reg", ®len); + reg = get_property(child, "reg", ®len); if (reg == NULL || reglen < 20) continue; devfn = (reg[0] >> 8) & 0xff; @@ -450,7 +414,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev) { struct pci_bus *bus; - u32 *busrange, *ranges; + const u32 *busrange, *ranges; int len, i, mode; struct resource *res; unsigned int flags; @@ -459,13 +423,13 @@ void __devinit of_scan_pci_bridge(struct device_node *node, DBG("of_scan_pci_bridge(%s)\n", node->full_name); /* parse bus-range property */ - busrange = (u32 *) get_property(node, "bus-range", &len); + busrange = get_property(node, "bus-range", &len); if (busrange == NULL || len != 8) { printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", node->full_name); return; } - ranges = (u32 *) get_property(node, "ranges", &len); + ranges = get_property(node, "ranges", &len); if (ranges == NULL) { printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", node->full_name); @@ -929,13 +893,13 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, unsigned int size; }; - struct isa_range *range; + const struct isa_range *range; unsigned long pci_addr; unsigned int isa_addr; unsigned int size; int rlen = 0; - range = (struct isa_range *) get_property(isa_node, "ranges", &rlen); + range = get_property(isa_node, "ranges", &rlen); if (range == NULL || (rlen < sizeof(struct isa_range))) { printk(KERN_ERR "no ISA ranges or unexpected isa range size," "mapping 64k\n"); @@ -976,7 +940,8 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int prim) { - unsigned int *ranges, pci_space; + const unsigned int *ranges; + unsigned int pci_space; unsigned long size; int rlen = 0; int memno = 0; @@ -994,7 +959,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, * (size depending on dev->n_addr_cells) * cells 4+5 or 5+6: the size of the range */ - ranges = (unsigned int *) get_property(dev, "ranges", &rlen); + ranges = get_property(dev, "ranges", &rlen); if (ranges == NULL) return; hose->io_base_phys = 0; diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 1c18953514c3..68df018dae0e 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c @@ -40,8 +40,8 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data) { struct pci_controller *phb = data; - int *type = (int *)get_property(dn, "ibm,pci-config-space-type", NULL); - u32 *regs; + const int *type = get_property(dn, "ibm,pci-config-space-type", NULL); + const u32 *regs; struct pci_dn *pdn; if (mem_init_done) @@ -54,14 +54,14 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data) dn->data = pdn; pdn->node = dn; pdn->phb = phb; - regs = (u32 *)get_property(dn, "reg", NULL); + regs = get_property(dn, "reg", NULL); if (regs) { /* First register entry is addr (00BBSS00) */ pdn->busno = (regs[0] >> 16) & 0xff; pdn->devfn = (regs[0] >> 8) & 0xff; } if (firmware_has_feature(FW_FEATURE_ISERIES)) { - u32 *busp = (u32 *)get_property(dn, "linux,subbus", NULL); + const u32 *busp = get_property(dn, "linux,subbus", NULL); if (busp) pdn->bussubno = *busp; } @@ -96,10 +96,11 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre, /* We started with a phb, iterate all childs */ for (dn = start->child; dn; dn = nextdn) { - u32 *classp, class; + const u32 *classp; + u32 class; nextdn = NULL; - classp = (u32 *)get_property(dn, "class-code", NULL); + classp = get_property(dn, "class-code", NULL); class = classp ? *classp : 0; if (pre && ((ret = pre(dn, data)) != NULL)) diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 39d3bfcabcd2..807193a3c784 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -91,25 +91,10 @@ EXPORT_SYMBOL(__copy_tofrom_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strnlen_user); - -#ifndef __powerpc64__ -EXPORT_SYMBOL(__ide_mm_insl); -EXPORT_SYMBOL(__ide_mm_outsw); -EXPORT_SYMBOL(__ide_mm_insw); -EXPORT_SYMBOL(__ide_mm_outsl); +#ifdef CONFIG_PPC64 +EXPORT_SYMBOL(copy_4K_page); #endif -EXPORT_SYMBOL(_insb); -EXPORT_SYMBOL(_outsb); -EXPORT_SYMBOL(_insw); -EXPORT_SYMBOL(_outsw); -EXPORT_SYMBOL(_insl); -EXPORT_SYMBOL(_outsl); -EXPORT_SYMBOL(_insw_ns); -EXPORT_SYMBOL(_outsw_ns); -EXPORT_SYMBOL(_insl_ns); -EXPORT_SYMBOL(_outsl_ns); - #if defined(CONFIG_PPC32) && (defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)) EXPORT_SYMBOL(ppc_ide_md); #endif diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index a1787ffb6319..eb913f80bfb1 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -757,24 +757,9 @@ static int __init early_init_dt_scan_root(unsigned long node, static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) { cell_t *p = *cellp; - unsigned long r; - /* Ignore more than 2 cells */ - while (s > sizeof(unsigned long) / 4) { - p++; - s--; - } - r = *p++; -#ifdef CONFIG_PPC64 - if (s > 1) { - r <<= 32; - r |= *(p++); - s--; - } -#endif - - *cellp = p; - return r; + *cellp = p + s; + return of_read_ulong(p, s); } @@ -942,11 +927,11 @@ void __init early_init_devtree(void *params) int prom_n_addr_cells(struct device_node* np) { - int* ip; + const int *ip; do { if (np->parent) np = np->parent; - ip = (int *) get_property(np, "#address-cells", NULL); + ip = get_property(np, "#address-cells", NULL); if (ip != NULL) return *ip; } while (np->parent); @@ -958,11 +943,11 @@ EXPORT_SYMBOL(prom_n_addr_cells); int prom_n_size_cells(struct device_node* np) { - int* ip; + const int* ip; do { if (np->parent) np = np->parent; - ip = (int *) get_property(np, "#size-cells", NULL); + ip = get_property(np, "#size-cells", NULL); if (ip != NULL) return *ip; } while (np->parent); @@ -1034,7 +1019,7 @@ int device_is_compatible(struct device_node *device, const char *compat) const char* cp; int cplen, l; - cp = (char *) get_property(device, "compatible", &cplen); + cp = get_property(device, "compatible", &cplen); if (cp == NULL) return 0; while (cplen > 0) { @@ -1449,7 +1434,7 @@ static int of_finish_dynamic_node(struct device_node *node) { struct device_node *parent = of_get_parent(node); int err = 0; - phandle *ibm_phandle; + const phandle *ibm_phandle; node->name = get_property(node, "name", NULL); node->type = get_property(node, "device_type", NULL); @@ -1466,8 +1451,7 @@ static int of_finish_dynamic_node(struct device_node *node) return -ENODEV; /* fix up new node's linux_phandle field */ - if ((ibm_phandle = (unsigned int *)get_property(node, - "ibm,phandle", NULL))) + if ((ibm_phandle = get_property(node, "ibm,phandle", NULL))) node->linux_phandle = *ibm_phandle; out: @@ -1528,7 +1512,7 @@ struct property *of_find_property(struct device_node *np, const char *name, * Find a property with a given name for a given node * and return the value. */ -void *get_property(struct device_node *np, const char *name, int *lenp) +const void *get_property(struct device_node *np, const char *name, int *lenp) { struct property *pp = of_find_property(np,name,lenp); return pp ? pp->value : NULL; @@ -1658,16 +1642,16 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) hardid = get_hard_smp_processor_id(cpu); for_each_node_by_type(np, "cpu") { - u32 *intserv; + const u32 *intserv; unsigned int plen, t; /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist * fallback to "reg" property and assume no threads */ - intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", - &plen); + intserv = get_property(np, "ibm,ppc-interrupt-server#s", + &plen); if (intserv == NULL) { - u32 *reg = (u32 *)get_property(np, "reg", NULL); + const u32 *reg = get_property(np, "reg", NULL); if (reg == NULL) continue; if (*reg == hardid) { diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 4394e545f9f7..b91761639d96 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2033,16 +2033,22 @@ static void __init fixup_device_tree_maple(void) #endif #ifdef CONFIG_PPC_CHRP -/* Pegasos lacks the "ranges" property in the isa node */ +/* Pegasos and BriQ lacks the "ranges" property in the isa node */ static void __init fixup_device_tree_chrp(void) { phandle isa; u32 isa_ranges[6]; + u32 rloc = 0x01006000; /* IO space; PCI device = 12 */ char *name; int rc; name = "/pci@80000000/isa@c"; isa = call_prom("finddevice", 1, 1, ADDR(name)); + if (!PHANDLE_VALID(isa)) { + name = "/pci@ff500000/isa@6"; + isa = call_prom("finddevice", 1, 1, ADDR(name)); + rloc = 0x01003000; /* IO space; PCI device = 6 */ + } if (!PHANDLE_VALID(isa)) return; @@ -2054,7 +2060,7 @@ static void __init fixup_device_tree_chrp(void) isa_ranges[0] = 0x1; isa_ranges[1] = 0x0; - isa_ranges[2] = 0x01006000; + isa_ranges[2] = rloc; isa_ranges[3] = 0x0; isa_ranges[4] = 0x0; isa_ranges[5] = 0x00010000; diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index a10825a5dfe6..603dff3ad62a 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c @@ -27,7 +27,7 @@ /* Debug utility */ #ifdef DEBUG -static void of_dump_addr(const char *s, u32 *addr, int na) +static void of_dump_addr(const char *s, const u32 *addr, int na) { printk("%s", s); while(na--) @@ -35,7 +35,7 @@ static void of_dump_addr(const char *s, u32 *addr, int na) printk("\n"); } #else -static void of_dump_addr(const char *s, u32 *addr, int na) { } +static void of_dump_addr(const char *s, const u32 *addr, int na) { } #endif @@ -46,9 +46,10 @@ struct of_bus { int (*match)(struct device_node *parent); void (*count_cells)(struct device_node *child, int *addrc, int *sizec); - u64 (*map)(u32 *addr, u32 *range, int na, int ns, int pna); + u64 (*map)(u32 *addr, const u32 *range, + int na, int ns, int pna); int (*translate)(u32 *addr, u64 offset, int na); - unsigned int (*get_flags)(u32 *addr); + unsigned int (*get_flags)(const u32 *addr); }; @@ -65,7 +66,8 @@ static void of_bus_default_count_cells(struct device_node *dev, *sizec = prom_n_size_cells(dev); } -static u64 of_bus_default_map(u32 *addr, u32 *range, int na, int ns, int pna) +static u64 of_bus_default_map(u32 *addr, const u32 *range, + int na, int ns, int pna) { u64 cp, s, da; @@ -93,7 +95,7 @@ static int of_bus_default_translate(u32 *addr, u64 offset, int na) return 0; } -static unsigned int of_bus_default_get_flags(u32 *addr) +static unsigned int of_bus_default_get_flags(const u32 *addr) { return IORESOURCE_MEM; } @@ -118,7 +120,7 @@ static void of_bus_pci_count_cells(struct device_node *np, *sizec = 2; } -static u64 of_bus_pci_map(u32 *addr, u32 *range, int na, int ns, int pna) +static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna) { u64 cp, s, da; @@ -143,7 +145,7 @@ static int of_bus_pci_translate(u32 *addr, u64 offset, int na) return of_bus_default_translate(addr + 1, offset, na - 1); } -static unsigned int of_bus_pci_get_flags(u32 *addr) +static unsigned int of_bus_pci_get_flags(const u32 *addr) { unsigned int flags = 0; u32 w = addr[0]; @@ -178,7 +180,7 @@ static void of_bus_isa_count_cells(struct device_node *child, *sizec = 1; } -static u64 of_bus_isa_map(u32 *addr, u32 *range, int na, int ns, int pna) +static u64 of_bus_isa_map(u32 *addr, const u32 *range, int na, int ns, int pna) { u64 cp, s, da; @@ -203,7 +205,7 @@ static int of_bus_isa_translate(u32 *addr, u64 offset, int na) return of_bus_default_translate(addr + 1, offset, na - 1); } -static unsigned int of_bus_isa_get_flags(u32 *addr) +static unsigned int of_bus_isa_get_flags(const u32 *addr) { unsigned int flags = 0; u32 w = addr[0]; @@ -268,7 +270,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, struct of_bus *pbus, u32 *addr, int na, int ns, int pna) { - u32 *ranges; + const u32 *ranges; unsigned int rlen; int rone; u64 offset = OF_BAD_ADDR; @@ -285,7 +287,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, * to translate addresses that aren't supposed to be translated in * the first place. --BenH. */ - ranges = (u32 *)get_property(parent, "ranges", &rlen); + ranges = get_property(parent, "ranges", &rlen); if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); @@ -328,7 +330,7 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, * that can be mapped to a cpu physical address). This is not really specified * that way, but this is traditionally the way IBM at least do things */ -u64 of_translate_address(struct device_node *dev, u32 *in_addr) +u64 of_translate_address(struct device_node *dev, const u32 *in_addr) { struct device_node *parent = NULL; struct of_bus *bus, *pbus; @@ -405,10 +407,10 @@ u64 of_translate_address(struct device_node *dev, u32 *in_addr) } EXPORT_SYMBOL(of_translate_address); -u32 *of_get_address(struct device_node *dev, int index, u64 *size, +const u32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags) { - u32 *prop; + const u32 *prop; unsigned int psize; struct device_node *parent; struct of_bus *bus; @@ -425,7 +427,7 @@ u32 *of_get_address(struct device_node *dev, int index, u64 *size, return NULL; /* Get "reg" or "assigned-addresses" property */ - prop = (u32 *)get_property(dev, bus->addresses, &psize); + prop = get_property(dev, bus->addresses, &psize); if (prop == NULL) return NULL; psize /= 4; @@ -443,10 +445,10 @@ u32 *of_get_address(struct device_node *dev, int index, u64 *size, } EXPORT_SYMBOL(of_get_address); -u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, +const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, unsigned int *flags) { - u32 *prop; + const u32 *prop; unsigned int psize; struct device_node *parent; struct of_bus *bus; @@ -467,7 +469,7 @@ u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, return NULL; /* Get "reg" or "assigned-addresses" property */ - prop = (u32 *)get_property(dev, bus->addresses, &psize); + prop = get_property(dev, bus->addresses, &psize); if (prop == NULL) return NULL; psize /= 4; @@ -485,7 +487,7 @@ u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, } EXPORT_SYMBOL(of_get_pci_address); -static int __of_address_to_resource(struct device_node *dev, u32 *addrp, +static int __of_address_to_resource(struct device_node *dev, const u32 *addrp, u64 size, unsigned int flags, struct resource *r) { @@ -516,7 +518,7 @@ static int __of_address_to_resource(struct device_node *dev, u32 *addrp, int of_address_to_resource(struct device_node *dev, int index, struct resource *r) { - u32 *addrp; + const u32 *addrp; u64 size; unsigned int flags; @@ -530,7 +532,7 @@ EXPORT_SYMBOL_GPL(of_address_to_resource); int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r) { - u32 *addrp; + const u32 *addrp; u64 size; unsigned int flags; @@ -541,13 +543,14 @@ int of_pci_address_to_resource(struct device_node *dev, int bar, } EXPORT_SYMBOL_GPL(of_pci_address_to_resource); -void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop, +void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size) { - u32 *dma_window, cells; - unsigned char *prop; + const u32 *dma_window; + u32 cells; + const unsigned char *prop; - dma_window = (u32 *)dma_window_prop; + dma_window = dma_window_prop; /* busno is always one cell */ *busno = *(dma_window++); @@ -576,13 +579,13 @@ static struct device_node *of_irq_dflt_pic; static struct device_node *of_irq_find_parent(struct device_node *child) { struct device_node *p; - phandle *parp; + const phandle *parp; if (!of_node_get(child)) return NULL; do { - parp = (phandle *)get_property(child, "interrupt-parent", NULL); + parp = get_property(child, "interrupt-parent", NULL); if (parp == NULL) p = of_get_parent(child); else { @@ -639,11 +642,11 @@ void of_irq_map_init(unsigned int flags) } -int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize, - u32 *addr, struct of_irq *out_irq) +int of_irq_map_raw(struct device_node *parent, const u32 *intspec, u32 ointsize, + const u32 *addr, struct of_irq *out_irq) { struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; - u32 *tmp, *imap, *imask; + const u32 *tmp, *imap, *imask; u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; int imaplen, match, i; @@ -657,7 +660,7 @@ int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize, * is none, we are nice and just walk up the tree */ do { - tmp = (u32 *)get_property(ipar, "#interrupt-cells", NULL); + tmp = get_property(ipar, "#interrupt-cells", NULL); if (tmp != NULL) { intsize = *tmp; break; @@ -681,7 +684,7 @@ int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize, */ old = of_node_get(ipar); do { - tmp = (u32 *)get_property(old, "#address-cells", NULL); + tmp = get_property(old, "#address-cells", NULL); tnode = of_get_parent(old); of_node_put(old); old = tnode; @@ -708,7 +711,7 @@ int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize, } /* Now look for an interrupt-map */ - imap = (u32 *)get_property(ipar, "interrupt-map", &imaplen); + imap = get_property(ipar, "interrupt-map", &imaplen); /* No interrupt map, check for an interrupt parent */ if (imap == NULL) { DBG(" -> no map, getting parent\n"); @@ -718,7 +721,7 @@ int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize, imaplen /= sizeof(u32); /* Look for a mask */ - imask = (u32 *)get_property(ipar, "interrupt-map-mask", NULL); + imask = get_property(ipar, "interrupt-map-mask", NULL); /* If we were passed no "reg" property and we attempt to parse * an interrupt-map, then #address-cells must be 0. @@ -765,14 +768,14 @@ int of_irq_map_raw(struct device_node *parent, u32 *intspec, u32 ointsize, /* Get #interrupt-cells and #address-cells of new * parent */ - tmp = (u32 *)get_property(newpar, "#interrupt-cells", + tmp = get_property(newpar, "#interrupt-cells", NULL); if (tmp == NULL) { DBG(" -> parent lacks #interrupt-cells !\n"); goto fail; } newintsize = *tmp; - tmp = (u32 *)get_property(newpar, "#address-cells", + tmp = get_property(newpar, "#address-cells", NULL); newaddrsize = (tmp == NULL) ? 0 : *tmp; @@ -818,14 +821,14 @@ EXPORT_SYMBOL_GPL(of_irq_map_raw); static int of_irq_map_oldworld(struct device_node *device, int index, struct of_irq *out_irq) { - u32 *ints; + const u32 *ints; int intlen; /* * Old machines just have a list of interrupt numbers * and no interrupt-controller nodes. */ - ints = (u32 *) get_property(device, "AAPL,interrupts", &intlen); + ints = get_property(device, "AAPL,interrupts", &intlen); if (ints == NULL) return -EINVAL; intlen /= sizeof(u32); @@ -850,7 +853,8 @@ static int of_irq_map_oldworld(struct device_node *device, int index, int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq) { struct device_node *p; - u32 *intspec, *tmp, intsize, intlen, *addr; + const u32 *intspec, *tmp, *addr; + u32 intsize, intlen; int res; DBG("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index); @@ -860,13 +864,13 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq return of_irq_map_oldworld(device, index, out_irq); /* Get the interrupts property */ - intspec = (u32 *)get_property(device, "interrupts", &intlen); + intspec = get_property(device, "interrupts", &intlen); if (intspec == NULL) return -EINVAL; intlen /= sizeof(u32); /* Get the reg property (if any) */ - addr = (u32 *)get_property(device, "reg", NULL); + addr = get_property(device, "reg", NULL); /* Look for the interrupt parent. */ p = of_irq_find_parent(device); @@ -874,7 +878,7 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq return -EINVAL; /* Get size of interrupt specifier */ - tmp = (u32 *)get_property(p, "#interrupt-cells", NULL); + tmp = get_property(p, "#interrupt-cells", NULL); if (tmp == NULL) { of_node_put(p); return -EINVAL; diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index 9c9ad1fa9cce..2fe82abf1c52 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c @@ -246,12 +246,12 @@ struct file_operations ppc_rtas_rmo_buf_ops = { static int ppc_rtas_find_all_sensors(void); static void ppc_rtas_process_sensor(struct seq_file *m, - struct individual_sensor *s, int state, int error, char *loc); + struct individual_sensor *s, int state, int error, const char *loc); static char *ppc_rtas_process_error(int error); static void get_location_code(struct seq_file *m, - struct individual_sensor *s, char *loc); -static void check_location_string(struct seq_file *m, char *c); -static void check_location(struct seq_file *m, char *c); + struct individual_sensor *s, const char *loc); +static void check_location_string(struct seq_file *m, const char *c); +static void check_location(struct seq_file *m, const char *c); static int __init proc_rtas_init(void) { @@ -446,11 +446,11 @@ static int ppc_rtas_sensors_show(struct seq_file *m, void *v) for (i=0; i<sensors.quant; i++) { struct individual_sensor *p = &sensors.sensor[i]; char rstr[64]; - char *loc; + const char *loc; int llen, offs; sprintf (rstr, SENSOR_PREFIX"%04d", p->token); - loc = (char *) get_property(rtas_node, rstr, &llen); + loc = get_property(rtas_node, rstr, &llen); /* A sensor may have multiple instances */ for (j = 0, offs = 0; j <= p->quant; j++) { @@ -474,10 +474,10 @@ static int ppc_rtas_sensors_show(struct seq_file *m, void *v) static int ppc_rtas_find_all_sensors(void) { - unsigned int *utmp; + const unsigned int *utmp; int len, i; - utmp = (unsigned int *) get_property(rtas_node, "rtas-sensors", &len); + utmp = get_property(rtas_node, "rtas-sensors", &len); if (utmp == NULL) { printk (KERN_ERR "error: could not get rtas-sensors\n"); return 1; @@ -530,7 +530,7 @@ static char *ppc_rtas_process_error(int error) */ static void ppc_rtas_process_sensor(struct seq_file *m, - struct individual_sensor *s, int state, int error, char *loc) + struct individual_sensor *s, int state, int error, const char *loc) { /* Defined return vales */ const char * key_switch[] = { "Off\t", "Normal\t", "Secure\t", @@ -682,7 +682,7 @@ static void ppc_rtas_process_sensor(struct seq_file *m, /* ****************************************************************** */ -static void check_location(struct seq_file *m, char *c) +static void check_location(struct seq_file *m, const char *c) { switch (c[0]) { case LOC_PLANAR: @@ -719,7 +719,7 @@ static void check_location(struct seq_file *m, char *c) * ${LETTER}${NUMBER}[[-/]${LETTER}${NUMBER} [ ... ] ] * the '.' may be an abbrevation */ -static void check_location_string(struct seq_file *m, char *c) +static void check_location_string(struct seq_file *m, const char *c) { while (*c) { if (isalpha(*c) || *c == '.') @@ -733,7 +733,8 @@ static void check_location_string(struct seq_file *m, char *c) /* ****************************************************************** */ -static void get_location_code(struct seq_file *m, struct individual_sensor *s, char *loc) +static void get_location_code(struct seq_file *m, struct individual_sensor *s, + const char *loc) { if (!loc || !*loc) { seq_printf(m, "---");/* does not have a location */ diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 77f1e06d208d..6ef80d4e38d3 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -177,10 +177,12 @@ void __init udbg_init_rtas_console(void) void rtas_progress(char *s, unsigned short hex) { struct device_node *root; - int width, *p; + int width; + const int *p; char *os; static int display_character, set_indicator; - static int display_width, display_lines, *row_width, form_feed; + static int display_width, display_lines, form_feed; + const static int *row_width; static DEFINE_SPINLOCK(progress_lock); static int current_line; static int pending_newline = 0; /* did last write end with unprinted newline? */ @@ -191,16 +193,16 @@ void rtas_progress(char *s, unsigned short hex) if (display_width == 0) { display_width = 0x10; if ((root = find_path_device("/rtas"))) { - if ((p = (unsigned int *)get_property(root, + if ((p = get_property(root, "ibm,display-line-length", NULL))) display_width = *p; - if ((p = (unsigned int *)get_property(root, + if ((p = get_property(root, "ibm,form-feed", NULL))) form_feed = *p; - if ((p = (unsigned int *)get_property(root, + if ((p = get_property(root, "ibm,display-number-of-lines", NULL))) display_lines = *p; - row_width = (unsigned int *)get_property(root, + row_width = get_property(root, "ibm,display-truncation-length", NULL); } display_character = rtas_token("display-character"); @@ -293,10 +295,10 @@ EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */ int rtas_token(const char *service) { - int *tokp; + const int *tokp; if (rtas.dev == NULL) return RTAS_UNKNOWN_SERVICE; - tokp = (int *) get_property(rtas.dev, service, NULL); + tokp = get_property(rtas.dev, service, NULL); return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; } EXPORT_SYMBOL(rtas_token); @@ -626,6 +628,9 @@ void rtas_os_term(char *str) { int status; + if (panic_timeout) + return; + if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term")) return; @@ -687,15 +692,14 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) int i; long state; long rc; - unsigned long dummy; - + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; /* Make sure the state is valid */ - rc = plpar_hcall(H_VASI_STATE, - ((u64)args->args[0] << 32) | args->args[1], - 0, 0, 0, - &state, &dummy, &dummy); + rc = plpar_hcall(H_VASI_STATE, retbuf, + ((u64)args->args[0] << 32) | args->args[1]); + + state = retbuf[0]; if (rc) { printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc); @@ -845,15 +849,15 @@ void __init rtas_initialize(void) */ rtas.dev = of_find_node_by_name(NULL, "rtas"); if (rtas.dev) { - u32 *basep, *entryp; - u32 *sizep; + const u32 *basep, *entryp, *sizep; - basep = (u32 *)get_property(rtas.dev, "linux,rtas-base", NULL); - sizep = (u32 *)get_property(rtas.dev, "rtas-size", NULL); + basep = get_property(rtas.dev, "linux,rtas-base", NULL); + sizep = get_property(rtas.dev, "rtas-size", NULL); if (basep != NULL && sizep != NULL) { rtas.base = *basep; rtas.size = *sizep; - entryp = (u32 *)get_property(rtas.dev, "linux,rtas-entry", NULL); + entryp = get_property(rtas.dev, + "linux,rtas-entry", NULL); if (entryp == NULL) /* Ugh */ rtas.entry = rtas.base; else @@ -909,6 +913,11 @@ int __init early_init_dt_scan_rtas(unsigned long node, basep = of_get_flat_dt_prop(node, "get-term-char", NULL); if (basep) rtas_getchar_token = *basep; + + if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE && + rtas_getchar_token != RTAS_UNKNOWN_SERVICE) + udbg_init_rtas_console(); + #endif /* break now */ diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index cda022657324..b4a0de79c060 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c @@ -57,7 +57,7 @@ static inline int config_access_valid(struct pci_dn *dn, int where) static int of_device_available(struct device_node * dn) { - char * status; + const char *status; status = get_property(dn, "status", NULL); @@ -81,8 +81,7 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val) if (!config_access_valid(pdn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; - addr = ((where & 0xf00) << 20) | (pdn->busno << 16) | - (pdn->devfn << 8) | (where & 0xff); + addr = rtas_config_addr(pdn->busno, pdn->devfn, where); buid = pdn->phb->buid; if (buid) { ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval, @@ -134,8 +133,7 @@ int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val) if (!config_access_valid(pdn, where)) return PCIBIOS_BAD_REGISTER_NUMBER; - addr = ((where & 0xf00) << 20) | (pdn->busno << 16) | - (pdn->devfn << 8) | (where & 0xff); + addr = rtas_config_addr(pdn->busno, pdn->devfn, where); buid = pdn->phb->buid; if (buid) { ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, @@ -178,7 +176,7 @@ struct pci_ops rtas_pci_ops = { int is_python(struct device_node *dev) { - char *model = (char *)get_property(dev, "model", NULL); + const char *model = get_property(dev, "model", NULL); if (model && strstr(model, "Python")) return 1; @@ -234,7 +232,7 @@ void __init init_pci_config_tokens (void) unsigned long __devinit get_phb_buid (struct device_node *phb) { int addr_cells; - unsigned int *buid_vals; + const unsigned int *buid_vals; unsigned int len; unsigned long buid; @@ -247,7 +245,7 @@ unsigned long __devinit get_phb_buid (struct device_node *phb) if (phb->parent->parent) return 0; - buid_vals = (unsigned int *) get_property(phb, "reg", &len); + buid_vals = get_property(phb, "reg", &len); if (buid_vals == NULL) return 0; @@ -264,10 +262,10 @@ unsigned long __devinit get_phb_buid (struct device_node *phb) static int phb_set_bus_ranges(struct device_node *dev, struct pci_controller *phb) { - int *bus_range; + const int *bus_range; unsigned int len; - bus_range = (int *) get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { return 1; } @@ -325,15 +323,15 @@ unsigned long __init find_and_init_phbs(void) * in chosen. */ if (of_chosen) { - int *prop; + const int *prop; - prop = (int *)get_property(of_chosen, "linux,pci-probe-only", - NULL); + prop = get_property(of_chosen, + "linux,pci-probe-only", NULL); if (prop) pci_probe_only = *prop; - prop = (int *)get_property(of_chosen, - "linux,pci-assign-all-buses", NULL); + prop = get_property(of_chosen, + "linux,pci-assign-all-buses", NULL); if (prop) pci_assign_all_buses = *prop; } diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 499c3861074f..0af3fc1bdcc9 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -304,19 +304,21 @@ struct seq_operations cpuinfo_op = { void __init check_for_initrd(void) { #ifdef CONFIG_BLK_DEV_INITRD - unsigned long *prop; + const unsigned int *prop; + int len; DBG(" -> check_for_initrd()\n"); if (of_chosen) { - prop = (unsigned long *)get_property(of_chosen, - "linux,initrd-start", NULL); + prop = get_property(of_chosen, "linux,initrd-start", &len); if (prop != NULL) { - initrd_start = (unsigned long)__va(*prop); - prop = (unsigned long *)get_property(of_chosen, - "linux,initrd-end", NULL); + initrd_start = (unsigned long) + __va(of_read_ulong(prop, len / 4)); + prop = get_property(of_chosen, + "linux,initrd-end", &len); if (prop != NULL) { - initrd_end = (unsigned long)__va(*prop); + initrd_end = (unsigned long) + __va(of_read_ulong(prop, len / 4)); initrd_below_start_ok = 1; } else initrd_start = 0; @@ -366,15 +368,14 @@ void __init smp_setup_cpu_maps(void) int cpu = 0; while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { - int *intserv; + const int *intserv; int j, len = sizeof(u32), nthreads = 1; - intserv = (int *)get_property(dn, "ibm,ppc-interrupt-server#s", - &len); + intserv = get_property(dn, "ibm,ppc-interrupt-server#s", &len); if (intserv) nthreads = len / sizeof(int); else { - intserv = (int *) get_property(dn, "reg", NULL); + intserv = get_property(dn, "reg", NULL); if (!intserv) intserv = &cpu; /* assume logical == phys */ } @@ -395,13 +396,12 @@ void __init smp_setup_cpu_maps(void) if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && (dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; - unsigned int *ireg; + const unsigned int *ireg; num_addr_cell = prom_n_addr_cells(dn); num_size_cell = prom_n_size_cells(dn); - ireg = (unsigned int *) - get_property(dn, "ibm,lrdr-capacity", NULL); + ireg = get_property(dn, "ibm,lrdr-capacity", NULL); if (!ireg) goto out; @@ -444,6 +444,8 @@ void __init smp_setup_cpu_maps(void) int __initdata do_early_xmon; #ifdef CONFIG_XMON +extern int xmon_no_auto_backtrace; + static int __init early_xmon(char *p) { /* ensure xmon is enabled */ @@ -452,6 +454,8 @@ static int __init early_xmon(char *p) xmon_init(1); if (strncmp(p, "off", 3) == 0) xmon_init(0); + if (strncmp(p, "nobt", 4) == 0) + xmon_no_auto_backtrace = 1; if (strncmp(p, "early", 5) != 0) return 0; } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index fd1785e4c9bb..00d6b8addd78 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -56,7 +56,6 @@ #include <asm/page.h> #include <asm/mmu.h> #include <asm/lmb.h> -#include <asm/iseries/it_lp_naca.h> #include <asm/firmware.h> #include <asm/xmon.h> #include <asm/udbg.h> @@ -79,10 +78,10 @@ u64 ppc64_pft_size; * before we've read this from the device tree. */ struct ppc64_caches ppc64_caches = { - .dline_size = 0x80, - .log_dline_size = 7, - .iline_size = 0x80, - .log_iline_size = 7 + .dline_size = 0x40, + .log_dline_size = 6, + .iline_size = 0x40, + .log_iline_size = 6 }; EXPORT_SYMBOL_GPL(ppc64_caches); @@ -107,7 +106,7 @@ static int smt_enabled_cmdline; static void check_smt_enabled(void) { struct device_node *dn; - char *smt_option; + const char *smt_option; /* Allow the command line to overrule the OF option */ if (smt_enabled_cmdline) @@ -116,7 +115,7 @@ static void check_smt_enabled(void) dn = of_find_node_by_path("/options"); if (dn) { - smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL); + smt_option = get_property(dn, "ibm,smt-enabled", NULL); if (smt_option) { if (!strcmp(smt_option, "on")) @@ -293,7 +292,7 @@ static void __init initialize_cache_info(void) */ if ( num_cpus == 1 ) { - u32 *sizep, *lsizep; + const u32 *sizep, *lsizep; u32 size, lsize; const char *dc, *ic; @@ -308,10 +307,10 @@ static void __init initialize_cache_info(void) size = 0; lsize = cur_cpu_spec->dcache_bsize; - sizep = (u32 *)get_property(np, "d-cache-size", NULL); + sizep = get_property(np, "d-cache-size", NULL); if (sizep != NULL) size = *sizep; - lsizep = (u32 *) get_property(np, dc, NULL); + lsizep = get_property(np, dc, NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) @@ -325,10 +324,10 @@ static void __init initialize_cache_info(void) size = 0; lsize = cur_cpu_spec->icache_bsize; - sizep = (u32 *)get_property(np, "i-cache-size", NULL); + sizep = get_property(np, "i-cache-size", NULL); if (sizep != NULL) size = *sizep; - lsizep = (u32 *)get_property(np, ic, NULL); + lsizep = get_property(np, ic, NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index fec228cd0163..406f308ddead 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c @@ -60,7 +60,7 @@ static int smt_snooze_cmdline; static int __init smt_setup(void) { struct device_node *options; - unsigned int *val; + const unsigned int *val; unsigned int cpu; if (!cpu_has_feature(CPU_FTR_SMT)) @@ -70,8 +70,7 @@ static int __init smt_setup(void) if (!options) return -ENODEV; - val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay", - NULL); + val = get_property(options, "ibm,smt-snooze-delay", NULL); if (!smt_snooze_cmdline && val) { for_each_possible_cpu(cpu) per_cpu(smt_snooze_delay, cpu) = *val; @@ -231,7 +230,7 @@ static void register_cpu_online(unsigned int cpu) if (cur_cpu_spec->num_pmcs >= 8) sysdev_create_file(s, &attr_pmc8); - if (cpu_has_feature(CPU_FTR_SMT)) + if (cpu_has_feature(CPU_FTR_PURR)) sysdev_create_file(s, &attr_purr); } @@ -273,7 +272,7 @@ static void unregister_cpu_online(unsigned int cpu) if (cur_cpu_spec->num_pmcs >= 8) sysdev_remove_file(s, &attr_pmc8); - if (cpu_has_feature(CPU_FTR_SMT)) + if (cpu_has_feature(CPU_FTR_PURR)) sysdev_remove_file(s, &attr_purr); } #endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index a124499e65d9..7a3c3f791ade 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -860,19 +860,17 @@ EXPORT_SYMBOL(do_settimeofday); static int __init get_freq(char *name, int cells, unsigned long *val) { struct device_node *cpu; - unsigned int *fp; + const unsigned int *fp; int found = 0; /* The cpu node should have timebase and clock frequency properties */ cpu = of_find_node_by_type(NULL, "cpu"); if (cpu) { - fp = (unsigned int *)get_property(cpu, name, NULL); + fp = get_property(cpu, name, NULL); if (fp) { found = 1; - *val = 0; - while (cells--) - *val = (*val << 32) | *fp++; + *val = of_read_ulong(fp, cells); } of_node_put(cpu); diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 9b352bd0a460..d9f10f2fc372 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -598,6 +598,9 @@ static void parse_fpe(struct pt_regs *regs) #define INST_STSWI 0x7c0005aa #define INST_STSWX 0x7c00052a +#define INST_POPCNTB 0x7c0000f4 +#define INST_POPCNTB_MASK 0xfc0007fe + static int emulate_string_inst(struct pt_regs *regs, u32 instword) { u8 rT = (instword >> 21) & 0x1f; @@ -666,6 +669,23 @@ static int emulate_string_inst(struct pt_regs *regs, u32 instword) return 0; } +static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) +{ + u32 ra,rs; + unsigned long tmp; + + ra = (instword >> 16) & 0x1f; + rs = (instword >> 21) & 0x1f; + + tmp = regs->gpr[rs]; + tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); + tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); + tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; + regs->gpr[ra] = tmp; + + return 0; +} + static int emulate_instruction(struct pt_regs *regs) { u32 instword; @@ -703,6 +723,11 @@ static int emulate_instruction(struct pt_regs *regs) if ((instword & INST_STRING_GEN_MASK) == INST_STRING) return emulate_string_inst(regs, instword); + /* Emulate the popcntb (Population Count Bytes) instruction. */ + if ((instword & INST_POPCNTB_MASK) == INST_POPCNTB) { + return emulate_popcntb_inst(regs, instword); + } + return -EINVAL; } diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index fad8580f9081..cb87e71eec66 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -77,7 +77,7 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) } else #endif { - unsigned char *dma_window; + const unsigned char *dma_window; struct iommu_table *tbl; unsigned long offset, size; @@ -217,7 +217,7 @@ static void __devinit vio_dev_release(struct device *dev) struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) { struct vio_dev *viodev; - unsigned int *unit_address; + const unsigned int *unit_address; /* we need the 'device_type' property, in order to match with drivers */ if (of_node->type == NULL) { @@ -227,7 +227,7 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) return NULL; } - unit_address = (unsigned int *)get_property(of_node, "reg", NULL); + unit_address = get_property(of_node, "reg", NULL); if (unit_address == NULL) { printk(KERN_WARNING "%s: node %s missing 'reg'\n", __FUNCTION__, @@ -249,7 +249,7 @@ struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node) viodev->type = of_node->type; viodev->unit_address = *unit_address; if (firmware_has_feature(FW_FEATURE_ISERIES)) { - unit_address = (unsigned int *)get_property(of_node, + unit_address = get_property(of_node, "linux,unit_address", NULL); if (unit_address != NULL) viodev->unit_address = *unit_address; @@ -423,7 +423,7 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp, { const struct vio_dev *vio_dev = to_vio_dev(dev); struct device_node *dn = dev->platform_data; - char *cp; + const char *cp; int length; if (!num_envp) @@ -431,7 +431,7 @@ static int vio_hotplug(struct device *dev, char **envp, int num_envp, if (!dn) return -ENODEV; - cp = (char *)get_property(dn, "compatible", &length); + cp = get_property(dn, "compatible", &length); if (!cp) return -ENODEV; @@ -493,11 +493,11 @@ static struct vio_dev *vio_find_name(const char *kobj_name) */ struct vio_dev *vio_find_node(struct device_node *vnode) { - uint32_t *unit_address; + const uint32_t *unit_address; char kobj_name[BUS_ID_SIZE]; /* construct the kobject name from the device node */ - unit_address = (uint32_t *)get_property(vnode, "reg", NULL); + unit_address = get_property(vnode, "reg", NULL); if (!unit_address) return NULL; snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address); diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index ff7096458249..336dd191f768 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -14,7 +14,6 @@ endif obj-$(CONFIG_PPC64) += checksum_64.o copypage_64.o copyuser_64.o \ memcpy_64.o usercopy_64.o mem_64.o string.o \ strcase.o -obj-$(CONFIG_PPC_ISERIES) += e2a.o obj-$(CONFIG_XMON) += sstep.o ifeq ($(CONFIG_PPC64),y) diff --git a/arch/powerpc/lib/e2a.c b/arch/powerpc/lib/e2a.c deleted file mode 100644 index 4b72ed8fd50e..000000000000 --- a/arch/powerpc/lib/e2a.c +++ /dev/null @@ -1,116 +0,0 @@ -/* - * EBCDIC to ASCII conversion - * - * This function moved here from arch/powerpc/platforms/iseries/viopath.c - * - * (C) Copyright 2000-2004 IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of the - * License, or (at your option) anyu later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include <linux/module.h> - -unsigned char e2a(unsigned char x) -{ - switch (x) { - case 0xF0: - return '0'; - case 0xF1: - return '1'; - case 0xF2: - return '2'; - case 0xF3: - return '3'; - case 0xF4: - return '4'; - case 0xF5: - return '5'; - case 0xF6: - return '6'; - case 0xF7: - return '7'; - case 0xF8: - return '8'; - case 0xF9: - return '9'; - case 0xC1: - return 'A'; - case 0xC2: - return 'B'; - case 0xC3: - return 'C'; - case 0xC4: - return 'D'; - case 0xC5: - return 'E'; - case 0xC6: - return 'F'; - case 0xC7: - return 'G'; - case 0xC8: - return 'H'; - case 0xC9: - return 'I'; - case 0xD1: - return 'J'; - case 0xD2: - return 'K'; - case 0xD3: - return 'L'; - case 0xD4: - return 'M'; - case 0xD5: - return 'N'; - case 0xD6: - return 'O'; - case 0xD7: - return 'P'; - case 0xD8: - return 'Q'; - case 0xD9: - return 'R'; - case 0xE2: - return 'S'; - case 0xE3: - return 'T'; - case 0xE4: - return 'U'; - case 0xE5: - return 'V'; - case 0xE6: - return 'W'; - case 0xE7: - return 'X'; - case 0xE8: - return 'Y'; - case 0xE9: - return 'Z'; - } - return ' '; -} -EXPORT_SYMBOL(e2a); - -unsigned char* strne2a(unsigned char *dest, const unsigned char *src, size_t n) -{ - int i; - - n = strnlen(src, n); - - for (i = 0; i < n; i++) - dest[i] = e2a(src[i]); - - return dest; -} diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 077bed7dc52b..80b482ca30df 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -23,6 +23,7 @@ #include <asm/hvcall.h> #include <asm/iseries/hv_call.h> #include <asm/smp.h> +#include <asm/firmware.h> void __spin_yield(raw_spinlock_t *lock) { @@ -39,13 +40,12 @@ void __spin_yield(raw_spinlock_t *lock) rmb(); if (lock->slock != lock_value) return; /* something has changed */ -#ifdef CONFIG_PPC_ISERIES - HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, - ((u64)holder_cpu << 32) | yield_count); -#else - plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), - yield_count); -#endif + if (firmware_has_feature(FW_FEATURE_ISERIES)) + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); + else + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); } /* @@ -69,13 +69,12 @@ void __rw_yield(raw_rwlock_t *rw) rmb(); if (rw->lock != lock_value) return; /* something has changed */ -#ifdef CONFIG_PPC_ISERIES - HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, - ((u64)holder_cpu << 32) | yield_count); -#else - plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(holder_cpu), - yield_count); -#endif + if (firmware_has_feature(FW_FEATURE_ISERIES)) + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); + else + plpar_hcall_norets(H_CONFER, + get_hard_smp_processor_id(holder_cpu), yield_count); } #endif diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index fbe23933f731..6c0f1c7d83e5 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -159,12 +159,12 @@ static struct device_node * __cpuinit find_cpu_node(unsigned int cpu) { unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); struct device_node *cpu_node = NULL; - unsigned int *interrupt_server, *reg; + const unsigned int *interrupt_server, *reg; int len; while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) { /* Try interrupt server first */ - interrupt_server = (unsigned int *)get_property(cpu_node, + interrupt_server = get_property(cpu_node, "ibm,ppc-interrupt-server#s", &len); len = len / sizeof(u32); @@ -175,8 +175,7 @@ static struct device_node * __cpuinit find_cpu_node(unsigned int cpu) return cpu_node; } } else { - reg = (unsigned int *)get_property(cpu_node, - "reg", &len); + reg = get_property(cpu_node, "reg", &len); if (reg && (len > 0) && (reg[0] == hw_cpuid)) return cpu_node; } @@ -186,9 +185,9 @@ static struct device_node * __cpuinit find_cpu_node(unsigned int cpu) } /* must hold reference to node during call */ -static int *of_get_associativity(struct device_node *dev) +static const int *of_get_associativity(struct device_node *dev) { - return (unsigned int *)get_property(dev, "ibm,associativity", NULL); + return get_property(dev, "ibm,associativity", NULL); } /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa @@ -197,7 +196,7 @@ static int *of_get_associativity(struct device_node *dev) static int of_node_to_nid_single(struct device_node *device) { int nid = -1; - unsigned int *tmp; + const unsigned int *tmp; if (min_common_depth == -1) goto out; @@ -255,7 +254,7 @@ EXPORT_SYMBOL_GPL(of_node_to_nid); static int __init find_min_common_depth(void) { int depth; - unsigned int *ref_points; + const unsigned int *ref_points; struct device_node *rtas_root; unsigned int len; @@ -270,7 +269,7 @@ static int __init find_min_common_depth(void) * configuration (should be all 0's) and the second is for a normal * NUMA configuration. */ - ref_points = (unsigned int *)get_property(rtas_root, + ref_points = get_property(rtas_root, "ibm,associativity-reference-points", &len); if ((len >= 1) && ref_points) { @@ -297,7 +296,7 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) of_node_put(memory); } -static unsigned long __devinit read_n_cells(int n, unsigned int **buf) +static unsigned long __devinit read_n_cells(int n, const unsigned int **buf) { unsigned long result = 0; @@ -435,15 +434,13 @@ static int __init parse_numa_properties(void) unsigned long size; int nid; int ranges; - unsigned int *memcell_buf; + const unsigned int *memcell_buf; unsigned int len; - memcell_buf = (unsigned int *)get_property(memory, + memcell_buf = get_property(memory, "linux,usable-memory", &len); if (!memcell_buf || len <= 0) - memcell_buf = - (unsigned int *)get_property(memory, "reg", - &len); + memcell_buf = get_property(memory, "reg", &len); if (!memcell_buf || len <= 0) continue; @@ -787,10 +784,10 @@ int hot_add_scn_to_nid(unsigned long scn_addr) while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { unsigned long start, size; int ranges; - unsigned int *memcell_buf; + const unsigned int *memcell_buf; unsigned int len; - memcell_buf = (unsigned int *)get_property(memory, "reg", &len); + memcell_buf = get_property(memory, "reg", &len); if (!memcell_buf || len <= 0) continue; diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index de0c8842415c..d3733912adb4 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -22,6 +22,8 @@ #include <asm/paca.h> #include <asm/cputable.h> #include <asm/cacheflush.h> +#include <asm/smp.h> +#include <linux/compiler.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -50,9 +52,32 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; } -static inline void create_slbe(unsigned long ea, unsigned long flags, - unsigned long entry) +static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, + unsigned long entry) { + /* + * Clear the ESID first so the entry is not valid while we are + * updating it. + */ + get_slb_shadow()->save_area[entry].esid = 0; + barrier(); + get_slb_shadow()->save_area[entry].vsid = vsid; + barrier(); + get_slb_shadow()->save_area[entry].esid = esid; + +} + +static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, + unsigned long entry) +{ + /* + * Updating the shadow buffer before writing the SLB ensures + * we don't get a stale entry here if we get preempted by PHYP + * between these two statements. + */ + slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), + entry); + asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, flags)), "r" (mk_esid_data(ea, entry)) @@ -77,6 +102,10 @@ void slb_flush_and_rebolt(void) if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) ksp_esid_data &= ~SLB_ESID_V; + /* Only third entry (stack) may change here so only resave that */ + slb_shadow_update(ksp_esid_data, + mk_vsid_data(ksp_esid_data, lflags), 2); + /* We need to do this all in asm, so we're sure we don't touch * the stack between the slbia and rebolting it. */ asm volatile("isync\n" @@ -209,9 +238,9 @@ void slb_initialize(void) asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); - create_slbe(PAGE_OFFSET, lflags, 0); + create_shadowed_slbe(PAGE_OFFSET, lflags, 0); - create_slbe(VMALLOC_START, vflags, 1); + create_shadowed_slbe(VMALLOC_START, vflags, 1); /* We don't bolt the stack for the time being - we're in boot, * so the stack is in the bolted segment. By the time it goes diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index f6eef78efd29..b58baa65c4a7 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c @@ -146,6 +146,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, psize = mmu_huge_psize; #else BUG(); + psize = pte_pagesize_index(pte); /* shutup gcc */ #endif } else psize = pte_pagesize_index(pte); diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c index cf3967a66fb5..969fbb6d8c46 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_itx.c +++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c @@ -60,8 +60,8 @@ static void __init mpc834x_itx_setup_arch(void) np = of_find_node_by_type(NULL, "cpu"); if (np != 0) { - unsigned int *fp = - (int *)get_property(np, "clock-frequency", NULL); + const unsigned int *fp = + get_property(np, "clock-frequency", NULL); if (fp != 0) loops_per_jiffy = *fp / HZ; else diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c index 32df239d1c48..677196187a4e 100644 --- a/arch/powerpc/platforms/83xx/mpc834x_sys.c +++ b/arch/powerpc/platforms/83xx/mpc834x_sys.c @@ -57,8 +57,8 @@ static void __init mpc834x_sys_setup_arch(void) np = of_find_node_by_type(NULL, "cpu"); if (np != 0) { - unsigned int *fp = - (int *)get_property(np, "clock-frequency", NULL); + const unsigned int *fp = + get_property(np, "clock-frequency", NULL); if (fp != 0) loops_per_jiffy = *fp / HZ; else diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c index 5d84a9ccd103..4557ac5255c1 100644 --- a/arch/powerpc/platforms/83xx/pci.c +++ b/arch/powerpc/platforms/83xx/pci.c @@ -59,7 +59,7 @@ int __init add_bridge(struct device_node *dev) int len; struct pci_controller *hose; struct resource rsrc; - int *bus_range; + const int *bus_range; int primary = 1, has_address = 0; phys_addr_t immr = get_immrbase(); @@ -69,7 +69,7 @@ int __init add_bridge(struct device_node *dev) has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); /* Get bus range if any */ - bus_range = (int *)get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 9d2acfbbeccd..cae6b73357d5 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c @@ -121,9 +121,9 @@ static void __init mpc85xx_ads_setup_arch(void) cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != 0) { - unsigned int *fp; + const unsigned int *fp; - fp = (int *)get_property(cpu, "clock-frequency", NULL); + fp = get_property(cpu, "clock-frequency", NULL); if (fp != 0) loops_per_jiffy = *fp / HZ; else diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 1d357d32a29f..4c1fede6470e 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c @@ -241,9 +241,9 @@ mpc85xx_cds_setup_arch(void) cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != 0) { - unsigned int *fp; + const unsigned int *fp; - fp = (int *)get_property(cpu, "clock-frequency", NULL); + fp = get_property(cpu, "clock-frequency", NULL); if (fp != 0) loops_per_jiffy = *fp / HZ; else diff --git a/arch/powerpc/platforms/85xx/pci.c b/arch/powerpc/platforms/85xx/pci.c index 1d51f3242ab1..05930eeb6e7f 100644 --- a/arch/powerpc/platforms/85xx/pci.c +++ b/arch/powerpc/platforms/85xx/pci.c @@ -41,7 +41,7 @@ int __init add_bridge(struct device_node *dev) int len; struct pci_controller *hose; struct resource rsrc; - int *bus_range; + const int *bus_range; int primary = 1, has_address = 0; phys_addr_t immr = get_immrbase(); @@ -51,7 +51,7 @@ int __init add_bridge(struct device_node *dev) has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); /* Get bus range if any */ - bus_range = (int *) get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 5e583cf38786..b637e8157f7b 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -347,9 +347,9 @@ mpc86xx_hpcn_setup_arch(void) np = of_find_node_by_type(NULL, "cpu"); if (np != 0) { - unsigned int *fp; + const unsigned int *fp; - fp = (int *)get_property(np, "clock-frequency", NULL); + fp = get_property(np, "clock-frequency", NULL); if (fp != 0) loops_per_jiffy = *fp / HZ; else diff --git a/arch/powerpc/platforms/86xx/pci.c b/arch/powerpc/platforms/86xx/pci.c index a8c8f0a44055..481e18ed5be9 100644 --- a/arch/powerpc/platforms/86xx/pci.c +++ b/arch/powerpc/platforms/86xx/pci.c @@ -153,7 +153,7 @@ int __init add_bridge(struct device_node *dev) int len; struct pci_controller *hose; struct resource rsrc; - int *bus_range; + const int *bus_range; int has_address = 0; int primary = 0; @@ -163,7 +163,7 @@ int __init add_bridge(struct device_node *dev) has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); /* Get bus range if any */ - bus_range = (int *) get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 5cf46dc57895..e58fa953a50b 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -13,5 +13,6 @@ obj-$(CONFIG_PPC_86xx) += 86xx/ obj-$(CONFIG_PPC_PSERIES) += pseries/ obj-$(CONFIG_PPC_ISERIES) += iseries/ obj-$(CONFIG_PPC_MAPLE) += maple/ +obj-$(CONFIG_PPC_PASEMI) += pasemi/ obj-$(CONFIG_PPC_CELL) += cell/ obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index ce696c1cca75..3f3859d12e00 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c @@ -97,7 +97,7 @@ void __init cbe_regs_init(void) struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++]; /* That hack must die die die ! */ - struct address_prop { + const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; @@ -114,13 +114,11 @@ void __init cbe_regs_init(void) if (cbe_thread_map[i].cpu_node == cpu) cbe_thread_map[i].regs = map; - prop = (struct address_prop *)get_property(cpu, "pervasive", - NULL); + prop = get_property(cpu, "pervasive", NULL); if (prop != NULL) map->pmd_regs = ioremap(prop->address, prop->len); - prop = (struct address_prop *)get_property(cpu, "iic", - NULL); + prop = get_property(cpu, "iic", NULL); if (prop != NULL) map->iic_regs = ioremap(prop->address, prop->len); } diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index d7bbb61109f9..6b57a47c5d37 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -89,17 +89,17 @@ static struct irq_chip iic_chip = { /* Get an IRQ number from the pending state register of the IIC */ static unsigned int iic_get_irq(struct pt_regs *regs) { - struct cbe_iic_pending_bits pending; - struct iic *iic; - - iic = &__get_cpu_var(iic); - *(unsigned long *) &pending = - in_be64((unsigned long __iomem *) &iic->regs->pending_destr); - iic->eoi_stack[++iic->eoi_ptr] = pending.prio; - BUG_ON(iic->eoi_ptr > 15); + struct cbe_iic_pending_bits pending; + struct iic *iic; + + iic = &__get_cpu_var(iic); + *(unsigned long *) &pending = + in_be64((unsigned long __iomem *) &iic->regs->pending_destr); + iic->eoi_stack[++iic->eoi_ptr] = pending.prio; + BUG_ON(iic->eoi_ptr > 15); if (pending.flags & CBE_IIC_IRQ_VALID) return irq_linear_revmap(iic->host, - iic_pending_to_hwnum(pending)); + iic_pending_to_hwnum(pending)); return NO_IRQ; } @@ -250,16 +250,15 @@ static int __init setup_iic(void) struct resource r0, r1; struct irq_host *host; int found = 0; - u32 *np; + const u32 *np; for (dn = NULL; (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) { if (!device_is_compatible(dn, "IBM,CBEA-Internal-Interrupt-Controller")) continue; - np = (u32 *)get_property(dn, "ibm,interrupt-server-ranges", - NULL); - if (np == NULL) { + np = get_property(dn, "ibm,interrupt-server-ranges", NULL); + if (np == NULL) { printk(KERN_WARNING "IIC: CPU association not found\n"); of_node_put(dn); return -ENODEV; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index a35004e14c69..d2b20eba5b87 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -308,15 +308,16 @@ static void cell_do_map_iommu(struct cell_iommu *iommu, static void iommu_devnode_setup(struct device_node *d) { - unsigned int *ioid; - unsigned long *dma_window, map_start, map_size, token; + const unsigned int *ioid; + unsigned long map_start, map_size, token; + const unsigned long *dma_window; struct cell_iommu *iommu; - ioid = (unsigned int *)get_property(d, "ioid", NULL); + ioid = get_property(d, "ioid", NULL); if (!ioid) pr_debug("No ioid entry found !\n"); - dma_window = (unsigned long *)get_property(d, "ibm,dma-window", NULL); + dma_window = get_property(d, "ibm,dma-window", NULL); if (!dma_window) pr_debug("No ibm,dma-window entry found !\n"); @@ -371,8 +372,9 @@ static int cell_map_iommu_hardcoded(int num_nodes) static int cell_map_iommu(void) { - unsigned int num_nodes = 0, *node_id; - unsigned long *base, *mmio_base; + unsigned int num_nodes = 0; + const unsigned int *node_id; + const unsigned long *base, *mmio_base; struct device_node *dn; struct cell_iommu *iommu = NULL; @@ -381,7 +383,7 @@ static int cell_map_iommu(void) for(dn = of_find_node_by_type(NULL, "cpu"); dn; dn = of_find_node_by_type(dn, "cpu")) { - node_id = (unsigned int *)get_property(dn, "node-id", NULL); + node_id = get_property(dn, "node-id", NULL); if (num_nodes < *node_id) num_nodes = *node_id; @@ -396,9 +398,9 @@ static int cell_map_iommu(void) dn; dn = of_find_node_by_type(dn, "cpu")) { - node_id = (unsigned int *)get_property(dn, "node-id", NULL); - base = (unsigned long *)get_property(dn, "ioc-cache", NULL); - mmio_base = (unsigned long *)get_property(dn, "ioc-translation", NULL); + node_id = get_property(dn, "node-id", NULL); + base = get_property(dn, "ioc-cache", NULL); + mmio_base = get_property(dn, "ioc-translation", NULL); if (!base || !mmio_base || !node_id) return cell_map_iommu_hardcoded(num_nodes); diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 282987d6d4a2..22c228a49c33 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -150,10 +150,6 @@ static int __init cell_probe(void) !of_flat_dt_is_compatible(root, "IBM,CPBW-1.0")) return 0; -#ifdef CONFIG_UDBG_RTAS_CONSOLE - udbg_init_rtas_console(); -#endif - hpte_init_native(); return 1; diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 46aef0640742..1c0acbad7425 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -57,7 +57,7 @@ */ static cpumask_t of_spin_map; -extern void pSeries_secondary_smp_init(unsigned long); +extern void generic_secondary_smp_init(unsigned long); /** * smp_startup_cpu() - start the given cpu @@ -74,7 +74,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) { int status; unsigned long start_here = __pa((u32)*((unsigned long *) - pSeries_secondary_smp_init)); + generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 15217bb0402f..742a03282b44 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -240,7 +240,7 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc, static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) { unsigned int virq; - u32 *imap, *tmp; + const u32 *imap, *tmp; int imaplen, intsize, unit; struct device_node *iic; struct irq_host *iic_host; @@ -258,25 +258,25 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) #endif /* Now do the horrible hacks */ - tmp = (u32 *)get_property(pic->of_node, "#interrupt-cells", NULL); + tmp = get_property(pic->of_node, "#interrupt-cells", NULL); if (tmp == NULL) return NO_IRQ; intsize = *tmp; - imap = (u32 *)get_property(pic->of_node, "interrupt-map", &imaplen); + imap = get_property(pic->of_node, "interrupt-map", &imaplen); if (imap == NULL || imaplen < (intsize + 1)) return NO_IRQ; iic = of_find_node_by_phandle(imap[intsize]); if (iic == NULL) return NO_IRQ; imap += intsize + 1; - tmp = (u32 *)get_property(iic, "#interrupt-cells", NULL); + tmp = get_property(iic, "#interrupt-cells", NULL); if (tmp == NULL) return NO_IRQ; intsize = *tmp; /* Assume unit is last entry of interrupt specifier */ unit = imap[intsize - 1]; /* Ok, we have a unit, now let's try to get the node */ - tmp = (u32 *)get_property(iic, "ibm,interrupt-server-ranges", NULL); + tmp = get_property(iic, "ibm,interrupt-server-ranges", NULL); if (tmp == NULL) { of_node_put(iic); return NO_IRQ; diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index d06042deb021..3bd36d46ab4a 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -488,10 +488,10 @@ int spu_irq_class_1_bottom(struct spu *spu) static int __init find_spu_node_id(struct device_node *spe) { - unsigned int *id; + const unsigned int *id; struct device_node *cpu; cpu = spe->parent->parent; - id = (unsigned int *)get_property(cpu, "node-id", NULL); + id = get_property(cpu, "node-id", NULL); return id ? *id : 0; } @@ -500,7 +500,7 @@ static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe, { static DEFINE_MUTEX(add_spumem_mutex); - struct address_prop { + const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *p; @@ -511,7 +511,7 @@ static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe, struct zone *zone; int ret; - p = (void*)get_property(spe, prop, &proplen); + p = get_property(spe, prop, &proplen); WARN_ON(proplen != sizeof (*p)); start_pfn = p->address >> PAGE_SHIFT; @@ -531,12 +531,12 @@ static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe, static void __iomem * __init map_spe_prop(struct spu *spu, struct device_node *n, const char *name) { - struct address_prop { + const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; - void *p; + const void *p; int proplen; void* ret = NULL; int err = 0; @@ -570,14 +570,14 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { struct irq_host *host; unsigned int isrc; - u32 *tmp; + const u32 *tmp; host = iic_get_irq_host(spu->node); if (host == NULL) return -ENODEV; /* Get the interrupt source from the device-tree */ - tmp = (u32 *)get_property(np, "isrc", NULL); + tmp = get_property(np, "isrc", NULL); if (!tmp) return -ENODEV; spu->isrc = isrc = tmp[0]; @@ -593,7 +593,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) static int __init spu_map_device(struct spu *spu, struct device_node *node) { - char *prop; + const char *prop; int ret; ret = -ENODEV; diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index 150f67d6f90c..0dd4a64757d9 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -67,13 +67,14 @@ static void chrp_nvram_write(int addr, unsigned char val) void __init chrp_nvram_init(void) { struct device_node *nvram; - unsigned int *nbytes_p, proplen; + const unsigned int *nbytes_p; + unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); if (nvram == NULL) return; - nbytes_p = (unsigned int *)get_property(nvram, "#bytes", &proplen); + nbytes_p = get_property(nvram, "#bytes", &proplen); if (nbytes_p == NULL || proplen != sizeof(unsigned int)) return; diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c index 6802cdc3168a..0f4340506c75 100644 --- a/arch/powerpc/platforms/chrp/pci.c +++ b/arch/powerpc/platforms/chrp/pci.c @@ -214,11 +214,11 @@ void __init chrp_find_bridges(void) { struct device_node *dev; - int *bus_range; + const int *bus_range; int len, index = -1; struct pci_controller *hose; - unsigned int *dma; - char *model, *machine; + const unsigned int *dma; + const char *model, *machine; int is_longtrail = 0, is_mot = 0, is_pegasos = 0; struct device_node *root = find_path_device("/"); struct resource r; @@ -246,7 +246,7 @@ chrp_find_bridges(void) dev->full_name); continue; } - bus_range = (int *) get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s\n", dev->full_name); @@ -257,7 +257,7 @@ chrp_find_bridges(void) else printk(KERN_INFO "PCI buses %d..%d", bus_range[0], bus_range[1]); - printk(" controlled by %s", dev->type); + printk(" controlled by %s", dev->full_name); if (!is_longtrail) printk(" at %llx", (unsigned long long)r.start); printk("\n"); @@ -289,6 +289,19 @@ chrp_find_bridges(void) setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc); } else if (is_pegasos == 2) { setup_peg2(hose, dev); + } else if (!strncmp(model, "IBM,CPC710", 10)) { + setup_indirect_pci(hose, + r.start + 0x000f8000, + r.start + 0x000f8010); + if (index == 0) { + dma = get_property(dev, "system-dma-base",&len); + if (dma && len >= sizeof(*dma)) { + dma = (unsigned int *) + (((unsigned long)dma) + + len - sizeof(*dma)); + pci_dram_offset = *dma; + } + } } else { printk("No methods for %s (model %s), using RTAS\n", dev->full_name, model); @@ -299,15 +312,35 @@ chrp_find_bridges(void) /* check the first bridge for a property that we can use to set pci_dram_offset */ - dma = (unsigned int *) - get_property(dev, "ibm,dma-ranges", &len); + dma = get_property(dev, "ibm,dma-ranges", &len); if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) { pci_dram_offset = dma[2] - dma[3]; printk("pci_dram_offset = %lx\n", pci_dram_offset); } } +} + +/* SL82C105 IDE Control/Status Register */ +#define SL82C105_IDECSR 0x40 + +/* Fixup for Winbond ATA quirk, required for briq */ +void chrp_pci_fixup_winbond_ata(struct pci_dev *sl82c105) +{ + u8 progif; - /* Do not fixup interrupts from OF tree on pegasos */ - if (is_pegasos) - ppc_md.pcibios_fixup = NULL; + /* If non-briq machines need that fixup too, please speak up */ + if (!machine_is(chrp) || _chrp_type != _CHRP_briq) + return; + + if ((sl82c105->class & 5) != 5) { + printk("W83C553: Switching SL82C105 IDE to PCI native mode\n"); + /* Enable SL82C105 PCI native IDE mode */ + pci_read_config_byte(sl82c105, PCI_CLASS_PROG, &progif); + pci_write_config_byte(sl82c105, PCI_CLASS_PROG, progif | 0x05); + sl82c105->class |= 0x05; + /* Disable SL82C105 second port */ + pci_write_config_word(sl82c105, SL82C105_IDECSR, 0x0003); + } } +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105, + chrp_pci_fixup_winbond_ata); diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 9c08ff322290..488dbd9b51ae 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -74,6 +74,9 @@ extern irqreturn_t xmon_irq(int, void *, struct pt_regs *); extern unsigned long loops_per_jiffy; +/* To be replaced by RTAS when available */ +static unsigned int *briq_SPOR; + #ifdef CONFIG_SMP extern struct smp_ops_t chrp_smp_ops; #endif @@ -92,6 +95,15 @@ static const char *gg2_cachemodes[4] = { "Disabled", "Write-Through", "Copy-Back", "Transparent Mode" }; +static const char *chrp_names[] = { + "Unknown", + "","","", + "Motorola", + "IBM or Longtrail", + "Genesi Pegasos", + "Total Impact Briq" +}; + void chrp_show_cpuinfo(struct seq_file *m) { int i, sdramen; @@ -214,8 +226,7 @@ static void __init pegasos_set_l2cr(void) /* Enable L2 cache if needed */ np = find_type_devices("cpu"); if (np != NULL) { - unsigned int *l2cr = (unsigned int *) - get_property (np, "l2cr", NULL); + const unsigned int *l2cr = get_property(np, "l2cr", NULL); if (l2cr == NULL) { printk ("Pegasos l2cr : no cpu l2cr property found\n"); return; @@ -229,10 +240,18 @@ static void __init pegasos_set_l2cr(void) } } +static void briq_restart(char *cmd) +{ + local_irq_disable(); + if (briq_SPOR) + out_be32(briq_SPOR, 0); + for(;;); +} + void __init chrp_setup_arch(void) { struct device_node *root = find_path_device ("/"); - char *machine = NULL; + const char *machine = NULL; /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000/HZ; @@ -245,11 +264,16 @@ void __init chrp_setup_arch(void) _chrp_type = _CHRP_IBM; } else if (machine && strncmp(machine, "MOT", 3) == 0) { _chrp_type = _CHRP_Motorola; + } else if (machine && strncmp(machine, "TotalImpact,BRIQ-1", 18) == 0) { + _chrp_type = _CHRP_briq; + /* Map the SPOR register on briq and change the restart hook */ + briq_SPOR = (unsigned int *)ioremap(0xff0000e8, 4); + ppc_md.restart = briq_restart; } else { /* Let's assume it is an IBM chrp if all else fails */ _chrp_type = _CHRP_IBM; } - printk("chrp type = %x\n", _chrp_type); + printk("chrp type = %x [%s]\n", _chrp_type, chrp_names[_chrp_type]); rtas_initialize(); if (rtas_token("display-character") >= 0) @@ -328,7 +352,7 @@ static void __init chrp_find_openpic(void) struct device_node *np, *root; int len, i, j; int isu_size, idu_size; - unsigned int *iranges, *opprop = NULL; + const unsigned int *iranges, *opprop = NULL; int oplen = 0; unsigned long opaddr; int na = 1; @@ -338,8 +362,7 @@ static void __init chrp_find_openpic(void) return; root = of_find_node_by_path("/"); if (root) { - opprop = (unsigned int *) get_property - (root, "platform-open-pic", &oplen); + opprop = get_property(root, "platform-open-pic", &oplen); na = prom_n_addr_cells(root); } if (opprop && oplen >= na * sizeof(unsigned int)) { @@ -356,7 +379,7 @@ static void __init chrp_find_openpic(void) printk(KERN_INFO "OpenPIC at %lx\n", opaddr); - iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len); + iranges = get_property(np, "interrupt-ranges", &len); if (iranges == NULL) len = 0; /* non-distributed mpic */ else @@ -442,8 +465,8 @@ static void __init chrp_find_8259(void) * from anyway */ for (np = find_devices("pci"); np != NULL; np = np->next) { - unsigned int *addrp = (unsigned int *) - get_property(np, "8259-interrupt-acknowledge", NULL); + const unsigned int *addrp = get_property(np, + "8259-interrupt-acknowledge", NULL); if (addrp == NULL) continue; @@ -502,7 +525,7 @@ void __init chrp_init2(void) { struct device_node *device; - unsigned int *p = NULL; + const unsigned int *p = NULL; #ifdef CONFIG_NVRAM chrp_nvram_init(); @@ -520,8 +543,7 @@ chrp_init2(void) */ device = find_devices("rtas"); if (device) - p = (unsigned int *) get_property - (device, "rtas-event-scan-rate", NULL); + p = get_property(device, "rtas-event-scan-rate", NULL); if (p && *p) { /* * Arrange to call chrp_event_scan at least *p times diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 5d393eb94935..e4f2b9df5e17 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c @@ -95,7 +95,7 @@ void mpc7448_hpc2_fixup_irq(struct pci_dev *dev) { struct pci_controller *hose; struct device_node *node; - unsigned int *interrupt; + const unsigned int *interrupt; int busnr; int len; u8 slot; @@ -112,7 +112,7 @@ void mpc7448_hpc2_fixup_irq(struct pci_dev *dev) if (!node) printk(KERN_ERR "No pci node found\n"); - interrupt = (unsigned int *) get_property(node, "interrupt-map", &len); + interrupt = get_property(node, "interrupt-map", &len); slot = find_slot_by_devfn(interrupt, dev->devfn); pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin == 0 || pin > 4) @@ -141,9 +141,9 @@ static void __init mpc7448_hpc2_setup_arch(void) cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != 0) { - unsigned int *fp; + const unsigned int *fp; - fp = (int *)get_property(cpu, "clock-frequency", NULL); + fp = get_property(cpu, "clock-frequency", NULL); if (fp != 0) loops_per_jiffy = *fp / HZ; else diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig index 3d957a30c8c2..887b68804e6d 100644 --- a/arch/powerpc/platforms/iseries/Kconfig +++ b/arch/powerpc/platforms/iseries/Kconfig @@ -3,13 +3,17 @@ menu "iSeries device drivers" depends on PPC_ISERIES config VIOCONS - tristate "iSeries Virtual Console Support" + tristate "iSeries Virtual Console Support (Obsolete)" + help + This is the old virtual console driver for legacy iSeries. + You should use the iSeries Hypervisor Virtual Console + support instead. config VIODASD tristate "iSeries Virtual I/O disk support" help If you are running on an iSeries system and you want to use - virtual disks created and managed by OS/400, say Y. + virtual disks created and managed by OS/400, say Y. config VIOCD tristate "iSeries Virtual I/O CD support" diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index d194140c1ebf..e305deee7f44 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2005-2006 Michael Ellerman, IBM Corporation + * Copyright (C) 2005-2006 Michael Ellerman, IBM Corporation + * Copyright (C) 2000-2004, IBM Corporation * * Description: * This file contains all the routines to build a flattened device @@ -33,13 +34,13 @@ #include <asm/iseries/hv_types.h> #include <asm/iseries/hv_lp_config.h> #include <asm/iseries/hv_call_xm.h> -#include <asm/iseries/it_exp_vpd_panel.h> #include <asm/udbg.h> #include "processor_vpd.h" #include "call_hpt.h" #include "call_pci.h" #include "pci.h" +#include "it_exp_vpd_panel.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -76,6 +77,43 @@ static char __initdata device_type_pci[] = "pci"; static char __initdata device_type_vdevice[] = "vdevice"; static char __initdata device_type_vscsi[] = "vscsi"; + +/* EBCDIC to ASCII conversion routines */ + +static unsigned char __init e2a(unsigned char x) +{ + switch (x) { + case 0x81 ... 0x89: + return x - 0x81 + 'a'; + case 0x91 ... 0x99: + return x - 0x91 + 'j'; + case 0xA2 ... 0xA9: + return x - 0xA2 + 's'; + case 0xC1 ... 0xC9: + return x - 0xC1 + 'A'; + case 0xD1 ... 0xD9: + return x - 0xD1 + 'J'; + case 0xE2 ... 0xE9: + return x - 0xE2 + 'S'; + case 0xF0 ... 0xF9: + return x - 0xF0 + '0'; + } + return ' '; +} + +static unsigned char * __init strne2a(unsigned char *dest, + const unsigned char *src, size_t n) +{ + int i; + + n = strnlen(src, n); + + for (i = 0; i < n; i++) + dest[i] = e2a(src[i]); + + return dest; +} + static struct iseries_flat_dt * __init dt_init(void) { struct iseries_flat_dt *dt; @@ -298,7 +336,8 @@ static void __init dt_vdevices(struct iseries_flat_dt *dt) dt_prop_u32(dt, "#address-cells", 1); dt_prop_u32(dt, "#size-cells", 0); - dt_do_vdevice(dt, "vty", reg, -1, device_type_serial, NULL, 1); + dt_do_vdevice(dt, "vty", reg, -1, device_type_serial, + "IBM,iSeries-vty", 1); reg++; dt_do_vdevice(dt, "v-scsi", reg, -1, device_type_vscsi, diff --git a/arch/powerpc/platforms/iseries/hvlpconfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c index 663a1affb4bb..f0475f0b1853 100644 --- a/arch/powerpc/platforms/iseries/hvlpconfig.c +++ b/arch/powerpc/platforms/iseries/hvlpconfig.c @@ -18,9 +18,22 @@ #include <linux/module.h> #include <asm/iseries/hv_lp_config.h> +#include "it_lp_naca.h" HvLpIndex HvLpConfig_getLpIndex_outline(void) { return HvLpConfig_getLpIndex(); } EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline); + +HvLpIndex HvLpConfig_getLpIndex(void) +{ + return itLpNaca.xLpIndex; +} +EXPORT_SYMBOL(HvLpConfig_getLpIndex); + +HvLpIndex HvLpConfig_getPrimaryLpIndex(void) +{ + return itLpNaca.xPrimaryLpIndex; +} +EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex); diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index e3bd2015f2c9..f4cbbcf8773a 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c @@ -88,6 +88,23 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) } /* + * Structure passed to HvCallXm_getTceTableParms + */ +struct iommu_table_cb { + unsigned long itc_busno; /* Bus number for this tce table */ + unsigned long itc_start; /* Will be NULL for secondary */ + unsigned long itc_totalsize; /* Size (in pages) of whole table */ + unsigned long itc_offset; /* Index into real tce table of the + start of our section */ + unsigned long itc_size; /* Size (in pages) of our section */ + unsigned long itc_index; /* Index of this tce table */ + unsigned short itc_maxtables; /* Max num of tables for partition */ + unsigned char itc_virtbus; /* Flag to indicate virtual bus */ + unsigned char itc_slotno; /* IOA Tce Slot Index */ + unsigned char itc_rsvd[4]; +}; + +/* * Call Hv with the architected data structure to get TCE table info. * info. Put the returned data into the Linux representation of the * TCE table data. @@ -162,7 +179,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn) { struct iommu_table *tbl; struct pci_dn *pdn = PCI_DN(dn); - u32 *lsn = (u32 *)get_property(dn, "linux,logical-slot-number", NULL); + const u32 *lsn = get_property(dn, "linux,logical-slot-number", NULL); BUG_ON(lsn == NULL); diff --git a/include/asm-powerpc/iseries/it_exp_vpd_panel.h b/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h index 304a609ae21a..6de9097b7f57 100644 --- a/include/asm-powerpc/iseries/it_exp_vpd_panel.h +++ b/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h @@ -15,8 +15,8 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifndef _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H -#define _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H +#ifndef _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H +#define _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H /* * This struct maps the panel information @@ -48,4 +48,4 @@ struct ItExtVpdPanel { extern struct ItExtVpdPanel xItExtVpdPanel; -#endif /* _ASM_POWERPC_ISERIES_IT_EXT_VPD_PANEL_H */ +#endif /* _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H */ diff --git a/include/asm-powerpc/iseries/it_lp_naca.h b/arch/powerpc/platforms/iseries/it_lp_naca.h index 4fdcf052927f..9bbf58986819 100644 --- a/include/asm-powerpc/iseries/it_lp_naca.h +++ b/arch/powerpc/platforms/iseries/it_lp_naca.h @@ -15,8 +15,8 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#ifndef _ASM_POWERPC_ISERIES_IT_LP_NACA_H -#define _ASM_POWERPC_ISERIES_IT_LP_NACA_H +#ifndef _PLATFORMS_ISERIES_IT_LP_NACA_H +#define _PLATFORMS_ISERIES_IT_LP_NACA_H #include <linux/types.h> @@ -77,4 +77,4 @@ extern struct ItLpNaca itLpNaca; #define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */ #define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */ -#endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */ +#endif /* _PLATFORMS_ISERIES_IT_LP_NACA_H */ diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c index a7769445d6c7..8162049bb04d 100644 --- a/arch/powerpc/platforms/iseries/lpardata.c +++ b/arch/powerpc/platforms/iseries/lpardata.c @@ -13,12 +13,10 @@ #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/abs_addr.h> -#include <asm/iseries/it_lp_naca.h> #include <asm/lppaca.h> #include <asm/iseries/it_lp_reg_save.h> #include <asm/paca.h> #include <asm/iseries/lpar_map.h> -#include <asm/iseries/it_exp_vpd_panel.h> #include <asm/iseries/it_lp_queue.h> #include "naca.h" @@ -27,6 +25,8 @@ #include "ipl_parms.h" #include "processor_vpd.h" #include "release_data.h" +#include "it_exp_vpd_panel.h" +#include "it_lp_naca.h" /* The HvReleaseData is the root of the information shared between * the hypervisor and Linux. @@ -127,14 +127,12 @@ struct ItLpNaca itLpNaca = { (u64)instruction_access_slb_iSeries /* 0x480 I-SLB */ } }; -EXPORT_SYMBOL(itLpNaca); /* May be filled in by the hypervisor so cannot end up in the BSS */ struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data"))); /* May be filled in by the hypervisor so cannot end up in the BSS */ struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data"))); -EXPORT_SYMBOL(xItExtVpdPanel); #define maxPhysicalProcessors 32 diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c index 2a9f81ea27d6..98c1c2440aad 100644 --- a/arch/powerpc/platforms/iseries/lpevents.c +++ b/arch/powerpc/platforms/iseries/lpevents.c @@ -20,7 +20,7 @@ #include <asm/iseries/it_lp_queue.h> #include <asm/iseries/hv_lp_event.h> #include <asm/iseries/hv_call_event.h> -#include <asm/iseries/it_lp_naca.h> +#include "it_lp_naca.h" /* * The LpQueue is used to pass event data from the hypervisor to diff --git a/arch/powerpc/platforms/iseries/main_store.h b/arch/powerpc/platforms/iseries/main_store.h index 74f6889f834f..1a7a3f50e40b 100644 --- a/arch/powerpc/platforms/iseries/main_store.h +++ b/arch/powerpc/platforms/iseries/main_store.h @@ -61,9 +61,9 @@ struct IoHriMainStoreSegment4 { }; /* Main Store VPD for Power4 */ -struct IoHriMainStoreChipInfo1 { - u32 chipMfgID __attribute((packed)); - char chipECLevel[4] __attribute((packed)); +struct __attribute((packed)) IoHriMainStoreChipInfo1 { + u32 chipMfgID; + char chipECLevel[4]; }; struct IoHriMainStoreVpdIdData { @@ -73,72 +73,72 @@ struct IoHriMainStoreVpdIdData { char serialNumber[12]; }; -struct IoHriMainStoreVpdFruData { - char fruLabel[8] __attribute((packed)); - u8 numberOfSlots __attribute((packed)); - u8 pluggingType __attribute((packed)); - u16 slotMapIndex __attribute((packed)); +struct __attribute((packed)) IoHriMainStoreVpdFruData { + char fruLabel[8]; + u8 numberOfSlots; + u8 pluggingType; + u16 slotMapIndex; }; -struct IoHriMainStoreAdrRangeBlock { - void *blockStart __attribute((packed)); - void *blockEnd __attribute((packed)); - u32 blockProcChipId __attribute((packed)); +struct __attribute((packed)) IoHriMainStoreAdrRangeBlock { + void *blockStart; + void *blockEnd; + u32 blockProcChipId; }; #define MaxAreaAdrRangeBlocks 4 -struct IoHriMainStoreArea4 { - u32 msVpdFormat __attribute((packed)); - u8 containedVpdType __attribute((packed)); - u8 reserved1 __attribute((packed)); - u16 reserved2 __attribute((packed)); - - u64 msExists __attribute((packed)); - u64 msFunctional __attribute((packed)); - - u32 memorySize __attribute((packed)); - u32 procNodeId __attribute((packed)); - - u32 numAdrRangeBlocks __attribute((packed)); - struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks] __attribute((packed)); - - struct IoHriMainStoreChipInfo1 chipInfo0 __attribute((packed)); - struct IoHriMainStoreChipInfo1 chipInfo1 __attribute((packed)); - struct IoHriMainStoreChipInfo1 chipInfo2 __attribute((packed)); - struct IoHriMainStoreChipInfo1 chipInfo3 __attribute((packed)); - struct IoHriMainStoreChipInfo1 chipInfo4 __attribute((packed)); - struct IoHriMainStoreChipInfo1 chipInfo5 __attribute((packed)); - struct IoHriMainStoreChipInfo1 chipInfo6 __attribute((packed)); - struct IoHriMainStoreChipInfo1 chipInfo7 __attribute((packed)); - - void *msRamAreaArray __attribute((packed)); - u32 msRamAreaArrayNumEntries __attribute((packed)); - u32 msRamAreaArrayEntrySize __attribute((packed)); - - u32 numaDimmExists __attribute((packed)); - u32 numaDimmFunctional __attribute((packed)); - void *numaDimmArray __attribute((packed)); - u32 numaDimmArrayNumEntries __attribute((packed)); - u32 numaDimmArrayEntrySize __attribute((packed)); - - struct IoHriMainStoreVpdIdData idData __attribute((packed)); - - u64 powerData __attribute((packed)); - u64 cardAssemblyPartNum __attribute((packed)); - u64 chipSerialNum __attribute((packed)); - - u64 reserved3 __attribute((packed)); - char reserved4[16] __attribute((packed)); - - struct IoHriMainStoreVpdFruData fruData __attribute((packed)); - - u8 vpdPortNum __attribute((packed)); - u8 reserved5 __attribute((packed)); - u8 frameId __attribute((packed)); - u8 rackUnit __attribute((packed)); - char asciiKeywordVpd[256] __attribute((packed)); - u32 reserved6 __attribute((packed)); +struct __attribute((packed)) IoHriMainStoreArea4 { + u32 msVpdFormat; + u8 containedVpdType; + u8 reserved1; + u16 reserved2; + + u64 msExists; + u64 msFunctional; + + u32 memorySize; + u32 procNodeId; + + u32 numAdrRangeBlocks; + struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks]; + + struct IoHriMainStoreChipInfo1 chipInfo0; + struct IoHriMainStoreChipInfo1 chipInfo1; + struct IoHriMainStoreChipInfo1 chipInfo2; + struct IoHriMainStoreChipInfo1 chipInfo3; + struct IoHriMainStoreChipInfo1 chipInfo4; + struct IoHriMainStoreChipInfo1 chipInfo5; + struct IoHriMainStoreChipInfo1 chipInfo6; + struct IoHriMainStoreChipInfo1 chipInfo7; + + void *msRamAreaArray; + u32 msRamAreaArrayNumEntries; + u32 msRamAreaArrayEntrySize; + + u32 numaDimmExists; + u32 numaDimmFunctional; + void *numaDimmArray; + u32 numaDimmArrayNumEntries; + u32 numaDimmArrayEntrySize; + + struct IoHriMainStoreVpdIdData idData; + + u64 powerData; + u64 cardAssemblyPartNum; + u64 chipSerialNum; + + u64 reserved3; + char reserved4[16]; + + struct IoHriMainStoreVpdFruData fruData; + + u8 vpdPortNum; + u8 reserved5; + u8 frameId; + u8 rackUnit; + char asciiKeywordVpd[256]; + u32 reserved6; }; diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c index 35bcc98111f5..3eb12065df23 100644 --- a/arch/powerpc/platforms/iseries/pci.c +++ b/arch/powerpc/platforms/iseries/pci.c @@ -34,6 +34,7 @@ #include <asm/pci-bridge.h> #include <asm/iommu.h> #include <asm/abs_addr.h> +#include <asm/firmware.h> #include <asm/iseries/hv_call_xm.h> #include <asm/iseries/mf.h> @@ -176,12 +177,12 @@ void iSeries_pcibios_init(void) } while ((node = of_get_next_child(root, node)) != NULL) { HvBusNumber bus; - u32 *busp; + const u32 *busp; if ((node->type == NULL) || (strcmp(node->type, "pci") != 0)) continue; - busp = (u32 *)get_property(node, "bus-range", NULL); + busp = get_property(node, "bus-range", NULL); if (busp == NULL) continue; bus = *busp; @@ -221,10 +222,9 @@ void __init iSeries_pci_final_fixup(void) if (node != NULL) { struct pci_dn *pdn = PCI_DN(node); - u32 *agent; + const u32 *agent; - agent = (u32 *)get_property(node, "linux,agent-id", - NULL); + agent = get_property(node, "linux,agent-id", NULL); if ((pdn != NULL) && (agent != NULL)) { u8 irq = iSeries_allocate_IRQ(pdn->busno, 0, pdn->bussubno); @@ -271,46 +271,6 @@ void pcibios_fixup_resources(struct pci_dev *pdev) } /* - * I/0 Memory copy MUST use mmio commands on iSeries - * To do; For performance, include the hv call directly - */ -void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count) -{ - u8 ByteValue = c; - long NumberOfBytes = Count; - - while (NumberOfBytes > 0) { - iSeries_Write_Byte(ByteValue, dest++); - -- NumberOfBytes; - } -} -EXPORT_SYMBOL(iSeries_memset_io); - -void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count) -{ - char *src = source; - long NumberOfBytes = count; - - while (NumberOfBytes > 0) { - iSeries_Write_Byte(*src++, dest++); - -- NumberOfBytes; - } -} -EXPORT_SYMBOL(iSeries_memcpy_toio); - -void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count) -{ - char *dst = dest; - long NumberOfBytes = count; - - while (NumberOfBytes > 0) { - *dst++ = iSeries_Read_Byte(src++); - -- NumberOfBytes; - } -} -EXPORT_SYMBOL(iSeries_memcpy_fromio); - -/* * Look down the chain to find the matching Device Device */ static struct device_node *find_Device_Node(int bus, int devfn) @@ -492,7 +452,7 @@ static inline struct device_node *xlate_iomm_address( * iSeries_Read_Word = Read Word (16 bit) * iSeries_Read_Long = Read Long (32 bit) */ -u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) +static u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) { u64 BarOffset; u64 dsa; @@ -519,9 +479,8 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) return (u8)ret.value; } -EXPORT_SYMBOL(iSeries_Read_Byte); -u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) +static u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) { u64 BarOffset; u64 dsa; @@ -549,9 +508,8 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) return swab16((u16)ret.value); } -EXPORT_SYMBOL(iSeries_Read_Word); -u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) +static u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) { u64 BarOffset; u64 dsa; @@ -579,7 +537,6 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) return swab32((u32)ret.value); } -EXPORT_SYMBOL(iSeries_Read_Long); /* * Write MM I/O Instructions for the iSeries @@ -588,7 +545,7 @@ EXPORT_SYMBOL(iSeries_Read_Long); * iSeries_Write_Word = Write Word(16 bit) * iSeries_Write_Long = Write Long(32 bit) */ -void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) +static void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) { u64 BarOffset; u64 dsa; @@ -613,9 +570,8 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0); } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0); } -EXPORT_SYMBOL(iSeries_Write_Byte); -void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) +static void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) { u64 BarOffset; u64 dsa; @@ -640,9 +596,8 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0); } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0); } -EXPORT_SYMBOL(iSeries_Write_Word); -void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) +static void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) { u64 BarOffset; u64 dsa; @@ -667,4 +622,224 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0); } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0); } -EXPORT_SYMBOL(iSeries_Write_Long); + +extern unsigned char __raw_readb(const volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return *(volatile unsigned char __force *)addr; +} +EXPORT_SYMBOL(__raw_readb); + +extern unsigned short __raw_readw(const volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return *(volatile unsigned short __force *)addr; +} +EXPORT_SYMBOL(__raw_readw); + +extern unsigned int __raw_readl(const volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return *(volatile unsigned int __force *)addr; +} +EXPORT_SYMBOL(__raw_readl); + +extern unsigned long __raw_readq(const volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return *(volatile unsigned long __force *)addr; +} +EXPORT_SYMBOL(__raw_readq); + +extern void __raw_writeb(unsigned char v, volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + *(volatile unsigned char __force *)addr = v; +} +EXPORT_SYMBOL(__raw_writeb); + +extern void __raw_writew(unsigned short v, volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + *(volatile unsigned short __force *)addr = v; +} +EXPORT_SYMBOL(__raw_writew); + +extern void __raw_writel(unsigned int v, volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + *(volatile unsigned int __force *)addr = v; +} +EXPORT_SYMBOL(__raw_writel); + +extern void __raw_writeq(unsigned long v, volatile void __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + *(volatile unsigned long __force *)addr = v; +} +EXPORT_SYMBOL(__raw_writeq); + +int in_8(const volatile unsigned char __iomem *addr) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + return iSeries_Read_Byte(addr); + return __in_8(addr); +} +EXPORT_SYMBOL(in_8); + +void out_8(volatile unsigned char __iomem *addr, int val) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + iSeries_Write_Byte(val, addr); + else + __out_8(addr, val); +} +EXPORT_SYMBOL(out_8); + +int in_le16(const volatile unsigned short __iomem *addr) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + return iSeries_Read_Word(addr); + return __in_le16(addr); +} +EXPORT_SYMBOL(in_le16); + +int in_be16(const volatile unsigned short __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return __in_be16(addr); +} +EXPORT_SYMBOL(in_be16); + +void out_le16(volatile unsigned short __iomem *addr, int val) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + iSeries_Write_Word(val, addr); + else + __out_le16(addr, val); +} +EXPORT_SYMBOL(out_le16); + +void out_be16(volatile unsigned short __iomem *addr, int val) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + __out_be16(addr, val); +} +EXPORT_SYMBOL(out_be16); + +unsigned in_le32(const volatile unsigned __iomem *addr) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + return iSeries_Read_Long(addr); + return __in_le32(addr); +} +EXPORT_SYMBOL(in_le32); + +unsigned in_be32(const volatile unsigned __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return __in_be32(addr); +} +EXPORT_SYMBOL(in_be32); + +void out_le32(volatile unsigned __iomem *addr, int val) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) + iSeries_Write_Long(val, addr); + else + __out_le32(addr, val); +} +EXPORT_SYMBOL(out_le32); + +void out_be32(volatile unsigned __iomem *addr, int val) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + __out_be32(addr, val); +} +EXPORT_SYMBOL(out_be32); + +unsigned long in_le64(const volatile unsigned long __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return __in_le64(addr); +} +EXPORT_SYMBOL(in_le64); + +unsigned long in_be64(const volatile unsigned long __iomem *addr) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + return __in_be64(addr); +} +EXPORT_SYMBOL(in_be64); + +void out_le64(volatile unsigned long __iomem *addr, unsigned long val) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + __out_le64(addr, val); +} +EXPORT_SYMBOL(out_le64); + +void out_be64(volatile unsigned long __iomem *addr, unsigned long val) +{ + BUG_ON(firmware_has_feature(FW_FEATURE_ISERIES)); + + __out_be64(addr, val); +} +EXPORT_SYMBOL(out_be64); + +void memset_io(volatile void __iomem *addr, int c, unsigned long n) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) { + volatile char __iomem *d = addr; + + while (n-- > 0) { + iSeries_Write_Byte(c, d++); + } + } else + eeh_memset_io(addr, c, n); +} +EXPORT_SYMBOL(memset_io); + +void memcpy_fromio(void *dest, const volatile void __iomem *src, + unsigned long n) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) { + char *d = dest; + const volatile char __iomem *s = src; + + while (n-- > 0) { + *d++ = iSeries_Read_Byte(s++); + } + } else + eeh_memcpy_fromio(dest, src, n); +} +EXPORT_SYMBOL(memcpy_fromio); + +void memcpy_toio(volatile void __iomem *dest, const void *src, unsigned long n) +{ + if (firmware_has_feature(FW_FEATURE_ISERIES)) { + const char *s = src; + volatile char __iomem *d = dest; + + while (n-- > 0) { + iSeries_Write_Byte(*s++, d++); + } + } else + eeh_memcpy_toio(dest, src, n); +} +EXPORT_SYMBOL(memcpy_toio); diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index c9605d773a77..7f1953066ff8 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -59,6 +59,7 @@ #include "irq.h" #include "vpd_areas.h" #include "processor_vpd.h" +#include "it_lp_naca.h" #include "main_store.h" #include "call_sm.h" #include "call_hpt.h" diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index 622a30149b48..9baa4ee82592 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c @@ -41,8 +41,8 @@ #include <asm/system.h> #include <asm/uaccess.h> +#include <asm/prom.h> #include <asm/iseries/hv_types.h> -#include <asm/iseries/it_exp_vpd_panel.h> #include <asm/iseries/hv_lp_event.h> #include <asm/iseries/hv_lp_config.h> #include <asm/iseries/mf.h> @@ -116,6 +116,8 @@ static int proc_viopath_show(struct seq_file *m, void *v) dma_addr_t handle; HvLpEvent_Rc hvrc; DECLARE_MUTEX_LOCKED(Semaphore); + struct device_node *node; + const char *sysid; buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL); if (!buf) @@ -143,20 +145,26 @@ static int proc_viopath_show(struct seq_file *m, void *v) buf[HW_PAGE_SIZE-1] = '\0'; seq_printf(m, "%s", buf); - seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); - seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n", - e2a(xItExtVpdPanel.mfgID[2]), - e2a(xItExtVpdPanel.mfgID[3]), - e2a(xItExtVpdPanel.systemSerial[1]), - e2a(xItExtVpdPanel.systemSerial[2]), - e2a(xItExtVpdPanel.systemSerial[3]), - e2a(xItExtVpdPanel.systemSerial[4]), - e2a(xItExtVpdPanel.systemSerial[5])); dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE, DMA_FROM_DEVICE); kfree(buf); + seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap); + + node = of_find_node_by_path("/"); + sysid = NULL; + if (node != NULL) + sysid = get_property(node, "system-id", NULL); + + if (sysid == NULL) + seq_printf(m, "SRLNBR=<UNKNOWN>\n"); + else + /* Skip "IBM," on front of serial number, see dt.c */ + seq_printf(m, "SRLNBR=%s\n", sysid + 4); + + of_node_put(node); + return 0; } diff --git a/arch/powerpc/platforms/iseries/vpdinfo.c b/arch/powerpc/platforms/iseries/vpdinfo.c index 23a6d1e5b429..9f83878a0c2e 100644 --- a/arch/powerpc/platforms/iseries/vpdinfo.c +++ b/arch/powerpc/platforms/iseries/vpdinfo.c @@ -188,7 +188,7 @@ static void __init iSeries_Parse_Vpd(u8 *VpdData, int VpdDataLen, { u8 *TagPtr = VpdData; int DataLen = VpdDataLen - 3; - u8 PhbId; + u8 PhbId = 0xff; while ((*TagPtr != VpdEndOfAreaTag) && (DataLen > 0)) { int AreaLen = *(TagPtr + 1) + (*(TagPtr + 2) * 256); @@ -205,15 +205,16 @@ static void __init iSeries_Parse_Vpd(u8 *VpdData, int VpdDataLen, } } -static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent, +static int __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent, u8 *frame, char card[4]) { + int status = 0; int BusVpdLen = 0; u8 *BusVpdPtr = kmalloc(BUS_VPDSIZE, GFP_KERNEL); if (BusVpdPtr == NULL) { printk("PCI: Bus VPD Buffer allocation failure.\n"); - return; + return 0; } BusVpdLen = HvCallPci_getBusVpd(bus, iseries_hv_addr(BusVpdPtr), BUS_VPDSIZE); @@ -228,8 +229,10 @@ static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent, goto out_free; } iSeries_Parse_Vpd(BusVpdPtr, BusVpdLen, agent, frame, card); + status = 1; out_free: kfree(BusVpdPtr); + return status; } /* @@ -246,7 +249,7 @@ void __init iSeries_Device_Information(struct pci_dev *PciDev, int count) struct device_node *DevNode = PciDev->sysdata; struct pci_dn *pdn; u16 bus; - u8 frame; + u8 frame = 0; char card[4]; HvSubBusNumber subbus; HvAgentId agent; @@ -262,10 +265,11 @@ void __init iSeries_Device_Information(struct pci_dev *PciDev, int count) subbus = pdn->bussubno; agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus), ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)); - iSeries_Get_Location_Code(bus, agent, &frame, card); - printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, Card %4s ", - count, bus, PCI_SLOT(PciDev->devfn), PciDev->vendor, - frame, card); - printk("0x%04X\n", (int)(PciDev->class >> 8)); + if (iSeries_Get_Location_Code(bus, agent, &frame, card)) { + printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, " + "Card %4s 0x%04X\n", count, bus, + PCI_SLOT(PciDev->devfn), PciDev->vendor, frame, + card, (int)(PciDev->class >> 8)); + } } diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 63a1670d3bfd..c3aa46b8e2b9 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c @@ -38,16 +38,16 @@ static struct pci_controller *u3_agp, *u3_ht; static int __init fixup_one_level_bus_range(struct device_node *node, int higher) { for (; node != 0;node = node->sibling) { - int * bus_range; - unsigned int *class_code; + const int *bus_range; + const unsigned int *class_code; int len; /* For PCI<->PCI bridges or CardBus bridges, we go down */ - class_code = (unsigned int *) get_property(node, "class-code", NULL); + class_code = get_property(node, "class-code", NULL); if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) continue; - bus_range = (int *) get_property(node, "bus-range", &len); + bus_range = get_property(node, "bus-range", &len); if (bus_range != NULL && len > 2 * sizeof(int)) { if (bus_range[1] > higher) higher = bus_range[1]; @@ -65,30 +65,36 @@ static int __init fixup_one_level_bus_range(struct device_node *node, int higher */ static void __init fixup_bus_range(struct device_node *bridge) { - int * bus_range; + int *bus_range; + struct property *prop; int len; /* Lookup the "bus-range" property for the hose */ - bus_range = (int *) get_property(bridge, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) { + prop = of_find_property(bridge, "bus-range", &len); + if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s\n", bridge->full_name); return; } + bus_range = (int *)prop->value; bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); } -#define U3_AGP_CFA0(devfn, off) \ - ((1 << (unsigned long)PCI_SLOT(dev_fn)) \ - | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \ - | (((unsigned long)(off)) & 0xFCUL)) +static unsigned long u3_agp_cfa0(u8 devfn, u8 off) +{ + return (1 << (unsigned long)PCI_SLOT(devfn)) | + ((unsigned long)PCI_FUNC(devfn) << 8) | + ((unsigned long)off & 0xFCUL); +} -#define U3_AGP_CFA1(bus, devfn, off) \ - ((((unsigned long)(bus)) << 16) \ - |(((unsigned long)(devfn)) << 8) \ - |(((unsigned long)(off)) & 0xFCUL) \ - |1UL) +static unsigned long u3_agp_cfa1(u8 bus, u8 devfn, u8 off) +{ + return ((unsigned long)bus << 16) | + ((unsigned long)devfn << 8) | + ((unsigned long)off & 0xFCUL) | + 1UL; +} static unsigned long u3_agp_cfg_access(struct pci_controller* hose, u8 bus, u8 dev_fn, u8 offset) @@ -98,9 +104,9 @@ static unsigned long u3_agp_cfg_access(struct pci_controller* hose, if (bus == hose->first_busno) { if (dev_fn < (11 << 3)) return 0; - caddr = U3_AGP_CFA0(dev_fn, offset); + caddr = u3_agp_cfa0(dev_fn, offset); } else - caddr = U3_AGP_CFA1(bus, dev_fn, offset); + caddr = u3_agp_cfa1(bus, dev_fn, offset); /* Uninorth will return garbage if we don't read back the value ! */ do { @@ -182,13 +188,15 @@ static struct pci_ops u3_agp_pci_ops = u3_agp_write_config }; +static unsigned long u3_ht_cfa0(u8 devfn, u8 off) +{ + return (devfn << 8) | off; +} -#define U3_HT_CFA0(devfn, off) \ - ((((unsigned long)devfn) << 8) | offset) -#define U3_HT_CFA1(bus, devfn, off) \ - (U3_HT_CFA0(devfn, off) \ - + (((unsigned long)bus) << 16) \ - + 0x01000000UL) +static unsigned long u3_ht_cfa1(u8 bus, u8 devfn, u8 off) +{ + return u3_ht_cfa0(devfn, off) + (bus << 16) + 0x01000000UL; +} static unsigned long u3_ht_cfg_access(struct pci_controller* hose, u8 bus, u8 devfn, u8 offset) @@ -196,9 +204,9 @@ static unsigned long u3_ht_cfg_access(struct pci_controller* hose, if (bus == hose->first_busno) { if (PCI_SLOT(devfn) == 0) return 0; - return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset); + return ((unsigned long)hose->cfg_data) + u3_ht_cfa0(devfn, offset); } else - return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset); + return ((unsigned long)hose->cfg_data) + u3_ht_cfa1(bus, devfn, offset); } static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, @@ -211,6 +219,9 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; + if (offset > 0xff) + return PCIBIOS_BAD_REGISTER_NUMBER; + addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; @@ -243,6 +254,9 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, if (hose == NULL) return PCIBIOS_DEVICE_NOT_FOUND; + if (offset > 0xff) + return PCIBIOS_BAD_REGISTER_NUMBER; + addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; @@ -314,12 +328,12 @@ static int __init add_bridge(struct device_node *dev) int len; struct pci_controller *hose; char* disp_name; - int *bus_range; + const int *bus_range; int primary = 1; DBG("Adding PCI host bridge %s\n", dev->full_name); - bus_range = (int *) get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume bus 0\n", dev->full_name); diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 57567dfb9819..fe6b9bff61b9 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -99,8 +99,7 @@ static unsigned long maple_find_nvram_base(void) static void maple_restart(char *cmd) { unsigned int maple_nvram_base; - unsigned int maple_nvram_offset; - unsigned int maple_nvram_command; + const unsigned int *maple_nvram_offset, *maple_nvram_command; struct device_node *sp; maple_nvram_base = maple_find_nvram_base(); @@ -113,14 +112,12 @@ static void maple_restart(char *cmd) printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); goto fail; } - maple_nvram_offset = *(unsigned int*) get_property(sp, - "restart-addr", NULL); - maple_nvram_command = *(unsigned int*) get_property(sp, - "restart-value", NULL); + maple_nvram_offset = get_property(sp, "restart-addr", NULL); + maple_nvram_command = get_property(sp, "restart-value", NULL); of_node_put(sp); /* send command */ - outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset); + outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); for (;;) ; fail: printk(KERN_EMERG "Maple: Manual Restart Required\n"); @@ -129,8 +126,7 @@ static void maple_restart(char *cmd) static void maple_power_off(void) { unsigned int maple_nvram_base; - unsigned int maple_nvram_offset; - unsigned int maple_nvram_command; + const unsigned int *maple_nvram_offset, *maple_nvram_command; struct device_node *sp; maple_nvram_base = maple_find_nvram_base(); @@ -143,14 +139,12 @@ static void maple_power_off(void) printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); goto fail; } - maple_nvram_offset = *(unsigned int*) get_property(sp, - "power-off-addr", NULL); - maple_nvram_command = *(unsigned int*) get_property(sp, - "power-off-value", NULL); + maple_nvram_offset = get_property(sp, "power-off-addr", NULL); + maple_nvram_command = get_property(sp, "power-off-value", NULL); of_node_put(sp); /* send command */ - outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset); + outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset); for (;;) ; fail: printk(KERN_EMERG "Maple: Manual Power-Down Required\n"); @@ -211,7 +205,7 @@ static void __init maple_init_early(void) static void __init maple_init_IRQ(void) { struct device_node *root, *np, *mpic_node = NULL; - unsigned int *opprop; + const unsigned int *opprop; unsigned long openpic_addr = 0; int naddr, n, i, opplen, has_isus = 0; struct mpic *mpic; @@ -241,8 +235,7 @@ static void __init maple_init_IRQ(void) /* Find address list in /platform-open-pic */ root = of_find_node_by_path("/"); naddr = prom_n_addr_cells(root); - opprop = (unsigned int *) get_property(root, "platform-open-pic", - &opplen); + opprop = get_property(root, "platform-open-pic", &opplen); if (opprop != 0) { openpic_addr = of_read_number(opprop, naddr); has_isus = (opplen > naddr); diff --git a/arch/powerpc/platforms/pasemi/Makefile b/arch/powerpc/platforms/pasemi/Makefile new file mode 100644 index 000000000000..1be1a993c5f5 --- /dev/null +++ b/arch/powerpc/platforms/pasemi/Makefile @@ -0,0 +1 @@ +obj-y += setup.o pci.o time.o diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h new file mode 100644 index 000000000000..fd71d72736b2 --- /dev/null +++ b/arch/powerpc/platforms/pasemi/pasemi.h @@ -0,0 +1,8 @@ +#ifndef _PASEMI_PASEMI_H +#define _PASEMI_PASEMI_H + +extern unsigned long pas_get_boot_time(void); +extern void pas_pci_init(void); +extern void pas_pcibios_fixup(void); + +#endif /* _PASEMI_PASEMI_H */ diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c new file mode 100644 index 000000000000..4679c5230413 --- /dev/null +++ b/arch/powerpc/platforms/pasemi/pci.c @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2006 PA Semi, Inc + * + * Authors: Kip Walker, PA Semi + * Olof Johansson, PA Semi + * + * Maintained by: Olof Johansson <olof@lixom.net> + * + * Based on arch/powerpc/platforms/maple/pci.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include <linux/kernel.h> +#include <linux/pci.h> + +#include <asm/pci-bridge.h> +#include <asm/machdep.h> + +#include <asm/ppc-pci.h> + +#define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off)) + +#define CONFIG_OFFSET_VALID(off) ((off) < 4096) + +static unsigned long pa_pxp_cfg_addr(struct pci_controller *hose, + u8 bus, u8 devfn, int offset) +{ + return ((unsigned long)hose->cfg_data) + PA_PXP_CFA(bus, devfn, offset); +} + +static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 *val) +{ + struct pci_controller *hose; + unsigned long addr; + + hose = pci_bus_to_host(bus); + if (!hose) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (!CONFIG_OFFSET_VALID(offset)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset); + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + switch (len) { + case 1: + *val = in_8((u8 *)addr); + break; + case 2: + *val = in_le16((u16 *)addr); + break; + default: + *val = in_le32((u32 *)addr); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int pa_pxp_write_config(struct pci_bus *bus, unsigned int devfn, + int offset, int len, u32 val) +{ + struct pci_controller *hose; + unsigned long addr; + + hose = pci_bus_to_host(bus); + if (!hose) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (!CONFIG_OFFSET_VALID(offset)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset); + + /* + * Note: the caller has already checked that offset is + * suitably aligned and that len is 1, 2 or 4. + */ + switch (len) { + case 1: + out_8((u8 *)addr, val); + (void) in_8((u8 *)addr); + break; + case 2: + out_le16((u16 *)addr, val); + (void) in_le16((u16 *)addr); + break; + default: + out_le32((u32 *)addr, val); + (void) in_le32((u32 *)addr); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops pa_pxp_ops = { + pa_pxp_read_config, + pa_pxp_write_config, +}; + +static void __init setup_pa_pxp(struct pci_controller *hose) +{ + hose->ops = &pa_pxp_ops; + hose->cfg_data = ioremap(0xe0000000, 0x10000000); +} + +static int __init add_bridge(struct device_node *dev) +{ + struct pci_controller *hose; + + pr_debug("Adding PCI host bridge %s\n", dev->full_name); + + hose = pcibios_alloc_controller(dev); + if (!hose) + return -ENOMEM; + + hose->first_busno = 0; + hose->last_busno = 0xff; + + setup_pa_pxp(hose); + + printk(KERN_INFO "Found PA-PXP PCI host bridge.\n"); + + /* Interpret the "ranges" property */ + /* This also maps the I/O region and sets isa_io/mem_base */ + pci_process_bridge_OF_ranges(hose, dev, 1); + pci_setup_phb_io(hose, 1); + + return 0; +} + + +void __init pas_pcibios_fixup(void) +{ + struct pci_dev *dev = NULL; + + for_each_pci_dev(dev) + pci_read_irq_line(dev); +} + +static void __init pas_fixup_phb_resources(void) +{ + struct pci_controller *hose, *tmp; + + list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { + unsigned long offset = (unsigned long)hose->io_base_virt - pci_io_base; + hose->io_resource.start += offset; + hose->io_resource.end += offset; + printk(KERN_INFO "PCI Host %d, io start: %lx; io end: %lx\n", + hose->global_number, + hose->io_resource.start, hose->io_resource.end); + } +} + + +void __init pas_pci_init(void) +{ + struct device_node *np, *root; + + root = of_find_node_by_path("/"); + if (!root) { + printk(KERN_CRIT "pas_pci_init: can't find root " + "of device tree\n"); + return; + } + + for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) + if (np->name && !strcmp(np->name, "pxp") && !add_bridge(np)) + of_node_get(np); + + of_node_put(root); + + pas_fixup_phb_resources(); + + /* Setup the linkage between OF nodes and PHBs */ + pci_devs_phb_init(); + + /* Use the common resource allocation mechanism */ + pci_probe_only = 1; +} diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c new file mode 100644 index 000000000000..628482671c15 --- /dev/null +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2006 PA Semi, Inc + * + * Authors: Kip Walker, PA Semi + * Olof Johansson, PA Semi + * + * Maintained by: Olof Johansson <olof@lixom.net> + * + * Based on arch/powerpc/platforms/maple/setup.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/console.h> + +#include <asm/prom.h> +#include <asm/system.h> +#include <asm/iommu.h> +#include <asm/machdep.h> +#include <asm/mpic.h> +#include <asm/smp.h> +#include <asm/time.h> + +#include "pasemi.h" + +static void pas_restart(char *cmd) +{ + printk("restart unimplemented, looping...\n"); + for (;;) ; +} + +static void pas_power_off(void) +{ + printk("power off unimplemented, looping...\n"); + for (;;) ; +} + +static void pas_halt(void) +{ + pas_power_off(); +} + +#ifdef CONFIG_SMP +struct smp_ops_t pas_smp_ops = { + .probe = smp_mpic_probe, + .message_pass = smp_mpic_message_pass, + .kick_cpu = smp_generic_kick_cpu, + .setup_cpu = smp_mpic_setup_cpu, + .give_timebase = smp_generic_give_timebase, + .take_timebase = smp_generic_take_timebase, +}; +#endif /* CONFIG_SMP */ + +void __init pas_setup_arch(void) +{ +#ifdef CONFIG_SMP + /* Setup SMP callback */ + smp_ops = &pas_smp_ops; +#endif + /* Lookup PCI hosts */ + pas_pci_init(); + +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif + + printk(KERN_DEBUG "Using default idle loop\n"); +} + +static void iommu_dev_setup_null(struct pci_dev *dev) { } +static void iommu_bus_setup_null(struct pci_bus *bus) { } + +static void __init pas_init_early(void) +{ + /* No iommu code yet */ + ppc_md.iommu_dev_setup = iommu_dev_setup_null; + ppc_md.iommu_bus_setup = iommu_bus_setup_null; + pci_direct_iommu_init(); +} + +/* No legacy IO on our parts */ +static int pas_check_legacy_ioport(unsigned int baseport) +{ + return -ENODEV; +} + +static __init void pas_init_IRQ(void) +{ + struct device_node *np; + struct device_node *root, *mpic_node; + unsigned long openpic_addr; + const unsigned int *opprop; + int naddr, opplen; + struct mpic *mpic; + + mpic_node = NULL; + + for_each_node_by_type(np, "interrupt-controller") + if (device_is_compatible(np, "open-pic")) { + mpic_node = np; + break; + } + if (!mpic_node) + for_each_node_by_type(np, "open-pic") { + mpic_node = np; + break; + } + if (!mpic_node) { + printk(KERN_ERR + "Failed to locate the MPIC interrupt controller\n"); + return; + } + + /* Find address list in /platform-open-pic */ + root = of_find_node_by_path("/"); + naddr = prom_n_addr_cells(root); + opprop = get_property(root, "platform-open-pic", &opplen); + if (!opprop) { + printk(KERN_ERR "No platform-open-pic property.\n"); + of_node_put(root); + return; + } + openpic_addr = of_read_number(opprop, naddr); + printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); + of_node_put(root); + + mpic = mpic_alloc(mpic_node, openpic_addr, MPIC_PRIMARY, 0, 0, + " PAS-OPIC "); + BUG_ON(!mpic); + + mpic_assign_isu(mpic, 0, openpic_addr + 0x10000); + mpic_init(mpic); + of_node_put(mpic_node); + of_node_put(root); +} + +static void __init pas_progress(char *s, unsigned short hex) +{ + printk("[%04x] : %s\n", hex, s ? s : ""); +} + + +/* + * Called very early, MMU is off, device-tree isn't unflattened + */ +static int __init pas_probe(void) +{ + unsigned long root = of_get_flat_dt_root(); + + if (!of_flat_dt_is_compatible(root, "PA6T-1682M")) + return 0; + + hpte_init_native(); + + return 1; +} + +define_machine(pas) { + .name = "PA Semi PA6T-1682M", + .probe = pas_probe, + .setup_arch = pas_setup_arch, + .init_early = pas_init_early, + .init_IRQ = pas_init_IRQ, + .get_irq = mpic_get_irq, + .pcibios_fixup = pas_pcibios_fixup, + .restart = pas_restart, + .power_off = pas_power_off, + .halt = pas_halt, + .get_boot_time = pas_get_boot_time, + .calibrate_decr = generic_calibrate_decr, + .check_legacy_ioport = pas_check_legacy_ioport, + .progress = pas_progress, +}; diff --git a/arch/powerpc/platforms/pasemi/time.c b/arch/powerpc/platforms/pasemi/time.c new file mode 100644 index 000000000000..9bd410b8fec6 --- /dev/null +++ b/arch/powerpc/platforms/pasemi/time.c @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2006 PA Semi, Inc + * + * Maintained by: Olof Johansson <olof@lixom.net> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/config.h> +#include <linux/time.h> + +#include <asm/time.h> + +unsigned long __init pas_get_boot_time(void) +{ + /* Let's just return a fake date right now */ + return mktime(2006, 1, 1, 12, 0, 0); +} diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index d66415491055..afa593a8544a 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c @@ -60,7 +60,8 @@ int pmac_has_backlight_type(const char *type) struct device_node* bk_node = find_devices("backlight"); if (bk_node) { - char *prop = get_property(bk_node, "backlight-control", NULL); + const char *prop = get_property(bk_node, + "backlight-control", NULL); if (prop && strncmp(prop, type, strlen(type)) == 0) return 1; } diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 62926248bdb8..c2b6b4134f68 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c @@ -421,7 +421,7 @@ static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) static u32 read_gpio(struct device_node *np) { - u32 *reg = (u32 *)get_property(np, "reg", NULL); + const u32 *reg = get_property(np, "reg", NULL); u32 offset; if (reg == NULL) @@ -497,7 +497,7 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) "frequency-gpio"); struct device_node *slew_done_gpio_np = of_find_node_by_name(NULL, "slewing-done"); - u32 *value; + const u32 *value; /* * Check to see if it's GPIO driven or PMU only @@ -519,15 +519,15 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) */ if (frequency_gpio && slew_done_gpio) { int lenp, rc; - u32 *freqs, *ratio; + const u32 *freqs, *ratio; - freqs = (u32 *)get_property(cpunode, "bus-frequencies", &lenp); + freqs = get_property(cpunode, "bus-frequencies", &lenp); lenp /= sizeof(u32); if (freqs == NULL || lenp != 2) { printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n"); return 1; } - ratio = (u32 *)get_property(cpunode, "processor-to-bus-ratio*2", NULL); + ratio = get_property(cpunode, "processor-to-bus-ratio*2", NULL); if (ratio == NULL) { printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n"); return 1; @@ -562,7 +562,7 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) /* If we use the PMU, look for the min & max frequencies in the * device-tree */ - value = (u32 *)get_property(cpunode, "min-clock-frequency", NULL); + value = get_property(cpunode, "min-clock-frequency", NULL); if (!value) return 1; low_freq = (*value) / 1000; @@ -571,7 +571,7 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) if (low_freq < 100000) low_freq *= 10; - value = (u32 *)get_property(cpunode, "max-clock-frequency", NULL); + value = get_property(cpunode, "max-clock-frequency", NULL); if (!value) return 1; hi_freq = (*value) / 1000; @@ -611,13 +611,14 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode) static int pmac_cpufreq_init_750FX(struct device_node *cpunode) { struct device_node *volt_gpio_np; - u32 pvr, *value; + u32 pvr; + const u32 *value; if (get_property(cpunode, "dynamic-power-step", NULL) == NULL) return 1; hi_freq = cur_freq; - value = (u32 *)get_property(cpunode, "reduced-clock-frequency", NULL); + value = get_property(cpunode, "reduced-clock-frequency", NULL); if (!value) return 1; low_freq = (*value) / 1000; @@ -650,7 +651,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode) static int __init pmac_cpufreq_setup(void) { struct device_node *cpunode; - u32 *value; + const u32 *value; if (strstr(cmd_line, "nocpufreq")) return 0; @@ -661,7 +662,7 @@ static int __init pmac_cpufreq_setup(void) goto out; /* Get current cpu clock freq */ - value = (u32 *)get_property(cpunode, "clock-frequency", NULL); + value = get_property(cpunode, "clock-frequency", NULL); if (!value) goto out; cur_freq = (*value) / 1000; diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index 7b1156ea5341..d30466d74194 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c @@ -89,7 +89,7 @@ static DEFINE_MUTEX(g5_switch_mutex); #ifdef CONFIG_PMAC_SMU -static u32 *g5_pmode_data; +static const u32 *g5_pmode_data; static int g5_pmode_max; static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ @@ -391,7 +391,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) unsigned int psize, ssize; unsigned long max_freq; char *freq_method, *volt_method; - u32 *valp, pvr_hi; + const u32 *valp; + u32 pvr_hi; int use_volts_vdnap = 0; int use_volts_smu = 0; int rc = -ENODEV; @@ -409,8 +410,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) /* Get first CPU node */ for (cpunode = NULL; (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) { - u32 *reg = - (u32 *)get_property(cpunode, "reg", NULL); + const u32 *reg = get_property(cpunode, "reg", NULL); if (reg == NULL || (*reg) != 0) continue; if (!strcmp(cpunode->type, "cpu")) @@ -422,7 +422,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) } /* Check 970FX for now */ - valp = (u32 *)get_property(cpunode, "cpu-version", NULL); + valp = get_property(cpunode, "cpu-version", NULL); if (!valp) { DBG("No cpu-version property !\n"); goto bail_noprops; @@ -434,7 +434,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) } /* Look for the powertune data in the device-tree */ - g5_pmode_data = (u32 *)get_property(cpunode, "power-mode-data",&psize); + g5_pmode_data = get_property(cpunode, "power-mode-data",&psize); if (!g5_pmode_data) { DBG("No power-mode-data !\n"); goto bail_noprops; @@ -442,7 +442,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) g5_pmode_max = psize / sizeof(u32) - 1; if (use_volts_smu) { - struct smu_sdbp_header *shdr; + const struct smu_sdbp_header *shdr; /* Look for the FVT table */ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); @@ -493,7 +493,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) * half freq in this version. So far, I haven't yet seen a machine * supporting anything else. */ - valp = (u32 *)get_property(cpunode, "clock-frequency", NULL); + valp = get_property(cpunode, "clock-frequency", NULL); if (!valp) return -ENODEV; max_freq = (*valp)/1000; @@ -541,8 +541,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) static int __init g5_pm72_cpufreq_init(struct device_node *cpus) { struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL; - u8 *eeprom = NULL; - u32 *valp; + const u8 *eeprom = NULL; + const u32 *valp; u64 max_freq, min_freq, ih, il; int has_volt = 1, rc = 0; @@ -563,7 +563,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus) /* Lookup the cpuid eeprom node */ cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); if (cpuid != NULL) - eeprom = (u8 *)get_property(cpuid, "cpuid", NULL); + eeprom = get_property(cpuid, "cpuid", NULL); if (eeprom == NULL) { printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n"); rc = -ENODEV; @@ -573,7 +573,8 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus) /* Lookup the i2c hwclock */ for (hwclock = NULL; (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){ - char *loc = get_property(hwclock, "hwctrl-location", NULL); + const char *loc = get_property(hwclock, + "hwctrl-location", NULL); if (loc == NULL) continue; if (strcmp(loc, "CPU CLOCK")) @@ -637,7 +638,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus) */ /* Get max frequency from device-tree */ - valp = (u32 *)get_property(cpunode, "clock-frequency", NULL); + valp = get_property(cpunode, "clock-frequency", NULL); if (!valp) { printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n"); rc = -ENODEV; diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index f8313bf9a9f7..13fcaf5b1796 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -1058,8 +1058,8 @@ core99_reset_cpu(struct device_node *node, long param, long value) if (np == NULL) return -ENODEV; for (np = np->child; np != NULL; np = np->sibling) { - u32 *num = (u32 *)get_property(np, "reg", NULL); - u32 *rst = (u32 *)get_property(np, "soft-reset", NULL); + u32 *num = get_property(np, "reg", NULL); + u32 *rst = get_property(np, "soft-reset", NULL); if (num == NULL || rst == NULL) continue; if (param == *num) { @@ -1087,7 +1087,7 @@ core99_usb_enable(struct device_node *node, long param, long value) { struct macio_chip *macio; unsigned long flags; - char *prop; + const char *prop; int number; u32 reg; @@ -1096,7 +1096,7 @@ core99_usb_enable(struct device_node *node, long param, long value) macio->type != macio_intrepid) return -ENODEV; - prop = (char *)get_property(node, "AAPL,clock-id", NULL); + prop = get_property(node, "AAPL,clock-id", NULL); if (!prop) return -ENODEV; if (strncmp(prop, "usb0u048", 8) == 0) @@ -1507,8 +1507,8 @@ static long g5_reset_cpu(struct device_node *node, long param, long value) if (np == NULL) return -ENODEV; for (np = np->child; np != NULL; np = np->sibling) { - u32 *num = (u32 *)get_property(np, "reg", NULL); - u32 *rst = (u32 *)get_property(np, "soft-reset", NULL); + const u32 *num = get_property(np, "reg", NULL); + const u32 *rst = get_property(np, "soft-reset", NULL); if (num == NULL || rst == NULL) continue; if (param == *num) { @@ -2408,7 +2408,7 @@ static int __init probe_motherboard(void) */ dt = find_devices("device-tree"); if (dt != NULL) - model = (const char *) get_property(dt, "model", NULL); + model = get_property(dt, "model", NULL); for(i=0; model && i<(sizeof(pmac_mb_defs)/sizeof(struct pmac_mb_def)); i++) { if (strcmp(model, pmac_mb_defs[i].model_string) == 0) { pmac_mb = pmac_mb_defs[i]; @@ -2536,7 +2536,7 @@ found: */ static void __init probe_uninorth(void) { - u32 *addrp; + const u32 *addrp; phys_addr_t address; unsigned long actrl; @@ -2555,7 +2555,7 @@ static void __init probe_uninorth(void) if (uninorth_node == NULL) return; - addrp = (u32 *)get_property(uninorth_node, "reg", NULL); + addrp = get_property(uninorth_node, "reg", NULL); if (addrp == NULL) return; address = of_translate_address(uninorth_node, addrp); @@ -2596,7 +2596,7 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ struct device_node* node; int i; volatile u32 __iomem *base; - u32 *addrp, *revp; + const u32 *addrp, *revp; phys_addr_t addr; u64 size; @@ -2639,7 +2639,7 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ return; } if (type == macio_keylargo || type == macio_keylargo2) { - u32 *did = (u32 *)get_property(node, "device-id", NULL); + const u32 *did = get_property(node, "device-id", NULL); if (*did == 0x00000025) type = macio_pangea; if (*did == 0x0000003e) @@ -2652,7 +2652,7 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ macio_chips[i].base = base; macio_chips[i].flags = MACIO_FLAG_SCCB_ON | MACIO_FLAG_SCCB_ON; macio_chips[i].name = macio_names[type]; - revp = (u32 *)get_property(node, "revision-id", NULL); + revp = get_property(node, "revision-id", NULL); if (revp) macio_chips[i].rev = *revp; printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n", @@ -2695,15 +2695,15 @@ static void __init initial_serial_shutdown(struct device_node *np) { int len; - struct slot_names_prop { + const struct slot_names_prop { int count; char name[1]; } *slots; - char *conn; + const char *conn; int port_type = PMAC_SCC_ASYNC; int modem = 0; - slots = (struct slot_names_prop *)get_property(np, "slot-names", &len); + slots = get_property(np, "slot-names", &len); conn = get_property(np, "AAPL,connector", &len); if (conn && (strcmp(conn, "infrared") == 0)) port_type = PMAC_SCC_IRDA; diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 8677f50c2586..c2c7cf75dd5f 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -477,7 +477,8 @@ static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) { struct pmac_i2c_host_kw *host; - u32 *psteps, *prate, *addrp, steps; + const u32 *psteps, *prate, *addrp; + u32 steps; host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL); if (host == NULL) { @@ -490,7 +491,7 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) * on all i2c keywest nodes so far ... we would have to fallback * to macio parsing if that wasn't the case */ - addrp = (u32 *)get_property(np, "AAPL,address", NULL); + addrp = get_property(np, "AAPL,address", NULL); if (addrp == NULL) { printk(KERN_ERR "low_i2c: Can't find address for %s\n", np->full_name); @@ -504,13 +505,13 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) host->timeout_timer.function = kw_i2c_timeout; host->timeout_timer.data = (unsigned long)host; - psteps = (u32 *)get_property(np, "AAPL,address-step", NULL); + psteps = get_property(np, "AAPL,address-step", NULL); steps = psteps ? (*psteps) : 0x10; for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) steps >>= 1; /* Select interface rate */ host->speed = KW_I2C_MODE_25KHZ; - prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL); + prate = get_property(np, "AAPL,i2c-rate", NULL); if (prate) switch(*prate) { case 100: host->speed = KW_I2C_MODE_100KHZ; @@ -618,8 +619,8 @@ static void __init kw_i2c_probe(void) } else { for (child = NULL; (child = of_get_next_child(np, child)) != NULL;) { - u32 *reg = - (u32 *)get_property(child, "reg", NULL); + const u32 *reg = get_property(child, + "reg", NULL); if (reg == NULL) continue; kw_i2c_add(host, np, child, *reg); @@ -881,7 +882,7 @@ static void __init smu_i2c_probe(void) { struct device_node *controller, *busnode; struct pmac_i2c_bus *bus; - u32 *reg; + const u32 *reg; int sz; if (!smu_present()) @@ -904,7 +905,7 @@ static void __init smu_i2c_probe(void) if (strcmp(busnode->type, "i2c") && strcmp(busnode->type, "i2c-bus")) continue; - reg = (u32 *)get_property(busnode, "reg", NULL); + reg = get_property(busnode, "reg", NULL); if (reg == NULL) continue; @@ -948,9 +949,8 @@ struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node) list_for_each_entry(bus, &pmac_i2c_busses, link) { if (p == bus->busnode) { if (prev && bus->flags & pmac_i2c_multibus) { - u32 *reg; - reg = (u32 *)get_property(prev, "reg", - NULL); + const u32 *reg; + reg = get_property(prev, "reg", NULL); if (!reg) continue; if (((*reg) >> 8) != bus->channel) @@ -971,7 +971,7 @@ EXPORT_SYMBOL_GPL(pmac_i2c_find_bus); u8 pmac_i2c_get_dev_addr(struct device_node *device) { - u32 *reg = (u32 *)get_property(device, "reg", NULL); + const u32 *reg = get_property(device, "reg", NULL); if (reg == NULL) return 0; diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 205d04471161..9923adc5248e 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c @@ -66,16 +66,16 @@ struct device_node *k2_skiplist[2]; static int __init fixup_one_level_bus_range(struct device_node *node, int higher) { for (; node != 0;node = node->sibling) { - int * bus_range; - unsigned int *class_code; + const int * bus_range; + const unsigned int *class_code; int len; /* For PCI<->PCI bridges or CardBus bridges, we go down */ - class_code = (unsigned int *) get_property(node, "class-code", NULL); + class_code = get_property(node, "class-code", NULL); if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI && (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) continue; - bus_range = (int *) get_property(node, "bus-range", &len); + bus_range = get_property(node, "bus-range", &len); if (bus_range != NULL && len > 2 * sizeof(int)) { if (bus_range[1] > higher) higher = bus_range[1]; @@ -93,13 +93,15 @@ static int __init fixup_one_level_bus_range(struct device_node *node, int higher */ static void __init fixup_bus_range(struct device_node *bridge) { - int * bus_range; - int len; + int *bus_range, len; + struct property *prop; /* Lookup the "bus-range" property for the hose */ - bus_range = (int *) get_property(bridge, "bus-range", &len); - if (bus_range == NULL || len < 2 * sizeof(int)) + prop = of_find_property(bridge, "bus-range", &len); + if (prop == NULL || prop->length < 2 * sizeof(int)) return; + + bus_range = (int *)prop->value; bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); } @@ -237,7 +239,7 @@ static struct pci_ops macrisc_pci_ops = static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) { struct device_node *np; - u32 *vendor, *device; + const u32 *vendor, *device; if (offset >= 0x100) return PCIBIOS_BAD_REGISTER_NUMBER; @@ -245,8 +247,8 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) if (np == NULL) return PCIBIOS_DEVICE_NOT_FOUND; - vendor = (u32 *)get_property(np, "vendor-id", NULL); - device = (u32 *)get_property(np, "device-id", NULL); + vendor = get_property(np, "vendor-id", NULL); + device = get_property(np, "device-id", NULL); if (vendor == NULL || device == NULL) return PCIBIOS_DEVICE_NOT_FOUND; @@ -686,20 +688,21 @@ static void __init fixup_nec_usb2(void) for (nec = NULL; (nec = of_find_node_by_name(nec, "usb")) != NULL;) { struct pci_controller *hose; - u32 data, *prop; + u32 data; + const u32 *prop; u8 bus, devfn; - prop = (u32 *)get_property(nec, "vendor-id", NULL); + prop = get_property(nec, "vendor-id", NULL); if (prop == NULL) continue; if (0x1033 != *prop) continue; - prop = (u32 *)get_property(nec, "device-id", NULL); + prop = get_property(nec, "device-id", NULL); if (prop == NULL) continue; if (0x0035 != *prop) continue; - prop = (u32 *)get_property(nec, "reg", NULL); + prop = get_property(nec, "reg", NULL); if (prop == NULL) continue; devfn = (prop[0] >> 8) & 0xff; @@ -898,7 +901,7 @@ static int __init add_bridge(struct device_node *dev) struct pci_controller *hose; struct resource rsrc; char *disp_name; - int *bus_range; + const int *bus_range; int primary = 1, has_address = 0; DBG("Adding PCI host bridge %s\n", dev->full_name); @@ -907,7 +910,7 @@ static int __init add_bridge(struct device_node *dev) has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); /* Get bus range if any */ - bus_range = (int *) get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c index aacfa59595d1..ee3b223ab17a 100644 --- a/arch/powerpc/platforms/powermac/pfunc_base.c +++ b/arch/powerpc/platforms/powermac/pfunc_base.c @@ -114,7 +114,7 @@ static void macio_gpio_init_one(struct macio_chip *macio) * we just create them all */ for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) { - u32 *reg = (u32 *)get_property(gp, "reg", NULL); + const u32 *reg = get_property(gp, "reg", NULL); unsigned long offset; if (reg == NULL) continue; diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index b117adbf9571..7651f278615a 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c @@ -813,14 +813,15 @@ struct pmf_function *__pmf_find_function(struct device_node *target, struct pmf_device *dev; struct pmf_function *func, *result = NULL; char fname[64]; - u32 *prop, ph; + const u32 *prop; + u32 ph; /* * Look for a "platform-*" function reference. If we can't find * one, then we fallback to a direct call attempt */ snprintf(fname, 63, "platform-%s", name); - prop = (u32 *)get_property(target, fname, NULL); + prop = get_property(target, fname, NULL); if (prop == NULL) goto find_it; ph = *prop; diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 31a9da769fa2..824a618396ab 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -116,7 +116,7 @@ extern struct smp_ops_t core99_smp_ops; static void pmac_show_cpuinfo(struct seq_file *m) { struct device_node *np; - char *pp; + const char *pp; int plen; int mbmodel; unsigned int mbflags; @@ -134,12 +134,12 @@ static void pmac_show_cpuinfo(struct seq_file *m) seq_printf(m, "machine\t\t: "); np = of_find_node_by_path("/"); if (np != NULL) { - pp = (char *) get_property(np, "model", NULL); + pp = get_property(np, "model", NULL); if (pp != NULL) seq_printf(m, "%s\n", pp); else seq_printf(m, "PowerMac\n"); - pp = (char *) get_property(np, "compatible", &plen); + pp = get_property(np, "compatible", &plen); if (pp != NULL) { seq_printf(m, "motherboard\t:"); while (plen > 0) { @@ -163,10 +163,8 @@ static void pmac_show_cpuinfo(struct seq_file *m) if (np == NULL) np = of_find_node_by_type(NULL, "cache"); if (np != NULL) { - unsigned int *ic = (unsigned int *) - get_property(np, "i-cache-size", NULL); - unsigned int *dc = (unsigned int *) - get_property(np, "d-cache-size", NULL); + const unsigned int *ic = get_property(np, "i-cache-size", NULL); + const unsigned int *dc = get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; if (get_property(np, "cache-unified", NULL) != 0 && dc) { @@ -254,7 +252,7 @@ static void __init l2cr_init(void) if (np == 0) np = find_type_devices("cpu"); if (np != 0) { - unsigned int *l2cr = (unsigned int *) + const unsigned int *l2cr = get_property(np, "l2cr-value", NULL); if (l2cr != 0) { ppc_override_l2cr = 1; @@ -277,7 +275,7 @@ static void __init l2cr_init(void) static void __init pmac_setup_arch(void) { struct device_node *cpu, *ic; - int *fp; + const int *fp; unsigned long pvr; pvr = PVR_VER(mfspr(SPRN_PVR)); @@ -287,7 +285,7 @@ static void __init pmac_setup_arch(void) loops_per_jiffy = 50000000 / HZ; cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != NULL) { - fp = (int *) get_property(cpu, "clock-frequency", NULL); + fp = get_property(cpu, "clock-frequency", NULL); if (fp != NULL) { if (pvr >= 0x30 && pvr < 0x80) /* PPC970 etc. */ diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index 827b7121ffb8..653eeb64d1e2 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c @@ -548,7 +548,7 @@ static void __init smp_core99_setup_i2c_hwsync(int ncpus) struct device_node *cc = NULL; struct device_node *p; const char *name = NULL; - u32 *reg; + const u32 *reg; int ok; /* Look for the clock chip */ @@ -562,7 +562,7 @@ static void __init smp_core99_setup_i2c_hwsync(int ncpus) pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc); if (pmac_tb_clock_chip_host == NULL) continue; - reg = (u32 *)get_property(cc, "reg", NULL); + reg = get_property(cc, "reg", NULL); if (reg == NULL) continue; switch (*reg) { @@ -707,8 +707,7 @@ static void __init smp_core99_setup(int ncpus) core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */ cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != NULL) { - tbprop = (u32 *)get_property(cpu, "timebase-enable", - NULL); + tbprop = get_property(cpu, "timebase-enable", NULL); if (tbprop) core99_tb_gpio = *tbprop; of_node_put(cpu); diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 37e5b1eff911..ce1a235855f7 100644 --- a/arch/powerpc/platforms/powermac/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c @@ -68,11 +68,11 @@ static unsigned char scc_inittab[] = { void udbg_scc_init(int force_scc) { - u32 *reg; + const u32 *reg; unsigned long addr; struct device_node *stdout = NULL, *escc = NULL, *macio = NULL; struct device_node *ch, *ch_def = NULL, *ch_a = NULL; - char *path; + const char *path; int i, x; escc = of_find_node_by_name(NULL, "escc"); @@ -81,7 +81,7 @@ void udbg_scc_init(int force_scc) macio = of_get_parent(escc); if (macio == NULL) goto bail; - path = (char *)get_property(of_chosen, "linux,stdout-path", NULL); + path = get_property(of_chosen, "linux,stdout-path", NULL); if (path != NULL) stdout = of_find_node_by_path(path); for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) { @@ -96,13 +96,13 @@ void udbg_scc_init(int force_scc) ch = ch_def ? ch_def : ch_a; /* Get address within mac-io ASIC */ - reg = (u32 *)get_property(escc, "reg", NULL); + reg = get_property(escc, "reg", NULL); if (reg == NULL) goto bail; addr = reg[0]; /* Get address of mac-io PCI itself */ - reg = (u32 *)get_property(macio, "assigned-addresses", NULL); + reg = get_property(macio, "assigned-addresses", NULL); if (reg == NULL) goto bail; addr += reg[2]; diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index e5e0ff466904..997243a91be8 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o obj-$(CONFIG_HVCS) += hvcserver.o +obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 32eaddfa5470..84bc8f7e17ef 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -449,7 +449,11 @@ EXPORT_SYMBOL(eeh_check_failure); /* ------------------------------------------------------------- */ /* The code below deals with error recovery */ -/** Return negative value if a permanent error, else return +/** + * eeh_slot_availability - returns error status of slot + * @pdn pci device node + * + * Return negative value if a permanent error, else return * a number of milliseconds to wait until the PCI slot is * ready to be used. */ @@ -474,11 +478,42 @@ eeh_slot_availability(struct pci_dn *pdn) printk (KERN_ERR "EEH: Slot unavailable: rc=%d, rets=%d %d %d\n", rc, rets[0], rets[1], rets[2]); - return -1; + return -2; +} + +/** + * rtas_pci_enable - enable MMIO or DMA transfers for this slot + * @pdn pci device node + */ + +int +rtas_pci_enable(struct pci_dn *pdn, int function) +{ + int config_addr; + int rc; + + /* Use PE configuration address, if present */ + config_addr = pdn->eeh_config_addr; + if (pdn->eeh_pe_config_addr) + config_addr = pdn->eeh_pe_config_addr; + + rc = rtas_call(ibm_set_eeh_option, 4, 1, NULL, + config_addr, + BUID_HI(pdn->phb->buid), + BUID_LO(pdn->phb->buid), + function); + + if (rc) + printk(KERN_WARNING "EEH: Cannot enable function %d, err=%d dn=%s\n", + function, rc, pdn->node->full_name); + + return rc; } -/** rtas_pci_slot_reset raises/lowers the pci #RST line - * state: 1/0 to raise/lower the #RST +/** + * rtas_pci_slot_reset - raises/lowers the pci #RST line + * @pdn pci device node + * @state: 1/0 to raise/lower the #RST * * Clear the EEH-frozen condition on a slot. This routine * asserts the PCI #RST line if the 'state' argument is '1', @@ -511,24 +546,21 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state) BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid), state); - if (rc) { - printk (KERN_WARNING "EEH: Unable to reset the failed slot, (%d) #RST=%d dn=%s\n", + if (rc) + printk (KERN_WARNING "EEH: Unable to reset the failed slot," + " (%d) #RST=%d dn=%s\n", rc, state, pdn->node->full_name); - return; - } } -/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second - * dn -- device node to be reset. +/** + * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second + * @pdn: pci device node to be reset. * * Return 0 if success, else a non-zero value. */ -int -rtas_set_slot_reset(struct pci_dn *pdn) +static void __rtas_set_slot_reset(struct pci_dn *pdn) { - int i, rc; - rtas_pci_slot_reset (pdn, 1); /* The PCI bus requires that the reset be held high for at least @@ -549,17 +581,33 @@ rtas_set_slot_reset(struct pci_dn *pdn) * up traffic. */ #define PCI_BUS_SETTLE_TIME_MSEC 1800 msleep (PCI_BUS_SETTLE_TIME_MSEC); +} + +int rtas_set_slot_reset(struct pci_dn *pdn) +{ + int i, rc; + + __rtas_set_slot_reset(pdn); /* Now double check with the firmware to make sure the device is * ready to be used; if not, wait for recovery. */ for (i=0; i<10; i++) { rc = eeh_slot_availability (pdn); - if (rc < 0) - printk (KERN_ERR "EEH: failed (%d) to reset slot %s\n", rc, pdn->node->full_name); if (rc == 0) return 0; - if (rc < 0) + + if (rc == -2) { + printk (KERN_ERR "EEH: failed (%d) to reset slot %s\n", + i, pdn->node->full_name); + __rtas_set_slot_reset(pdn); + continue; + } + + if (rc < 0) { + printk (KERN_ERR "EEH: unrecoverable slot failure %s\n", + pdn->node->full_name); return -1; + } msleep (rc+100); } @@ -582,6 +630,8 @@ rtas_set_slot_reset(struct pci_dn *pdn) /** * __restore_bars - Restore the Base Address Registers + * @pdn: pci device node + * * Loads the PCI configuration space base address registers, * the expansion ROM base address, the latency timer, and etc. * from the saved values in the device node. @@ -691,11 +741,11 @@ static void *early_enable_eeh(struct device_node *dn, void *data) { struct eeh_early_enable_info *info = data; int ret; - char *status = get_property(dn, "status", NULL); - u32 *class_code = (u32 *)get_property(dn, "class-code", NULL); - u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", NULL); - u32 *device_id = (u32 *)get_property(dn, "device-id", NULL); - u32 *regs; + const char *status = get_property(dn, "status", NULL); + const u32 *class_code = get_property(dn, "class-code", NULL); + const u32 *vendor_id = get_property(dn, "vendor-id", NULL); + const u32 *device_id = get_property(dn, "device-id", NULL); + const u32 *regs; int enable; struct pci_dn *pdn = PCI_DN(dn); @@ -737,7 +787,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data) /* Ok... see if this device supports EEH. Some do, some don't, * and the only way to find out is to check each and every one. */ - regs = (u32 *)get_property(dn, "reg", NULL); + regs = get_property(dn, "reg", NULL); if (regs) { /* First register entry is addr (00BBSS00) */ /* Try to enable eeh */ diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c index c37a8497c60f..b6b462d3c604 100644 --- a/arch/powerpc/platforms/pseries/eeh_cache.c +++ b/arch/powerpc/platforms/pseries/eeh_cache.c @@ -157,6 +157,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, if (!piar) return NULL; + pci_dev_get(dev); piar->addr_lo = alo; piar->addr_hi = ahi; piar->pcidev = dev; @@ -178,7 +179,6 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev) struct device_node *dn; struct pci_dn *pdn; int i; - int inserted = 0; dn = pci_device_to_OF_node(dev); if (!dn) { @@ -197,9 +197,6 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev) return; } - /* The cache holds a reference to the device... */ - pci_dev_get(dev); - /* Walk resources on this device, poke them into the tree */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { unsigned long start = pci_resource_start(dev,i); @@ -212,12 +209,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev) if (start == 0 || ~start == 0 || end == 0 || ~end == 0) continue; pci_addr_cache_insert(dev, start, end, flags); - inserted = 1; } - - /* If there was nothing to add, the cache has no reference... */ - if (!inserted) - pci_dev_put(dev); } /** @@ -240,7 +232,6 @@ void pci_addr_cache_insert_device(struct pci_dev *dev) static inline void __pci_addr_cache_remove_device(struct pci_dev *dev) { struct rb_node *n; - int removed = 0; restart: n = rb_first(&pci_io_addr_cache_root.rb_root); @@ -250,16 +241,12 @@ restart: if (piar->pcidev == dev) { rb_erase(n, &pci_io_addr_cache_root.rb_root); - removed = 1; + pci_dev_put(piar->pcidev); kfree(piar); goto restart; } n = rb_next(n); } - - /* The cache no longer holds its reference to this device... */ - if (removed) - pci_dev_put(dev); } /** diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index aaad2c0afcbf..c2bc9904f1cb 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c @@ -77,8 +77,12 @@ static int irq_in_use(unsigned int irq) } /* ------------------------------------------------------- */ -/** eeh_report_error - report an EEH error to each device, - * collect up and merge the device responses. +/** + * eeh_report_error - report pci error to each device driver + * + * Report an EEH error to each device driver, collect up and + * merge the device driver responses. Cumulative response + * passed back in "userdata". */ static void eeh_report_error(struct pci_dev *dev, void *userdata) @@ -96,24 +100,49 @@ static void eeh_report_error(struct pci_dev *dev, void *userdata) PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED; disable_irq_nosync(dev->irq); } - if (!driver->err_handler) - return; - if (!driver->err_handler->error_detected) + if (!driver->err_handler || + !driver->err_handler->error_detected) return; rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen); if (*res == PCI_ERS_RESULT_NONE) *res = rc; - if (*res == PCI_ERS_RESULT_NEED_RESET) return; if (*res == PCI_ERS_RESULT_DISCONNECT && rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; } -/** eeh_report_reset -- tell this device that the pci slot - * has been reset. +/** + * eeh_report_mmio_enabled - tell drivers that MMIO has been enabled + * + * Report an EEH error to each device driver, collect up and + * merge the device driver responses. Cumulative response + * passed back in "userdata". + */ + +static void eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) +{ + enum pci_ers_result rc, *res = userdata; + struct pci_driver *driver = dev->driver; + + // dev->error_state = pci_channel_mmio_enabled; + + if (!driver || + !driver->err_handler || + !driver->err_handler->mmio_enabled) + return; + + rc = driver->err_handler->mmio_enabled (dev); + if (*res == PCI_ERS_RESULT_NONE) *res = rc; + if (*res == PCI_ERS_RESULT_DISCONNECT && + rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; +} + +/** + * eeh_report_reset - tell device that slot has been reset */ static void eeh_report_reset(struct pci_dev *dev, void *userdata) { + enum pci_ers_result rc, *res = userdata; struct pci_driver *driver = dev->driver; struct device_node *dn = pci_device_to_OF_node(dev); @@ -124,14 +153,20 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata) PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED; enable_irq(dev->irq); } - if (!driver->err_handler) - return; - if (!driver->err_handler->slot_reset) + if (!driver->err_handler || + !driver->err_handler->slot_reset) return; - driver->err_handler->slot_reset(dev); + rc = driver->err_handler->slot_reset(dev); + if (*res == PCI_ERS_RESULT_NONE) *res = rc; + if (*res == PCI_ERS_RESULT_DISCONNECT && + rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; } +/** + * eeh_report_resume - tell device to resume normal operations + */ + static void eeh_report_resume(struct pci_dev *dev, void *userdata) { struct pci_driver *driver = dev->driver; @@ -148,6 +183,13 @@ static void eeh_report_resume(struct pci_dev *dev, void *userdata) driver->err_handler->resume(dev); } +/** + * eeh_report_failure - tell device driver that device is dead. + * + * This informs the device driver that the device is permanently + * dead, and that no further recovery attempts will be made on it. + */ + static void eeh_report_failure(struct pci_dev *dev, void *userdata) { struct pci_driver *driver = dev->driver; @@ -190,11 +232,11 @@ static void eeh_report_failure(struct pci_dev *dev, void *userdata) /** * eeh_reset_device() -- perform actual reset of a pci slot - * Args: bus: pointer to the pci bus structure corresponding + * @bus: pointer to the pci bus structure corresponding * to the isolated slot. A non-null value will * cause all devices under the bus to be removed * and then re-added. - * pe_dn: pointer to a "Partionable Endpoint" device node. + * @pe_dn: pointer to a "Partionable Endpoint" device node. * This is the top-level structure on which pci * bus resets can be performed. */ @@ -268,14 +310,14 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) if (!frozen_dn) { - location = (char *) get_property(event->dn, "ibm,loc-code", NULL); + location = get_property(event->dn, "ibm,loc-code", NULL); location = location ? location : "unknown"; printk(KERN_ERR "EEH: Error: Cannot find partition endpoint " "for location=%s pci addr=%s\n", location, pci_name(event->dev)); return NULL; } - location = (char *) get_property(frozen_dn, "ibm,loc-code", NULL); + location = get_property(frozen_dn, "ibm,loc-code", NULL); location = location ? location : "unknown"; /* There are two different styles for coming up with the PE. @@ -347,23 +389,43 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) goto hard_fail; } - /* If any device called out for a reset, then reset the slot */ - if (result == PCI_ERS_RESULT_NEED_RESET) { - rc = eeh_reset_device(frozen_pdn, NULL); - if (rc) - goto hard_fail; - pci_walk_bus(frozen_bus, eeh_report_reset, NULL); + /* If all devices reported they can proceed, then re-enable MMIO */ + if (result == PCI_ERS_RESULT_CAN_RECOVER) { + rc = rtas_pci_enable(frozen_pdn, EEH_THAW_MMIO); + + if (rc) { + result = PCI_ERS_RESULT_NEED_RESET; + } else { + result = PCI_ERS_RESULT_NONE; + pci_walk_bus(frozen_bus, eeh_report_mmio_enabled, &result); + } } - /* If all devices reported they can proceed, the re-enable PIO */ + /* If all devices reported they can proceed, then re-enable DMA */ if (result == PCI_ERS_RESULT_CAN_RECOVER) { - /* XXX Not supported; we brute-force reset the device */ + rc = rtas_pci_enable(frozen_pdn, EEH_THAW_DMA); + + if (rc) + result = PCI_ERS_RESULT_NEED_RESET; + } + + /* If any device has a hard failure, then shut off everything. */ + if (result == PCI_ERS_RESULT_DISCONNECT) + goto hard_fail; + + /* If any device called out for a reset, then reset the slot */ + if (result == PCI_ERS_RESULT_NEED_RESET) { rc = eeh_reset_device(frozen_pdn, NULL); if (rc) goto hard_fail; - pci_walk_bus(frozen_bus, eeh_report_reset, NULL); + result = PCI_ERS_RESULT_NONE; + pci_walk_bus(frozen_bus, eeh_report_reset, &result); } + /* All devices should claim they have recovered by now. */ + if (result != PCI_ERS_RESULT_RECOVERED) + goto hard_fail; + /* Tell all device drivers that they can resume operations */ pci_walk_bus(frozen_bus, eeh_report_resume, NULL); diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index 45ccc687e57c..137077451316 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c @@ -124,11 +124,11 @@ int eeh_send_failure_event (struct device_node *dn, { unsigned long flags; struct eeh_event *event; - char *location; + const char *location; if (!mem_init_done) { printk(KERN_ERR "EEH: event during early boot not handled\n"); - location = (char *) get_property(dn, "ibm,loc-code", NULL); + location = get_property(dn, "ibm,loc-code", NULL); printk(KERN_ERR "EEH: device node = %s\n", dn->full_name); printk(KERN_ERR "EEH: PCI location = %s\n", location); return 1; diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index c01d8f0cbe6d..1c7b2baa5f73 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c @@ -68,7 +68,7 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = { void __init fw_feature_init(void) { struct device_node *dn; - char *hypertas, *s; + const char *hypertas, *s; int len, i; DBG(" -> fw_feature_init()\n"); diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index c9ff547f9d25..c00cfed7af2c 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S @@ -1,7 +1,6 @@ /* * This file contains the generic code to perform a call to the * pSeries LPAR hypervisor. - * NOTE: this file will go away when we move to inline this work. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -11,217 +10,153 @@ #include <asm/hvcall.h> #include <asm/processor.h> #include <asm/ppc_asm.h> +#include <asm/asm-offsets.h> #define STK_PARM(i) (48 + ((i)-3)*8) - .text - -/* long plpar_hcall(unsigned long opcode, R3 - unsigned long arg1, R4 - unsigned long arg2, R5 - unsigned long arg3, R6 - unsigned long arg4, R7 - unsigned long *out1, R8 - unsigned long *out2, R9 - unsigned long *out3); R10 +#ifdef CONFIG_HCALL_STATS +/* + * precall must preserve all registers. use unused STK_PARM() + * areas to save snapshots and opcode. */ -_GLOBAL(plpar_hcall) - HMT_MEDIUM - - mfcr r0 - - std r8,STK_PARM(r8)(r1) /* Save out ptrs */ - std r9,STK_PARM(r9)(r1) - std r10,STK_PARM(r10)(r1) - - stw r0,8(r1) - - HVSC /* invoke the hypervisor */ - - lwz r0,8(r1) - - ld r8,STK_PARM(r8)(r1) /* Fetch r4-r6 ret args */ - ld r9,STK_PARM(r9)(r1) - ld r10,STK_PARM(r10)(r1) - std r4,0(r8) - std r5,0(r9) - std r6,0(r10) - - mtcrf 0xff,r0 - blr /* return r3 = status */ +#define HCALL_INST_PRECALL \ + std r3,STK_PARM(r3)(r1); /* save opcode */ \ + mftb r0; /* get timebase and */ \ + std r0,STK_PARM(r5)(r1); /* save for later */ \ +BEGIN_FTR_SECTION; \ + mfspr r0,SPRN_PURR; /* get PURR and */ \ + std r0,STK_PARM(r6)(r1); /* save for later */ \ +END_FTR_SECTION_IFCLR(CPU_FTR_PURR); + +/* + * postcall is performed immediately before function return which + * allows liberal use of volatile registers. + */ +#define HCALL_INST_POSTCALL \ + ld r4,STK_PARM(r3)(r1); /* validate opcode */ \ + cmpldi cr7,r4,MAX_HCALL_OPCODE; \ + bgt- cr7,1f; \ + \ + /* get time and PURR snapshots after hcall */ \ + mftb r7; /* timebase after */ \ +BEGIN_FTR_SECTION; \ + mfspr r8,SPRN_PURR; /* PURR after */ \ + ld r6,STK_PARM(r6)(r1); /* PURR before */ \ + subf r6,r6,r8; /* delta */ \ +END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ + ld r5,STK_PARM(r5)(r1); /* timebase before */ \ + subf r5,r5,r7; /* time delta */ \ + \ + /* calculate address of stat structure r4 = opcode */ \ + srdi r4,r4,2; /* index into array */ \ + mulli r4,r4,HCALL_STAT_SIZE; \ + LOAD_REG_ADDR(r7, per_cpu__hcall_stats); \ + add r4,r4,r7; \ + ld r7,PACA_DATA_OFFSET(r13); /* per cpu offset */ \ + add r4,r4,r7; \ + \ + /* update stats */ \ + ld r7,HCALL_STAT_CALLS(r4); /* count */ \ + addi r7,r7,1; \ + std r7,HCALL_STAT_CALLS(r4); \ + ld r7,HCALL_STAT_TB(r4); /* timebase */ \ + add r7,r7,r5; \ + std r7,HCALL_STAT_TB(r4); \ +BEGIN_FTR_SECTION; \ + ld r7,HCALL_STAT_PURR(r4); /* PURR */ \ + add r7,r7,r6; \ + std r7,HCALL_STAT_PURR(r4); \ +END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ +1: +#else +#define HCALL_INST_PRECALL +#define HCALL_INST_POSTCALL +#endif + .text -/* Simple interface with no output values (other than status) */ _GLOBAL(plpar_hcall_norets) HMT_MEDIUM mfcr r0 stw r0,8(r1) - HVSC /* invoke the hypervisor */ - - lwz r0,8(r1) - mtcrf 0xff,r0 - blr /* return r3 = status */ - - -/* long plpar_hcall_8arg_2ret(unsigned long opcode, R3 - unsigned long arg1, R4 - unsigned long arg2, R5 - unsigned long arg3, R6 - unsigned long arg4, R7 - unsigned long arg5, R8 - unsigned long arg6, R9 - unsigned long arg7, R10 - unsigned long arg8, 112(R1) - unsigned long *out1); 120(R1) - */ -_GLOBAL(plpar_hcall_8arg_2ret) - HMT_MEDIUM - - mfcr r0 - ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */ - stw r0,8(r1) + HCALL_INST_PRECALL HVSC /* invoke the hypervisor */ + HCALL_INST_POSTCALL + lwz r0,8(r1) - ld r10,STK_PARM(r12)(r1) /* Fetch r4 ret arg */ - std r4,0(r10) mtcrf 0xff,r0 blr /* return r3 = status */ - -/* long plpar_hcall_4out(unsigned long opcode, R3 - unsigned long arg1, R4 - unsigned long arg2, R5 - unsigned long arg3, R6 - unsigned long arg4, R7 - unsigned long *out1, R8 - unsigned long *out2, R9 - unsigned long *out3, R10 - unsigned long *out4); 112(R1) - */ -_GLOBAL(plpar_hcall_4out) +_GLOBAL(plpar_hcall) HMT_MEDIUM mfcr r0 stw r0,8(r1) - std r8,STK_PARM(r8)(r1) /* Save out ptrs */ - std r9,STK_PARM(r9)(r1) - std r10,STK_PARM(r10)(r1) - - HVSC /* invoke the hypervisor */ - - lwz r0,8(r1) + HCALL_INST_PRECALL - ld r8,STK_PARM(r8)(r1) /* Fetch r4-r7 ret args */ - ld r9,STK_PARM(r9)(r1) - ld r10,STK_PARM(r10)(r1) - ld r11,STK_PARM(r11)(r1) - std r4,0(r8) - std r5,0(r9) - std r6,0(r10) - std r7,0(r11) + std r4,STK_PARM(r4)(r1) /* Save ret buffer */ - mtcrf 0xff,r0 - blr /* return r3 = status */ - -/* plpar_hcall_7arg_7ret(unsigned long opcode, R3 - unsigned long arg1, R4 - unsigned long arg2, R5 - unsigned long arg3, R6 - unsigned long arg4, R7 - unsigned long arg5, R8 - unsigned long arg6, R9 - unsigned long arg7, R10 - unsigned long *out1, 112(R1) - unsigned long *out2, 110(R1) - unsigned long *out3, 108(R1) - unsigned long *out4, 106(R1) - unsigned long *out5, 104(R1) - unsigned long *out6, 102(R1) - unsigned long *out7); 100(R1) -*/ -_GLOBAL(plpar_hcall_7arg_7ret) - HMT_MEDIUM - - mfcr r0 - stw r0,8(r1) + mr r4,r5 + mr r5,r6 + mr r6,r7 + mr r7,r8 + mr r8,r9 + mr r9,r10 HVSC /* invoke the hypervisor */ - lwz r0,8(r1) + ld r12,STK_PARM(r4)(r1) + std r4, 0(r12) + std r5, 8(r12) + std r6, 16(r12) + std r7, 24(r12) - ld r11,STK_PARM(r11)(r1) /* Fetch r4 ret arg */ - std r4,0(r11) - ld r11,STK_PARM(r12)(r1) /* Fetch r5 ret arg */ - std r5,0(r11) - ld r11,STK_PARM(r13)(r1) /* Fetch r6 ret arg */ - std r6,0(r11) - ld r11,STK_PARM(r14)(r1) /* Fetch r7 ret arg */ - std r7,0(r11) - ld r11,STK_PARM(r15)(r1) /* Fetch r8 ret arg */ - std r8,0(r11) - ld r11,STK_PARM(r16)(r1) /* Fetch r9 ret arg */ - std r9,0(r11) - ld r11,STK_PARM(r17)(r1) /* Fetch r10 ret arg */ - std r10,0(r11) + HCALL_INST_POSTCALL + lwz r0,8(r1) mtcrf 0xff,r0 blr /* return r3 = status */ -/* plpar_hcall_9arg_9ret(unsigned long opcode, R3 - unsigned long arg1, R4 - unsigned long arg2, R5 - unsigned long arg3, R6 - unsigned long arg4, R7 - unsigned long arg5, R8 - unsigned long arg6, R9 - unsigned long arg7, R10 - unsigned long arg8, 112(R1) - unsigned long arg9, 110(R1) - unsigned long *out1, 108(R1) - unsigned long *out2, 106(R1) - unsigned long *out3, 104(R1) - unsigned long *out4, 102(R1) - unsigned long *out5, 100(R1) - unsigned long *out6, 98(R1) - unsigned long *out7); 96(R1) - unsigned long *out8, 94(R1) - unsigned long *out9, 92(R1) -*/ -_GLOBAL(plpar_hcall_9arg_9ret) +_GLOBAL(plpar_hcall9) HMT_MEDIUM mfcr r0 stw r0,8(r1) - ld r11,STK_PARM(r11)(r1) /* put arg8 in R11 */ - ld r12,STK_PARM(r12)(r1) /* put arg9 in R12 */ + HCALL_INST_PRECALL + + std r4,STK_PARM(r4)(r1) /* Save ret buffer */ + + mr r4,r5 + mr r5,r6 + mr r6,r7 + mr r7,r8 + mr r8,r9 + mr r9,r10 + ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */ + ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */ + ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */ HVSC /* invoke the hypervisor */ - ld r0,STK_PARM(r13)(r1) /* Fetch r4 ret arg */ - stdx r4,r0,r0 - ld r0,STK_PARM(r14)(r1) /* Fetch r5 ret arg */ - stdx r5,r0,r0 - ld r0,STK_PARM(r15)(r1) /* Fetch r6 ret arg */ - stdx r6,r0,r0 - ld r0,STK_PARM(r16)(r1) /* Fetch r7 ret arg */ - stdx r7,r0,r0 - ld r0,STK_PARM(r17)(r1) /* Fetch r8 ret arg */ - stdx r8,r0,r0 - ld r0,STK_PARM(r18)(r1) /* Fetch r9 ret arg */ - stdx r9,r0,r0 - ld r0,STK_PARM(r19)(r1) /* Fetch r10 ret arg */ - stdx r10,r0,r0 - ld r0,STK_PARM(r20)(r1) /* Fetch r11 ret arg */ - stdx r11,r0,r0 - ld r0,STK_PARM(r21)(r1) /* Fetch r12 ret arg */ - stdx r12,r0,r0 + ld r12,STK_PARM(r4)(r1) + std r4, 0(r12) + std r5, 8(r12) + std r6, 16(r12) + std r7, 24(r12) + std r8, 32(r12) + std r9, 40(r12) + std r10,48(r12) + std r11,56(r12) + std r12,64(r12) + + HCALL_INST_POSTCALL lwz r0,8(r1) mtcrf 0xff,r0 diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c new file mode 100644 index 000000000000..641e6511cf06 --- /dev/null +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2006 Mike Kravetz IBM Corporation + * + * Hypervisor Call Instrumentation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/cpumask.h> +#include <asm/hvcall.h> +#include <asm/firmware.h> +#include <asm/cputable.h> + +DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats); + +/* + * Routines for displaying the statistics in debugfs + */ +static void *hc_start(struct seq_file *m, loff_t *pos) +{ + if ((int)*pos < HCALL_STAT_ARRAY_SIZE) + return (void *)(unsigned long)(*pos + 1); + + return NULL; +} + +static void *hc_next(struct seq_file *m, void *p, loff_t * pos) +{ + ++*pos; + + return hc_start(m, pos); +} + +static void hc_stop(struct seq_file *m, void *p) +{ +} + +static int hc_show(struct seq_file *m, void *p) +{ + unsigned long h_num = (unsigned long)p; + struct hcall_stats *hs = (struct hcall_stats *)m->private; + + if (hs[h_num].num_calls) { + if (!cpu_has_feature(CPU_FTR_PURR)) + seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2, + hs[h_num].num_calls, + hs[h_num].tb_total, + hs[h_num].purr_total); + else + seq_printf(m, "%lu %lu %lu\n", h_num<<2, + hs[h_num].num_calls, + hs[h_num].tb_total); + } + + return 0; +} + +static struct seq_operations hcall_inst_seq_ops = { + .start = hc_start, + .next = hc_next, + .stop = hc_stop, + .show = hc_show +}; + +static int hcall_inst_seq_open(struct inode *inode, struct file *file) +{ + int rc; + struct seq_file *seq; + + rc = seq_open(file, &hcall_inst_seq_ops); + seq = file->private_data; + seq->private = file->f_dentry->d_inode->u.generic_ip; + + return rc; +} + +static struct file_operations hcall_inst_seq_fops = { + .open = hcall_inst_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +#define HCALL_ROOT_DIR "hcall_inst" +#define CPU_NAME_BUF_SIZE 32 + +static int __init hcall_inst_init(void) +{ + struct dentry *hcall_root; + struct dentry *hcall_file; + char cpu_name_buf[CPU_NAME_BUF_SIZE]; + int cpu; + + if (!firmware_has_feature(FW_FEATURE_LPAR)) + return 0; + + hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); + if (!hcall_root) + return -ENOMEM; + + for_each_possible_cpu(cpu) { + snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu); + hcall_file = debugfs_create_file(cpu_name_buf, S_IRUGO, + hcall_root, + per_cpu(hcall_stats, cpu), + &hcall_inst_seq_fops); + if (!hcall_file) + return -ENOMEM; + } + + return 0; +} +__initcall(hcall_inst_init); diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c index a72a987f1d4d..3f6a89b09816 100644 --- a/arch/powerpc/platforms/pseries/hvconsole.c +++ b/arch/powerpc/platforms/pseries/hvconsole.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <asm/hvcall.h> #include <asm/hvconsole.h> +#include "plpar_wrappers.h" /** * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper @@ -40,9 +41,9 @@ int hvc_get_chars(uint32_t vtermno, char *buf, int count) { unsigned long got; - if (plpar_hcall(H_GET_TERM_CHAR, vtermno, 0, 0, 0, &got, - (unsigned long *)buf, (unsigned long *)buf+1) == H_SUCCESS) + if (plpar_get_term_char(vtermno, &got, buf) == H_SUCCESS) return got; + return 0; } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index d67af2c65754..bbf2e34dc358 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -267,13 +267,12 @@ static void iommu_table_setparms(struct pci_controller *phb, struct iommu_table *tbl) { struct device_node *node; - unsigned long *basep; - unsigned int *sizep; + const unsigned long *basep, *sizep; node = (struct device_node *)phb->arch_data; - basep = (unsigned long *)get_property(node, "linux,tce-base", NULL); - sizep = (unsigned int *)get_property(node, "linux,tce-size", NULL); + basep = get_property(node, "linux,tce-base", NULL); + sizep = get_property(node, "linux,tce-size", NULL); if (basep == NULL || sizep == NULL) { printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has " "missing tce entries !\n", dn->full_name); @@ -315,7 +314,7 @@ static void iommu_table_setparms(struct pci_controller *phb, static void iommu_table_setparms_lpar(struct pci_controller *phb, struct device_node *dn, struct iommu_table *tbl, - unsigned char *dma_window) + const void *dma_window) { unsigned long offset, size; @@ -415,7 +414,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus) struct iommu_table *tbl; struct device_node *dn, *pdn; struct pci_dn *ppci; - unsigned char *dma_window = NULL; + const void *dma_window = NULL; DBG("iommu_bus_setup_pSeriesLP, bus %p, bus->self %p\n", bus, bus->self); @@ -519,7 +518,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) { struct device_node *pdn, *dn; struct iommu_table *tbl; - unsigned char *dma_window = NULL; + const void *dma_window = NULL; struct pci_dn *pci; DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev)); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 3aeb40699042..1820a0b0a8c6 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -48,13 +48,11 @@ #define DBG_LOW(fmt...) do { } while(0) #endif -/* in pSeries_hvCall.S */ +/* in hvCall.S */ EXPORT_SYMBOL(plpar_hcall); -EXPORT_SYMBOL(plpar_hcall_4out); +EXPORT_SYMBOL(plpar_hcall9); EXPORT_SYMBOL(plpar_hcall_norets); -EXPORT_SYMBOL(plpar_hcall_8arg_2ret); -EXPORT_SYMBOL(plpar_hcall_7arg_7ret); -EXPORT_SYMBOL(plpar_hcall_9arg_9ret); + extern void pSeries_find_serial_port(void); @@ -204,20 +202,20 @@ void __init udbg_init_debug_lpar(void) void __init find_udbg_vterm(void) { struct device_node *stdout_node; - u32 *termno; - char *name; + const u32 *termno; + const char *name; int add_console; /* find the boot console from /chosen/stdout */ if (!of_chosen) return; - name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); + name = get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) return; stdout_node = of_find_node_by_path(name); if (!stdout_node) return; - name = (char *)get_property(stdout_node, "name", NULL); + name = get_property(stdout_node, "name", NULL); if (!name) { printk(KERN_WARNING "stdout node missing 'name' property!\n"); goto out; @@ -228,7 +226,7 @@ void __init find_udbg_vterm(void) /* Check if it's a virtual terminal */ if (strncmp(name, "vty", 3) != 0) goto out; - termno = (u32 *)get_property(stdout_node, "reg", NULL); + termno = get_property(stdout_node, "reg", NULL); if (termno == NULL) goto out; vtermno = termno[0]; @@ -254,18 +252,34 @@ out: void vpa_init(int cpu) { int hwcpu = get_hard_smp_processor_id(cpu); - unsigned long vpa = __pa(&lppaca[cpu]); + unsigned long addr; long ret; if (cpu_has_feature(CPU_FTR_ALTIVEC)) lppaca[cpu].vmxregs_in_use = 1; - ret = register_vpa(hwcpu, vpa); + addr = __pa(&lppaca[cpu]); + ret = register_vpa(hwcpu, addr); - if (ret) + if (ret) { printk(KERN_ERR "WARNING: vpa_init: VPA registration for " "cpu %d (hw %d) of area %lx returns %ld\n", - cpu, hwcpu, vpa, ret); + cpu, hwcpu, addr, ret); + return; + } + /* + * PAPR says this feature is SLB-Buffer but firmware never + * reports that. All SPLPAR support SLB shadow buffer. + */ + addr = __pa(&slb_shadow[cpu]); + if (firmware_has_feature(FW_FEATURE_SPLPAR)) { + ret = register_slb_shadow(hwcpu, addr); + if (ret) + printk(KERN_ERR + "WARNING: vpa_init: SLB shadow buffer " + "registration for cpu %d (hw %d) of area %lx " + "returns %ld\n", cpu, hwcpu, addr, ret); + } } long pSeries_lpar_hpte_insert(unsigned long hpte_group, @@ -277,7 +291,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long flags; unsigned long slot; unsigned long hpte_v, hpte_r; - unsigned long dummy0, dummy1; if (!(vflags & HPTE_V_BOLTED)) DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " @@ -302,8 +315,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group, if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE)) hpte_r &= ~_PAGE_COHERENT; - lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v, - hpte_r, &slot, &dummy0, &dummy1); + lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); if (unlikely(lpar_rc == H_PTEG_FULL)) { if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" full\n"); diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 18abfb1f4e24..64163cecdf93 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -123,13 +123,14 @@ static ssize_t pSeries_nvram_get_size(void) int __init pSeries_nvram_init(void) { struct device_node *nvram; - unsigned int *nbytes_p, proplen; + const unsigned int *nbytes_p; + unsigned int proplen; nvram = of_find_node_by_type(NULL, "nvram"); if (nvram == NULL) return -ENODEV; - nbytes_p = (unsigned int *)get_property(nvram, "#bytes", &proplen); + nbytes_p = get_property(nvram, "#bytes", &proplen); if (nbytes_p == NULL || proplen != sizeof(unsigned int)) return -EIO; diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index e97e67f5e079..410a6bcc4ca0 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c @@ -60,7 +60,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); static void __devinit check_s7a(void) { struct device_node *root; - char *model; + const char *model; s7a_workaround = 0; root = of_find_node_by_path("/"); diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index 3bd1b3e06003..3eb7b294d92f 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -5,20 +5,17 @@ static inline long poll_pending(void) { - unsigned long dummy; - return plpar_hcall(H_POLL_PENDING, 0, 0, 0, 0, &dummy, &dummy, &dummy); + return plpar_hcall_norets(H_POLL_PENDING); } static inline long prod_processor(void) { - plpar_hcall_norets(H_PROD); - return 0; + return plpar_hcall_norets(H_PROD); } static inline long cede_processor(void) { - plpar_hcall_norets(H_CEDE); - return 0; + return plpar_hcall_norets(H_CEDE); } static inline long vpa_call(unsigned long flags, unsigned long cpu, @@ -40,23 +37,59 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa) return vpa_call(0x1, cpu, vpa); } +static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa) +{ + return vpa_call(0x7, cpu, vpa); +} + +static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa) +{ + return vpa_call(0x3, cpu, vpa); +} + extern void vpa_init(int cpu); +static inline long plpar_pte_enter(unsigned long flags, + unsigned long hpte_group, unsigned long hpte_v, + unsigned long hpte_r, unsigned long *slot) +{ + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r); + + *slot = retbuf[0]; + + return rc; +} + static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex, unsigned long avpn, unsigned long *old_pteh_ret, unsigned long *old_ptel_ret) { - unsigned long dummy; - return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0, old_pteh_ret, - old_ptel_ret, &dummy); + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn); + + *old_pteh_ret = retbuf[0]; + *old_ptel_ret = retbuf[1]; + + return rc; } static inline long plpar_pte_read(unsigned long flags, unsigned long ptex, unsigned long *old_pteh_ret, unsigned long *old_ptel_ret) { - unsigned long dummy; - return plpar_hcall(H_READ, flags, ptex, 0, 0, old_pteh_ret, - old_ptel_ret, &dummy); + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_READ, retbuf, flags, ptex); + + *old_pteh_ret = retbuf[0]; + *old_ptel_ret = retbuf[1]; + + return rc; } static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, @@ -68,9 +101,14 @@ static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba, unsigned long *tce_ret) { - unsigned long dummy; - return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0, tce_ret, &dummy, - &dummy); + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba); + + *tce_ret = retbuf[0]; + + return rc; } static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba, @@ -94,9 +132,17 @@ static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba, static inline long plpar_get_term_char(unsigned long termno, unsigned long *len_ret, char *buf_ret) { + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */ - return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0, len_ret, - lbuf + 0, lbuf + 1); + + rc = plpar_hcall(H_GET_TERM_CHAR, retbuf, termno); + + *len_ret = retbuf[0]; + lbuf[0] = retbuf[1]; + lbuf[1] = retbuf[2]; + + return rc; } static inline long plpar_put_term_char(unsigned long termno, unsigned long len, @@ -107,4 +153,31 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len, lbuf[1]); } +static inline long plpar_eoi(unsigned long xirr) +{ + return plpar_hcall_norets(H_EOI, xirr); +} + +static inline long plpar_cppr(unsigned long cppr) +{ + return plpar_hcall_norets(H_CPPR, cppr); +} + +static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) +{ + return plpar_hcall_norets(H_IPI, servernum, mfrr); +} + +static inline long plpar_xirr(unsigned long *xirr_ret) +{ + long rc; + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + + rc = plpar_hcall(H_XIRR, retbuf); + + *xirr_ret = retbuf[0]; + + return rc; +} + #endif /* _PSERIES_PLPAR_WRAPPERS_H */ diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index c7ffde1a614e..903115d67fdc 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c @@ -79,7 +79,7 @@ static void request_ras_irqs(struct device_node *np, { int i, index, count = 0; struct of_irq oirq; - u32 *opicprop; + const u32 *opicprop; unsigned int opicplen; unsigned int virqs[16]; @@ -87,7 +87,7 @@ static void request_ras_irqs(struct device_node *np, * map those interrupts using the default interrupt host and default * trigger */ - opicprop = (u32 *)get_property(np, "open-pic-interrupt", &opicplen); + opicprop = get_property(np, "open-pic-interrupt", &opicplen); if (opicprop) { opicplen /= sizeof(u32); for (i = 0; i < opicplen; i++) { diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c index 2e4e04042d85..8ca2612221d6 100644 --- a/arch/powerpc/platforms/pseries/rtasd.c +++ b/arch/powerpc/platforms/pseries/rtasd.c @@ -359,11 +359,11 @@ static int enable_surveillance(int timeout) static int get_eventscan_parms(void) { struct device_node *node; - int *ip; + const int *ip; node = of_find_node_by_path("/rtas"); - ip = (int *)get_property(node, "rtas-event-scan-rate", NULL); + ip = get_property(node, "rtas-event-scan-rate", NULL); if (ip == NULL) { printk(KERN_ERR "rtasd: no rtas-event-scan-rate\n"); of_node_put(node); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 31867a701fcb..a6398fbe530d 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -133,9 +133,9 @@ void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc, static void __init pseries_mpic_init_IRQ(void) { struct device_node *np, *old, *cascade = NULL; - unsigned int *addrp; + const unsigned int *addrp; unsigned long intack = 0; - unsigned int *opprop; + const unsigned int *opprop; unsigned long openpic_addr = 0; unsigned int cascade_irq; int naddr, n, i, opplen; @@ -143,7 +143,7 @@ static void __init pseries_mpic_init_IRQ(void) np = of_find_node_by_path("/"); naddr = prom_n_addr_cells(np); - opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen); + opprop = get_property(np, "platform-open-pic", &opplen); if (opprop != 0) { openpic_addr = of_read_number(opprop, naddr); printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); @@ -192,7 +192,7 @@ static void __init pseries_mpic_init_IRQ(void) break; if (strcmp(np->name, "pci") != 0) continue; - addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", + addrp = get_property(np, "8259-interrupt-acknowledge", NULL); if (addrp == NULL) continue; @@ -223,23 +223,37 @@ static void pseries_lpar_enable_pmcs(void) } #ifdef CONFIG_KEXEC -static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary) -{ - mpic_teardown_this_cpu(secondary); -} - -static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) +static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) { /* Don't risk a hypervisor call if we're crashing */ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { - unsigned long vpa = __pa(get_lppaca()); + unsigned long addr; - if (unregister_vpa(hard_smp_processor_id(), vpa)) { + addr = __pa(get_slb_shadow()); + if (unregister_slb_shadow(hard_smp_processor_id(), addr)) + printk("SLB shadow buffer deregistration of " + "cpu %u (hw_cpu_id %d) failed\n", + smp_processor_id(), + hard_smp_processor_id()); + + addr = __pa(get_lppaca()); + if (unregister_vpa(hard_smp_processor_id(), addr)) { printk("VPA deregistration of cpu %u (hw_cpu_id %d) " "failed\n", smp_processor_id(), hard_smp_processor_id()); } } +} + +static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary) +{ + pseries_kexec_cpu_down(crash_shutdown, secondary); + mpic_teardown_this_cpu(secondary); +} + +static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) +{ + pseries_kexec_cpu_down(crash_shutdown, secondary); xics_teardown_cpu(secondary); } #endif /* CONFIG_KEXEC */ @@ -247,11 +261,11 @@ static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary) static void __init pseries_discover_pic(void) { struct device_node *np; - char *typep; + const char *typep; for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { - typep = (char *)get_property(np, "compatible", NULL); + typep = get_property(np, "compatible", NULL); if (strstr(typep, "open-pic")) { pSeries_mpic_node = of_node_get(np); ppc_md.init_IRQ = pseries_mpic_init_IRQ; diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index ac61098ff401..c6624b8a0e77 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -62,7 +62,7 @@ */ static cpumask_t of_spin_map; -extern void pSeries_secondary_smp_init(unsigned long); +extern void generic_secondary_smp_init(unsigned long); #ifdef CONFIG_HOTPLUG_CPU @@ -145,9 +145,9 @@ static int pSeries_add_processor(struct device_node *np) unsigned int cpu; cpumask_t candidate_map, tmp = CPU_MASK_NONE; int err = -ENOSPC, len, nthreads, i; - u32 *intserv; + const u32 *intserv; - intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", &len); + intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return 0; @@ -205,9 +205,9 @@ static void pSeries_remove_processor(struct device_node *np) { unsigned int cpu; int len, nthreads, i; - u32 *intserv; + const u32 *intserv; - intserv = (u32 *)get_property(np, "ibm,ppc-interrupt-server#s", &len); + intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len); if (!intserv) return; @@ -270,7 +270,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) { int status; unsigned long start_here = __pa((u32)*((unsigned long *) - pSeries_secondary_smp_init)); + generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index e98863025721..253972e5479f 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -34,6 +34,7 @@ #include <asm/i8259.h> #include "xics.h" +#include "plpar_wrappers.h" #define XICS_IPI 2 #define XICS_IRQ_SPURIOUS 0 @@ -110,27 +111,6 @@ static inline void direct_qirr_info(int n_cpu, u8 value) /* LPAR low level accessors */ -static inline long plpar_eoi(unsigned long xirr) -{ - return plpar_hcall_norets(H_EOI, xirr); -} - -static inline long plpar_cppr(unsigned long cppr) -{ - return plpar_hcall_norets(H_CPPR, cppr); -} - -static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) -{ - return plpar_hcall_norets(H_IPI, servernum, mfrr); -} - -static inline long plpar_xirr(unsigned long *xirr_ret) -{ - unsigned long dummy; - return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy); -} - static inline unsigned int lpar_xirr_info_get(int n_cpu) { unsigned long lpar_rc; @@ -590,14 +570,14 @@ static void __init xics_init_one_node(struct device_node *np, unsigned int *indx) { unsigned int ilen; - u32 *ireg; + const u32 *ireg; /* This code does the theorically broken assumption that the interrupt * server numbers are the same as the hard CPU numbers. * This happens to be the case so far but we are playing with fire... * should be fixed one of these days. -BenH. */ - ireg = (u32 *)get_property(np, "ibm,interrupt-server-ranges", NULL); + ireg = get_property(np, "ibm,interrupt-server-ranges", NULL); /* Do that ever happen ? we'll know soon enough... but even good'old * f80 does have that property .. @@ -609,7 +589,7 @@ static void __init xics_init_one_node(struct device_node *np, */ *indx = *ireg; } - ireg = (u32 *)get_property(np, "reg", &ilen); + ireg = get_property(np, "reg", &ilen); if (!ireg) panic("xics_init_IRQ: can't find interrupt reg property"); @@ -635,7 +615,7 @@ static void __init xics_setup_8259_cascade(void) { struct device_node *np, *old, *found = NULL; int cascade, naddr; - u32 *addrp; + const u32 *addrp; unsigned long intack = 0; for_each_node_by_type(np, "interrupt-controller") @@ -661,7 +641,7 @@ static void __init xics_setup_8259_cascade(void) break; if (strcmp(np->name, "pci") != 0) continue; - addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge", NULL); + addrp = get_property(np, "8259-interrupt-acknowledge", NULL); if (addrp == NULL) continue; naddr = prom_n_addr_cells(np); @@ -680,7 +660,8 @@ void __init xics_init_IRQ(void) { int i; struct device_node *np; - u32 *ireg, ilen, indx = 0; + u32 ilen, indx = 0; + const u32 *ireg; int found = 0; ppc64_boot_msg(0x20, "XICS Init"); @@ -705,18 +686,17 @@ void __init xics_init_IRQ(void) for (np = of_find_node_by_type(NULL, "cpu"); np; np = of_find_node_by_type(np, "cpu")) { - ireg = (u32 *)get_property(np, "reg", &ilen); + ireg = get_property(np, "reg", &ilen); if (ireg && ireg[0] == get_hard_smp_processor_id(boot_cpuid)) { - ireg = (u32 *)get_property(np, - "ibm,ppc-interrupt-gserver#s", - &ilen); + ireg = get_property(np, + "ibm,ppc-interrupt-gserver#s", &ilen); i = ilen / sizeof(int); if (ireg && i > 0) { default_server = ireg[0]; /* take last element */ default_distrib_server = ireg[i-1]; } - ireg = (u32 *)get_property(np, + ireg = get_property(np, "ibm,interrupt-server#-size", NULL); if (ireg) interrupt_server_size = *ireg; diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index ef10bcf2d943..92ba378b7990 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -41,7 +41,7 @@ phys_addr_t get_immrbase(void) soc = of_find_node_by_type(NULL, "soc"); if (soc) { unsigned int size; - void *prop = get_property(soc, "reg", &size); + const void *prop = get_property(soc, "reg", &size); immrbase = of_translate_address(soc, prop); of_node_put(soc); }; @@ -85,7 +85,7 @@ static int __init gfar_mdio_of_init(void) mdio_data.irq[k] = -1; while ((child = of_get_next_child(np, child)) != NULL) { - u32 *id = get_property(child, "reg", NULL); + const u32 *id = get_property(child, "reg", NULL); mdio_data.irq[*id] = irq_of_parse_and_map(child, 0); } @@ -124,10 +124,10 @@ static int __init gfar_of_init(void) struct resource r[4]; struct device_node *phy, *mdio; struct gianfar_platform_data gfar_data; - unsigned int *id; - char *model; - void *mac_addr; - phandle *ph; + const unsigned int *id; + const char *model; + const void *mac_addr; + const phandle *ph; int n_res = 1; memset(r, 0, sizeof(r)); @@ -193,7 +193,7 @@ static int __init gfar_of_init(void) FSL_GIANFAR_DEV_HAS_VLAN | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; - ph = (phandle *) get_property(np, "phy-handle", NULL); + ph = get_property(np, "phy-handle", NULL); phy = of_find_node_by_phandle(*ph); if (phy == NULL) { @@ -203,7 +203,7 @@ static int __init gfar_of_init(void) mdio = of_get_parent(phy); - id = (u32 *) get_property(phy, "reg", NULL); + id = get_property(phy, "reg", NULL); ret = of_address_to_resource(mdio, 0, &res); if (ret) { of_node_put(phy); @@ -247,7 +247,7 @@ static int __init fsl_i2c_of_init(void) i++) { struct resource r[2]; struct fsl_i2c_platform_data i2c_data; - unsigned char *flags = NULL; + const unsigned char *flags = NULL; memset(&r, 0, sizeof(r)); memset(&i2c_data, 0, sizeof(i2c_data)); @@ -298,7 +298,7 @@ static int __init mpc83xx_wdt_init(void) struct resource r; struct device_node *soc, *np; struct platform_device *dev; - unsigned int *freq; + const unsigned int *freq; int ret; np = of_find_compatible_node(NULL, "watchdog", "mpc83xx_wdt"); @@ -315,7 +315,7 @@ static int __init mpc83xx_wdt_init(void) goto nosoc; } - freq = (unsigned int *)get_property(soc, "bus-frequency", NULL); + freq = get_property(soc, "bus-frequency", NULL); if (!freq) { ret = -ENODEV; goto err; @@ -355,7 +355,7 @@ nodev: arch_initcall(mpc83xx_wdt_init); #endif -static enum fsl_usb2_phy_modes determine_usb_phy(char * phy_type) +static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type) { if (!phy_type) return FSL_USB2_PHY_NONE; @@ -383,7 +383,7 @@ static int __init fsl_usb_of_init(void) i++) { struct resource r[2]; struct fsl_usb2_platform_data usb_data; - unsigned char *prop = NULL; + const unsigned char *prop = NULL; memset(&r, 0, sizeof(r)); memset(&usb_data, 0, sizeof(usb_data)); @@ -431,7 +431,7 @@ static int __init fsl_usb_of_init(void) i++) { struct resource r[2]; struct fsl_usb2_platform_data usb_data; - unsigned char *prop = NULL; + const unsigned char *prop = NULL; memset(&r, 0, sizeof(r)); memset(&usb_data, 0, sizeof(usb_data)); diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h index c433d3f39edd..5a3dd480d2fd 100644 --- a/arch/powerpc/sysdev/fsl_soc.h +++ b/arch/powerpc/sysdev/fsl_soc.h @@ -2,6 +2,8 @@ #define __PPC_FSL_SOC_H #ifdef __KERNEL__ +#include <asm/mmu.h> + extern phys_addr_t get_immrbase(void); #endif diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 9855820b9548..26a6a3becd66 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c @@ -224,7 +224,7 @@ static struct irq_host_ops i8259_host_ops = { .xlate = i8259_host_xlate, }; -/**** +/** * i8259_init - Initialize the legacy controller * @node: device node of the legacy PIC (can be NULL, but then, it will match * all interrupts, so beware) diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 70e707785d49..0251b7c68d0e 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -210,7 +210,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 4, }, [64] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, @@ -218,7 +218,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 0, }, [65] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, @@ -226,7 +226,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 1, }, [66] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, @@ -234,7 +234,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 2, }, [67] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_A, .force = IPIC_SIFCR_L, @@ -242,7 +242,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 3, }, [68] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, @@ -250,7 +250,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 0, }, [69] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, @@ -258,7 +258,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 1, }, [70] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, @@ -266,7 +266,7 @@ static struct ipic_info ipic_info[] = { .prio_mask = 2, }, [71] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = IPIC_SMPRR_B, .force = IPIC_SIFCR_L, @@ -274,91 +274,91 @@ static struct ipic_info ipic_info[] = { .prio_mask = 3, }, [72] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 8, }, [73] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 9, }, [74] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 10, }, [75] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 11, }, [76] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 12, }, [77] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 13, }, [78] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 14, }, [79] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 15, }, [80] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 16, }, [84] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 20, }, [85] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 21, }, [90] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, .bit = 26, }, [91] = { - .pend = IPIC_SIPNR_H, + .pend = IPIC_SIPNR_L, .mask = IPIC_SIMSR_L, .prio = 0, .force = IPIC_SIFCR_L, diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c index 615350d46b52..ff23f5a4d4b9 100644 --- a/arch/powerpc/sysdev/mmio_nvram.c +++ b/arch/powerpc/sysdev/mmio_nvram.c @@ -80,7 +80,7 @@ static ssize_t mmio_nvram_get_size(void) int __init mmio_nvram_init(void) { struct device_node *nvram_node; - unsigned long *buffer; + const unsigned long *buffer; int proplen; unsigned long nvram_addr; int ret; @@ -91,7 +91,7 @@ int __init mmio_nvram_init(void) goto out; ret = -EIO; - buffer = (unsigned long *)get_property(nvram_node, "reg", &proplen); + buffer = get_property(nvram_node, "reg", &proplen); if (proplen != 2*sizeof(unsigned long)) goto out; diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 2ab06ed3ae73..c28f69bef8e2 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c @@ -194,7 +194,7 @@ int __init tsi108_setup_pci(struct device_node *dev) int len; struct pci_controller *hose; struct resource rsrc; - int *bus_range; + const int *bus_range; int primary = 0, has_address = 0; /* PCI Config mapping */ @@ -207,7 +207,7 @@ int __init tsi108_setup_pci(struct device_node *dev) has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); /* Get bus range if any */ - bus_range = (int *)get_property(dev, "bus-range", &len); + bus_range = get_property(dev, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING "Can't get bus-range for %s, assume" " bus 0\n", dev->full_name); diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 179b10ced8c7..8adad1444a51 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -137,10 +137,14 @@ static void bootcmds(void); static void proccall(void); void dump_segments(void); static void symbol_lookup(void); +static void xmon_show_stack(unsigned long sp, unsigned long lr, + unsigned long pc); static void xmon_print_symbol(unsigned long address, const char *mid, const char *after); static const char *getvecname(unsigned long vec); +int xmon_no_auto_backtrace; + extern int print_insn_powerpc(unsigned long, unsigned long, int); extern void xmon_enter(void); @@ -736,6 +740,12 @@ cmds(struct pt_regs *excp) last_cmd = NULL; xmon_regs = excp; + + if (!xmon_no_auto_backtrace) { + xmon_no_auto_backtrace = 1; + xmon_show_stack(excp->gpr[1], excp->link, excp->nip); + } + for(;;) { #ifdef CONFIG_SMP printf("%x:", smp_processor_id()); diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index a04cdf01596b..8fa10cf661a8 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -1204,7 +1204,7 @@ config PCI_DOMAINS default PCI config MPC83xx_PCI2 - bool " Supprt for 2nd PCI host controller" + bool "Support for 2nd PCI host controller" depends on PCI && MPC834x default y if MPC834x_SYS @@ -1223,12 +1223,12 @@ config PCI_8260 default y config 8260_PCI9 - bool " Enable workaround for MPC826x erratum PCI 9" + bool "Enable workaround for MPC826x erratum PCI 9" depends on PCI_8260 && !ADS8272 default y choice - prompt " IDMA channel for PCI 9 workaround" + prompt "IDMA channel for PCI 9 workaround" depends on 8260_PCI9 config 8260_PCI9_IDMA1 diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 2fa0075f2b5f..50b4bbd06804 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -768,91 +768,6 @@ _GLOBAL(_outsb) bdnz 00b blr -_GLOBAL(_insw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhbrx r5,0,r3 -01: eieio -02: sthu r5,2(r4) - ISYNC_8xx - .section .fixup,"ax" -03: blr - .text - .section __ex_table, "a" - .align 2 - .long 00b, 03b - .long 01b, 03b - .long 02b, 03b - .text - bdnz 00b - blr - -_GLOBAL(_outsw) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,2 - blelr- -00: lhzu r5,2(r4) -01: eieio -02: sthbrx r5,0,r3 - ISYNC_8xx - .section .fixup,"ax" -03: blr - .text - .section __ex_table, "a" - .align 2 - .long 00b, 03b - .long 01b, 03b - .long 02b, 03b - .text - bdnz 00b - blr - -_GLOBAL(_insl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwbrx r5,0,r3 -01: eieio -02: stwu r5,4(r4) - ISYNC_8xx - .section .fixup,"ax" -03: blr - .text - .section __ex_table, "a" - .align 2 - .long 00b, 03b - .long 01b, 03b - .long 02b, 03b - .text - bdnz 00b - blr - -_GLOBAL(_outsl) - cmpwi 0,r5,0 - mtctr r5 - subi r4,r4,4 - blelr- -00: lwzu r5,4(r4) -01: stwbrx r5,0,r3 -02: eieio - ISYNC_8xx - .section .fixup,"ax" -03: blr - .text - .section __ex_table, "a" - .align 2 - .long 00b, 03b - .long 01b, 03b - .long 02b, 03b - .text - bdnz 00b - blr - -_GLOBAL(__ide_mm_insw) _GLOBAL(_insw_ns) cmpwi 0,r5,0 mtctr r5 @@ -874,7 +789,6 @@ _GLOBAL(_insw_ns) bdnz 00b blr -_GLOBAL(__ide_mm_outsw) _GLOBAL(_outsw_ns) cmpwi 0,r5,0 mtctr r5 @@ -896,7 +810,6 @@ _GLOBAL(_outsw_ns) bdnz 00b blr -_GLOBAL(__ide_mm_insl) _GLOBAL(_insl_ns) cmpwi 0,r5,0 mtctr r5 @@ -918,7 +831,6 @@ _GLOBAL(_insl_ns) bdnz 00b blr -_GLOBAL(__ide_mm_outsl) _GLOBAL(_outsl_ns) cmpwi 0,r5,0 mtctr r5 diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index d1735401384c..c8b65ca8a350 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -115,17 +115,8 @@ EXPORT_SYMBOL(outw); EXPORT_SYMBOL(outl); EXPORT_SYMBOL(outsl);*/ -EXPORT_SYMBOL(__ide_mm_insl); -EXPORT_SYMBOL(__ide_mm_outsw); -EXPORT_SYMBOL(__ide_mm_insw); -EXPORT_SYMBOL(__ide_mm_outsl); - EXPORT_SYMBOL(_insb); EXPORT_SYMBOL(_outsb); -EXPORT_SYMBOL(_insw); -EXPORT_SYMBOL(_outsw); -EXPORT_SYMBOL(_insl); -EXPORT_SYMBOL(_outsl); EXPORT_SYMBOL(_insw_ns); EXPORT_SYMBOL(_outsw_ns); EXPORT_SYMBOL(_insl_ns); diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c index a74f46d9826f..5458ac5da7c3 100644 --- a/arch/ppc/kernel/setup.c +++ b/arch/ppc/kernel/setup.c @@ -127,11 +127,8 @@ void machine_restart(char *cmd) ppc_md.restart(cmd); } -void machine_power_off(void) +static void ppc_generic_power_off(void) { -#ifdef CONFIG_NVRAM - nvram_sync(); -#endif ppc_md.power_off(); } @@ -143,7 +140,17 @@ void machine_halt(void) ppc_md.halt(); } -void (*pm_power_off)(void) = machine_power_off; +void (*pm_power_off)(void) = ppc_generic_power_off; + +void machine_power_off(void) +{ +#ifdef CONFIG_NVRAM + nvram_sync(); +#endif + if (pm_power_off) + pm_power_off(); + ppc_generic_power_off(); +} #ifdef CONFIG_TAU extern u32 cpu_temp(unsigned long cpu); diff --git a/arch/ppc/platforms/85xx/sbc8560.h b/arch/ppc/platforms/85xx/sbc8560.h index c7d61cf3a449..e5e156f60100 100644 --- a/arch/ppc/platforms/85xx/sbc8560.h +++ b/arch/ppc/platforms/85xx/sbc8560.h @@ -14,6 +14,7 @@ #define __MACH_SBC8560_H__ #include <platforms/85xx/sbc85xx.h> +#include <asm/irq.h> #define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET) diff --git a/arch/ppc/platforms/85xx/sbc85xx.h b/arch/ppc/platforms/85xx/sbc85xx.h index 21ea7a55639b..51df4dc04e22 100644 --- a/arch/ppc/platforms/85xx/sbc85xx.h +++ b/arch/ppc/platforms/85xx/sbc85xx.h @@ -49,4 +49,22 @@ extern void sbc8560_init_IRQ(void) __init; #define MPC85XX_PCI1_IO_SIZE 0x01000000 +/* FCC1 Clock Source Configuration. These can be + * redefined in the board specific file. + * Can only choose from CLK9-12 */ +#define F1_RXCLK 12 +#define F1_TXCLK 11 + +/* FCC2 Clock Source Configuration. These can be + * redefined in the board specific file. + * Can only choose from CLK13-16 */ +#define F2_RXCLK 13 +#define F2_TXCLK 14 + +/* FCC3 Clock Source Configuration. These can be + * redefined in the board specific file. + * Can only choose from CLK13-16 */ +#define F3_RXCLK 15 +#define F3_TXCLK 16 + #endif /* __PLATFORMS_85XX_SBC85XX_H__ */ diff --git a/arch/ppc/syslib/m8260_pci_erratum9.c b/arch/ppc/syslib/m8260_pci_erratum9.c index 974581ea4849..5475709ce07b 100644 --- a/arch/ppc/syslib/m8260_pci_erratum9.c +++ b/arch/ppc/syslib/m8260_pci_erratum9.c @@ -339,20 +339,6 @@ void insl(unsigned port, void *buf, int nl) idma_pci9_read((u8 *)buf, (u8 *)addr, nl*sizeof(u32), sizeof(u32), 0); } -void insw_ns(unsigned port, void *buf, int ns) -{ - u8 *addr = (u8 *)(port + _IO_BASE); - - idma_pci9_read((u8 *)buf, (u8 *)addr, ns*sizeof(u16), sizeof(u16), 0); -} - -void insl_ns(unsigned port, void *buf, int nl) -{ - u8 *addr = (u8 *)(port + _IO_BASE); - - idma_pci9_read((u8 *)buf, (u8 *)addr, nl*sizeof(u32), sizeof(u32), 0); -} - void *memcpy_fromio(void *dest, unsigned long src, size_t count) { unsigned long pa = iopa((unsigned long) src); @@ -373,8 +359,6 @@ EXPORT_SYMBOL(inl); EXPORT_SYMBOL(insb); EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl); -EXPORT_SYMBOL(insw_ns); -EXPORT_SYMBOL(insl_ns); EXPORT_SYMBOL(memcpy_fromio); #endif /* ifdef CONFIG_8260_PCI9 */ diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c index f7e92986952a..d74a883e5bde 100644 --- a/arch/ppc/xmon/start.c +++ b/arch/ppc/xmon/start.c @@ -15,6 +15,7 @@ #include <asm/processor.h> #include <asm/delay.h> #include <asm/btext.h> +#include <asm/ibm4xx.h> static volatile unsigned char *sccc, *sccd; unsigned int TXRDY, RXRDY, DLAB; @@ -57,23 +58,30 @@ static struct sysrq_key_op sysrq_xmon_op = void xmon_map_scc(void) { -#ifdef CONFIG_PPC_PREP - volatile unsigned char *base; - -#elif defined(CONFIG_GEMINI) +#if defined(CONFIG_GEMINI) /* should already be mapped by the kernel boot */ - sccc = (volatile unsigned char *) 0xffeffb0d; sccd = (volatile unsigned char *) 0xffeffb08; - TXRDY = 0x20; - RXRDY = 1; - DLAB = 0x80; #elif defined(CONFIG_405GP) - sccc = (volatile unsigned char *)0xef600305; sccd = (volatile unsigned char *)0xef600300; +#elif defined(CONFIG_440EP) + sccd = (volatile unsigned char *) ioremap(PPC440EP_UART0_ADDR, 8); +#elif defined(CONFIG_440SP) + sccd = (volatile unsigned char *) ioremap64(PPC440SP_UART0_ADDR, 8); +#elif defined(CONFIG_440SPE) + sccd = (volatile unsigned char *) ioremap64(PPC440SPE_UART0_ADDR, 8); +#elif defined(CONFIG_44x) + /* This is the default for 44x platforms. Any boards that have a + different UART address need to be put in cases before this or the + port will be mapped incorrectly */ + sccd = (volatile unsigned char *) ioremap64(PPC440GP_UART0_ADDR, 8); +#endif /* platform */ + +#ifndef CONFIG_PPC_PREP + sccc = sccd + 5; TXRDY = 0x20; RXRDY = 1; DLAB = 0x80; -#endif /* platform */ +#endif register_sysrq_key('x', &sysrq_xmon_op); } diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c index 37d234f93394..b1a91744fd2d 100644 --- a/arch/ppc/xmon/xmon.c +++ b/arch/ppc/xmon/xmon.c @@ -153,6 +153,12 @@ static int xmon_trace[NR_CPUS]; #define SSTEP 1 /* stepping because of 's' command */ #define BRSTEP 2 /* stepping over breakpoint */ +#ifdef CONFIG_4xx +#define MSR_SSTEP_ENABLE 0x200 +#else +#define MSR_SSTEP_ENABLE 0x400 +#endif + static struct pt_regs *xmon_regs[NR_CPUS]; extern inline void sync(void) @@ -211,6 +217,14 @@ static void get_tb(unsigned *p) p[1] = lo; } +static inline void xmon_enable_sstep(struct pt_regs *regs) +{ + regs->msr |= MSR_SSTEP_ENABLE; +#ifdef CONFIG_4xx + mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); +#endif +} + int xmon(struct pt_regs *excp) { struct pt_regs regs; @@ -254,10 +268,10 @@ int xmon(struct pt_regs *excp) cmd = cmds(excp); if (cmd == 's') { xmon_trace[smp_processor_id()] = SSTEP; - excp->msr |= 0x400; + xmon_enable_sstep(excp); } else if (at_breakpoint(excp->nip)) { xmon_trace[smp_processor_id()] = BRSTEP; - excp->msr |= 0x400; + xmon_enable_sstep(excp); } else { xmon_trace[smp_processor_id()] = 0; insert_bpts(); @@ -298,7 +312,7 @@ xmon_bpt(struct pt_regs *regs) remove_bpts(); excprint(regs); xmon_trace[smp_processor_id()] = BRSTEP; - regs->msr |= 0x400; + xmon_enable_sstep(regs); } else { xmon(regs); } @@ -385,7 +399,7 @@ insert_bpts(void) } store_inst((void *) bp->address); } -#if !defined(CONFIG_8xx) +#if ! (defined(CONFIG_8xx) || defined(CONFIG_4xx)) if (dabr.enabled) set_dabr(dabr.address); if (iabr.enabled) @@ -400,7 +414,7 @@ remove_bpts(void) struct bpt *bp; unsigned instr; -#if !defined(CONFIG_8xx) +#if ! (defined(CONFIG_8xx) || defined(CONFIG_4xx)) set_dabr(0); set_iabr(0); #endif @@ -677,7 +691,7 @@ bpt_cmds(void) cmd = inchar(); switch (cmd) { -#if !defined(CONFIG_8xx) +#if ! (defined(CONFIG_8xx) || defined(CONFIG_4xx)) case 'd': mode = 7; cmd = inchar(); @@ -792,7 +806,7 @@ backtrace(struct pt_regs *excp) for (; sp != 0; sp = stack[0]) { if (mread(sp, stack, sizeof(stack)) != sizeof(stack)) break; - printf("[%.8lx] ", stack); + printf("[%.8lx] ", stack[0]); xmon_print_symbol(stack[1], " ", "\n"); if (stack[1] == (unsigned) &ret_from_except || stack[1] == (unsigned) &ret_from_except_full diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2f4f70c4dbb2..b216ca659cdf 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -460,8 +460,7 @@ config S390_HYPFS_FS information in an s390 hypervisor environment. config KEXEC - bool "kexec system call (EXPERIMENTAL)" - depends on EXPERIMENTAL + bool "kexec system call" help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -487,8 +486,22 @@ source "drivers/net/Kconfig" source "fs/Kconfig" +menu "Instrumentation Support" + source "arch/s390/oprofile/Kconfig" +config KPROBES + bool "Kprobes (EXPERIMENTAL)" + depends on EXPERIMENTAL && MODULES + help + Kprobes allows you to trap at almost any kernel address and + execute a callback function. register_kprobe() establishes + a probepoint and specifies the callback. Kprobes is useful + for kernel debugging, non-intrusive instrumentation and testing. + If in doubt, say "N". + +endmenu + source "arch/s390/Kconfig.debug" source "security/Kconfig" diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h index 71d65eb30650..0429481dea63 100644 --- a/arch/s390/appldata/appldata.h +++ b/arch/s390/appldata/appldata.h @@ -29,22 +29,6 @@ #define CTL_APPLDATA_NET_SUM 2125 #define CTL_APPLDATA_PROC 2126 -#ifndef CONFIG_64BIT - -#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ -#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ -#define APPLDATA_GEN_EVENT_RECORD 0x02 -#define APPLDATA_START_CONFIG_REC 0x03 - -#else - -#define APPLDATA_START_INTERVAL_REC 0x80 -#define APPLDATA_STOP_REC 0x81 -#define APPLDATA_GEN_EVENT_RECORD 0x82 -#define APPLDATA_START_CONFIG_REC 0x83 - -#endif /* CONFIG_64BIT */ - #define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index a0a94e0ef8d1..b69ed742f981 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -14,20 +14,20 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> -#include <asm/uaccess.h> -#include <asm/io.h> -#include <asm/smp.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/page-flags.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/sysctl.h> -#include <asm/timer.h> -//#include <linux/kernel_stat.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/workqueue.h> +#include <asm/appldata.h> +#include <asm/timer.h> +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/smp.h> #include "appldata.h" @@ -39,34 +39,6 @@ #define TOD_MICRO 0x01000 /* nr. of TOD clock units for 1 microsecond */ - -/* - * Parameter list for DIAGNOSE X'DC' - */ -#ifndef CONFIG_64BIT -struct appldata_parameter_list { - u16 diag; /* The DIAGNOSE code X'00DC' */ - u8 function; /* The function code for the DIAGNOSE */ - u8 parlist_length; /* Length of the parameter list */ - u32 product_id_addr; /* Address of the 16-byte product ID */ - u16 reserved; - u16 buffer_length; /* Length of the application data buffer */ - u32 buffer_addr; /* Address of the application data buffer */ -}; -#else -struct appldata_parameter_list { - u16 diag; - u8 function; - u8 parlist_length; - u32 unused01; - u16 reserved; - u16 buffer_length; - u32 unused02; - u64 product_id_addr; - u64 buffer_addr; -}; -#endif /* CONFIG_64BIT */ - /* * /proc entries (sysctl) */ @@ -181,46 +153,17 @@ static void appldata_work_fn(void *data) int appldata_diag(char record_nr, u16 function, unsigned long buffer, u16 length, char *mod_lvl) { - unsigned long ry; - struct appldata_product_id { - char prod_nr[7]; /* product nr. */ - char prod_fn[2]; /* product function */ - char record_nr; /* record nr. */ - char version_nr[2]; /* version */ - char release_nr[2]; /* release */ - char mod_lvl[2]; /* modification lvl. */ - } appldata_product_id = { - /* all strings are EBCDIC, record_nr is byte */ + struct appldata_product_id id = { .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4, - 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ - .prod_fn = {0xD5, 0xD3}, /* "NL" */ + 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */ + .prod_fn = 0xD5D3, /* "NL" */ .record_nr = record_nr, - .version_nr = {0xF2, 0xF6}, /* "26" */ - .release_nr = {0xF0, 0xF1}, /* "01" */ - .mod_lvl = {mod_lvl[0], mod_lvl[1]}, - }; - struct appldata_parameter_list appldata_parameter_list = { - .diag = 0xDC, - .function = function, - .parlist_length = - sizeof(appldata_parameter_list), - .buffer_length = length, - .product_id_addr = - (unsigned long) &appldata_product_id, - .buffer_addr = virt_to_phys((void *) buffer) + .version_nr = 0xF2F6, /* "26" */ + .release_nr = 0xF0F1, /* "01" */ + .mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1], }; - if (!MACHINE_IS_VM) - return -ENOSYS; - ry = -1; - asm volatile( - "diag %1,%0,0xDC\n\t" - : "=d" (ry) - : "d" (&appldata_parameter_list), - "m" (appldata_parameter_list), - "m" (appldata_product_id) - : "cc"); - return (int) ry; + return appldata_asm(&id, function, (void *) buffer, length); } /************************ timer, work, DIAG <END> ****************************/ diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index 161acc5c8a1b..76a15523ae9e 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c @@ -16,6 +16,7 @@ #include <linux/kernel_stat.h> #include <linux/netdevice.h> #include <linux/sched.h> +#include <asm/appldata.h> #include <asm/smp.h> #include "appldata.h" diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 5713c7e5bd16..15c9eec02928 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -16,9 +16,9 @@ * */ +#include <crypto/algapi.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/crypto.h> #include "crypt_s390.h" #define AES_MIN_KEY_SIZE 16 @@ -34,13 +34,16 @@ int has_aes_256 = 0; struct s390_aes_ctx { u8 iv[AES_BLOCK_SIZE]; u8 key[AES_MAX_KEY_SIZE]; + long enc; + long dec; int key_len; }; static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; switch (key_len) { case 16: @@ -110,133 +113,206 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) } } -static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) -{ - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; - /* only use complete blocks */ - nbytes &= ~(AES_BLOCK_SIZE - 1); +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s390_aes_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt, + } + } +}; + +static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); - switch (sctx->key_len) { + switch (key_len) { case 16: - ret = crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + sctx->enc = KM_AES_128_ENCRYPT; + sctx->dec = KM_AES_128_DECRYPT; break; case 24: - ret = crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + sctx->enc = KM_AES_192_ENCRYPT; + sctx->dec = KM_AES_192_DECRYPT; break; case 32: - ret = crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + sctx->enc = KM_AES_256_ENCRYPT; + sctx->dec = KM_AES_256_DECRYPT; break; } - return nbytes; + + return aes_set_key(tfm, in_key, key_len); } -static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, + struct blkcipher_walk *walk) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + int ret = blkcipher_walk_virt(desc, walk); + unsigned int nbytes; - /* only use complete blocks */ - nbytes &= ~(AES_BLOCK_SIZE - 1); + while ((nbytes = walk->nbytes)) { + /* only use complete blocks */ + unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); + u8 *out = walk->dst.virt.addr; + u8 *in = walk->src.virt.addr; - switch (sctx->key_len) { - case 16: - ret = crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - break; - case 24: - ret = crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - break; - case 32: - ret = crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - break; + ret = crypt_s390_km(func, param, out, in, n); + BUG_ON((ret < 0) || (ret != n)); + + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, walk, nbytes); } - return nbytes; + + return ret; } -static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int ecb_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - /* only use complete blocks */ - nbytes &= ~(AES_BLOCK_SIZE - 1); + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); +} - memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); - switch (sctx->key_len) { +static int ecb_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); +} + +static struct crypto_alg ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct s390_aes_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ecb_aes_set_key, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, + } + } +}; + +static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); + + switch (key_len) { case 16: - ret = crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + sctx->enc = KMC_AES_128_ENCRYPT; + sctx->dec = KMC_AES_128_DECRYPT; break; case 24: - ret = crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + sctx->enc = KMC_AES_192_ENCRYPT; + sctx->dec = KMC_AES_192_DECRYPT; break; case 32: - ret = crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + sctx->enc = KMC_AES_256_ENCRYPT; + sctx->dec = KMC_AES_256_DECRYPT; break; } - memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE); - return nbytes; + return aes_set_key(tfm, in_key, key_len); } -static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, + struct blkcipher_walk *walk) { - struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + int ret = blkcipher_walk_virt(desc, walk); + unsigned int nbytes = walk->nbytes; - /* only use complete blocks */ - nbytes &= ~(AES_BLOCK_SIZE - 1); + if (!nbytes) + goto out; - memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE); - switch (sctx->key_len) { - case 16: - ret = crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - break; - case 24: - ret = crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - break; - case 32: - ret = crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - break; - } - return nbytes; + memcpy(param, walk->iv, AES_BLOCK_SIZE); + do { + /* only use complete blocks */ + unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); + u8 *out = walk->dst.virt.addr; + u8 *in = walk->src.virt.addr; + + ret = crypt_s390_kmc(func, param, out, in, n); + BUG_ON((ret < 0) || (ret != n)); + + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, walk, nbytes); + } while ((nbytes = walk->nbytes)); + memcpy(walk->iv, param, AES_BLOCK_SIZE); + +out: + return ret; } +static int cbc_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; -static struct crypto_alg aes_alg = { - .cra_name = "aes", - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); +} + +static int cbc_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); +} + +static struct crypto_alg cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_aes_ctx), + .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), .cra_u = { - .cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt, - .cia_encrypt_ecb = aes_encrypt_ecb, - .cia_decrypt_ecb = aes_decrypt_ecb, - .cia_encrypt_cbc = aes_encrypt_cbc, - .cia_decrypt_cbc = aes_decrypt_cbc, + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_aes_set_key, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, } } }; @@ -256,13 +332,40 @@ static int __init aes_init(void) return -ENOSYS; ret = crypto_register_alg(&aes_alg); - if (ret != 0) - printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n"); + if (ret != 0) { + printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n"); + goto aes_err; + } + + ret = crypto_register_alg(&ecb_aes_alg); + if (ret != 0) { + printk(KERN_INFO + "crypt_s390: ecb-aes-s390 couldn't be loaded.\n"); + goto ecb_aes_err; + } + + ret = crypto_register_alg(&cbc_aes_alg); + if (ret != 0) { + printk(KERN_INFO + "crypt_s390: cbc-aes-s390 couldn't be loaded.\n"); + goto cbc_aes_err; + } + +out: return ret; + +cbc_aes_err: + crypto_unregister_alg(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + goto out; } static void __exit aes_fini(void) { + crypto_unregister_alg(&cbc_aes_alg); + crypto_unregister_alg(&ecb_aes_alg); crypto_unregister_alg(&aes_alg); } diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index d1c259a7fe33..efd836c2e4a6 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h @@ -20,6 +20,9 @@ #define CRYPT_S390_OP_MASK 0xFF00 #define CRYPT_S390_FUNC_MASK 0x00FF +#define CRYPT_S390_PRIORITY 300 +#define CRYPT_S390_COMPOSITE_PRIORITY 400 + /* s930 cryptographic operations */ enum crypt_s390_operations { CRYPT_S390_KM = 0x0100, diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index b3f7496a79b4..2aba04852fe3 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -13,9 +13,10 @@ * (at your option) any later version. * */ + +#include <crypto/algapi.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #include "crypt_s390.h" #include "crypto_des.h" @@ -45,9 +46,10 @@ struct crypt_s390_des3_192_ctx { }; static int des_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; int ret; /* test if key is valid (not a weak key) */ @@ -71,85 +73,159 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); } -static unsigned int des_encrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static struct crypto_alg des_alg = { + .cra_name = "des", + .cra_driver_name = "des-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = DES_KEY_SIZE, + .cia_max_keysize = DES_KEY_SIZE, + .cia_setkey = des_setkey, + .cia_encrypt = des_encrypt, + .cia_decrypt = des_decrypt, + } + } +}; + +static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, + void *param, struct blkcipher_walk *walk) { - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + int ret = blkcipher_walk_virt(desc, walk); + unsigned int nbytes; + + while ((nbytes = walk->nbytes)) { + /* only use complete blocks */ + unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); + u8 *out = walk->dst.virt.addr; + u8 *in = walk->src.virt.addr; - /* only use complete blocks */ - nbytes &= ~(DES_BLOCK_SIZE - 1); - ret = crypt_s390_km(KM_DEA_ENCRYPT, sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + ret = crypt_s390_km(func, param, out, in, n); + BUG_ON((ret < 0) || (ret != n)); - return nbytes; + nbytes &= DES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, walk, nbytes); + } + + return ret; } -static unsigned int des_decrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, + void *param, struct blkcipher_walk *walk) { - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + int ret = blkcipher_walk_virt(desc, walk); + unsigned int nbytes = walk->nbytes; + + if (!nbytes) + goto out; + + memcpy(param, walk->iv, DES_BLOCK_SIZE); + do { + /* only use complete blocks */ + unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); + u8 *out = walk->dst.virt.addr; + u8 *in = walk->src.virt.addr; - /* only use complete blocks */ - nbytes &= ~(DES_BLOCK_SIZE - 1); - ret = crypt_s390_km(KM_DEA_DECRYPT, sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + ret = crypt_s390_kmc(func, param, out, in, n); + BUG_ON((ret < 0) || (ret != n)); - return nbytes; + nbytes &= DES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, walk, nbytes); + } while ((nbytes = walk->nbytes)); + memcpy(walk->iv, param, DES_BLOCK_SIZE); + +out: + return ret; } -static unsigned int des_encrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int ecb_des_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - /* only use complete blocks */ - nbytes &= ~(DES_BLOCK_SIZE - 1); + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk); +} - memcpy(sctx->iv, desc->info, DES_BLOCK_SIZE); - ret = crypt_s390_kmc(KMC_DEA_ENCRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); +static int ecb_des_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - memcpy(desc->info, sctx->iv, DES_BLOCK_SIZE); - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk); } -static unsigned int des_decrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static struct crypto_alg ecb_des_alg = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = des_setkey, + .encrypt = ecb_des_encrypt, + .decrypt = ecb_des_decrypt, + } + } +}; + +static int cbc_des_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - /* only use complete blocks */ - nbytes &= ~(DES_BLOCK_SIZE - 1); + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk); +} - memcpy(&sctx->iv, desc->info, DES_BLOCK_SIZE); - ret = crypt_s390_kmc(KMC_DEA_DECRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); +static int cbc_des_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk); } -static struct crypto_alg des_alg = { - .cra_name = "des", - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, +static struct crypto_alg cbc_des_alg = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc-des-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypt_s390_des_ctx), + .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(des_alg.cra_list), + .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list), .cra_u = { - .cipher = { - .cia_min_keysize = DES_KEY_SIZE, - .cia_max_keysize = DES_KEY_SIZE, - .cia_setkey = des_setkey, - .cia_encrypt = des_encrypt, - .cia_decrypt = des_decrypt, - .cia_encrypt_ecb = des_encrypt_ecb, - .cia_decrypt_ecb = des_decrypt_ecb, - .cia_encrypt_cbc = des_encrypt_cbc, - .cia_decrypt_cbc = des_decrypt_cbc, + .blkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .setkey = des_setkey, + .encrypt = cbc_des_encrypt, + .decrypt = cbc_des_decrypt, } } }; @@ -167,11 +243,12 @@ static struct crypto_alg des_alg = { * */ static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { int i, ret; struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); - const u8* temp_key = key; + const u8 *temp_key = key; + u32 *flags = &tfm->crt_flags; if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED; @@ -202,89 +279,111 @@ static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) DES3_128_BLOCK_SIZE); } -static unsigned int des3_128_encrypt_ecb(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) -{ - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; +static struct crypto_alg des3_128_alg = { + .cra_name = "des3_ede128", + .cra_driver_name = "des3_ede128-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES3_128_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = DES3_128_KEY_SIZE, + .cia_max_keysize = DES3_128_KEY_SIZE, + .cia_setkey = des3_128_setkey, + .cia_encrypt = des3_128_encrypt, + .cia_decrypt = des3_128_decrypt, + } + } +}; - /* only use complete blocks */ - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); - ret = crypt_s390_km(KM_TDEA_128_ENCRYPT, sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); +static int ecb_des3_128_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_desall_crypt(desc, KM_TDEA_128_ENCRYPT, sctx->key, &walk); } -static unsigned int des3_128_decrypt_ecb(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) +static int ecb_des3_128_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - /* only use complete blocks */ - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); - ret = crypt_s390_km(KM_TDEA_128_DECRYPT, sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_desall_crypt(desc, KM_TDEA_128_DECRYPT, sctx->key, &walk); } -static unsigned int des3_128_encrypt_cbc(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) -{ - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; - - /* only use complete blocks */ - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); +static struct crypto_alg ecb_des3_128_alg = { + .cra_name = "ecb(des3_ede128)", + .cra_driver_name = "ecb-des3_ede128-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = DES3_128_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT( + ecb_des3_128_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = DES3_128_KEY_SIZE, + .max_keysize = DES3_128_KEY_SIZE, + .setkey = des3_128_setkey, + .encrypt = ecb_des3_128_encrypt, + .decrypt = ecb_des3_128_decrypt, + } + } +}; - memcpy(sctx->iv, desc->info, DES3_128_BLOCK_SIZE); - ret = crypt_s390_kmc(KMC_TDEA_128_ENCRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); +static int cbc_des3_128_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - memcpy(desc->info, sctx->iv, DES3_128_BLOCK_SIZE); - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_desall_crypt(desc, KMC_TDEA_128_ENCRYPT, sctx->iv, &walk); } -static unsigned int des3_128_decrypt_cbc(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) +static int cbc_des3_128_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; - - /* only use complete blocks */ - nbytes &= ~(DES3_128_BLOCK_SIZE - 1); - - memcpy(&sctx->iv, desc->info, DES3_128_BLOCK_SIZE); - ret = crypt_s390_kmc(KMC_TDEA_128_DECRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_desall_crypt(desc, KMC_TDEA_128_DECRYPT, sctx->iv, &walk); } -static struct crypto_alg des3_128_alg = { - .cra_name = "des3_ede128", - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, +static struct crypto_alg cbc_des3_128_alg = { + .cra_name = "cbc(des3_ede128)", + .cra_driver_name = "cbc-des3_ede128-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES3_128_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx), + .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(des3_128_alg.cra_list), + .cra_list = LIST_HEAD_INIT( + cbc_des3_128_alg.cra_list), .cra_u = { - .cipher = { - .cia_min_keysize = DES3_128_KEY_SIZE, - .cia_max_keysize = DES3_128_KEY_SIZE, - .cia_setkey = des3_128_setkey, - .cia_encrypt = des3_128_encrypt, - .cia_decrypt = des3_128_decrypt, - .cia_encrypt_ecb = des3_128_encrypt_ecb, - .cia_decrypt_ecb = des3_128_decrypt_ecb, - .cia_encrypt_cbc = des3_128_encrypt_cbc, - .cia_decrypt_cbc = des3_128_decrypt_cbc, + .blkcipher = { + .min_keysize = DES3_128_KEY_SIZE, + .max_keysize = DES3_128_KEY_SIZE, + .ivsize = DES3_128_BLOCK_SIZE, + .setkey = des3_128_setkey, + .encrypt = cbc_des3_128_encrypt, + .decrypt = cbc_des3_128_decrypt, } } }; @@ -303,11 +402,12 @@ static struct crypto_alg des3_128_alg = { * */ static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { int i, ret; struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); - const u8* temp_key = key; + const u8 *temp_key = key; + u32 *flags = &tfm->crt_flags; if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], @@ -341,89 +441,111 @@ static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) DES3_192_BLOCK_SIZE); } -static unsigned int des3_192_encrypt_ecb(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) -{ - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; +static struct crypto_alg des3_192_alg = { + .cra_name = "des3_ede", + .cra_driver_name = "des3_ede-s390", + .cra_priority = CRYPT_S390_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = DES3_192_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = DES3_192_KEY_SIZE, + .cia_max_keysize = DES3_192_KEY_SIZE, + .cia_setkey = des3_192_setkey, + .cia_encrypt = des3_192_encrypt, + .cia_decrypt = des3_192_decrypt, + } + } +}; - /* only use complete blocks */ - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); - ret = crypt_s390_km(KM_TDEA_192_ENCRYPT, sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); +static int ecb_des3_192_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk); } -static unsigned int des3_192_decrypt_ecb(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) +static int ecb_des3_192_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; - - /* only use complete blocks */ - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); - ret = crypt_s390_km(KM_TDEA_192_DECRYPT, sctx->key, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk); } -static unsigned int des3_192_encrypt_cbc(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) -{ - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; - - /* only use complete blocks */ - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); +static struct crypto_alg ecb_des3_192_alg = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3_ede-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = DES3_192_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT( + ecb_des3_192_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = DES3_192_KEY_SIZE, + .max_keysize = DES3_192_KEY_SIZE, + .setkey = des3_192_setkey, + .encrypt = ecb_des3_192_encrypt, + .decrypt = ecb_des3_192_decrypt, + } + } +}; - memcpy(sctx->iv, desc->info, DES3_192_BLOCK_SIZE); - ret = crypt_s390_kmc(KMC_TDEA_192_ENCRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); +static int cbc_des3_192_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) +{ + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - memcpy(desc->info, sctx->iv, DES3_192_BLOCK_SIZE); - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk); } -static unsigned int des3_192_decrypt_cbc(const struct cipher_desc *desc, - u8 *out, const u8 *in, - unsigned int nbytes) +static int cbc_des3_192_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) { - struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm); - int ret; + struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); + struct blkcipher_walk walk; - /* only use complete blocks */ - nbytes &= ~(DES3_192_BLOCK_SIZE - 1); - - memcpy(&sctx->iv, desc->info, DES3_192_BLOCK_SIZE); - ret = crypt_s390_kmc(KMC_TDEA_192_DECRYPT, &sctx->iv, out, in, nbytes); - BUG_ON((ret < 0) || (ret != nbytes)); - - return nbytes; + blkcipher_walk_init(&walk, dst, src, nbytes); + return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk); } -static struct crypto_alg des3_192_alg = { - .cra_name = "des3_ede", - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, +static struct crypto_alg cbc_des3_192_alg = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc-des3_ede-s390", + .cra_priority = CRYPT_S390_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = DES3_192_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx), + .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(des3_192_alg.cra_list), + .cra_list = LIST_HEAD_INIT( + cbc_des3_192_alg.cra_list), .cra_u = { - .cipher = { - .cia_min_keysize = DES3_192_KEY_SIZE, - .cia_max_keysize = DES3_192_KEY_SIZE, - .cia_setkey = des3_192_setkey, - .cia_encrypt = des3_192_encrypt, - .cia_decrypt = des3_192_decrypt, - .cia_encrypt_ecb = des3_192_encrypt_ecb, - .cia_decrypt_ecb = des3_192_decrypt_ecb, - .cia_encrypt_cbc = des3_192_encrypt_cbc, - .cia_decrypt_cbc = des3_192_decrypt_cbc, + .blkcipher = { + .min_keysize = DES3_192_KEY_SIZE, + .max_keysize = DES3_192_KEY_SIZE, + .ivsize = DES3_192_BLOCK_SIZE, + .setkey = des3_192_setkey, + .encrypt = cbc_des3_192_encrypt, + .decrypt = cbc_des3_192_decrypt, } } }; @@ -437,22 +559,69 @@ static int init(void) !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) return -ENOSYS; - ret |= (crypto_register_alg(&des_alg) == 0) ? 0:1; - ret |= (crypto_register_alg(&des3_128_alg) == 0) ? 0:2; - ret |= (crypto_register_alg(&des3_192_alg) == 0) ? 0:4; - if (ret) { - crypto_unregister_alg(&des3_192_alg); - crypto_unregister_alg(&des3_128_alg); - crypto_unregister_alg(&des_alg); - return -EEXIST; - } - return 0; + ret = crypto_register_alg(&des_alg); + if (ret) + goto des_err; + ret = crypto_register_alg(&ecb_des_alg); + if (ret) + goto ecb_des_err; + ret = crypto_register_alg(&cbc_des_alg); + if (ret) + goto cbc_des_err; + + ret = crypto_register_alg(&des3_128_alg); + if (ret) + goto des3_128_err; + ret = crypto_register_alg(&ecb_des3_128_alg); + if (ret) + goto ecb_des3_128_err; + ret = crypto_register_alg(&cbc_des3_128_alg); + if (ret) + goto cbc_des3_128_err; + + ret = crypto_register_alg(&des3_192_alg); + if (ret) + goto des3_192_err; + ret = crypto_register_alg(&ecb_des3_192_alg); + if (ret) + goto ecb_des3_192_err; + ret = crypto_register_alg(&cbc_des3_192_alg); + if (ret) + goto cbc_des3_192_err; + +out: + return ret; + +cbc_des3_192_err: + crypto_unregister_alg(&ecb_des3_192_alg); +ecb_des3_192_err: + crypto_unregister_alg(&des3_192_alg); +des3_192_err: + crypto_unregister_alg(&cbc_des3_128_alg); +cbc_des3_128_err: + crypto_unregister_alg(&ecb_des3_128_alg); +ecb_des3_128_err: + crypto_unregister_alg(&des3_128_alg); +des3_128_err: + crypto_unregister_alg(&cbc_des_alg); +cbc_des_err: + crypto_unregister_alg(&ecb_des_alg); +ecb_des_err: + crypto_unregister_alg(&des_alg); +des_err: + goto out; } static void __exit fini(void) { + crypto_unregister_alg(&cbc_des3_192_alg); + crypto_unregister_alg(&ecb_des3_192_alg); crypto_unregister_alg(&des3_192_alg); + crypto_unregister_alg(&cbc_des3_128_alg); + crypto_unregister_alg(&ecb_des3_128_alg); crypto_unregister_alg(&des3_128_alg); + crypto_unregister_alg(&cbc_des_alg); + crypto_unregister_alg(&ecb_des_alg); crypto_unregister_alg(&des_alg); } diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 9d34a35b1aa5..49ca8690ee39 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -126,6 +126,8 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out) static struct crypto_alg alg = { .cra_name = "sha1", + .cra_driver_name = "sha1-s390", + .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index f573df30f31d..8e4e67503fe7 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -127,6 +127,8 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out) static struct crypto_alg alg = { .cra_name = "sha256", + .cra_driver_name = "sha256-s390", + .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_sha256_ctx), diff --git a/arch/s390/defconfig b/arch/s390/defconfig index f1d4591eddbb..35da53986b1b 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -428,6 +428,7 @@ CONFIG_S390_TAPE_34XX=m # CONFIG_VMLOGRDR is not set # CONFIG_VMCP is not set # CONFIG_MONREADER is not set +CONFIG_MONWRITER=m # # Cryptographic devices diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index ea5567be00fc..f3dbd91965c6 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h @@ -1,5 +1,5 @@ /* - * fs/hypfs/hypfs.h + * arch/s390/hypfs/hypfs.h * Hypervisor filesystem for Linux on s390. * * Copyright (C) IBM Corp. 2006 diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 1785bce2b919..75144efbb92b 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -1,5 +1,5 @@ /* - * fs/hypfs/hypfs_diag.c + * arch/s390/hypfs/hypfs_diag.c * Hypervisor filesystem for Linux on s390. Diag 204 and 224 * implementation. * @@ -432,12 +432,14 @@ static int diag204_probe(void) buf = diag204_get_buffer(INFO_EXT, &pages); if (!IS_ERR(buf)) { - if (diag204(SUBC_STIB7 | INFO_EXT, pages, buf) >= 0) { + if (diag204((unsigned long)SUBC_STIB7 | + (unsigned long)INFO_EXT, pages, buf) >= 0) { diag204_store_sc = SUBC_STIB7; diag204_info_type = INFO_EXT; goto out; } - if (diag204(SUBC_STIB6 | INFO_EXT, pages, buf) >= 0) { + if (diag204((unsigned long)SUBC_STIB6 | + (unsigned long)INFO_EXT, pages, buf) >= 0) { diag204_store_sc = SUBC_STIB7; diag204_info_type = INFO_EXT; goto out; @@ -452,7 +454,8 @@ static int diag204_probe(void) rc = PTR_ERR(buf); goto fail_alloc; } - if (diag204(SUBC_STIB4 | INFO_SIMPLE, pages, buf) >= 0) { + if (diag204((unsigned long)SUBC_STIB4 | + (unsigned long)INFO_SIMPLE, pages, buf) >= 0) { diag204_store_sc = SUBC_STIB4; diag204_info_type = INFO_SIMPLE; goto out; @@ -476,7 +479,8 @@ static void *diag204_store(void) buf = diag204_get_buffer(diag204_info_type, &pages); if (IS_ERR(buf)) goto out; - if (diag204(diag204_store_sc | diag204_info_type, pages, buf) < 0) + if (diag204((unsigned long)diag204_store_sc | + (unsigned long)diag204_info_type, pages, buf) < 0) return ERR_PTR(-ENOSYS); out: return buf; @@ -531,7 +535,7 @@ __init int hypfs_diag_init(void) return rc; } -__exit void hypfs_diag_exit(void) +void hypfs_diag_exit(void) { diag224_delete_name_table(); diag204_free_buffer(); diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h index 793dea6b9bb6..256b384aebe1 100644 --- a/arch/s390/hypfs/hypfs_diag.h +++ b/arch/s390/hypfs/hypfs_diag.h @@ -1,5 +1,5 @@ /* - * fs/hypfs/hypfs_diag.h + * arch/s390/hypfs_diag.h * Hypervisor filesystem for Linux on s390. * * Copyright (C) IBM Corp. 2006 diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 18c091925ea5..bdade5f2e325 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -1,5 +1,5 @@ /* - * fs/hypfs/inode.c + * arch/s390/hypfs/inode.c * Hypervisor filesystem for Linux on s390. * * Copyright (C) IBM Corp. 2006 @@ -312,10 +312,12 @@ static void hypfs_kill_super(struct super_block *sb) { struct hypfs_sb_info *sb_info = sb->s_fs_info; - hypfs_delete_tree(sb->s_root); - hypfs_remove(sb_info->update_file); - kfree(sb->s_fs_info); - sb->s_fs_info = NULL; + if (sb->s_root) { + hypfs_delete_tree(sb->s_root); + hypfs_remove(sb_info->update_file); + kfree(sb->s_fs_info); + sb->s_fs_info = NULL; + } kill_litter_super(sb); } diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 9a33ed6ca696..aa978978d3d1 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional obj-y := bitmap.o traps.o time.o process.o \ setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ - semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o + semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) @@ -24,6 +24,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ obj-$(CONFIG_VIRT_TIMER) += vtime.o obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_KPROBES) += kprobes.o # Kexec part S390_KEXEC_OBJS := machine_kexec.o crash.o diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 5b5799ac8f83..0c712b78a7e8 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -505,6 +505,8 @@ pgm_no_vtime2: mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP + tm SP_PSW+1(%r15),0x01 # kernel per event ? + bz BASED(kernel_per) l %r3,__LC_PGM_ILC # load program interruption code la %r8,0x7f nr %r8,%r3 # clear per-event-bit and ilc @@ -536,6 +538,16 @@ pgm_no_vtime3: stosm __SF_EMPTY(%r15),0x03 # reenable interrupts b BASED(sysc_do_svc) +# +# per was called from kernel, must be kprobes +# +kernel_per: + mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check + la %r2,SP_PTREGS(%r15) # address of register-save area + l %r1,BASED(.Lhandle_per) # load adr. of per handler + la %r14,BASED(sysc_leave) # load adr. of system return + br %r1 # branch to do_single_step + /* * IO interrupt handler routine */ diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 56f5f613b868..29bbfbab7332 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -518,6 +518,8 @@ pgm_no_vtime2: #endif lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct lg %r1,__TI_task(%r9) + tm SP_PSW+1(%r15),0x01 # kernel per event ? + jz kernel_per mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID @@ -553,6 +555,16 @@ pgm_no_vtime3: stosm __SF_EMPTY(%r15),0x03 # reenable interrupts j sysc_do_svc +# +# per was called from kernel, must be kprobes +# +kernel_per: + lhi %r0,__LC_PGM_OLD_PSW + sth %r0,SP_TRAP(%r15) # set trap indication to pgm check + la %r2,SP_PTREGS(%r15) # address of register-save area + larl %r14,sysc_leave # load adr. of system ret, no work + jg do_single_step # branch to do_single_step + /* * IO interrupt handler routine */ @@ -815,7 +827,7 @@ restart_go: */ stack_overflow: lg %r15,__LC_PANIC_STACK # change to panic stack - aghi %r1,-SP_SIZE + aghi %r15,-SP_SIZE mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack la %r1,__LC_SAVE_AREA @@ -823,7 +835,7 @@ stack_overflow: je 0f chi %r12,__LC_PGM_OLD_PSW je 0f - la %r1,__LC_SAVE_AREA+16 + la %r1,__LC_SAVE_AREA+32 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain la %r2,SP_PTREGS(%r15) # load pt_regs diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index adad8863ee2f..0f1db268a8a9 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -272,7 +272,7 @@ iplstart: # load parameter file from ipl device # .Lagain1: - l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # ramdisk loc. is temp + l %r2,.Linitrd # ramdisk loc. is temp bas %r14,.Lloader # load parameter file ltr %r2,%r2 # got anything ? bz .Lnopf @@ -280,7 +280,7 @@ iplstart: bnh .Lnotrunc la %r2,895 .Lnotrunc: - l %r4,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) + l %r4,.Linitrd clc 0(3,%r4),.L_hdr # if it is HDRx bz .Lagain1 # skip dataset header clc 0(3,%r4),.L_eof # if it is EOFx @@ -323,14 +323,15 @@ iplstart: # load ramdisk from ipl device # .Lagain2: - l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # addr of ramdisk + l %r2,.Linitrd # addr of ramdisk + st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) bas %r14,.Lloader # load ramdisk st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk ltr %r2,%r2 bnz .Lrdcont st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found .Lrdcont: - l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) + l %r2,.Linitrd clc 0(3,%r2),.L_hdr # skip HDRx and EOFx bz .Lagain2 @@ -379,6 +380,7 @@ iplstart: l %r1,.Lstartup br %r1 +.Linitrd:.long _end + 0x400000 # default address of initrd .Lparm: .long PARMAREA .Lstartup: .long startup .Lcvtab:.long _ebcasc # ebcdic to ascii table @@ -479,65 +481,6 @@ start: .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff -.macro GET_IPL_DEVICE -.Lget_ipl_device: - l %r1,0xb8 # get sid - sll %r1,15 # test if subchannel is enabled - srl %r1,31 - ltr %r1,%r1 - bz 2f-.LPG1(%r13) # subchannel disabled - l %r1,0xb8 - la %r5,.Lipl_schib-.LPG1(%r13) - stsch 0(%r5) # get schib of subchannel - bnz 2f-.LPG1(%r13) # schib not available - tm 5(%r5),0x01 # devno valid? - bno 2f-.LPG1(%r13) - la %r6,ipl_parameter_flags-.LPG1(%r13) - oi 3(%r6),0x01 # set flag - la %r2,ipl_devno-.LPG1(%r13) - mvc 0(2,%r2),6(%r5) # store devno - tm 4(%r5),0x80 # qdio capable device? - bno 2f-.LPG1(%r13) - oi 3(%r6),0x02 # set flag - - # copy ipl parameters - - lhi %r0,4096 - l %r2,20(%r0) # get address of parameter list - lhi %r3,IPL_PARMBLOCK_ORIGIN - st %r3,20(%r0) - lhi %r4,1 - cr %r2,%r3 # start parameters < destination ? - jl 0f - lhi %r1,1 # copy direction is upwards - j 1f -0: lhi %r1,-1 # copy direction is downwards - ar %r2,%r0 - ar %r3,%r0 - ar %r2,%r1 - ar %r3,%r1 -1: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters - ar %r3,%r1 - ar %r2,%r1 - sr %r0,%r4 - jne 1b - b 2f-.LPG1(%r13) - - .align 4 -.Lipl_schib: - .rept 13 - .long 0 - .endr - - .globl ipl_parameter_flags -ipl_parameter_flags: - .long 0 - .globl ipl_devno -ipl_devno: - .word 0 -2: -.endm - #ifdef CONFIG_64BIT #include "head64.S" #else diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index a4dc61f3285e..1fa9fa1ca740 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -26,8 +26,8 @@ startup:basr %r13,0 # get base # .org PARMAREA .long 0,0 # IPL_DEVICE - .long 0,RAMDISK_ORIGIN # INITRD_START - .long 0,RAMDISK_SIZE # INITRD_SIZE + .long 0,0 # INITRD_START + .long 0,0 # INITRD_SIZE .org COMMAND_LINE .byte "root=/dev/ram0 ro" @@ -37,12 +37,23 @@ startup:basr %r13,0 # get base startup_continue: basr %r13,0 # get base -.LPG1: GET_IPL_DEVICE +.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area # move IPL device to lowcore mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12) +# +# Setup stack +# + l %r15,.Linittu-.LPG1(%r13) + mvc __LC_CURRENT(4),__TI_task(%r15) + ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE + st %r15,__LC_KERNEL_STACK # set end of kernel stack + ahi %r15,-96 + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain + l %r14,.Lipl_save_parameters-.LPG1(%r13) + basr %r14,%r14 # # clear bss memory # @@ -114,6 +125,10 @@ startup_continue: b .Lfchunk-.LPG1(%r13) .align 4 +.Lipl_save_parameters: + .long ipl_save_parameters +.Linittu: + .long init_thread_union .Lpmask: .byte 0 .align 8 @@ -273,7 +288,23 @@ startup_continue: .Lbss_end: .long _end .Lparmaddr: .long PARMAREA .Lsccbaddr: .long .Lsccb + + .globl ipl_schib +ipl_schib: + .rept 13 + .long 0 + .endr + + .globl ipl_flags +ipl_flags: + .long 0 + .globl ipl_devno +ipl_devno: + .word 0 + .org 0x12000 +.globl s390_readinfo_sccb +s390_readinfo_sccb: .Lsccb: .hword 0x1000 # length, one page .byte 0x00,0x00,0x00 @@ -302,16 +333,6 @@ startup_continue: .globl _stext _stext: basr %r13,0 # get base .LPG3: -# -# Setup stack -# - l %r15,.Linittu-.LPG3(%r13) - mvc __LC_CURRENT(4),__TI_task(%r15) - ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE - st %r15,__LC_KERNEL_STACK # set end of kernel stack - ahi %r15,-96 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain - # check control registers stctl %c0,%c15,0(%r15) oi 2(%r15),0x40 # enable sigp emergency signal @@ -330,6 +351,5 @@ _stext: basr %r13,0 # get base # .align 8 .Ldw: .long 0x000a0000,0x00000000 -.Linittu:.long init_thread_union .Lstart:.long start_kernel .Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 9d80c5b1ef95..a8bdd96494c7 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -26,8 +26,8 @@ startup:basr %r13,0 # get base # .org PARMAREA .quad 0 # IPL_DEVICE - .quad RAMDISK_ORIGIN # INITRD_START - .quad RAMDISK_SIZE # INITRD_SIZE + .quad 0 # INITRD_START + .quad 0 # INITRD_SIZE .org COMMAND_LINE .byte "root=/dev/ram0 ro" @@ -39,8 +39,8 @@ startup_continue: basr %r13,0 # get base .LPG1: sll %r13,1 # remove high order bit srl %r13,1 - GET_IPL_DEVICE lhi %r1,1 # mode 1 = esame + mvi __LC_AR_MODE_ID,1 # set esame flag slr %r0,%r0 # set cpuid to zero sigp %r1,%r0,0x12 # switch to esame mode sam64 # switch to 64 bit mode @@ -48,7 +48,18 @@ startup_continue: lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area # move IPL device to lowcore mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12) +# +# Setup stack +# + larl %r15,init_thread_union + lg %r14,__TI_task(%r15) # cache current in lowcore + stg %r14,__LC_CURRENT + aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE + stg %r15,__LC_KERNEL_STACK # set end of kernel stack + aghi %r15,-160 + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain + brasl %r14,ipl_save_parameters # # clear bss memory # @@ -239,6 +250,19 @@ startup_continue: oi 7(%r12),0x80 # set IDTE flag 0: +# +# find out if we have the MVCOS instruction +# + la %r1,0f-.LPG1(%r13) # set program check address + stg %r1,__LC_PGM_NEW_PSW+8 + .short 0xc800 # mvcos 0(%r0),0(%r0),%r0 + .short 0x0000 + .short 0x0000 +0: tm 0x8f,0x13 # special-operation exception? + bno 1f-.LPG1(%r13) # if yes, MVCOS is present + oi 6(%r12),2 # set MVCOS flag +1: + lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space, # virtual and never return ... .align 16 @@ -268,7 +292,22 @@ startup_continue: .Lparmaddr: .quad PARMAREA + .globl ipl_schib +ipl_schib: + .rept 13 + .long 0 + .endr + + .globl ipl_flags +ipl_flags: + .long 0 + .globl ipl_devno +ipl_devno: + .word 0 + .org 0x12000 +.globl s390_readinfo_sccb +s390_readinfo_sccb: .Lsccb: .hword 0x1000 # length, one page .byte 0x00,0x00,0x00 @@ -297,24 +336,12 @@ startup_continue: .globl _stext _stext: basr %r13,0 # get base .LPG3: -# -# Setup stack -# - larl %r15,init_thread_union - lg %r14,__TI_task(%r15) # cache current in lowcore - stg %r14,__LC_CURRENT - aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE - stg %r15,__LC_KERNEL_STACK # set end of kernel stack - aghi %r15,-160 - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain - # check control registers stctg %c0,%c15,0(%r15) oi 6(%r15),0x40 # enable sigp emergency signal oi 4(%r15),0x10 # switch on low address proctection lctlg %c0,%c15,0(%r15) -# lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess brasl %r14,start_kernel # go to C code # @@ -322,7 +349,7 @@ _stext: basr %r13,0 # get base # basr %r13,0 lpswe .Ldw-.(%r13) # load disabled wait psw -# + .align 8 .Ldw: .quad 0x0002000180000000,0x0000000000000000 .Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c new file mode 100644 index 000000000000..6555cc48e28f --- /dev/null +++ b/arch/s390/kernel/ipl.c @@ -0,0 +1,942 @@ +/* + * arch/s390/kernel/ipl.c + * ipl/reipl/dump support for Linux on s390. + * + * Copyright (C) IBM Corp. 2005,2006 + * Author(s): Michael Holzheu <holzheu@de.ibm.com> + * Heiko Carstens <heiko.carstens@de.ibm.com> + * Volker Sameske <sameske@de.ibm.com> + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <asm/smp.h> +#include <asm/setup.h> +#include <asm/cpcmd.h> +#include <asm/cio.h> + +#define IPL_PARM_BLOCK_VERSION 0 + +enum ipl_type { + IPL_TYPE_NONE = 1, + IPL_TYPE_UNKNOWN = 2, + IPL_TYPE_CCW = 4, + IPL_TYPE_FCP = 8, +}; + +#define IPL_NONE_STR "none" +#define IPL_UNKNOWN_STR "unknown" +#define IPL_CCW_STR "ccw" +#define IPL_FCP_STR "fcp" + +static char *ipl_type_str(enum ipl_type type) +{ + switch (type) { + case IPL_TYPE_NONE: + return IPL_NONE_STR; + case IPL_TYPE_CCW: + return IPL_CCW_STR; + case IPL_TYPE_FCP: + return IPL_FCP_STR; + case IPL_TYPE_UNKNOWN: + default: + return IPL_UNKNOWN_STR; + } +} + +enum ipl_method { + IPL_METHOD_NONE, + IPL_METHOD_CCW_CIO, + IPL_METHOD_CCW_DIAG, + IPL_METHOD_CCW_VM, + IPL_METHOD_FCP_RO_DIAG, + IPL_METHOD_FCP_RW_DIAG, + IPL_METHOD_FCP_RO_VM, +}; + +enum shutdown_action { + SHUTDOWN_REIPL, + SHUTDOWN_DUMP, + SHUTDOWN_STOP, +}; + +#define SHUTDOWN_REIPL_STR "reipl" +#define SHUTDOWN_DUMP_STR "dump" +#define SHUTDOWN_STOP_STR "stop" + +static char *shutdown_action_str(enum shutdown_action action) +{ + switch (action) { + case SHUTDOWN_REIPL: + return SHUTDOWN_REIPL_STR; + case SHUTDOWN_DUMP: + return SHUTDOWN_DUMP_STR; + case SHUTDOWN_STOP: + return SHUTDOWN_STOP_STR; + default: + BUG(); + } +} + +enum diag308_subcode { + DIAG308_IPL = 3, + DIAG308_DUMP = 4, + DIAG308_SET = 5, + DIAG308_STORE = 6, +}; + +enum diag308_ipl_type { + DIAG308_IPL_TYPE_FCP = 0, + DIAG308_IPL_TYPE_CCW = 2, +}; + +enum diag308_opt { + DIAG308_IPL_OPT_IPL = 0x10, + DIAG308_IPL_OPT_DUMP = 0x20, +}; + +enum diag308_rc { + DIAG308_RC_OK = 1, +}; + +static int diag308_set_works = 0; + +static int reipl_capabilities = IPL_TYPE_UNKNOWN; +static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; +static enum ipl_method reipl_method = IPL_METHOD_NONE; +static struct ipl_parameter_block *reipl_block_fcp; +static struct ipl_parameter_block *reipl_block_ccw; + +static int dump_capabilities = IPL_TYPE_NONE; +static enum ipl_type dump_type = IPL_TYPE_NONE; +static enum ipl_method dump_method = IPL_METHOD_NONE; +static struct ipl_parameter_block *dump_block_fcp; +static struct ipl_parameter_block *dump_block_ccw; + +static enum shutdown_action on_panic_action = SHUTDOWN_STOP; + +static int diag308(unsigned long subcode, void *addr) +{ + register unsigned long _addr asm("0") = (unsigned long)addr; + register unsigned long _rc asm("1") = 0; + + asm volatile ( + " diag %0,%2,0x308\n" + "0: \n" + ".section __ex_table,\"a\"\n" +#ifdef CONFIG_64BIT + " .align 8\n" + " .quad 0b, 0b\n" +#else + " .align 4\n" + " .long 0b, 0b\n" +#endif + ".previous\n" + : "+d" (_addr), "+d" (_rc) + : "d" (subcode) : "cc", "memory" ); + + return _rc; +} + +/* SYSFS */ + +#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ +static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ + char *page) \ +{ \ + return sprintf(page, _format, _value); \ +} \ +static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ + __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL); + +#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \ +static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ + char *page) \ +{ \ + return sprintf(page, _fmt_out, \ + (unsigned long long) _value); \ +} \ +static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\ + const char *buf, size_t len) \ +{ \ + unsigned long long value; \ + if (sscanf(buf, _fmt_in, &value) != 1) \ + return -EINVAL; \ + _value = value; \ + return len; \ +} \ +static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ + __ATTR(_name,(S_IRUGO | S_IWUSR), \ + sys_##_prefix##_##_name##_show, \ + sys_##_prefix##_##_name##_store); + +static void make_attrs_ro(struct attribute **attrs) +{ + while (*attrs) { + (*attrs)->mode = S_IRUGO; + attrs++; + } +} + +/* + * ipl section + */ + +static enum ipl_type ipl_get_type(void) +{ + struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; + + if (!(ipl_flags & IPL_DEVNO_VALID)) + return IPL_TYPE_UNKNOWN; + if (!(ipl_flags & IPL_PARMBLOCK_VALID)) + return IPL_TYPE_CCW; + if (ipl->hdr.version > IPL_MAX_SUPPORTED_VERSION) + return IPL_TYPE_UNKNOWN; + if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP) + return IPL_TYPE_UNKNOWN; + return IPL_TYPE_FCP; +} + +static ssize_t ipl_type_show(struct subsystem *subsys, char *page) +{ + return sprintf(page, "%s\n", ipl_type_str(ipl_get_type())); +} + +static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); + +static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page) +{ + struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; + + switch (ipl_get_type()) { + case IPL_TYPE_CCW: + return sprintf(page, "0.0.%04x\n", ipl_devno); + case IPL_TYPE_FCP: + return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); + default: + return 0; + } +} + +static struct subsys_attribute sys_ipl_device_attr = + __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL); + +static ssize_t ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + unsigned int size = IPL_PARMBLOCK_SIZE; + + if (off > size) + return 0; + if (off + count > size) + count = size - off; + memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count); + return count; +} + +static struct bin_attribute ipl_parameter_attr = { + .attr = { + .name = "binary_parameter", + .mode = S_IRUGO, + .owner = THIS_MODULE, + }, + .size = PAGE_SIZE, + .read = &ipl_parameter_read, +}; + +static ssize_t ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; + void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; + + if (off > size) + return 0; + if (off + count > size) + count = size - off; + memcpy(buf, scp_data + off, count); + return count; +} + +static struct bin_attribute ipl_scp_data_attr = { + .attr = { + .name = "scp_data", + .mode = S_IRUGO, + .owner = THIS_MODULE, + }, + .size = PAGE_SIZE, + .read = &ipl_scp_data_read, +}; + +/* FCP ipl device attributes */ + +DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", (unsigned long long) + IPL_PARMBLOCK_START->ipl_info.fcp.wwpn); +DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", (unsigned long long) + IPL_PARMBLOCK_START->ipl_info.fcp.lun); +DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long) + IPL_PARMBLOCK_START->ipl_info.fcp.bootprog); +DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long) + IPL_PARMBLOCK_START->ipl_info.fcp.br_lba); + +static struct attribute *ipl_fcp_attrs[] = { + &sys_ipl_type_attr.attr, + &sys_ipl_device_attr.attr, + &sys_ipl_fcp_wwpn_attr.attr, + &sys_ipl_fcp_lun_attr.attr, + &sys_ipl_fcp_bootprog_attr.attr, + &sys_ipl_fcp_br_lba_attr.attr, + NULL, +}; + +static struct attribute_group ipl_fcp_attr_group = { + .attrs = ipl_fcp_attrs, +}; + +/* CCW ipl device attributes */ + +static struct attribute *ipl_ccw_attrs[] = { + &sys_ipl_type_attr.attr, + &sys_ipl_device_attr.attr, + NULL, +}; + +static struct attribute_group ipl_ccw_attr_group = { + .attrs = ipl_ccw_attrs, +}; + +/* UNKNOWN ipl device attributes */ + +static struct attribute *ipl_unknown_attrs[] = { + &sys_ipl_type_attr.attr, + NULL, +}; + +static struct attribute_group ipl_unknown_attr_group = { + .attrs = ipl_unknown_attrs, +}; + +static decl_subsys(ipl, NULL, NULL); + +/* + * reipl section + */ + +/* FCP reipl device attributes */ + +DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", + reipl_block_fcp->ipl_info.fcp.wwpn); +DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", + reipl_block_fcp->ipl_info.fcp.lun); +DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", + reipl_block_fcp->ipl_info.fcp.bootprog); +DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n", + reipl_block_fcp->ipl_info.fcp.br_lba); +DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", + reipl_block_fcp->ipl_info.fcp.devno); + +static struct attribute *reipl_fcp_attrs[] = { + &sys_reipl_fcp_device_attr.attr, + &sys_reipl_fcp_wwpn_attr.attr, + &sys_reipl_fcp_lun_attr.attr, + &sys_reipl_fcp_bootprog_attr.attr, + &sys_reipl_fcp_br_lba_attr.attr, + NULL, +}; + +static struct attribute_group reipl_fcp_attr_group = { + .name = IPL_FCP_STR, + .attrs = reipl_fcp_attrs, +}; + +/* CCW reipl device attributes */ + +DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", + reipl_block_ccw->ipl_info.ccw.devno); + +static struct attribute *reipl_ccw_attrs[] = { + &sys_reipl_ccw_device_attr.attr, + NULL, +}; + +static struct attribute_group reipl_ccw_attr_group = { + .name = IPL_CCW_STR, + .attrs = reipl_ccw_attrs, +}; + +/* reipl type */ + +static int reipl_set_type(enum ipl_type type) +{ + if (!(reipl_capabilities & type)) + return -EINVAL; + + switch(type) { + case IPL_TYPE_CCW: + if (MACHINE_IS_VM) + reipl_method = IPL_METHOD_CCW_VM; + else + reipl_method = IPL_METHOD_CCW_CIO; + break; + case IPL_TYPE_FCP: + if (diag308_set_works) + reipl_method = IPL_METHOD_FCP_RW_DIAG; + else if (MACHINE_IS_VM) + reipl_method = IPL_METHOD_FCP_RO_VM; + else + reipl_method = IPL_METHOD_FCP_RO_DIAG; + break; + default: + reipl_method = IPL_METHOD_NONE; + } + reipl_type = type; + return 0; +} + +static ssize_t reipl_type_show(struct subsystem *subsys, char *page) +{ + return sprintf(page, "%s\n", ipl_type_str(reipl_type)); +} + +static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf, + size_t len) +{ + int rc = -EINVAL; + + if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) + rc = reipl_set_type(IPL_TYPE_CCW); + else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) + rc = reipl_set_type(IPL_TYPE_FCP); + return (rc != 0) ? rc : len; +} + +static struct subsys_attribute reipl_type_attr = + __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); + +static decl_subsys(reipl, NULL, NULL); + +/* + * dump section + */ + +/* FCP dump device attributes */ + +DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", + dump_block_fcp->ipl_info.fcp.wwpn); +DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", + dump_block_fcp->ipl_info.fcp.lun); +DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", + dump_block_fcp->ipl_info.fcp.bootprog); +DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n", + dump_block_fcp->ipl_info.fcp.br_lba); +DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", + dump_block_fcp->ipl_info.fcp.devno); + +static struct attribute *dump_fcp_attrs[] = { + &sys_dump_fcp_device_attr.attr, + &sys_dump_fcp_wwpn_attr.attr, + &sys_dump_fcp_lun_attr.attr, + &sys_dump_fcp_bootprog_attr.attr, + &sys_dump_fcp_br_lba_attr.attr, + NULL, +}; + +static struct attribute_group dump_fcp_attr_group = { + .name = IPL_FCP_STR, + .attrs = dump_fcp_attrs, +}; + +/* CCW dump device attributes */ + +DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", + dump_block_ccw->ipl_info.ccw.devno); + +static struct attribute *dump_ccw_attrs[] = { + &sys_dump_ccw_device_attr.attr, + NULL, +}; + +static struct attribute_group dump_ccw_attr_group = { + .name = IPL_CCW_STR, + .attrs = dump_ccw_attrs, +}; + +/* dump type */ + +static int dump_set_type(enum ipl_type type) +{ + if (!(dump_capabilities & type)) + return -EINVAL; + switch(type) { + case IPL_TYPE_CCW: + if (MACHINE_IS_VM) + dump_method = IPL_METHOD_CCW_VM; + else + dump_method = IPL_METHOD_CCW_CIO; + break; + case IPL_TYPE_FCP: + dump_method = IPL_METHOD_FCP_RW_DIAG; + break; + default: + dump_method = IPL_METHOD_NONE; + } + dump_type = type; + return 0; +} + +static ssize_t dump_type_show(struct subsystem *subsys, char *page) +{ + return sprintf(page, "%s\n", ipl_type_str(dump_type)); +} + +static ssize_t dump_type_store(struct subsystem *subsys, const char *buf, + size_t len) +{ + int rc = -EINVAL; + + if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0) + rc = dump_set_type(IPL_TYPE_NONE); + else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0) + rc = dump_set_type(IPL_TYPE_CCW); + else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) + rc = dump_set_type(IPL_TYPE_FCP); + return (rc != 0) ? rc : len; +} + +static struct subsys_attribute dump_type_attr = + __ATTR(dump_type, 0644, dump_type_show, dump_type_store); + +static decl_subsys(dump, NULL, NULL); + +#ifdef CONFIG_SMP +static void dump_smp_stop_all(void) +{ + int cpu; + preempt_disable(); + for_each_online_cpu(cpu) { + if (cpu == smp_processor_id()) + continue; + while (signal_processor(cpu, sigp_stop) == sigp_busy) + udelay(10); + } + preempt_enable(); +} +#else +#define dump_smp_stop_all() do { } while (0) +#endif + +/* + * Shutdown actions section + */ + +static decl_subsys(shutdown_actions, NULL, NULL); + +/* on panic */ + +static ssize_t on_panic_show(struct subsystem *subsys, char *page) +{ + return sprintf(page, "%s\n", shutdown_action_str(on_panic_action)); +} + +static ssize_t on_panic_store(struct subsystem *subsys, const char *buf, + size_t len) +{ + if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0) + on_panic_action = SHUTDOWN_REIPL; + else if (strncmp(buf, SHUTDOWN_DUMP_STR, + strlen(SHUTDOWN_DUMP_STR)) == 0) + on_panic_action = SHUTDOWN_DUMP; + else if (strncmp(buf, SHUTDOWN_STOP_STR, + strlen(SHUTDOWN_STOP_STR)) == 0) + on_panic_action = SHUTDOWN_STOP; + else + return -EINVAL; + + return len; +} + +static struct subsys_attribute on_panic_attr = + __ATTR(on_panic, 0644, on_panic_show, on_panic_store); + +static void print_fcp_block(struct ipl_parameter_block *fcp_block) +{ + printk(KERN_EMERG "wwpn: %016llx\n", + (unsigned long long)fcp_block->ipl_info.fcp.wwpn); + printk(KERN_EMERG "lun: %016llx\n", + (unsigned long long)fcp_block->ipl_info.fcp.lun); + printk(KERN_EMERG "bootprog: %lld\n", + (unsigned long long)fcp_block->ipl_info.fcp.bootprog); + printk(KERN_EMERG "br_lba: %lld\n", + (unsigned long long)fcp_block->ipl_info.fcp.br_lba); + printk(KERN_EMERG "device: %llx\n", + (unsigned long long)fcp_block->ipl_info.fcp.devno); + printk(KERN_EMERG "opt: %x\n", fcp_block->ipl_info.fcp.opt); +} + +void do_reipl(void) +{ + struct ccw_dev_id devid; + static char buf[100]; + + switch (reipl_type) { + case IPL_TYPE_CCW: + printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n", + reipl_block_ccw->ipl_info.ccw.devno); + break; + case IPL_TYPE_FCP: + printk(KERN_EMERG "reboot on fcp device:\n"); + print_fcp_block(reipl_block_fcp); + break; + default: + break; + } + + switch (reipl_method) { + case IPL_METHOD_CCW_CIO: + devid.devno = reipl_block_ccw->ipl_info.ccw.devno; + devid.ssid = 0; + reipl_ccw_dev(&devid); + break; + case IPL_METHOD_CCW_VM: + sprintf(buf, "IPL %X", reipl_block_ccw->ipl_info.ccw.devno); + cpcmd(buf, NULL, 0, NULL); + break; + case IPL_METHOD_CCW_DIAG: + diag308(DIAG308_SET, reipl_block_ccw); + diag308(DIAG308_IPL, NULL); + break; + case IPL_METHOD_FCP_RW_DIAG: + diag308(DIAG308_SET, reipl_block_fcp); + diag308(DIAG308_IPL, NULL); + break; + case IPL_METHOD_FCP_RO_DIAG: + diag308(DIAG308_IPL, NULL); + break; + case IPL_METHOD_FCP_RO_VM: + cpcmd("IPL", NULL, 0, NULL); + break; + case IPL_METHOD_NONE: + default: + if (MACHINE_IS_VM) + cpcmd("IPL", NULL, 0, NULL); + diag308(DIAG308_IPL, NULL); + break; + } + panic("reipl failed!\n"); +} + +static void do_dump(void) +{ + struct ccw_dev_id devid; + static char buf[100]; + + switch (dump_type) { + case IPL_TYPE_CCW: + printk(KERN_EMERG "Automatic dump on ccw device: 0.0.%04x\n", + dump_block_ccw->ipl_info.ccw.devno); + break; + case IPL_TYPE_FCP: + printk(KERN_EMERG "Automatic dump on fcp device:\n"); + print_fcp_block(dump_block_fcp); + break; + default: + return; + } + + switch (dump_method) { + case IPL_METHOD_CCW_CIO: + dump_smp_stop_all(); + devid.devno = dump_block_ccw->ipl_info.ccw.devno; + devid.ssid = 0; + reipl_ccw_dev(&devid); + break; + case IPL_METHOD_CCW_VM: + dump_smp_stop_all(); + sprintf(buf, "STORE STATUS"); + cpcmd(buf, NULL, 0, NULL); + sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); + cpcmd(buf, NULL, 0, NULL); + break; + case IPL_METHOD_CCW_DIAG: + diag308(DIAG308_SET, dump_block_ccw); + diag308(DIAG308_DUMP, NULL); + break; + case IPL_METHOD_FCP_RW_DIAG: + diag308(DIAG308_SET, dump_block_fcp); + diag308(DIAG308_DUMP, NULL); + break; + case IPL_METHOD_NONE: + default: + return; + } + printk(KERN_EMERG "Dump failed!\n"); +} + +/* init functions */ + +static int __init ipl_register_fcp_files(void) +{ + int rc; + + rc = sysfs_create_group(&ipl_subsys.kset.kobj, + &ipl_fcp_attr_group); + if (rc) + goto out; + rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, + &ipl_parameter_attr); + if (rc) + goto out_ipl_parm; + rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, + &ipl_scp_data_attr); + if (!rc) + goto out; + + sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr); + +out_ipl_parm: + sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); +out: + return rc; +} + +static int __init ipl_init(void) +{ + int rc; + + rc = firmware_register(&ipl_subsys); + if (rc) + return rc; + switch (ipl_get_type()) { + case IPL_TYPE_CCW: + rc = sysfs_create_group(&ipl_subsys.kset.kobj, + &ipl_ccw_attr_group); + break; + case IPL_TYPE_FCP: + rc = ipl_register_fcp_files(); + break; + default: + rc = sysfs_create_group(&ipl_subsys.kset.kobj, + &ipl_unknown_attr_group); + break; + } + if (rc) + firmware_unregister(&ipl_subsys); + return rc; +} + +static void __init reipl_probe(void) +{ + void *buffer; + + buffer = (void *) get_zeroed_page(GFP_KERNEL); + if (!buffer) + return; + if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK) + diag308_set_works = 1; + free_page((unsigned long)buffer); +} + +static int __init reipl_ccw_init(void) +{ + int rc; + + reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); + if (!reipl_block_ccw) + return -ENOMEM; + rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_ccw_attr_group); + if (rc) { + free_page((unsigned long)reipl_block_ccw); + return rc; + } + reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN; + reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; + reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw); + reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; + if (ipl_get_type() == IPL_TYPE_CCW) + reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; + reipl_capabilities |= IPL_TYPE_CCW; + return 0; +} + +static int __init reipl_fcp_init(void) +{ + int rc; + + if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP)) + return 0; + if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP)) + make_attrs_ro(reipl_fcp_attrs); + + reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); + if (!reipl_block_fcp) + return -ENOMEM; + rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_fcp_attr_group); + if (rc) { + free_page((unsigned long)reipl_block_fcp); + return rc; + } + if (ipl_get_type() == IPL_TYPE_FCP) { + memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); + } else { + reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; + reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; + reipl_block_fcp->hdr.blk0_len = + sizeof(reipl_block_fcp->ipl_info.fcp); + reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; + reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL; + } + reipl_capabilities |= IPL_TYPE_FCP; + return 0; +} + +static int __init reipl_init(void) +{ + int rc; + + rc = firmware_register(&reipl_subsys); + if (rc) + return rc; + rc = subsys_create_file(&reipl_subsys, &reipl_type_attr); + if (rc) { + firmware_unregister(&reipl_subsys); + return rc; + } + rc = reipl_ccw_init(); + if (rc) + return rc; + rc = reipl_fcp_init(); + if (rc) + return rc; + rc = reipl_set_type(ipl_get_type()); + if (rc) + return rc; + return 0; +} + +static int __init dump_ccw_init(void) +{ + int rc; + + dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); + if (!dump_block_ccw) + return -ENOMEM; + rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_ccw_attr_group); + if (rc) { + free_page((unsigned long)dump_block_ccw); + return rc; + } + dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN; + dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; + dump_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw); + dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; + dump_capabilities |= IPL_TYPE_CCW; + return 0; +} + +extern char s390_readinfo_sccb[]; + +static int __init dump_fcp_init(void) +{ + int rc; + + if(!(s390_readinfo_sccb[91] & 0x2)) + return 0; /* LDIPL DUMP is not installed */ + if (!diag308_set_works) + return 0; + dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); + if (!dump_block_fcp) + return -ENOMEM; + rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_fcp_attr_group); + if (rc) { + free_page((unsigned long)dump_block_fcp); + return rc; + } + dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; + dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; + dump_block_fcp->hdr.blk0_len = sizeof(dump_block_fcp->ipl_info.fcp); + dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP; + dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP; + dump_capabilities |= IPL_TYPE_FCP; + return 0; +} + +#define SHUTDOWN_ON_PANIC_PRIO 0 + +static int shutdown_on_panic_notify(struct notifier_block *self, + unsigned long event, void *data) +{ + if (on_panic_action == SHUTDOWN_DUMP) + do_dump(); + else if (on_panic_action == SHUTDOWN_REIPL) + do_reipl(); + return NOTIFY_OK; +} + +static struct notifier_block shutdown_on_panic_nb = { + .notifier_call = shutdown_on_panic_notify, + .priority = SHUTDOWN_ON_PANIC_PRIO +}; + +static int __init dump_init(void) +{ + int rc; + + rc = firmware_register(&dump_subsys); + if (rc) + return rc; + rc = subsys_create_file(&dump_subsys, &dump_type_attr); + if (rc) { + firmware_unregister(&dump_subsys); + return rc; + } + rc = dump_ccw_init(); + if (rc) + return rc; + rc = dump_fcp_init(); + if (rc) + return rc; + dump_set_type(IPL_TYPE_NONE); + return 0; +} + +static int __init shutdown_actions_init(void) +{ + int rc; + + rc = firmware_register(&shutdown_actions_subsys); + if (rc) + return rc; + rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr); + if (rc) { + firmware_unregister(&shutdown_actions_subsys); + return rc; + } + atomic_notifier_chain_register(&panic_notifier_list, + &shutdown_on_panic_nb); + return 0; +} + +static int __init s390_ipl_init(void) +{ + int rc; + + reipl_probe(); + rc = ipl_init(); + if (rc) + return rc; + rc = reipl_init(); + if (rc) + return rc; + rc = dump_init(); + if (rc) + return rc; + rc = shutdown_actions_init(); + if (rc) + return rc; + return 0; +} + +__initcall(s390_ipl_init); diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c new file mode 100644 index 000000000000..ca28fb0b3790 --- /dev/null +++ b/arch/s390/kernel/kprobes.c @@ -0,0 +1,657 @@ +/* + * Kernel Probes (KProbes) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2006 + * + * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> + */ + +#include <linux/config.h> +#include <linux/kprobes.h> +#include <linux/ptrace.h> +#include <linux/preempt.h> +#include <linux/stop_machine.h> +#include <asm/cacheflush.h> +#include <asm/kdebug.h> +#include <asm/sections.h> +#include <asm/uaccess.h> +#include <linux/module.h> + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + /* Make sure the probe isn't going on a difficult instruction */ + if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) + return -EINVAL; + + if ((unsigned long)p->addr & 0x01) { + printk("Attempt to register kprobe at an unaligned address\n"); + return -EINVAL; + } + + /* Use the get_insn_slot() facility for correctness */ + if (!(p->ainsn.insn = get_insn_slot())) + return -ENOMEM; + + memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + + get_instruction_type(&p->ainsn); + p->opcode = *p->addr; + return 0; +} + +int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction) +{ + switch (*(__u8 *) instruction) { + case 0x0c: /* bassm */ + case 0x0b: /* bsm */ + case 0x83: /* diag */ + case 0x44: /* ex */ + return -EINVAL; + } + switch (*(__u16 *) instruction) { + case 0x0101: /* pr */ + case 0xb25a: /* bsa */ + case 0xb240: /* bakr */ + case 0xb258: /* bsg */ + case 0xb218: /* pc */ + case 0xb228: /* pt */ + return -EINVAL; + } + return 0; +} + +void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) +{ + /* default fixup method */ + ainsn->fixup = FIXUP_PSW_NORMAL; + + /* save r1 operand */ + ainsn->reg = (*ainsn->insn & 0xf0) >> 4; + + /* save the instruction length (pop 5-5) in bytes */ + switch (*(__u8 *) (ainsn->insn) >> 4) { + case 0: + ainsn->ilen = 2; + break; + case 1: + case 2: + ainsn->ilen = 4; + break; + case 3: + ainsn->ilen = 6; + break; + } + + switch (*(__u8 *) ainsn->insn) { + case 0x05: /* balr */ + case 0x0d: /* basr */ + ainsn->fixup = FIXUP_RETURN_REGISTER; + /* if r2 = 0, no branch will be taken */ + if ((*ainsn->insn & 0x0f) == 0) + ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x06: /* bctr */ + case 0x07: /* bcr */ + ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x45: /* bal */ + case 0x4d: /* bas */ + ainsn->fixup = FIXUP_RETURN_REGISTER; + break; + case 0x47: /* bc */ + case 0x46: /* bct */ + case 0x86: /* bxh */ + case 0x87: /* bxle */ + ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x82: /* lpsw */ + ainsn->fixup = FIXUP_NOT_REQUIRED; + break; + case 0xb2: /* lpswe */ + if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) { + ainsn->fixup = FIXUP_NOT_REQUIRED; + } + break; + case 0xa7: /* bras */ + if ((*ainsn->insn & 0x0f) == 0x05) { + ainsn->fixup |= FIXUP_RETURN_REGISTER; + } + break; + case 0xc0: + if ((*ainsn->insn & 0x0f) == 0x00 /* larl */ + || (*ainsn->insn & 0x0f) == 0x05) /* brasl */ + ainsn->fixup |= FIXUP_RETURN_REGISTER; + break; + case 0xeb: + if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */ + *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */ + ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; + } + break; + case 0xe3: /* bctg */ + if (*(((__u8 *) ainsn->insn) + 5) == 0x46) { + ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; + } + break; + } +} + +static int __kprobes swap_instruction(void *aref) +{ + struct ins_replace_args *args = aref; + int err = -EFAULT; + + asm volatile( + "0: mvc 0(2,%2),0(%3)\n" + "1: la %0,0\n" + "2:\n" + EX_TABLE(0b,2b) + : "+d" (err), "=m" (*args->ptr) + : "a" (args->ptr), "a" (&args->new), "m" (args->new)); + return err; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long status = kcb->kprobe_status; + struct ins_replace_args args; + + args.ptr = p->addr; + args.old = p->opcode; + args.new = BREAKPOINT_INSTRUCTION; + + kcb->kprobe_status = KPROBE_SWAP_INST; + stop_machine_run(swap_instruction, &args, NR_CPUS); + kcb->kprobe_status = status; +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long status = kcb->kprobe_status; + struct ins_replace_args args; + + args.ptr = p->addr; + args.old = BREAKPOINT_INSTRUCTION; + args.new = p->opcode; + + kcb->kprobe_status = KPROBE_SWAP_INST; + stop_machine_run(swap_instruction, &args, NR_CPUS); + kcb->kprobe_status = status; +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + mutex_lock(&kprobe_mutex); + free_insn_slot(p->ainsn.insn); + mutex_unlock(&kprobe_mutex); +} + +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + per_cr_bits kprobe_per_regs[1]; + + memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); + regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE; + + /* Set up the per control reg info, will pass to lctl */ + kprobe_per_regs[0].em_instruction_fetch = 1; + kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn; + kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1; + + /* Set the PER control regs, turns on single step for this address */ + __ctl_load(kprobe_per_regs, 9, 11); + regs->psw.mask |= PSW_MASK_PER; + regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; + kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask; + memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl, + sizeof(kcb->kprobe_saved_ctl)); +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; + kcb->kprobe_status = kcb->prev_kprobe.status; + kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask; + memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl, + sizeof(kcb->kprobe_saved_ctl)); +} + +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + __get_cpu_var(current_kprobe) = p; + /* Save the interrupt and per flags */ + kcb->kprobe_saved_imask = regs->psw.mask & + (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); + /* Save the control regs that govern PER */ + __ctl_store(kcb->kprobe_saved_ctl, 9, 11); +} + +/* Called with kretprobe_lock held */ +void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, + struct pt_regs *regs) +{ + struct kretprobe_instance *ri; + + if ((ri = get_free_rp_inst(rp)) != NULL) { + ri->rp = rp; + ri->task = current; + ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; + + /* Replace the return addr with trampoline addr */ + regs->gprs[14] = (unsigned long)&kretprobe_trampoline; + + add_rp_inst(ri); + } else { + rp->nmissed++; + } +} + +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + int ret = 0; + unsigned long *addr = (unsigned long *) + ((regs->psw.addr & PSW_ADDR_INSN) - 2); + struct kprobe_ctlblk *kcb; + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + + /* Check we're not actually recursing */ + if (kprobe_running()) { + p = get_kprobe(addr); + if (p) { + if (kcb->kprobe_status == KPROBE_HIT_SS && + *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { + regs->psw.mask &= ~PSW_MASK_PER; + regs->psw.mask |= kcb->kprobe_saved_imask; + goto no_kprobe; + } + /* We have reentered the kprobe_handler(), since + * another probe was hit while within the handler. + * We here save the original kprobes variables and + * just single step on the instruction of the new probe + * without calling any user handlers. + */ + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kprobes_inc_nmissed_count(p); + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_REENTER; + return 1; + } else { + p = __get_cpu_var(current_kprobe); + if (p->break_handler && p->break_handler(p, regs)) { + goto ss_probe; + } + } + goto no_kprobe; + } + + p = get_kprobe(addr); + if (!p) { + if (*addr != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + * + */ + ret = 1; + } + /* Not one of ours: let kernel handle it */ + goto no_kprobe; + } + + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + set_current_kprobe(p, regs, kcb); + if (p->pre_handler && p->pre_handler(p, regs)) + /* handler has already set things up, so skip ss setup */ + return 1; + +ss_probe: + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_HIT_SS; + return 1; + +no_kprobe: + preempt_enable_no_resched(); + return ret; +} + +/* + * Function return probe trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe + * causes the handlers to fire + */ +void __kprobes kretprobe_trampoline_holder(void) +{ + asm volatile(".global kretprobe_trampoline\n" + "kretprobe_trampoline: bcr 0,0\n"); +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kretprobe_instance *ri = NULL; + struct hlist_head *head; + struct hlist_node *node, *tmp; + unsigned long flags, orig_ret_address = 0; + unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; + + spin_lock_irqsave(&kretprobe_lock, flags); + head = kretprobe_inst_table_head(current); + + /* + * It is possible to have multiple instances associated with a given + * task either because an multiple functions in the call path + * have a return probe installed on them, and/or more then one return + * return probe was registered for a target function. + * + * We can handle this because: + * - instances are always inserted at the head of the list + * - when multiple return probes are registered for the same + * function, the first instance's ret_addr will point to the + * real return address, and all the rest will point to + * kretprobe_trampoline + */ + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + + if (ri->rp && ri->rp->handler) + ri->rp->handler(ri, regs); + + orig_ret_address = (unsigned long)ri->ret_addr; + recycle_rp_inst(ri); + + if (orig_ret_address != trampoline_address) { + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + } + BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); + regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; + + reset_current_kprobe(); + spin_unlock_irqrestore(&kretprobe_lock, flags); + preempt_enable_no_resched(); + + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +/* + * Called after single-stepping. p->addr is the address of the + * instruction whose first byte has been replaced by the "breakpoint" + * instruction. To avoid the SMP problems that can occur when we + * temporarily put back the original opcode to single-step, we + * single-stepped a copy of the instruction. The address of this + * copy is p->ainsn.insn. + */ +static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + regs->psw.addr &= PSW_ADDR_INSN; + + if (p->ainsn.fixup & FIXUP_PSW_NORMAL) + regs->psw.addr = (unsigned long)p->addr + + ((unsigned long)regs->psw.addr - + (unsigned long)p->ainsn.insn); + + if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN) + if ((unsigned long)regs->psw.addr - + (unsigned long)p->ainsn.insn == p->ainsn.ilen) + regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen; + + if (p->ainsn.fixup & FIXUP_RETURN_REGISTER) + regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr + + (regs->gprs[p->ainsn.reg] - + (unsigned long)p->ainsn.insn)) + | PSW_ADDR_AMODE; + + regs->psw.addr |= PSW_ADDR_AMODE; + /* turn off PER mode */ + regs->psw.mask &= ~PSW_MASK_PER; + /* Restore the original per control regs */ + __ctl_load(kcb->kprobe_saved_ctl, 9, 11); + regs->psw.mask |= kcb->kprobe_saved_imask; +} + +static int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + resume_execution(cur, regs); + + /*Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + /* + * if somebody else is singlestepping across a probe point, psw mask + * will have PER set, in which case, continue the remaining processing + * of do_single_step, as if this is not a probe hit. + */ + if (regs->psw.mask & PSW_MASK_PER) { + return 0; + } + + return 1; +} + +static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + const struct exception_table_entry *entry; + + switch(kcb->kprobe_status) { + case KPROBE_SWAP_INST: + /* We are here because the instruction replacement failed */ + return 0; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the nip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE; + regs->psw.mask &= ~PSW_MASK_PER; + regs->psw.mask |= kcb->kprobe_saved_imask; + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); + if (entry) { + regs->psw.addr = entry->fixup | PSW_ADDR_AMODE; + return 1; + } + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; + } + return 0; +} + +/* + * Wrapper routine to for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BPT: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_SSTEP: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_TRAP: + case DIE_PAGE_FAULT: + /* kprobe_running() needs smp_processor_id() */ + preempt_disable(); + if (kprobe_running() && + kprobe_fault_handler(args->regs, args->trapnr)) + ret = NOTIFY_STOP; + preempt_enable(); + break; + default: + break; + } + return ret; +} + +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct jprobe *jp = container_of(p, struct jprobe, kp); + unsigned long addr; + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); + + /* setup return addr to the jprobe handler routine */ + regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; + + /* r14 is the function return address */ + kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; + /* r15 is the stack pointer */ + kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15]; + addr = (unsigned long)kcb->jprobe_saved_r15; + + memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, + MIN_STACK_SIZE(addr)); + return 1; +} + +void __kprobes jprobe_return(void) +{ + asm volatile(".word 0x0002"); +} + +void __kprobes jprobe_return_end(void) +{ + asm volatile("bcr 0,0"); +} + +int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15); + + /* Put the regs back */ + memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + /* put the stack back */ + memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, + MIN_STACK_SIZE(stack_addr)); + preempt_enable_no_resched(); + return 1; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) & kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 658e5ac484f9..4562cdbce8eb 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -8,13 +8,30 @@ #include <asm/lowcore.h> - .globl do_reipl -do_reipl: basr %r13,0 + .globl do_reipl_asm +do_reipl_asm: basr %r13,0 .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) -.Lpg1: lctl %c6,%c6,.Lall-.Lpg0(%r13) - stctl %c0,%c0,.Lctlsave-.Lpg0(%r13) - ni .Lctlsave-.Lpg0(%r13),0xef - lctl %c0,%c0,.Lctlsave-.Lpg0(%r13) + + # switch off lowcore protection + +.Lpg1: stctl %c0,%c0,.Lctlsave1-.Lpg0(%r13) + stctl %c0,%c0,.Lctlsave2-.Lpg0(%r13) + ni .Lctlsave1-.Lpg0(%r13),0xef + lctl %c0,%c0,.Lctlsave1-.Lpg0(%r13) + + # do store status of all registers + + stm %r0,%r15,__LC_GPREGS_SAVE_AREA + stctl %c0,%c15,__LC_CREGS_SAVE_AREA + mvc __LC_CREGS_SAVE_AREA(4),.Lctlsave2-.Lpg0(%r13) + stam %a0,%a15,__LC_AREGS_SAVE_AREA + stpx __LC_PREFIX_SAVE_AREA + stckc .Lclkcmp-.Lpg0(%r13) + mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) + stpt __LC_CPU_TIMER_SAVE_AREA + st %r13, __LC_PSW_SAVE_AREA+4 + + lctl %c6,%c6,.Lall-.Lpg0(%r13) lr %r1,%r2 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) stsch .Lschib-.Lpg0(%r13) @@ -46,9 +63,11 @@ do_reipl: basr %r13,0 .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) lpsw .Ldispsw-.Lpg0(%r13) .align 8 +.Lclkcmp: .quad 0x0000000000000000 .Lall: .long 0xff000000 .Lnull: .long 0x00000000 -.Lctlsave: .long 0x00000000 +.Lctlsave1: .long 0x00000000 +.Lctlsave2: .long 0x00000000 .align 8 .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 .Lpcnew: .long 0x00080000,0x80000000+.Lecs diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 4d090d60f3ef..95bd1e234f63 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -8,13 +8,30 @@ */ #include <asm/lowcore.h> - .globl do_reipl -do_reipl: basr %r13,0 -.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) + .globl do_reipl_asm +do_reipl_asm: basr %r13,0 + + # do store status of all registers + +.Lpg0: stg %r1,.Lregsave-.Lpg0(%r13) + lghi %r1,0x1000 + stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1) + lg %r0,.Lregsave-.Lpg0(%r13) + stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) + stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) + stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) + stpx __LC_PREFIX_SAVE_AREA-0x1000(%r1) + stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) + stckc .Lclkcmp-.Lpg0(%r13) + mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) + stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) + stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) + + lpswe .Lnewpsw-.Lpg0(%r13) .Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13) - stctg %c0,%c0,.Lctlsave-.Lpg0(%r13) - ni .Lctlsave+4-.Lpg0(%r13),0xef - lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13) + stctg %c0,%c0,.Lregsave-.Lpg0(%r13) + ni .Lregsave+4-.Lpg0(%r13),0xef + lctlg %c0,%c0,.Lregsave-.Lpg0(%r13) lgr %r1,%r2 mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) stsch .Lschib-.Lpg0(%r13) @@ -50,8 +67,9 @@ do_reipl: basr %r13,0 st %r14,.Ldispsw+12-.Lpg0(%r13) lpswe .Ldispsw-.Lpg0(%r13) .align 8 +.Lclkcmp: .quad 0x0000000000000000 .Lall: .quad 0x00000000ff000000 -.Lctlsave: .quad 0x0000000000000000 +.Lregsave: .quad 0x0000000000000000 .Lnull: .long 0x0000000000000000 .align 16 /* @@ -92,5 +110,3 @@ do_reipl: basr %r13,0 .long 0x00000000,0x00000000 .long 0x00000000,0x00000000 - - diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c deleted file mode 100644 index 1f33951ba439..000000000000 --- a/arch/s390/kernel/reipl_diag.c +++ /dev/null @@ -1,39 +0,0 @@ -/* - * This file contains the implementation of the - * Linux re-IPL support - * - * (C) Copyright IBM Corp. 2005 - * - * Author(s): Volker Sameske (sameske@de.ibm.com) - * - */ - -#include <linux/kernel.h> - -static unsigned int reipl_diag_rc1; -static unsigned int reipl_diag_rc2; - -/* - * re-IPL the system using the last used IPL parameters - */ -void reipl_diag(void) -{ - asm volatile ( - " la %%r4,0\n" - " la %%r5,0\n" - " diag %%r4,%2,0x308\n" - "0:\n" - " st %%r4,%0\n" - " st %%r5,%1\n" - ".section __ex_table,\"a\"\n" -#ifdef CONFIG_64BIT - " .align 8\n" - " .quad 0b, 0b\n" -#else - " .align 4\n" - " .long 0b, 0b\n" -#endif - ".previous\n" - : "=m" (reipl_diag_rc1), "=m" (reipl_diag_rc2) - : "d" (3) : "cc", "4", "5" ); -} diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index c73a45467fa4..9f19e833a562 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -25,12 +25,6 @@ EXPORT_SYMBOL(_oi_bitmap); EXPORT_SYMBOL(_ni_bitmap); EXPORT_SYMBOL(_zb_findmap); EXPORT_SYMBOL(_sb_findmap); -EXPORT_SYMBOL(__copy_from_user_asm); -EXPORT_SYMBOL(__copy_to_user_asm); -EXPORT_SYMBOL(__copy_in_user_asm); -EXPORT_SYMBOL(__clear_user_asm); -EXPORT_SYMBOL(__strncpy_from_user_asm); -EXPORT_SYMBOL(__strnlen_user_asm); EXPORT_SYMBOL(diag10); /* diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c902f059c7aa..e3d9325f6022 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -37,6 +37,7 @@ #include <linux/kernel_stat.h> #include <linux/device.h> #include <linux/notifier.h> +#include <linux/pfn.h> #include <asm/uaccess.h> #include <asm/system.h> @@ -50,6 +51,12 @@ #include <asm/sections.h> /* + * User copy operations. + */ +struct uaccess_ops uaccess; +EXPORT_SYMBOL_GPL(uaccess); + +/* * Machine setup.. */ unsigned int console_mode = 0; @@ -284,16 +291,9 @@ void (*_machine_power_off)(void) = machine_power_off_smp; /* * Reboot, halt and power_off routines for non SMP. */ -extern void reipl(unsigned long devno); -extern void reipl_diag(void); static void do_machine_restart_nonsmp(char * __unused) { - reipl_diag(); - - if (MACHINE_IS_VM) - cpcmd ("IPL", NULL, 0, NULL); - else - reipl (0x10000 | S390_lowcore.ipl_device); + do_reipl(); } static void do_machine_halt_nonsmp(void) @@ -501,13 +501,47 @@ setup_memory(void) * partially used pages are not usable - thus * we are rounding upwards: */ - start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; - end_pfn = max_pfn = memory_end >> PAGE_SHIFT; + start_pfn = PFN_UP(__pa(&_end)); + end_pfn = max_pfn = PFN_DOWN(memory_end); /* Initialize storage key for kernel pages */ for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); +#ifdef CONFIG_BLK_DEV_INITRD + /* + * Move the initrd in case the bitmap of the bootmem allocater + * would overwrite it. + */ + + if (INITRD_START && INITRD_SIZE) { + unsigned long bmap_size; + unsigned long start; + + bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1); + bmap_size = PFN_PHYS(bmap_size); + + if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) { + start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; + + if (start + INITRD_SIZE > memory_end) { + printk("initrd extends beyond end of memory " + "(0x%08lx > 0x%08lx)\n" + "disabling initrd\n", + start + INITRD_SIZE, memory_end); + INITRD_START = INITRD_SIZE = 0; + } else { + printk("Moving initrd (0x%08lx -> 0x%08lx, " + "size: %ld)\n", + INITRD_START, start, INITRD_SIZE); + memmove((void *) start, (void *) INITRD_START, + INITRD_SIZE); + INITRD_START = start; + } + } + } +#endif + /* * Initialize the boot-time allocator (with low memory only): */ @@ -559,7 +593,7 @@ setup_memory(void) reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); #ifdef CONFIG_BLK_DEV_INITRD - if (INITRD_START) { + if (INITRD_START && INITRD_SIZE) { if (INITRD_START + INITRD_SIZE <= memory_end) { reserve_bootmem(INITRD_START, INITRD_SIZE); initrd_start = INITRD_START; @@ -613,6 +647,11 @@ setup_arch(char **cmdline_p) memory_end = memory_size; + if (MACHINE_HAS_MVCOS) + memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); + else + memcpy(&uaccess, &uaccess_std, sizeof(uaccess)); + parse_early_param(); #ifndef CONFIG_64BIT @@ -720,214 +759,3 @@ struct seq_operations cpuinfo_op = { .show = show_cpuinfo, }; -#define DEFINE_IPL_ATTR(_name, _format, _value) \ -static ssize_t ipl_##_name##_show(struct subsystem *subsys, \ - char *page) \ -{ \ - return sprintf(page, _format, _value); \ -} \ -static struct subsys_attribute ipl_##_name##_attr = \ - __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL); - -DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.wwpn); -DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.lun); -DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.bootprog); -DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long) - IPL_PARMBLOCK_START->fcp.br_lba); - -enum ipl_type_type { - ipl_type_unknown, - ipl_type_ccw, - ipl_type_fcp, -}; - -static enum ipl_type_type -get_ipl_type(void) -{ - struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - - if (!IPL_DEVNO_VALID) - return ipl_type_unknown; - if (!IPL_PARMBLOCK_VALID) - return ipl_type_ccw; - if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION) - return ipl_type_unknown; - if (ipl->fcp.pbt != IPL_TYPE_FCP) - return ipl_type_unknown; - return ipl_type_fcp; -} - -static ssize_t -ipl_type_show(struct subsystem *subsys, char *page) -{ - switch (get_ipl_type()) { - case ipl_type_ccw: - return sprintf(page, "ccw\n"); - case ipl_type_fcp: - return sprintf(page, "fcp\n"); - default: - return sprintf(page, "unknown\n"); - } -} - -static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type); - -static ssize_t -ipl_device_show(struct subsystem *subsys, char *page) -{ - struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; - - switch (get_ipl_type()) { - case ipl_type_ccw: - return sprintf(page, "0.0.%04x\n", ipl_devno); - case ipl_type_fcp: - return sprintf(page, "0.0.%04x\n", ipl->fcp.devno); - default: - return 0; - } -} - -static struct subsys_attribute ipl_device_attr = - __ATTR(device, S_IRUGO, ipl_device_show, NULL); - -static struct attribute *ipl_fcp_attrs[] = { - &ipl_type_attr.attr, - &ipl_device_attr.attr, - &ipl_wwpn_attr.attr, - &ipl_lun_attr.attr, - &ipl_bootprog_attr.attr, - &ipl_br_lba_attr.attr, - NULL, -}; - -static struct attribute_group ipl_fcp_attr_group = { - .attrs = ipl_fcp_attrs, -}; - -static struct attribute *ipl_ccw_attrs[] = { - &ipl_type_attr.attr, - &ipl_device_attr.attr, - NULL, -}; - -static struct attribute_group ipl_ccw_attr_group = { - .attrs = ipl_ccw_attrs, -}; - -static struct attribute *ipl_unknown_attrs[] = { - &ipl_type_attr.attr, - NULL, -}; - -static struct attribute_group ipl_unknown_attr_group = { - .attrs = ipl_unknown_attrs, -}; - -static ssize_t -ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count) -{ - unsigned int size = IPL_PARMBLOCK_SIZE; - - if (off > size) - return 0; - if (off + count > size) - count = size - off; - - memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count); - return count; -} - -static struct bin_attribute ipl_parameter_attr = { - .attr = { - .name = "binary_parameter", - .mode = S_IRUGO, - .owner = THIS_MODULE, - }, - .size = PAGE_SIZE, - .read = &ipl_parameter_read, -}; - -static ssize_t -ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count) -{ - unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len; - void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data; - - if (off > size) - return 0; - if (off + count > size) - count = size - off; - - memcpy(buf, scp_data + off, count); - return count; -} - -static struct bin_attribute ipl_scp_data_attr = { - .attr = { - .name = "scp_data", - .mode = S_IRUGO, - .owner = THIS_MODULE, - }, - .size = PAGE_SIZE, - .read = &ipl_scp_data_read, -}; - -static decl_subsys(ipl, NULL, NULL); - -static int ipl_register_fcp_files(void) -{ - int rc; - - rc = sysfs_create_group(&ipl_subsys.kset.kobj, - &ipl_fcp_attr_group); - if (rc) - goto out; - rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, - &ipl_parameter_attr); - if (rc) - goto out_ipl_parm; - rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj, - &ipl_scp_data_attr); - if (!rc) - goto out; - - sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr); - -out_ipl_parm: - sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group); -out: - return rc; -} - -static int __init -ipl_device_sysfs_register(void) { - int rc; - - rc = firmware_register(&ipl_subsys); - if (rc) - goto out; - - switch (get_ipl_type()) { - case ipl_type_ccw: - rc = sysfs_create_group(&ipl_subsys.kset.kobj, - &ipl_ccw_attr_group); - break; - case ipl_type_fcp: - rc = ipl_register_fcp_files(); - break; - default: - rc = sysfs_create_group(&ipl_subsys.kset.kobj, - &ipl_unknown_attr_group); - break; - } - - if (rc) - firmware_unregister(&ipl_subsys); -out: - return rc; -} - -__initcall(ipl_device_sysfs_register); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index a887b686f279..642095ec7c07 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -114,29 +114,26 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) { unsigned long old_mask = regs->psw.mask; - int err; - + _sigregs user_sregs; + save_access_regs(current->thread.acrs); /* Copy a 'clean' PSW mask to the user to avoid leaking information about whether PER is currently on. */ regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); - err = __copy_to_user(&sregs->regs.psw, ®s->psw, - sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs)); + memcpy(&user_sregs.regs.psw, ®s->psw, sizeof(sregs->regs.psw) + + sizeof(sregs->regs.gprs)); regs->psw.mask = old_mask; - if (err != 0) - return err; - err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs, - sizeof(sregs->regs.acrs)); - if (err != 0) - return err; + memcpy(&user_sregs.regs.acrs, current->thread.acrs, + sizeof(sregs->regs.acrs)); /* * We have to store the fp registers to current->thread.fp_regs * to merge them with the emulated registers. */ save_fp_regs(¤t->thread.fp_regs); - return __copy_to_user(&sregs->fpregs, ¤t->thread.fp_regs, - sizeof(s390_fp_regs)); + memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs, + sizeof(s390_fp_regs)); + return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs)); } /* Returns positive number on error */ @@ -144,27 +141,25 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) { unsigned long old_mask = regs->psw.mask; int err; + _sigregs user_sregs; /* Alwys make any pending restarted system call return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; - err = __copy_from_user(®s->psw, &sregs->regs.psw, - sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs)); + err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs)); regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask); regs->psw.addr |= PSW_ADDR_AMODE; if (err) return err; - err = __copy_from_user(¤t->thread.acrs, &sregs->regs.acrs, - sizeof(sregs->regs.acrs)); - if (err) - return err; + memcpy(®s->psw, &user_sregs.regs.psw, sizeof(sregs->regs.psw) + + sizeof(sregs->regs.gprs)); + memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, + sizeof(sregs->regs.acrs)); restore_access_regs(current->thread.acrs); - err = __copy_from_user(¤t->thread.fp_regs, &sregs->fpregs, - sizeof(s390_fp_regs)); + memcpy(¤t->thread.fp_regs, &user_sregs.fpregs, + sizeof(s390_fp_regs)); current->thread.fp_regs.fpc &= FPC_VALID_MASK; - if (err) - return err; restore_fp_regs(¤t->thread.fp_regs); regs->trap = -1; /* disable syscall checks */ @@ -457,6 +452,7 @@ void do_signal(struct pt_regs *regs) case -ERESTART_RESTARTBLOCK: regs->gprs[2] = -EINTR; } + regs->trap = -1; /* Don't deal with this again. */ } /* Get signal to deliver. When running under ptrace, at this point diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8e03219eea76..b2e6f4c8d382 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,9 +59,6 @@ static struct task_struct *current_set[NR_CPUS]; extern char vmhalt_cmd[]; extern char vmpoff_cmd[]; -extern void reipl(unsigned long devno); -extern void reipl_diag(void); - static void smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall_others(ec_bit_sig); @@ -279,12 +276,7 @@ static void do_machine_restart(void * __unused) * interrupted by an external interrupt and s390irq * locks are always held disabled). */ - reipl_diag(); - - if (MACHINE_IS_VM) - cpcmd ("IPL", NULL, 0, NULL); - else - reipl (0x10000 | S390_lowcore.ipl_device); + do_reipl(); } void machine_restart_smp(char * __unused) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index bde1d1d59858..c4982c963424 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/reboot.h> +#include <linux/kprobes.h> #include <asm/system.h> #include <asm/uaccess.h> @@ -39,6 +40,7 @@ #include <asm/s390_ext.h> #include <asm/lowcore.h> #include <asm/debug.h> +#include <asm/kdebug.h> /* Called from entry.S only */ extern void handle_per_exception(struct pt_regs *regs); @@ -74,6 +76,20 @@ static int kstack_depth_to_print = 12; static int kstack_depth_to_print = 20; #endif /* CONFIG_64BIT */ +ATOMIC_NOTIFIER_HEAD(s390die_chain); + +int register_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&s390die_chain, nb); +} +EXPORT_SYMBOL(register_die_notifier); + +int unregister_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&s390die_chain, nb); +} +EXPORT_SYMBOL(unregister_die_notifier); + /* * For show_trace we have tree different stack to consider: * - the panic stack which is used if the kernel stack has overflown @@ -305,8 +321,9 @@ report_user_fault(long interruption_code, struct pt_regs *regs) #endif } -static void inline do_trap(long interruption_code, int signr, char *str, - struct pt_regs *regs, siginfo_t *info) +static void __kprobes inline do_trap(long interruption_code, int signr, + char *str, struct pt_regs *regs, + siginfo_t *info) { /* * We got all needed information from the lowcore and can @@ -315,6 +332,10 @@ static void inline do_trap(long interruption_code, int signr, char *str, if (regs->psw.mask & PSW_MASK_PSTATE) local_irq_enable(); + if (notify_die(DIE_TRAP, str, regs, interruption_code, + interruption_code, signr) == NOTIFY_STOP) + return; + if (regs->psw.mask & PSW_MASK_PSTATE) { struct task_struct *tsk = current; @@ -336,8 +357,12 @@ static inline void __user *get_check_address(struct pt_regs *regs) return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN); } -void do_single_step(struct pt_regs *regs) +void __kprobes do_single_step(struct pt_regs *regs) { + if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, + SIGTRAP) == NOTIFY_STOP){ + return; + } if ((current->ptrace & PT_PTRACED) != 0) force_sig(SIGTRAP, current); } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index ff5f7bb34f75..af9e69a03011 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -24,6 +24,7 @@ SECTIONS *(.text) SCHED_TEXT LOCK_TEXT + KPROBES_TEXT *(.fixup) *(.gnu.warning) } = 0x0700 @@ -117,7 +118,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exitcall.exit) + *(.exit.text) *(.exit.data) *(.exitcall.exit) } /* Stabs debugging sections. */ diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index e05d087a6eae..c42ffedfdb49 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -4,6 +4,6 @@ EXTRA_AFLAGS := -traditional -lib-y += delay.o string.o -lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o) +lib-y += delay.o string.o uaccess_std.o +lib-$(CONFIG_64BIT) += uaccess_mvcos.o lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S deleted file mode 100644 index 837275284d9f..000000000000 --- a/arch/s390/lib/uaccess.S +++ /dev/null @@ -1,211 +0,0 @@ -/* - * arch/s390/lib/uaccess.S - * __copy_{from|to}_user functions. - * - * s390 - * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * These functions have standard call interface - */ - -#include <linux/errno.h> -#include <asm/lowcore.h> -#include <asm/asm-offsets.h> - - .text - .align 4 - .globl __copy_from_user_asm - # %r2 = to, %r3 = n, %r4 = from -__copy_from_user_asm: - slr %r0,%r0 -0: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1f - slr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - ahi %r3,-256 -2: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1b -3: slr %r2,%r2 - br %r14 -4: lhi %r0,-4096 - lr %r5,%r4 - slr %r5,%r0 - nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slr %r5,%r4 # %r5 = #bytes to next user page boundary - clr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcp 0(%r5,%r2),0(%r4),%r0 - slr %r3,%r5 -6: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 0b,4b - .long 2b,4b - .long 5b,6b - .previous - - .align 4 - .text - .globl __copy_to_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_to_user_asm: - slr %r0,%r0 -0: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1f - slr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - ahi %r3,-256 -2: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1b -3: slr %r2,%r2 - br %r14 -4: lhi %r0,-4096 - lr %r5,%r4 - slr %r5,%r0 - nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slr %r5,%r4 # %r5 = #bytes to next user page boundary - clr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcs 0(%r5,%r4),0(%r2),%r0 - slr %r3,%r5 -6: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 0b,4b - .long 2b,4b - .long 5b,6b - .previous - - .align 4 - .text - .globl __copy_in_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_in_user_asm: - ahi %r3,-1 - jo 6f - sacf 256 - bras %r1,4f -0: ahi %r3,257 -1: mvc 0(1,%r4),0(%r2) - la %r2,1(%r2) - la %r4,1(%r4) - ahi %r3,-1 - jnz 1b -2: lr %r2,%r3 - br %r14 -3: mvc 0(256,%r4),0(%r2) - la %r2,256(%r2) - la %r4,256(%r4) -4: ahi %r3,-256 - jnm 3b -5: ex %r3,4(%r1) - sacf 0 -6: slr %r2,%r2 - br %r14 - .section __ex_table,"a" - .long 1b,2b - .long 3b,0b - .long 5b,0b - .previous - - .align 4 - .text - .globl __clear_user_asm - # %r2 = to, %r3 = n -__clear_user_asm: - bras %r5,0f - .long empty_zero_page -0: l %r5,0(%r5) - slr %r0,%r0 -1: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2f - slr %r2,%r2 - br %r14 -2: la %r2,256(%r2) - ahi %r3,-256 -3: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2b -4: slr %r2,%r2 - br %r14 -5: lhi %r0,-4096 - lr %r4,%r2 - slr %r4,%r0 - nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 - slr %r4,%r2 # %r4 = #bytes to next user page boundary - clr %r3,%r4 # clear crosses next page boundary ? - jnh 7f # no, the current page faulted - # clear with the reduced length which is < 256 -6: mvcs 0(%r4,%r2),0(%r5),%r0 - slr %r3,%r4 -7: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 1b,5b - .long 3b,5b - .long 6b,7b - .previous - - .align 4 - .text - .globl __strncpy_from_user_asm - # %r2 = count, %r3 = dst, %r4 = src -__strncpy_from_user_asm: - lhi %r0,0 - lr %r1,%r4 - la %r4,0(%r4) # clear high order bit from %r4 - la %r2,0(%r2,%r4) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - lr %r1,%r2 - jh 1f # \0 found in string ? - ahi %r1,1 # include \0 in copy -1: slr %r1,%r4 # %r1 = copy length (without \0) - slr %r2,%r4 # %r2 = return length (including \0) -2: mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3f - br %r14 -3: la %r3,256(%r3) - la %r4,256(%r4) - ahi %r1,-256 - mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3b - br %r14 -4: sacf 0 - lhi %r2,-EFAULT - br %r14 - .section __ex_table,"a" - .long 0b,4b - .previous - - .align 4 - .text - .globl __strnlen_user_asm - # %r2 = count, %r3 = src -__strnlen_user_asm: - lhi %r0,0 - lr %r1,%r3 - la %r3,0(%r3) # clear high order bit from %r4 - la %r2,0(%r2,%r3) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - ahi %r2,1 # strnlen_user result includes the \0 - # or return count+1 if \0 not found - slr %r2,%r3 - br %r14 -2: sacf 0 - slr %r2,%r2 # return 0 on exception - br %r14 - .section __ex_table,"a" - .long 0b,2b - .previous diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S deleted file mode 100644 index 1f755be22f92..000000000000 --- a/arch/s390/lib/uaccess64.S +++ /dev/null @@ -1,207 +0,0 @@ -/* - * arch/s390x/lib/uaccess.S - * __copy_{from|to}_user functions. - * - * s390 - * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * These functions have standard call interface - */ - -#include <linux/errno.h> -#include <asm/lowcore.h> -#include <asm/asm-offsets.h> - - .text - .align 4 - .globl __copy_from_user_asm - # %r2 = to, %r3 = n, %r4 = from -__copy_from_user_asm: - slgr %r0,%r0 -0: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1f - slgr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - aghi %r3,-256 -2: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1b -3: slgr %r2,%r2 - br %r14 -4: lghi %r0,-4096 - lgr %r5,%r4 - slgr %r5,%r0 - ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slgr %r5,%r4 # %r5 = #bytes to next user page boundary - clgr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcp 0(%r5,%r2),0(%r4),%r0 - slgr %r3,%r5 -6: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .quad 2b,4b - .quad 5b,6b - .previous - - .align 4 - .text - .globl __copy_to_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_to_user_asm: - slgr %r0,%r0 -0: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1f - slgr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - aghi %r3,-256 -2: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1b -3: slgr %r2,%r2 - br %r14 -4: lghi %r0,-4096 - lgr %r5,%r4 - slgr %r5,%r0 - ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slgr %r5,%r4 # %r5 = #bytes to next user page boundary - clgr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcs 0(%r5,%r4),0(%r2),%r0 - slgr %r3,%r5 -6: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .quad 2b,4b - .quad 5b,6b - .previous - - .align 4 - .text - .globl __copy_in_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_in_user_asm: - aghi %r3,-1 - jo 6f - sacf 256 - bras %r1,4f -0: aghi %r3,257 -1: mvc 0(1,%r4),0(%r2) - la %r2,1(%r2) - la %r4,1(%r4) - aghi %r3,-1 - jnz 1b -2: lgr %r2,%r3 - br %r14 -3: mvc 0(256,%r4),0(%r2) - la %r2,256(%r2) - la %r4,256(%r4) -4: aghi %r3,-256 - jnm 3b -5: ex %r3,4(%r1) - sacf 0 -6: slgr %r2,%r2 - br 14 - .section __ex_table,"a" - .quad 1b,2b - .quad 3b,0b - .quad 5b,0b - .previous - - .align 4 - .text - .globl __clear_user_asm - # %r2 = to, %r3 = n -__clear_user_asm: - slgr %r0,%r0 - larl %r5,empty_zero_page -1: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2f - slgr %r2,%r2 - br %r14 -2: la %r2,256(%r2) - aghi %r3,-256 -3: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2b -4: slgr %r2,%r2 - br %r14 -5: lghi %r0,-4096 - lgr %r4,%r2 - slgr %r4,%r0 - ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 - slgr %r4,%r2 # %r4 = #bytes to next user page boundary - clgr %r3,%r4 # clear crosses next page boundary ? - jnh 7f # no, the current page faulted - # clear with the reduced length which is < 256 -6: mvcs 0(%r4,%r2),0(%r5),%r0 - slgr %r3,%r4 -7: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 1b,5b - .quad 3b,5b - .quad 6b,7b - .previous - - .align 4 - .text - .globl __strncpy_from_user_asm - # %r2 = count, %r3 = dst, %r4 = src -__strncpy_from_user_asm: - lghi %r0,0 - lgr %r1,%r4 - la %r2,0(%r2,%r4) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - lgr %r1,%r2 - jh 1f # \0 found in string ? - aghi %r1,1 # include \0 in copy -1: slgr %r1,%r4 # %r1 = copy length (without \0) - slgr %r2,%r4 # %r2 = return length (including \0) -2: mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3f - br %r14 -3: la %r3,256(%r3) - la %r4,256(%r4) - aghi %r1,-256 - mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3b - br %r14 -4: sacf 0 - lghi %r2,-EFAULT - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .previous - - .align 4 - .text - .globl __strnlen_user_asm - # %r2 = count, %r3 = src -__strnlen_user_asm: - lghi %r0,0 - lgr %r1,%r3 - la %r2,0(%r2,%r3) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - aghi %r2,1 # strnlen_user result includes the \0 - # or return count+1 if \0 not found - slgr %r2,%r3 - br %r14 -2: sacf 0 - slgr %r2,%r2 # return 0 on exception - br %r14 - .section __ex_table,"a" - .quad 0b,2b - .previous diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c new file mode 100644 index 000000000000..86c96d6c191a --- /dev/null +++ b/arch/s390/lib/uaccess_mvcos.c @@ -0,0 +1,156 @@ +/* + * arch/s390/lib/uaccess_mvcos.c + * + * Optimized user space space access functions based on mvcos. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/uaccess.h> +#include <asm/futex.h> + +#ifndef __s390x__ +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) +{ + register unsigned long reg0 asm("0") = 0x81UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" + " jz 4f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" + " "SLR" %0,%4\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 4f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" + " "SLR" %0,%4\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) +{ + register unsigned long reg0 asm("0") = 0x810081UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + /* FIXME: copy with reduced length. */ + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 2f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2:"SLR" %0,%0\n" + "3: \n" + EX_TABLE(0b,3b) + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t clear_user_mvcos(size_t size, void __user *to) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" + " jz 4f\n" + "1:"ALR" %0,%2\n" + " "SLR" %1,%2\n" + " j 0b\n" + "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ + " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ + " "SLR" %3,%1\n" + " "CLR" %0,%3\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" + " "SLR" %0,%3\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) + : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); + return size; +} + +extern size_t copy_from_user_std_small(size_t, const void __user *, void *); +extern size_t copy_to_user_std_small(size_t, void __user *, const void *); +extern size_t strnlen_user_std(size_t, const char __user *); +extern size_t strncpy_from_user_std(size_t, const char __user *, char *); +extern int futex_atomic_op(int, int __user *, int, int *); +extern int futex_atomic_cmpxchg(int __user *, int, int); + +struct uaccess_ops uaccess_mvcos = { + .copy_from_user = copy_from_user_mvcos, + .copy_from_user_small = copy_from_user_std_small, + .copy_to_user = copy_to_user_mvcos, + .copy_to_user_small = copy_to_user_std_small, + .copy_in_user = copy_in_user_mvcos, + .clear_user = clear_user_mvcos, + .strnlen_user = strnlen_user_std, + .strncpy_from_user = strncpy_from_user_std, + .futex_atomic_op = futex_atomic_op, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg, +}; diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c new file mode 100644 index 000000000000..9a4d4a29ea79 --- /dev/null +++ b/arch/s390/lib/uaccess_std.c @@ -0,0 +1,340 @@ +/* + * arch/s390/lib/uaccess_std.c + * + * Standard user space access functions based on mvcp/mvcs and doing + * interesting things in the secondary space mode. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/uaccess.h> +#include <asm/futex.h> + +#ifndef __s390x__ +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcp 0(%0,%2),0(%1),%3\n" + " jz 5f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcp 0(%0,%2),0(%1),%3\n" + " jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcp 0(%4,%2),0(%1),%3\n" + " "SLR" %0,%4\n" + " j 6f\n" + "5:"SLR" %0,%0\n" + "6: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = 0UL; + asm volatile( + "0: mvcp 0(%0,%2),0(%1),%3\n" + " "SLR" %0,%0\n" + " j 3f\n" + "1: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 3f\n" + "2: mvcp 0(%4,%2),0(%1),%3\n" + " "SLR" %0,%4\n" + "3:\n" + EX_TABLE(0b,1b) EX_TABLE(2b,3b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcs 0(%0,%1),0(%2),%3\n" + " jz 5f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcs 0(%0,%1),0(%2),%3\n" + " jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcs 0(%4,%1),0(%2),%3\n" + " "SLR" %0,%4\n" + " j 6f\n" + "5:"SLR" %0,%0\n" + "6: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = 0UL; + asm volatile( + "0: mvcs 0(%0,%1),0(%2),%3\n" + " "SLR" %0,%0\n" + " j 3f\n" + "1: la %4,255(%1)\n" /* ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* (ptr + 255) & -4096UL */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 3f\n" + "2: mvcs 0(%4,%1),0(%2),%3\n" + " "SLR" %0,%4\n" + "3:\n" + EX_TABLE(0b,1b) EX_TABLE(2b,3b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) +{ + unsigned long tmp1; + + asm volatile( + " "AHI" %0,-1\n" + " jo 5f\n" + " sacf 256\n" + " bras %3,3f\n" + "0:"AHI" %0,257\n" + "1: mvc 0(1,%1),0(%2)\n" + " la %1,1(%1)\n" + " la %2,1(%2)\n" + " "AHI" %0,-1\n" + " jnz 1b\n" + " j 5f\n" + "2: mvc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,1b-0b(%3)\n" + " sacf 0\n" + "5: "SLR" %0,%0\n" + "6:\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) + : : "cc", "memory"); + return size; +} + +size_t clear_user_std(size_t size, void __user *to) +{ + unsigned long tmp1, tmp2; + + asm volatile( + " "AHI" %0,-1\n" + " jo 5f\n" + " sacf 256\n" + " bras %3,3f\n" + " xc 0(1,%1),0(%1)\n" + "0:"AHI" %0,257\n" + " la %2,255(%1)\n" /* %2 = ptr + 255 */ + " srl %2,12\n" + " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ + " "SLR" %2,%1\n" + " "CLR" %0,%2\n" /* clear crosses next page boundary? */ + " jnh 5f\n" + " "AHI" %2,-1\n" + "1: ex %2,0(%3)\n" + " "AHI" %2,1\n" + " "SLR" %0,%2\n" + " j 5f\n" + "2: xc 0(256,%1),0(%1)\n" + " la %1,256(%1)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,0(%3)\n" + " sacf 0\n" + "5: "SLR" %0,%0\n" + "6:\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t strnlen_user_std(size_t size, const char __user *src) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %2,0(%1)\n" + " la %3,0(%0,%1)\n" + " "SLR" %0,%0\n" + " sacf 256\n" + "0: srst %3,%2\n" + " jo 0b\n" + " la %0,1(%3)\n" /* strnlen_user results includes \0 */ + " "SLR" %0,%1\n" + "1: sacf 0\n" + EX_TABLE(0b,1b) + : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %3,0(%1)\n" + " la %4,0(%0,%1)\n" + " sacf 256\n" + "0: srst %4,%3\n" + " jo 0b\n" + " sacf 0\n" + " la %0,0(%4)\n" + " jh 1f\n" /* found \0 in string ? */ + " "AHI" %4,1\n" /* include \0 in copy */ + "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */ + " "SLR" %4,%1\n" /* %4 = copy length (including \0) */ + "2: mvcp 0(%4,%2),0(%1),%5\n" + " jz 9f\n" + "3:"AHI" %4,-256\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "4: mvcp 0(%4,%2),0(%1),%5\n" + " jnz 3b\n" + " j 9f\n" + "7: sacf 0\n" + "8:"LHI" %0,%6\n" + "9:\n" + EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b) + : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0), "K" (-EFAULT) : "cc", "memory"); + return size; +} + +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile( \ + " sacf 256\n" \ + "0: l %1,0(%6)\n" \ + "1:"insn \ + "2: cs %1,%2,0(%6)\n" \ + "3: jl 1b\n" \ + " lhi %0,0\n" \ + "4: sacf 0\n" \ + EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc"); + +int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) +{ + int oldval = 0, newval, ret; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + dec_preempt_count(); + *old = oldval; + return ret; +} + +int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) +{ + int ret; + + asm volatile( + " sacf 256\n" + " cs %1,%4,0(%5)\n" + "0: lr %0,%1\n" + "1: sacf 0\n" + EX_TABLE(0b,1b) + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory" ); + return ret; +} + +struct uaccess_ops uaccess_std = { + .copy_from_user = copy_from_user_std, + .copy_from_user_small = copy_from_user_std_small, + .copy_to_user = copy_to_user_std, + .copy_to_user_small = copy_to_user_std_small, + .copy_in_user = copy_in_user_std, + .clear_user = clear_user_std, + .strnlen_user = strnlen_user_std, + .strncpy_from_user = strncpy_from_user_std, + .futex_atomic_op = futex_atomic_op, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg, +}; diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index ceea51cff03b..786a44dba5bf 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -53,22 +53,6 @@ static void cmm_timer_fn(unsigned long); static void cmm_set_timer(void); static long -cmm_strtoul(const char *cp, char **endp) -{ - unsigned int base = 10; - - if (*cp == '0') { - base = 8; - cp++; - if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) { - base = 16; - cp++; - } - } - return simple_strtoul(cp, endp, base); -} - -static long cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list) { struct cmm_page_array *pa; @@ -276,7 +260,7 @@ cmm_pages_handler(ctl_table *ctl, int write, struct file *filp, return -EFAULT; buf[sizeof(buf) - 1] = '\0'; cmm_skip_blanks(buf, &p); - pages = cmm_strtoul(p, &p); + pages = simple_strtoul(p, &p, 0); if (ctl == &cmm_table[0]) cmm_set_pages(pages); else @@ -317,9 +301,9 @@ cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp, return -EFAULT; buf[sizeof(buf) - 1] = '\0'; cmm_skip_blanks(buf, &p); - pages = cmm_strtoul(p, &p); + pages = simple_strtoul(p, &p, 0); cmm_skip_blanks(p, &p); - seconds = cmm_strtoul(p, &p); + seconds = simple_strtoul(p, &p, 0); cmm_set_timeout(pages, seconds); } else { len = sprintf(buf, "%ld %ld\n", @@ -382,24 +366,24 @@ cmm_smsg_target(char *from, char *msg) if (strncmp(msg, "SHRINK", 6) == 0) { if (!cmm_skip_blanks(msg + 6, &msg)) return; - pages = cmm_strtoul(msg, &msg); + pages = simple_strtoul(msg, &msg, 0); cmm_skip_blanks(msg, &msg); if (*msg == '\0') cmm_set_pages(pages); } else if (strncmp(msg, "RELEASE", 7) == 0) { if (!cmm_skip_blanks(msg + 7, &msg)) return; - pages = cmm_strtoul(msg, &msg); + pages = simple_strtoul(msg, &msg, 0); cmm_skip_blanks(msg, &msg); if (*msg == '\0') cmm_add_timed_pages(pages); } else if (strncmp(msg, "REUSE", 5) == 0) { if (!cmm_skip_blanks(msg + 5, &msg)) return; - pages = cmm_strtoul(msg, &msg); + pages = simple_strtoul(msg, &msg, 0); if (!cmm_skip_blanks(msg, &msg)) return; - seconds = cmm_strtoul(msg, &msg); + seconds = simple_strtoul(msg, &msg, 0); cmm_skip_blanks(msg, &msg); if (*msg == '\0') cmm_set_timeout(pages, seconds); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7cd82575813d..44f0cda7e72e 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -25,10 +25,12 @@ #include <linux/console.h> #include <linux/module.h> #include <linux/hardirq.h> +#include <linux/kprobes.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> +#include <asm/kdebug.h> #ifndef CONFIG_64BIT #define __FAIL_ADDR_MASK 0x7ffff000 @@ -48,6 +50,38 @@ extern int sysctl_userprocess_debug; extern void die(const char *,struct pt_regs *,long); +#ifdef CONFIG_KPROBES +ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); +int register_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); +} + +int unregister_page_fault_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); +} + +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig + }; + return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); +} +#else +static inline int notify_page_fault(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + return NOTIFY_DONE; +} +#endif + extern spinlock_t timerlist_lock; /* @@ -159,7 +193,7 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ -static inline void +static inline void __kprobes do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) { struct task_struct *tsk; @@ -173,6 +207,10 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) tsk = current; mm = tsk->mm; + if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14, + SIGSEGV) == NOTIFY_STOP) + return; + /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 6e6b6de77770..cfd9b8f7a523 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -108,16 +108,23 @@ void __init paging_init(void) unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; static const int ssm_mask = 0x04000000L; unsigned long ro_start_pfn, ro_end_pfn; + unsigned long zones_size[MAX_NR_ZONES]; ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); + memset(zones_size, 0, sizeof(zones_size)); + zones_size[ZONE_DMA] = max_low_pfn; + free_area_init_node(0, &contig_page_data, zones_size, + __pa(PAGE_OFFSET) >> PAGE_SHIFT, + zholes_size); + /* unmap whole virtual address space */ pg_dir = swapper_pg_dir; - for (i=0;i<KERNEL_PGD_PTRS;i++) - pmd_clear((pmd_t*)pg_dir++); + for (i = 0; i < PTRS_PER_PGD; i++) + pmd_clear((pmd_t *) pg_dir++); /* * map whole physical memory to virtual memory (identity mapping) @@ -131,10 +138,7 @@ void __init paging_init(void) */ pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); - pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); - pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024)); - pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048)); - pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072)); + pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table); pg_dir++; for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { @@ -143,8 +147,8 @@ void __init paging_init(void) else pte = pfn_pte(pfn, PAGE_KERNEL); if (pfn >= max_low_pfn) - pte_clear(&init_mm, 0, &pte); - set_pte(pg_table, pte); + pte_val(pte) = _PAGE_TYPE_EMPTY; + set_pte(pg_table, pte); pfn++; } } @@ -159,16 +163,6 @@ void __init paging_init(void) : : "m" (pgdir_k), "m" (ssm_mask)); local_flush_tlb(); - - { - unsigned long zones_size[MAX_NR_ZONES]; - - memset(zones_size, 0, sizeof(zones_size)); - zones_size[ZONE_DMA] = max_low_pfn; - free_area_init_node(0, &contig_page_data, zones_size, - __pa(PAGE_OFFSET) >> PAGE_SHIFT, - zholes_size); - } return; } @@ -236,10 +230,8 @@ void __init paging_init(void) pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); else pte = pfn_pte(pfn, PAGE_KERNEL); - if (pfn >= max_low_pfn) { - pte_clear(&init_mm, 0, &pte); - continue; - } + if (pfn >= max_low_pfn) + pte_val(pte) = _PAGE_TYPE_EMPTY; set_pte(pt_dir, pte); pfn++; } diff --git a/arch/x86_64/crypto/Makefile b/arch/x86_64/crypto/Makefile index 426d20f4b72e..15b538a8b7f7 100644 --- a/arch/x86_64/crypto/Makefile +++ b/arch/x86_64/crypto/Makefile @@ -5,5 +5,8 @@ # obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o +obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o aes-x86_64-y := aes-x86_64-asm.o aes.o +twofish-x86_64-y := twofish-x86_64-asm.o twofish.o + diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c index 68866fab37aa..5cdb13ea5cc2 100644 --- a/arch/x86_64/crypto/aes.c +++ b/arch/x86_64/crypto/aes.c @@ -228,13 +228,14 @@ static void __init gen_tabs(void) } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; + u32 *flags = &tfm->crt_flags; u32 i, j, t, u, v, w; - if (key_len != 16 && key_len != 24 && key_len != 32) { + if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/arch/x86_64/crypto/twofish-x86_64-asm.S b/arch/x86_64/crypto/twofish-x86_64-asm.S new file mode 100644 index 000000000000..35974a586615 --- /dev/null +++ b/arch/x86_64/crypto/twofish-x86_64-asm.S @@ -0,0 +1,324 @@ +/*************************************************************************** +* Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> * +* * +* This program is free software; you can redistribute it and/or modify * +* it under the terms of the GNU General Public License as published by * +* the Free Software Foundation; either version 2 of the License, or * +* (at your option) any later version. * +* * +* This program is distributed in the hope that it will be useful, * +* but WITHOUT ANY WARRANTY; without even the implied warranty of * +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * +* GNU General Public License for more details. * +* * +* You should have received a copy of the GNU General Public License * +* along with this program; if not, write to the * +* Free Software Foundation, Inc., * +* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * +***************************************************************************/ + +.file "twofish-x86_64-asm.S" +.text + +#include <asm/asm-offsets.h> + +#define a_offset 0 +#define b_offset 4 +#define c_offset 8 +#define d_offset 12 + +/* Structure of the crypto context struct*/ + +#define s0 0 /* S0 Array 256 Words each */ +#define s1 1024 /* S1 Array */ +#define s2 2048 /* S2 Array */ +#define s3 3072 /* S3 Array */ +#define w 4096 /* 8 whitening keys (word) */ +#define k 4128 /* key 1-32 ( word ) */ + +/* define a few register aliases to allow macro substitution */ + +#define R0 %rax +#define R0D %eax +#define R0B %al +#define R0H %ah + +#define R1 %rbx +#define R1D %ebx +#define R1B %bl +#define R1H %bh + +#define R2 %rcx +#define R2D %ecx +#define R2B %cl +#define R2H %ch + +#define R3 %rdx +#define R3D %edx +#define R3B %dl +#define R3H %dh + + +/* performs input whitening */ +#define input_whitening(src,context,offset)\ + xor w+offset(context), src; + +/* performs input whitening */ +#define output_whitening(src,context,offset)\ + xor w+16+offset(context), src; + + +/* + * a input register containing a (rotated 16) + * b input register containing b + * c input register containing c + * d input register containing d (already rol $1) + * operations on a and b are interleaved to increase performance + */ +#define encrypt_round(a,b,c,d,round)\ + movzx b ## B, %edi;\ + mov s1(%r11,%rdi,4),%r8d;\ + movzx a ## B, %edi;\ + mov s2(%r11,%rdi,4),%r9d;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor s2(%r11,%rdi,4),%r8d;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s3(%r11,%rdi,4),%r9d;\ + movzx b ## B, %edi;\ + xor s3(%r11,%rdi,4),%r8d;\ + movzx a ## B, %edi;\ + xor (%r11,%rdi,4), %r9d;\ + movzx b ## H, %edi;\ + ror $15, b ## D;\ + xor (%r11,%rdi,4), %r8d;\ + movzx a ## H, %edi;\ + xor s1(%r11,%rdi,4),%r9d;\ + add %r8d, %r9d;\ + add %r9d, %r8d;\ + add k+round(%r11), %r9d;\ + xor %r9d, c ## D;\ + rol $15, c ## D;\ + add k+4+round(%r11),%r8d;\ + xor %r8d, d ## D; + +/* + * a input register containing a(rotated 16) + * b input register containing b + * c input register containing c + * d input register containing d (already rol $1) + * operations on a and b are interleaved to increase performance + * during the round a and b are prepared for the output whitening + */ +#define encrypt_last_round(a,b,c,d,round)\ + mov b ## D, %r10d;\ + shl $32, %r10;\ + movzx b ## B, %edi;\ + mov s1(%r11,%rdi,4),%r8d;\ + movzx a ## B, %edi;\ + mov s2(%r11,%rdi,4),%r9d;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor s2(%r11,%rdi,4),%r8d;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s3(%r11,%rdi,4),%r9d;\ + movzx b ## B, %edi;\ + xor s3(%r11,%rdi,4),%r8d;\ + movzx a ## B, %edi;\ + xor (%r11,%rdi,4), %r9d;\ + xor a, %r10;\ + movzx b ## H, %edi;\ + xor (%r11,%rdi,4), %r8d;\ + movzx a ## H, %edi;\ + xor s1(%r11,%rdi,4),%r9d;\ + add %r8d, %r9d;\ + add %r9d, %r8d;\ + add k+round(%r11), %r9d;\ + xor %r9d, c ## D;\ + ror $1, c ## D;\ + add k+4+round(%r11),%r8d;\ + xor %r8d, d ## D + +/* + * a input register containing a + * b input register containing b (rotated 16) + * c input register containing c (already rol $1) + * d input register containing d + * operations on a and b are interleaved to increase performance + */ +#define decrypt_round(a,b,c,d,round)\ + movzx a ## B, %edi;\ + mov (%r11,%rdi,4), %r9d;\ + movzx b ## B, %edi;\ + mov s3(%r11,%rdi,4),%r8d;\ + movzx a ## H, %edi;\ + ror $16, a ## D;\ + xor s1(%r11,%rdi,4),%r9d;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor (%r11,%rdi,4), %r8d;\ + movzx a ## B, %edi;\ + xor s2(%r11,%rdi,4),%r9d;\ + movzx b ## B, %edi;\ + xor s1(%r11,%rdi,4),%r8d;\ + movzx a ## H, %edi;\ + ror $15, a ## D;\ + xor s3(%r11,%rdi,4),%r9d;\ + movzx b ## H, %edi;\ + xor s2(%r11,%rdi,4),%r8d;\ + add %r8d, %r9d;\ + add %r9d, %r8d;\ + add k+round(%r11), %r9d;\ + xor %r9d, c ## D;\ + add k+4+round(%r11),%r8d;\ + xor %r8d, d ## D;\ + rol $15, d ## D; + +/* + * a input register containing a + * b input register containing b + * c input register containing c (already rol $1) + * d input register containing d + * operations on a and b are interleaved to increase performance + * during the round a and b are prepared for the output whitening + */ +#define decrypt_last_round(a,b,c,d,round)\ + movzx a ## B, %edi;\ + mov (%r11,%rdi,4), %r9d;\ + movzx b ## B, %edi;\ + mov s3(%r11,%rdi,4),%r8d;\ + movzx b ## H, %edi;\ + ror $16, b ## D;\ + xor (%r11,%rdi,4), %r8d;\ + movzx a ## H, %edi;\ + mov b ## D, %r10d;\ + shl $32, %r10;\ + xor a, %r10;\ + ror $16, a ## D;\ + xor s1(%r11,%rdi,4),%r9d;\ + movzx b ## B, %edi;\ + xor s1(%r11,%rdi,4),%r8d;\ + movzx a ## B, %edi;\ + xor s2(%r11,%rdi,4),%r9d;\ + movzx b ## H, %edi;\ + xor s2(%r11,%rdi,4),%r8d;\ + movzx a ## H, %edi;\ + xor s3(%r11,%rdi,4),%r9d;\ + add %r8d, %r9d;\ + add %r9d, %r8d;\ + add k+round(%r11), %r9d;\ + xor %r9d, c ## D;\ + add k+4+round(%r11),%r8d;\ + xor %r8d, d ## D;\ + ror $1, d ## D; + +.align 8 +.global twofish_enc_blk +.global twofish_dec_blk + +twofish_enc_blk: + pushq R1 + + /* %rdi contains the crypto tfm adress */ + /* %rsi contains the output adress */ + /* %rdx contains the input adress */ + add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */ + /* ctx adress is moved to free one non-rex register + as target for the 8bit high operations */ + mov %rdi, %r11 + + movq (R3), R1 + movq 8(R3), R3 + input_whitening(R1,%r11,a_offset) + input_whitening(R3,%r11,c_offset) + mov R1D, R0D + rol $16, R0D + shr $32, R1 + mov R3D, R2D + shr $32, R3 + rol $1, R3D + + encrypt_round(R0,R1,R2,R3,0); + encrypt_round(R2,R3,R0,R1,8); + encrypt_round(R0,R1,R2,R3,2*8); + encrypt_round(R2,R3,R0,R1,3*8); + encrypt_round(R0,R1,R2,R3,4*8); + encrypt_round(R2,R3,R0,R1,5*8); + encrypt_round(R0,R1,R2,R3,6*8); + encrypt_round(R2,R3,R0,R1,7*8); + encrypt_round(R0,R1,R2,R3,8*8); + encrypt_round(R2,R3,R0,R1,9*8); + encrypt_round(R0,R1,R2,R3,10*8); + encrypt_round(R2,R3,R0,R1,11*8); + encrypt_round(R0,R1,R2,R3,12*8); + encrypt_round(R2,R3,R0,R1,13*8); + encrypt_round(R0,R1,R2,R3,14*8); + encrypt_last_round(R2,R3,R0,R1,15*8); + + + output_whitening(%r10,%r11,a_offset) + movq %r10, (%rsi) + + shl $32, R1 + xor R0, R1 + + output_whitening(R1,%r11,c_offset) + movq R1, 8(%rsi) + + popq R1 + movq $1,%rax + ret + +twofish_dec_blk: + pushq R1 + + /* %rdi contains the crypto tfm adress */ + /* %rsi contains the output adress */ + /* %rdx contains the input adress */ + add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */ + /* ctx adress is moved to free one non-rex register + as target for the 8bit high operations */ + mov %rdi, %r11 + + movq (R3), R1 + movq 8(R3), R3 + output_whitening(R1,%r11,a_offset) + output_whitening(R3,%r11,c_offset) + mov R1D, R0D + shr $32, R1 + rol $16, R1D + mov R3D, R2D + shr $32, R3 + rol $1, R2D + + decrypt_round(R0,R1,R2,R3,15*8); + decrypt_round(R2,R3,R0,R1,14*8); + decrypt_round(R0,R1,R2,R3,13*8); + decrypt_round(R2,R3,R0,R1,12*8); + decrypt_round(R0,R1,R2,R3,11*8); + decrypt_round(R2,R3,R0,R1,10*8); + decrypt_round(R0,R1,R2,R3,9*8); + decrypt_round(R2,R3,R0,R1,8*8); + decrypt_round(R0,R1,R2,R3,7*8); + decrypt_round(R2,R3,R0,R1,6*8); + decrypt_round(R0,R1,R2,R3,5*8); + decrypt_round(R2,R3,R0,R1,4*8); + decrypt_round(R0,R1,R2,R3,3*8); + decrypt_round(R2,R3,R0,R1,2*8); + decrypt_round(R0,R1,R2,R3,1*8); + decrypt_last_round(R2,R3,R0,R1,0); + + input_whitening(%r10,%r11,a_offset) + movq %r10, (%rsi) + + shl $32, R1 + xor R0, R1 + + input_whitening(R1,%r11,c_offset) + movq R1, 8(%rsi) + + popq R1 + movq $1,%rax + ret diff --git a/arch/x86_64/crypto/twofish.c b/arch/x86_64/crypto/twofish.c new file mode 100644 index 000000000000..182d91d5cfb9 --- /dev/null +++ b/arch/x86_64/crypto/twofish.c @@ -0,0 +1,97 @@ +/* + * Glue Code for optimized x86_64 assembler version of TWOFISH + * + * Originally Twofish for GPG + * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 + * 256-bit key length added March 20, 1999 + * Some modifications to reduce the text size by Werner Koch, April, 1998 + * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> + * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> + * + * The original author has disclaimed all copyright interest in this + * code and thus put it in the public domain. The subsequent authors + * have put this under the GNU General Public License. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + * This code is a "clean room" implementation, written from the paper + * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available + * through http://www.counterpane.com/twofish.html + * + * For background information on multiplication in finite fields, used for + * the matrix operations in the key schedule, see the book _Contemporary + * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the + * Third Edition. + */ + +#include <crypto/twofish.h> +#include <linux/crypto.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> + +asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + +static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + twofish_enc_blk(tfm, dst, src); +} + +static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + twofish_dec_blk(tfm, dst, src); +} + +static struct crypto_alg alg = { + .cra_name = "twofish", + .cra_driver_name = "twofish-x86_64", + .cra_priority = 200, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = TF_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct twofish_ctx), + .cra_alignmask = 3, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = TF_MIN_KEY_SIZE, + .cia_max_keysize = TF_MAX_KEY_SIZE, + .cia_setkey = twofish_setkey, + .cia_encrypt = twofish_encrypt, + .cia_decrypt = twofish_decrypt + } + } +}; + +static int __init init(void) +{ + return crypto_register_alg(&alg); +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&alg); +} + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized"); +MODULE_ALIAS("twofish"); diff --git a/crypto/Kconfig b/crypto/Kconfig index ba133d557045..1e2f39c21180 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -9,47 +9,71 @@ config CRYPTO help This option provides the core Cryptographic API. +if CRYPTO + +config CRYPTO_ALGAPI + tristate + help + This option provides the API for cryptographic algorithms. + +config CRYPTO_BLKCIPHER + tristate + select CRYPTO_ALGAPI + +config CRYPTO_HASH + tristate + select CRYPTO_ALGAPI + +config CRYPTO_MANAGER + tristate "Cryptographic algorithm manager" + select CRYPTO_ALGAPI + default m + help + Create default cryptographic template instantiations such as + cbc(aes). + config CRYPTO_HMAC - bool "HMAC support" - depends on CRYPTO + tristate "HMAC support" + select CRYPTO_HASH help HMAC: Keyed-Hashing for Message Authentication (RFC2104). This is required for IPSec. config CRYPTO_NULL tristate "Null algorithms" - depends on CRYPTO + select CRYPTO_ALGAPI help These are 'Null' algorithms, used by IPsec, which do nothing. config CRYPTO_MD4 tristate "MD4 digest algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help MD4 message digest algorithm (RFC1320). config CRYPTO_MD5 tristate "MD5 digest algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help MD5 message digest algorithm (RFC1321). config CRYPTO_SHA1 tristate "SHA1 digest algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). config CRYPTO_SHA1_S390 tristate "SHA1 digest algorithm (s390)" - depends on CRYPTO && S390 + depends on S390 + select CRYPTO_ALGAPI help This is the s390 hardware accelerated implementation of the SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). config CRYPTO_SHA256 tristate "SHA256 digest algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help SHA256 secure hash standard (DFIPS 180-2). @@ -58,7 +82,8 @@ config CRYPTO_SHA256 config CRYPTO_SHA256_S390 tristate "SHA256 digest algorithm (s390)" - depends on CRYPTO && S390 + depends on S390 + select CRYPTO_ALGAPI help This is the s390 hardware accelerated implementation of the SHA256 secure hash standard (DFIPS 180-2). @@ -68,7 +93,7 @@ config CRYPTO_SHA256_S390 config CRYPTO_SHA512 tristate "SHA384 and SHA512 digest algorithms" - depends on CRYPTO + select CRYPTO_ALGAPI help SHA512 secure hash standard (DFIPS 180-2). @@ -80,7 +105,7 @@ config CRYPTO_SHA512 config CRYPTO_WP512 tristate "Whirlpool digest algorithms" - depends on CRYPTO + select CRYPTO_ALGAPI help Whirlpool hash algorithm 512, 384 and 256-bit hashes @@ -92,7 +117,7 @@ config CRYPTO_WP512 config CRYPTO_TGR192 tristate "Tiger digest algorithms" - depends on CRYPTO + select CRYPTO_ALGAPI help Tiger hash algorithm 192, 160 and 128-bit hashes @@ -103,21 +128,40 @@ config CRYPTO_TGR192 See also: <http://www.cs.technion.ac.il/~biham/Reports/Tiger/>. +config CRYPTO_ECB + tristate "ECB support" + select CRYPTO_BLKCIPHER + default m + help + ECB: Electronic CodeBook mode + This is the simplest block cipher algorithm. It simply encrypts + the input block by block. + +config CRYPTO_CBC + tristate "CBC support" + select CRYPTO_BLKCIPHER + default m + help + CBC: Cipher Block Chaining mode + This block cipher algorithm is required for IPSec. + config CRYPTO_DES tristate "DES and Triple DES EDE cipher algorithms" - depends on CRYPTO + select CRYPTO_ALGAPI help DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). config CRYPTO_DES_S390 tristate "DES and Triple DES cipher algorithms (s390)" - depends on CRYPTO && S390 + depends on S390 + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER help DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). config CRYPTO_BLOWFISH tristate "Blowfish cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help Blowfish cipher algorithm, by Bruce Schneier. @@ -130,7 +174,8 @@ config CRYPTO_BLOWFISH config CRYPTO_TWOFISH tristate "Twofish cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI + select CRYPTO_TWOFISH_COMMON help Twofish cipher algorithm. @@ -142,9 +187,47 @@ config CRYPTO_TWOFISH See also: <http://www.schneier.com/twofish.html> +config CRYPTO_TWOFISH_COMMON + tristate + help + Common parts of the Twofish cipher algorithm shared by the + generic c and the assembler implementations. + +config CRYPTO_TWOFISH_586 + tristate "Twofish cipher algorithms (i586)" + depends on (X86 || UML_X86) && !64BIT + select CRYPTO_ALGAPI + select CRYPTO_TWOFISH_COMMON + help + Twofish cipher algorithm. + + Twofish was submitted as an AES (Advanced Encryption Standard) + candidate cipher by researchers at CounterPane Systems. It is a + 16 round block cipher supporting key sizes of 128, 192, and 256 + bits. + + See also: + <http://www.schneier.com/twofish.html> + +config CRYPTO_TWOFISH_X86_64 + tristate "Twofish cipher algorithm (x86_64)" + depends on (X86 || UML_X86) && 64BIT + select CRYPTO_ALGAPI + select CRYPTO_TWOFISH_COMMON + help + Twofish cipher algorithm (x86_64). + + Twofish was submitted as an AES (Advanced Encryption Standard) + candidate cipher by researchers at CounterPane Systems. It is a + 16 round block cipher supporting key sizes of 128, 192, and 256 + bits. + + See also: + <http://www.schneier.com/twofish.html> + config CRYPTO_SERPENT tristate "Serpent cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help Serpent cipher algorithm, by Anderson, Biham & Knudsen. @@ -157,7 +240,7 @@ config CRYPTO_SERPENT config CRYPTO_AES tristate "AES cipher algorithms" - depends on CRYPTO + select CRYPTO_ALGAPI help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. @@ -177,7 +260,8 @@ config CRYPTO_AES config CRYPTO_AES_586 tristate "AES cipher algorithms (i586)" - depends on CRYPTO && ((X86 || UML_X86) && !64BIT) + depends on (X86 || UML_X86) && !64BIT + select CRYPTO_ALGAPI help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. @@ -197,7 +281,8 @@ config CRYPTO_AES_586 config CRYPTO_AES_X86_64 tristate "AES cipher algorithms (x86_64)" - depends on CRYPTO && ((X86 || UML_X86) && 64BIT) + depends on (X86 || UML_X86) && 64BIT + select CRYPTO_ALGAPI help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. @@ -217,7 +302,9 @@ config CRYPTO_AES_X86_64 config CRYPTO_AES_S390 tristate "AES cipher algorithms (s390)" - depends on CRYPTO && S390 + depends on S390 + select CRYPTO_ALGAPI + select CRYPTO_BLKCIPHER help This is the s390 hardware accelerated implementation of the AES cipher algorithms (FIPS-197). AES uses the Rijndael @@ -237,21 +324,21 @@ config CRYPTO_AES_S390 config CRYPTO_CAST5 tristate "CAST5 (CAST-128) cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help The CAST5 encryption algorithm (synonymous with CAST-128) is described in RFC2144. config CRYPTO_CAST6 tristate "CAST6 (CAST-256) cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help The CAST6 encryption algorithm (synonymous with CAST-256) is described in RFC2612. config CRYPTO_TEA tristate "TEA, XTEA and XETA cipher algorithms" - depends on CRYPTO + select CRYPTO_ALGAPI help TEA cipher algorithm. @@ -268,7 +355,7 @@ config CRYPTO_TEA config CRYPTO_ARC4 tristate "ARC4 cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help ARC4 cipher algorithm. @@ -279,7 +366,7 @@ config CRYPTO_ARC4 config CRYPTO_KHAZAD tristate "Khazad cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help Khazad cipher algorithm. @@ -292,7 +379,7 @@ config CRYPTO_KHAZAD config CRYPTO_ANUBIS tristate "Anubis cipher algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help Anubis cipher algorithm. @@ -307,7 +394,7 @@ config CRYPTO_ANUBIS config CRYPTO_DEFLATE tristate "Deflate compression algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI select ZLIB_INFLATE select ZLIB_DEFLATE help @@ -318,7 +405,7 @@ config CRYPTO_DEFLATE config CRYPTO_MICHAEL_MIC tristate "Michael MIC keyed digest algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI help Michael MIC is used for message integrity protection in TKIP (IEEE 802.11i). This algorithm is required for TKIP, but it @@ -327,7 +414,7 @@ config CRYPTO_MICHAEL_MIC config CRYPTO_CRC32C tristate "CRC32c CRC algorithm" - depends on CRYPTO + select CRYPTO_ALGAPI select LIBCRC32C help Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used @@ -337,10 +424,13 @@ config CRYPTO_CRC32C config CRYPTO_TEST tristate "Testing module" - depends on CRYPTO && m + depends on m + select CRYPTO_ALGAPI help Quick & dirty crypto test module. source "drivers/crypto/Kconfig" -endmenu +endif # if CRYPTO + +endmenu diff --git a/crypto/Makefile b/crypto/Makefile index d287b9e60c47..72366208e291 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -2,11 +2,18 @@ # Cryptographic API # -proc-crypto-$(CONFIG_PROC_FS) = proc.o +obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o -obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o \ - $(proc-crypto-y) +crypto_algapi-$(CONFIG_PROC_FS) += proc.o +crypto_algapi-objs := algapi.o $(crypto_algapi-y) +obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o +obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o + +crypto_hash-objs := hash.o +obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o + +obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o obj-$(CONFIG_CRYPTO_HMAC) += hmac.o obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o obj-$(CONFIG_CRYPTO_MD4) += md4.o @@ -16,9 +23,12 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256.o obj-$(CONFIG_CRYPTO_SHA512) += sha512.o obj-$(CONFIG_CRYPTO_WP512) += wp512.o obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o +obj-$(CONFIG_CRYPTO_ECB) += ecb.o +obj-$(CONFIG_CRYPTO_CBC) += cbc.o obj-$(CONFIG_CRYPTO_DES) += des.o obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o +obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o obj-$(CONFIG_CRYPTO_AES) += aes.o obj-$(CONFIG_CRYPTO_CAST5) += cast5.o diff --git a/crypto/aes.c b/crypto/aes.c index a038711831e7..e2440773878c 100644 --- a/crypto/aes.c +++ b/crypto/aes.c @@ -249,13 +249,14 @@ gen_tabs (void) } static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; + u32 *flags = &tfm->crt_flags; u32 i, t, u, v, w; - if (key_len != 16 && key_len != 24 && key_len != 32) { + if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/crypto/algapi.c b/crypto/algapi.c new file mode 100644 index 000000000000..c91530021e9c --- /dev/null +++ b/crypto/algapi.c @@ -0,0 +1,486 @@ +/* + * Cryptographic API for algorithms (i.e., low-level API). + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/rtnetlink.h> +#include <linux/string.h> + +#include "internal.h" + +static LIST_HEAD(crypto_template_list); + +void crypto_larval_error(const char *name, u32 type, u32 mask) +{ + struct crypto_alg *alg; + + down_read(&crypto_alg_sem); + alg = __crypto_alg_lookup(name, type, mask); + up_read(&crypto_alg_sem); + + if (alg) { + if (crypto_is_larval(alg)) { + struct crypto_larval *larval = (void *)alg; + complete(&larval->completion); + } + crypto_mod_put(alg); + } +} +EXPORT_SYMBOL_GPL(crypto_larval_error); + +static inline int crypto_set_driver_name(struct crypto_alg *alg) +{ + static const char suffix[] = "-generic"; + char *driver_name = alg->cra_driver_name; + int len; + + if (*driver_name) + return 0; + + len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); + if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) + return -ENAMETOOLONG; + + memcpy(driver_name + len, suffix, sizeof(suffix)); + return 0; +} + +static int crypto_check_alg(struct crypto_alg *alg) +{ + if (alg->cra_alignmask & (alg->cra_alignmask + 1)) + return -EINVAL; + + if (alg->cra_alignmask & alg->cra_blocksize) + return -EINVAL; + + if (alg->cra_blocksize > PAGE_SIZE / 8) + return -EINVAL; + + if (alg->cra_priority < 0) + return -EINVAL; + + return crypto_set_driver_name(alg); +} + +static void crypto_destroy_instance(struct crypto_alg *alg) +{ + struct crypto_instance *inst = (void *)alg; + struct crypto_template *tmpl = inst->tmpl; + + tmpl->free(inst); + crypto_tmpl_put(tmpl); +} + +static void crypto_remove_spawns(struct list_head *spawns, + struct list_head *list) +{ + struct crypto_spawn *spawn, *n; + + list_for_each_entry_safe(spawn, n, spawns, list) { + struct crypto_instance *inst = spawn->inst; + struct crypto_template *tmpl = inst->tmpl; + + list_del_init(&spawn->list); + spawn->alg = NULL; + + if (crypto_is_dead(&inst->alg)) + continue; + + inst->alg.cra_flags |= CRYPTO_ALG_DEAD; + if (!tmpl || !crypto_tmpl_get(tmpl)) + continue; + + crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg); + list_move(&inst->alg.cra_list, list); + hlist_del(&inst->list); + inst->alg.cra_destroy = crypto_destroy_instance; + + if (!list_empty(&inst->alg.cra_users)) { + if (&n->list == spawns) + n = list_entry(inst->alg.cra_users.next, + typeof(*n), list); + __list_splice(&inst->alg.cra_users, spawns->prev); + } + } +} + +static int __crypto_register_alg(struct crypto_alg *alg, + struct list_head *list) +{ + struct crypto_alg *q; + int ret = -EAGAIN; + + if (crypto_is_dead(alg)) + goto out; + + INIT_LIST_HEAD(&alg->cra_users); + + ret = -EEXIST; + + atomic_set(&alg->cra_refcnt, 1); + list_for_each_entry(q, &crypto_alg_list, cra_list) { + if (q == alg) + goto out; + + if (crypto_is_moribund(q)) + continue; + + if (crypto_is_larval(q)) { + struct crypto_larval *larval = (void *)q; + + if (strcmp(alg->cra_name, q->cra_name) && + strcmp(alg->cra_driver_name, q->cra_name)) + continue; + + if (larval->adult) + continue; + if ((q->cra_flags ^ alg->cra_flags) & larval->mask) + continue; + if (!crypto_mod_get(alg)) + continue; + + larval->adult = alg; + complete(&larval->completion); + continue; + } + + if (strcmp(alg->cra_name, q->cra_name)) + continue; + + if (strcmp(alg->cra_driver_name, q->cra_driver_name) && + q->cra_priority > alg->cra_priority) + continue; + + crypto_remove_spawns(&q->cra_users, list); + } + + list_add(&alg->cra_list, &crypto_alg_list); + + crypto_notify(CRYPTO_MSG_ALG_REGISTER, alg); + ret = 0; + +out: + return ret; +} + +static void crypto_remove_final(struct list_head *list) +{ + struct crypto_alg *alg; + struct crypto_alg *n; + + list_for_each_entry_safe(alg, n, list, cra_list) { + list_del_init(&alg->cra_list); + crypto_alg_put(alg); + } +} + +int crypto_register_alg(struct crypto_alg *alg) +{ + LIST_HEAD(list); + int err; + + err = crypto_check_alg(alg); + if (err) + return err; + + down_write(&crypto_alg_sem); + err = __crypto_register_alg(alg, &list); + up_write(&crypto_alg_sem); + + crypto_remove_final(&list); + return err; +} +EXPORT_SYMBOL_GPL(crypto_register_alg); + +static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list) +{ + if (unlikely(list_empty(&alg->cra_list))) + return -ENOENT; + + alg->cra_flags |= CRYPTO_ALG_DEAD; + + crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg); + list_del_init(&alg->cra_list); + crypto_remove_spawns(&alg->cra_users, list); + + return 0; +} + +int crypto_unregister_alg(struct crypto_alg *alg) +{ + int ret; + LIST_HEAD(list); + + down_write(&crypto_alg_sem); + ret = crypto_remove_alg(alg, &list); + up_write(&crypto_alg_sem); + + if (ret) + return ret; + + BUG_ON(atomic_read(&alg->cra_refcnt) != 1); + if (alg->cra_destroy) + alg->cra_destroy(alg); + + crypto_remove_final(&list); + return 0; +} +EXPORT_SYMBOL_GPL(crypto_unregister_alg); + +int crypto_register_template(struct crypto_template *tmpl) +{ + struct crypto_template *q; + int err = -EEXIST; + + down_write(&crypto_alg_sem); + + list_for_each_entry(q, &crypto_template_list, list) { + if (q == tmpl) + goto out; + } + + list_add(&tmpl->list, &crypto_template_list); + crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl); + err = 0; +out: + up_write(&crypto_alg_sem); + return err; +} +EXPORT_SYMBOL_GPL(crypto_register_template); + +void crypto_unregister_template(struct crypto_template *tmpl) +{ + struct crypto_instance *inst; + struct hlist_node *p, *n; + struct hlist_head *list; + LIST_HEAD(users); + + down_write(&crypto_alg_sem); + + BUG_ON(list_empty(&tmpl->list)); + list_del_init(&tmpl->list); + + list = &tmpl->instances; + hlist_for_each_entry(inst, p, list, list) { + int err = crypto_remove_alg(&inst->alg, &users); + BUG_ON(err); + } + + crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl); + + up_write(&crypto_alg_sem); + + hlist_for_each_entry_safe(inst, p, n, list, list) { + BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1); + tmpl->free(inst); + } + crypto_remove_final(&users); +} +EXPORT_SYMBOL_GPL(crypto_unregister_template); + +static struct crypto_template *__crypto_lookup_template(const char *name) +{ + struct crypto_template *q, *tmpl = NULL; + + down_read(&crypto_alg_sem); + list_for_each_entry(q, &crypto_template_list, list) { + if (strcmp(q->name, name)) + continue; + if (unlikely(!crypto_tmpl_get(q))) + continue; + + tmpl = q; + break; + } + up_read(&crypto_alg_sem); + + return tmpl; +} + +struct crypto_template *crypto_lookup_template(const char *name) +{ + return try_then_request_module(__crypto_lookup_template(name), name); +} +EXPORT_SYMBOL_GPL(crypto_lookup_template); + +int crypto_register_instance(struct crypto_template *tmpl, + struct crypto_instance *inst) +{ + LIST_HEAD(list); + int err = -EINVAL; + + if (inst->alg.cra_destroy) + goto err; + + err = crypto_check_alg(&inst->alg); + if (err) + goto err; + + inst->alg.cra_module = tmpl->module; + + down_write(&crypto_alg_sem); + + err = __crypto_register_alg(&inst->alg, &list); + if (err) + goto unlock; + + hlist_add_head(&inst->list, &tmpl->instances); + inst->tmpl = tmpl; + +unlock: + up_write(&crypto_alg_sem); + + crypto_remove_final(&list); + +err: + return err; +} +EXPORT_SYMBOL_GPL(crypto_register_instance); + +int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst) +{ + int err = -EAGAIN; + + spawn->inst = inst; + + down_write(&crypto_alg_sem); + if (!crypto_is_moribund(alg)) { + list_add(&spawn->list, &alg->cra_users); + spawn->alg = alg; + err = 0; + } + up_write(&crypto_alg_sem); + + return err; +} +EXPORT_SYMBOL_GPL(crypto_init_spawn); + +void crypto_drop_spawn(struct crypto_spawn *spawn) +{ + down_write(&crypto_alg_sem); + list_del(&spawn->list); + up_write(&crypto_alg_sem); +} +EXPORT_SYMBOL_GPL(crypto_drop_spawn); + +struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn) +{ + struct crypto_alg *alg; + struct crypto_alg *alg2; + struct crypto_tfm *tfm; + + down_read(&crypto_alg_sem); + alg = spawn->alg; + alg2 = alg; + if (alg2) + alg2 = crypto_mod_get(alg2); + up_read(&crypto_alg_sem); + + if (!alg2) { + if (alg) + crypto_shoot_alg(alg); + return ERR_PTR(-EAGAIN); + } + + tfm = __crypto_alloc_tfm(alg, 0); + if (IS_ERR(tfm)) + crypto_mod_put(alg); + + return tfm; +} +EXPORT_SYMBOL_GPL(crypto_spawn_tfm); + +int crypto_register_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&crypto_chain, nb); +} +EXPORT_SYMBOL_GPL(crypto_register_notifier); + +int crypto_unregister_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&crypto_chain, nb); +} +EXPORT_SYMBOL_GPL(crypto_unregister_notifier); + +struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, + u32 type, u32 mask) +{ + struct rtattr *rta = param; + struct crypto_attr_alg *alga; + + if (!RTA_OK(rta, len)) + return ERR_PTR(-EBADR); + if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga)) + return ERR_PTR(-EINVAL); + + alga = RTA_DATA(rta); + alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0; + + return crypto_alg_mod_lookup(alga->name, type, mask); +} +EXPORT_SYMBOL_GPL(crypto_get_attr_alg); + +struct crypto_instance *crypto_alloc_instance(const char *name, + struct crypto_alg *alg) +{ + struct crypto_instance *inst; + struct crypto_spawn *spawn; + int err; + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) + return ERR_PTR(-ENOMEM); + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name, + alg->cra_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", + name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + spawn = crypto_instance_ctx(inst); + err = crypto_init_spawn(spawn, alg, inst); + + if (err) + goto err_free_inst; + + return inst; + +err_free_inst: + kfree(inst); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(crypto_alloc_instance); + +static int __init crypto_algapi_init(void) +{ + crypto_init_proc(); + return 0; +} + +static void __exit crypto_algapi_exit(void) +{ + crypto_exit_proc(); +} + +module_init(crypto_algapi_init); +module_exit(crypto_algapi_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Cryptographic algorithms API"); diff --git a/crypto/anubis.c b/crypto/anubis.c index 7e2e1a29800e..1c771f7f4dc5 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -461,10 +461,11 @@ static const u32 rc[] = { }; static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; + u32 *flags = &tfm->crt_flags; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; diff --git a/crypto/api.c b/crypto/api.c index c11ec1fd4f18..2e84d4b54790 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -15,70 +15,202 @@ * */ -#include <linux/compiler.h> -#include <linux/init.h> -#include <linux/crypto.h> +#include <linux/err.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/kmod.h> -#include <linux/rwsem.h> +#include <linux/module.h> +#include <linux/param.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/string.h> #include "internal.h" LIST_HEAD(crypto_alg_list); +EXPORT_SYMBOL_GPL(crypto_alg_list); DECLARE_RWSEM(crypto_alg_sem); +EXPORT_SYMBOL_GPL(crypto_alg_sem); -static inline int crypto_alg_get(struct crypto_alg *alg) +BLOCKING_NOTIFIER_HEAD(crypto_chain); +EXPORT_SYMBOL_GPL(crypto_chain); + +static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) +{ + atomic_inc(&alg->cra_refcnt); + return alg; +} + +struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { - return try_module_get(alg->cra_module); + return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; } +EXPORT_SYMBOL_GPL(crypto_mod_get); -static inline void crypto_alg_put(struct crypto_alg *alg) +void crypto_mod_put(struct crypto_alg *alg) { + crypto_alg_put(alg); module_put(alg->cra_module); } +EXPORT_SYMBOL_GPL(crypto_mod_put); -static struct crypto_alg *crypto_alg_lookup(const char *name) +struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask) { struct crypto_alg *q, *alg = NULL; - int best = -1; + int best = -2; - if (!name) - return NULL; - - down_read(&crypto_alg_sem); - list_for_each_entry(q, &crypto_alg_list, cra_list) { int exact, fuzzy; + if (crypto_is_moribund(q)) + continue; + + if ((q->cra_flags ^ type) & mask) + continue; + + if (crypto_is_larval(q) && + ((struct crypto_larval *)q)->mask != mask) + continue; + exact = !strcmp(q->cra_driver_name, name); fuzzy = !strcmp(q->cra_name, name); if (!exact && !(fuzzy && q->cra_priority > best)) continue; - if (unlikely(!crypto_alg_get(q))) + if (unlikely(!crypto_mod_get(q))) continue; best = q->cra_priority; if (alg) - crypto_alg_put(alg); + crypto_mod_put(alg); alg = q; if (exact) break; } - + + return alg; +} +EXPORT_SYMBOL_GPL(__crypto_alg_lookup); + +static void crypto_larval_destroy(struct crypto_alg *alg) +{ + struct crypto_larval *larval = (void *)alg; + + BUG_ON(!crypto_is_larval(alg)); + if (larval->adult) + crypto_mod_put(larval->adult); + kfree(larval); +} + +static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type, + u32 mask) +{ + struct crypto_alg *alg; + struct crypto_larval *larval; + + larval = kzalloc(sizeof(*larval), GFP_KERNEL); + if (!larval) + return ERR_PTR(-ENOMEM); + + larval->mask = mask; + larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type; + larval->alg.cra_priority = -1; + larval->alg.cra_destroy = crypto_larval_destroy; + + atomic_set(&larval->alg.cra_refcnt, 2); + strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); + init_completion(&larval->completion); + + down_write(&crypto_alg_sem); + alg = __crypto_alg_lookup(name, type, mask); + if (!alg) { + alg = &larval->alg; + list_add(&alg->cra_list, &crypto_alg_list); + } + up_write(&crypto_alg_sem); + + if (alg != &larval->alg) + kfree(larval); + + return alg; +} + +static void crypto_larval_kill(struct crypto_alg *alg) +{ + struct crypto_larval *larval = (void *)alg; + + down_write(&crypto_alg_sem); + list_del(&alg->cra_list); + up_write(&crypto_alg_sem); + complete(&larval->completion); + crypto_alg_put(alg); +} + +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) +{ + struct crypto_larval *larval = (void *)alg; + + wait_for_completion_interruptible_timeout(&larval->completion, 60 * HZ); + alg = larval->adult; + if (alg) { + if (!crypto_mod_get(alg)) + alg = ERR_PTR(-EAGAIN); + } else + alg = ERR_PTR(-ENOENT); + crypto_mod_put(&larval->alg); + + return alg; +} + +static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, + u32 mask) +{ + struct crypto_alg *alg; + + down_read(&crypto_alg_sem); + alg = __crypto_alg_lookup(name, type, mask); up_read(&crypto_alg_sem); + return alg; } -/* A far more intelligent version of this is planned. For now, just - * try an exact match on the name of the algorithm. */ -static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name) +struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) { - return try_then_request_module(crypto_alg_lookup(name), name); + struct crypto_alg *alg; + struct crypto_alg *larval; + int ok; + + if (!name) + return ERR_PTR(-ENOENT); + + mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); + type &= mask; + + alg = try_then_request_module(crypto_alg_lookup(name, type, mask), + name); + if (alg) + return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg; + + larval = crypto_larval_alloc(name, type, mask); + if (IS_ERR(larval) || !crypto_is_larval(larval)) + return larval; + + ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval); + if (ok == NOTIFY_DONE) { + request_module("cryptomgr"); + ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval); + } + + if (ok == NOTIFY_STOP) + alg = crypto_larval_wait(larval); + else { + crypto_mod_put(larval); + alg = ERR_PTR(-ENOENT); + } + crypto_larval_kill(larval); + return alg; } +EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup); static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) { @@ -94,17 +226,18 @@ static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags) case CRYPTO_ALG_TYPE_COMPRESS: return crypto_init_compress_flags(tfm, flags); - - default: - break; } - BUG(); - return -EINVAL; + return 0; } static int crypto_init_ops(struct crypto_tfm *tfm) { + const struct crypto_type *type = tfm->__crt_alg->cra_type; + + if (type) + return type->init(tfm); + switch (crypto_tfm_alg_type(tfm)) { case CRYPTO_ALG_TYPE_CIPHER: return crypto_init_cipher_ops(tfm); @@ -125,6 +258,14 @@ static int crypto_init_ops(struct crypto_tfm *tfm) static void crypto_exit_ops(struct crypto_tfm *tfm) { + const struct crypto_type *type = tfm->__crt_alg->cra_type; + + if (type) { + if (type->exit) + type->exit(tfm); + return; + } + switch (crypto_tfm_alg_type(tfm)) { case CRYPTO_ALG_TYPE_CIPHER: crypto_exit_cipher_ops(tfm); @@ -146,53 +287,67 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags) { + const struct crypto_type *type = alg->cra_type; unsigned int len; + len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1); + if (type) + return len + type->ctxsize(alg); + switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { default: BUG(); case CRYPTO_ALG_TYPE_CIPHER: - len = crypto_cipher_ctxsize(alg, flags); + len += crypto_cipher_ctxsize(alg, flags); break; case CRYPTO_ALG_TYPE_DIGEST: - len = crypto_digest_ctxsize(alg, flags); + len += crypto_digest_ctxsize(alg, flags); break; case CRYPTO_ALG_TYPE_COMPRESS: - len = crypto_compress_ctxsize(alg, flags); + len += crypto_compress_ctxsize(alg, flags); break; } - return len + (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + return len; } -struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) +void crypto_shoot_alg(struct crypto_alg *alg) +{ + down_write(&crypto_alg_sem); + alg->cra_flags |= CRYPTO_ALG_DYING; + up_write(&crypto_alg_sem); +} +EXPORT_SYMBOL_GPL(crypto_shoot_alg); + +struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags) { struct crypto_tfm *tfm = NULL; - struct crypto_alg *alg; unsigned int tfm_size; - - alg = crypto_alg_mod_lookup(name); - if (alg == NULL) - goto out; + int err = -ENOMEM; tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags); tfm = kzalloc(tfm_size, GFP_KERNEL); if (tfm == NULL) - goto out_put; + goto out; tfm->__crt_alg = alg; - - if (crypto_init_flags(tfm, flags)) + + err = crypto_init_flags(tfm, flags); + if (err) goto out_free_tfm; - if (crypto_init_ops(tfm)) + err = crypto_init_ops(tfm); + if (err) goto out_free_tfm; - if (alg->cra_init && alg->cra_init(tfm)) + if (alg->cra_init && (err = alg->cra_init(tfm))) { + if (err == -EAGAIN) + crypto_shoot_alg(alg); goto cra_init_failed; + } goto out; @@ -200,13 +355,97 @@ cra_init_failed: crypto_exit_ops(tfm); out_free_tfm: kfree(tfm); - tfm = NULL; -out_put: - crypto_alg_put(alg); + tfm = ERR_PTR(err); out: return tfm; } +EXPORT_SYMBOL_GPL(__crypto_alloc_tfm); + +struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) +{ + struct crypto_tfm *tfm = NULL; + int err; + + do { + struct crypto_alg *alg; + + alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC); + err = PTR_ERR(alg); + if (IS_ERR(alg)) + continue; + + tfm = __crypto_alloc_tfm(alg, flags); + err = 0; + if (IS_ERR(tfm)) { + crypto_mod_put(alg); + err = PTR_ERR(tfm); + tfm = NULL; + } + } while (err == -EAGAIN && !signal_pending(current)); + + return tfm; +} + +/* + * crypto_alloc_base - Locate algorithm and allocate transform + * @alg_name: Name of algorithm + * @type: Type of algorithm + * @mask: Mask for type comparison + * + * crypto_alloc_base() will first attempt to locate an already loaded + * algorithm. If that fails and the kernel supports dynamically loadable + * modules, it will then attempt to load a module of the same name or + * alias. If that fails it will send a query to any loaded crypto manager + * to construct an algorithm on the fly. A refcount is grabbed on the + * algorithm which is then associated with the new transform. + * + * The returned transform is of a non-determinate type. Most people + * should use one of the more specific allocation functions such as + * crypto_alloc_blkcipher. + * + * In case of error the return value is an error pointer. + */ +struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) +{ + struct crypto_tfm *tfm; + int err; + + for (;;) { + struct crypto_alg *alg; + + alg = crypto_alg_mod_lookup(alg_name, type, mask); + err = PTR_ERR(alg); + tfm = ERR_PTR(err); + if (IS_ERR(alg)) + goto err; + + tfm = __crypto_alloc_tfm(alg, 0); + if (!IS_ERR(tfm)) + break; + + crypto_mod_put(alg); + err = PTR_ERR(tfm); +err: + if (err != -EAGAIN) + break; + if (signal_pending(current)) { + err = -EINTR; + break; + } + }; + + return tfm; +} +EXPORT_SYMBOL_GPL(crypto_alloc_base); + +/* + * crypto_free_tfm - Free crypto transform + * @tfm: Transform to free + * + * crypto_free_tfm() frees up the transform and any associated resources, + * then drops the refcount on the associated algorithm. + */ void crypto_free_tfm(struct crypto_tfm *tfm) { struct crypto_alg *alg; @@ -221,108 +460,39 @@ void crypto_free_tfm(struct crypto_tfm *tfm) if (alg->cra_exit) alg->cra_exit(tfm); crypto_exit_ops(tfm); - crypto_alg_put(alg); + crypto_mod_put(alg); memset(tfm, 0, size); kfree(tfm); } -static inline int crypto_set_driver_name(struct crypto_alg *alg) -{ - static const char suffix[] = "-generic"; - char *driver_name = alg->cra_driver_name; - int len; - - if (*driver_name) - return 0; - - len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); - if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME) - return -ENAMETOOLONG; - - memcpy(driver_name + len, suffix, sizeof(suffix)); - return 0; -} - -int crypto_register_alg(struct crypto_alg *alg) +int crypto_alg_available(const char *name, u32 flags) { - int ret; - struct crypto_alg *q; - - if (alg->cra_alignmask & (alg->cra_alignmask + 1)) - return -EINVAL; - - if (alg->cra_alignmask & alg->cra_blocksize) - return -EINVAL; - - if (alg->cra_blocksize > PAGE_SIZE / 8) - return -EINVAL; - - if (alg->cra_priority < 0) - return -EINVAL; - - ret = crypto_set_driver_name(alg); - if (unlikely(ret)) - return ret; - - down_write(&crypto_alg_sem); + int ret = 0; + struct crypto_alg *alg = crypto_alg_mod_lookup(name, 0, + CRYPTO_ALG_ASYNC); - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (q == alg) { - ret = -EEXIST; - goto out; - } + if (!IS_ERR(alg)) { + crypto_mod_put(alg); + ret = 1; } - list_add(&alg->cra_list, &crypto_alg_list); -out: - up_write(&crypto_alg_sem); return ret; } -int crypto_unregister_alg(struct crypto_alg *alg) -{ - int ret = -ENOENT; - struct crypto_alg *q; - - BUG_ON(!alg->cra_module); - - down_write(&crypto_alg_sem); - list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (alg == q) { - list_del(&alg->cra_list); - ret = 0; - goto out; - } - } -out: - up_write(&crypto_alg_sem); - return ret; -} +EXPORT_SYMBOL_GPL(crypto_alloc_tfm); +EXPORT_SYMBOL_GPL(crypto_free_tfm); +EXPORT_SYMBOL_GPL(crypto_alg_available); -int crypto_alg_available(const char *name, u32 flags) +int crypto_has_alg(const char *name, u32 type, u32 mask) { int ret = 0; - struct crypto_alg *alg = crypto_alg_mod_lookup(name); + struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask); - if (alg) { - crypto_alg_put(alg); + if (!IS_ERR(alg)) { + crypto_mod_put(alg); ret = 1; } return ret; } - -static int __init init_crypto(void) -{ - printk(KERN_INFO "Initializing Cryptographic API\n"); - crypto_init_proc(); - return 0; -} - -__initcall(init_crypto); - -EXPORT_SYMBOL_GPL(crypto_register_alg); -EXPORT_SYMBOL_GPL(crypto_unregister_alg); -EXPORT_SYMBOL_GPL(crypto_alloc_tfm); -EXPORT_SYMBOL_GPL(crypto_free_tfm); -EXPORT_SYMBOL_GPL(crypto_alg_available); +EXPORT_SYMBOL_GPL(crypto_has_alg); diff --git a/crypto/arc4.c b/crypto/arc4.c index 5edc6a65b987..8be47e13a9e3 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -25,7 +25,7 @@ struct arc4_ctx { }; static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); int i, j = 0, k = 0; diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c new file mode 100644 index 000000000000..034c939bf91a --- /dev/null +++ b/crypto/blkcipher.c @@ -0,0 +1,405 @@ +/* + * Block chaining cipher operations. + * + * Generic encrypt/decrypt wrapper for ciphers, handles operations across + * multiple page boundaries by using temporary blocks. In user context, + * the kernel is given a chance to schedule us once per page. + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/crypto.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include "internal.h" +#include "scatterwalk.h" + +enum { + BLKCIPHER_WALK_PHYS = 1 << 0, + BLKCIPHER_WALK_SLOW = 1 << 1, + BLKCIPHER_WALK_COPY = 1 << 2, + BLKCIPHER_WALK_DIFF = 1 << 3, +}; + +static int blkcipher_walk_next(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +static int blkcipher_walk_first(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); + +static inline void blkcipher_map_src(struct blkcipher_walk *walk) +{ + walk->src.virt.addr = scatterwalk_map(&walk->in, 0); +} + +static inline void blkcipher_map_dst(struct blkcipher_walk *walk) +{ + walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); +} + +static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) +{ + scatterwalk_unmap(walk->src.virt.addr, 0); +} + +static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) +{ + scatterwalk_unmap(walk->dst.virt.addr, 1); +} + +static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len) +{ + if (offset_in_page(start + len) < len) + return (u8 *)((unsigned long)(start + len) & PAGE_MASK); + return start; +} + +static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm, + struct blkcipher_walk *walk, + unsigned int bsize) +{ + u8 *addr; + unsigned int alignmask = crypto_blkcipher_alignmask(tfm); + + addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); + addr = blkcipher_get_spot(addr, bsize); + scatterwalk_copychunks(addr, &walk->out, bsize, 1); + return bsize; +} + +static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk, + unsigned int n) +{ + n = walk->nbytes - n; + + if (walk->flags & BLKCIPHER_WALK_COPY) { + blkcipher_map_dst(walk); + memcpy(walk->dst.virt.addr, walk->page, n); + blkcipher_unmap_dst(walk); + } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) { + blkcipher_unmap_src(walk); + if (walk->flags & BLKCIPHER_WALK_DIFF) + blkcipher_unmap_dst(walk); + } + + scatterwalk_advance(&walk->in, n); + scatterwalk_advance(&walk->out, n); + + return n; +} + +int blkcipher_walk_done(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, int err) +{ + struct crypto_blkcipher *tfm = desc->tfm; + unsigned int nbytes = 0; + + if (likely(err >= 0)) { + unsigned int bsize = crypto_blkcipher_blocksize(tfm); + unsigned int n; + + if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) + n = blkcipher_done_fast(walk, err); + else + n = blkcipher_done_slow(tfm, walk, bsize); + + nbytes = walk->total - n; + err = 0; + } + + scatterwalk_done(&walk->in, 0, nbytes); + scatterwalk_done(&walk->out, 1, nbytes); + + walk->total = nbytes; + walk->nbytes = nbytes; + + if (nbytes) { + crypto_yield(desc->flags); + return blkcipher_walk_next(desc, walk); + } + + if (walk->iv != desc->info) + memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm)); + if (walk->buffer != walk->page) + kfree(walk->buffer); + if (walk->page) + free_page((unsigned long)walk->page); + + return err; +} +EXPORT_SYMBOL_GPL(blkcipher_walk_done); + +static inline int blkcipher_next_slow(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + unsigned int bsize, + unsigned int alignmask) +{ + unsigned int n; + + if (walk->buffer) + goto ok; + + walk->buffer = walk->page; + if (walk->buffer) + goto ok; + + n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); + walk->buffer = kmalloc(n, GFP_ATOMIC); + if (!walk->buffer) + return blkcipher_walk_done(desc, walk, -ENOMEM); + +ok: + walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer, + alignmask + 1); + walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize); + walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize, + bsize); + + scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); + + walk->nbytes = bsize; + walk->flags |= BLKCIPHER_WALK_SLOW; + + return 0; +} + +static inline int blkcipher_next_copy(struct blkcipher_walk *walk) +{ + u8 *tmp = walk->page; + + blkcipher_map_src(walk); + memcpy(tmp, walk->src.virt.addr, walk->nbytes); + blkcipher_unmap_src(walk); + + walk->src.virt.addr = tmp; + walk->dst.virt.addr = tmp; + + return 0; +} + +static inline int blkcipher_next_fast(struct blkcipher_desc *desc, + struct blkcipher_walk *walk) +{ + unsigned long diff; + + walk->src.phys.page = scatterwalk_page(&walk->in); + walk->src.phys.offset = offset_in_page(walk->in.offset); + walk->dst.phys.page = scatterwalk_page(&walk->out); + walk->dst.phys.offset = offset_in_page(walk->out.offset); + + if (walk->flags & BLKCIPHER_WALK_PHYS) + return 0; + + diff = walk->src.phys.offset - walk->dst.phys.offset; + diff |= walk->src.virt.page - walk->dst.virt.page; + + blkcipher_map_src(walk); + walk->dst.virt.addr = walk->src.virt.addr; + + if (diff) { + walk->flags |= BLKCIPHER_WALK_DIFF; + blkcipher_map_dst(walk); + } + + return 0; +} + +static int blkcipher_walk_next(struct blkcipher_desc *desc, + struct blkcipher_walk *walk) +{ + struct crypto_blkcipher *tfm = desc->tfm; + unsigned int alignmask = crypto_blkcipher_alignmask(tfm); + unsigned int bsize = crypto_blkcipher_blocksize(tfm); + unsigned int n; + int err; + + n = walk->total; + if (unlikely(n < bsize)) { + desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; + return blkcipher_walk_done(desc, walk, -EINVAL); + } + + walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY | + BLKCIPHER_WALK_DIFF); + if (!scatterwalk_aligned(&walk->in, alignmask) || + !scatterwalk_aligned(&walk->out, alignmask)) { + walk->flags |= BLKCIPHER_WALK_COPY; + if (!walk->page) { + walk->page = (void *)__get_free_page(GFP_ATOMIC); + if (!walk->page) + n = 0; + } + } + + n = scatterwalk_clamp(&walk->in, n); + n = scatterwalk_clamp(&walk->out, n); + + if (unlikely(n < bsize)) { + err = blkcipher_next_slow(desc, walk, bsize, alignmask); + goto set_phys_lowmem; + } + + walk->nbytes = n; + if (walk->flags & BLKCIPHER_WALK_COPY) { + err = blkcipher_next_copy(walk); + goto set_phys_lowmem; + } + + return blkcipher_next_fast(desc, walk); + +set_phys_lowmem: + if (walk->flags & BLKCIPHER_WALK_PHYS) { + walk->src.phys.page = virt_to_page(walk->src.virt.addr); + walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); + walk->src.phys.offset &= PAGE_SIZE - 1; + walk->dst.phys.offset &= PAGE_SIZE - 1; + } + return err; +} + +static inline int blkcipher_copy_iv(struct blkcipher_walk *walk, + struct crypto_blkcipher *tfm, + unsigned int alignmask) +{ + unsigned bs = crypto_blkcipher_blocksize(tfm); + unsigned int ivsize = crypto_blkcipher_ivsize(tfm); + unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1); + u8 *iv; + + size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); + walk->buffer = kmalloc(size, GFP_ATOMIC); + if (!walk->buffer) + return -ENOMEM; + + iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1); + iv = blkcipher_get_spot(iv, bs) + bs; + iv = blkcipher_get_spot(iv, bs) + bs; + iv = blkcipher_get_spot(iv, ivsize); + + walk->iv = memcpy(iv, walk->iv, ivsize); + return 0; +} + +int blkcipher_walk_virt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk) +{ + walk->flags &= ~BLKCIPHER_WALK_PHYS; + return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_walk_virt); + +int blkcipher_walk_phys(struct blkcipher_desc *desc, + struct blkcipher_walk *walk) +{ + walk->flags |= BLKCIPHER_WALK_PHYS; + return blkcipher_walk_first(desc, walk); +} +EXPORT_SYMBOL_GPL(blkcipher_walk_phys); + +static int blkcipher_walk_first(struct blkcipher_desc *desc, + struct blkcipher_walk *walk) +{ + struct crypto_blkcipher *tfm = desc->tfm; + unsigned int alignmask = crypto_blkcipher_alignmask(tfm); + + walk->nbytes = walk->total; + if (unlikely(!walk->total)) + return 0; + + walk->buffer = NULL; + walk->iv = desc->info; + if (unlikely(((unsigned long)walk->iv & alignmask))) { + int err = blkcipher_copy_iv(walk, tfm, alignmask); + if (err) + return err; + } + + scatterwalk_start(&walk->in, walk->in.sg); + scatterwalk_start(&walk->out, walk->out.sg); + walk->page = NULL; + + return blkcipher_walk_next(desc, walk); +} + +static int setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher; + + if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + return cipher->setkey(tfm, key, keylen); +} + +static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg) +{ + struct blkcipher_alg *cipher = &alg->cra_blkcipher; + unsigned int len = alg->cra_ctxsize; + + if (cipher->ivsize) { + len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); + len += cipher->ivsize; + } + + return len; +} + +static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm) +{ + struct blkcipher_tfm *crt = &tfm->crt_blkcipher; + struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; + unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1; + unsigned long addr; + + if (alg->ivsize > PAGE_SIZE / 8) + return -EINVAL; + + crt->setkey = setkey; + crt->encrypt = alg->encrypt; + crt->decrypt = alg->decrypt; + + addr = (unsigned long)crypto_tfm_ctx(tfm); + addr = ALIGN(addr, align); + addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); + crt->iv = (void *)addr; + + return 0; +} + +static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) + __attribute_used__; +static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_printf(m, "type : blkcipher\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize); + seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize); + seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize); +} + +const struct crypto_type crypto_blkcipher_type = { + .ctxsize = crypto_blkcipher_ctxsize, + .init = crypto_init_blkcipher_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_blkcipher_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_blkcipher_type); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Generic block chaining cipher type"); diff --git a/crypto/blowfish.c b/crypto/blowfish.c index 490265f42b3b..55238c4e37f0 100644 --- a/crypto/blowfish.c +++ b/crypto/blowfish.c @@ -399,8 +399,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) /* * Calculates the blowfish S and P boxes for encryption and decryption. */ -static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) +static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct bf_ctx *ctx = crypto_tfm_ctx(tfm); u32 *P = ctx->p; diff --git a/crypto/cast5.c b/crypto/cast5.c index 08eef58c1d3d..13ea60abc19a 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c @@ -769,8 +769,7 @@ static void key_schedule(u32 * x, u32 * z, u32 * k) } -static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned key_len, u32 *flags) +static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned key_len) { struct cast5_ctx *c = crypto_tfm_ctx(tfm); int i; @@ -778,11 +777,6 @@ static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, u32 z[4]; u32 k[16]; __be32 p_key[4]; - - if (key_len < 5 || key_len > 16) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } c->rr = key_len <= 10 ? 1 : 0; diff --git a/crypto/cast6.c b/crypto/cast6.c index 08e33bfc3ad1..136ab6dfe8c5 100644 --- a/crypto/cast6.c +++ b/crypto/cast6.c @@ -382,14 +382,15 @@ static inline void W(u32 *key, unsigned int i) { } static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, - unsigned key_len, u32 *flags) + unsigned key_len) { int i; u32 key[8]; __be32 p_key[8]; /* padded key */ struct cast6_ctx *c = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; - if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { + if (key_len % 4 != 0) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/crypto/cbc.c b/crypto/cbc.c new file mode 100644 index 000000000000..f5542b4db387 --- /dev/null +++ b/crypto/cbc.c @@ -0,0 +1,344 @@ +/* + * CBC: Cipher Block Chaining mode + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <crypto/algapi.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> + +struct crypto_cbc_ctx { + struct crypto_cipher *child; + void (*xor)(u8 *dst, const u8 *src, unsigned int bs); +}; + +static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent); + struct crypto_cipher *child = ctx->child; + int err; + + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(child, key, keylen); + crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_cipher *tfm, + void (*xor)(u8 *, const u8 *, + unsigned int)) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + int bsize = crypto_cipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + xor(iv, src, bsize); + fn(crypto_cipher_tfm(tfm), dst, iv); + memcpy(iv, dst, bsize); + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + return nbytes; +} + +static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_cipher *tfm, + void (*xor)(u8 *, const u8 *, + unsigned int)) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_encrypt; + int bsize = crypto_cipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *iv = walk->iv; + + do { + xor(src, iv, bsize); + fn(crypto_cipher_tfm(tfm), src, src); + iv = src; + + src += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static int crypto_cbc_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + struct crypto_blkcipher *tfm = desc->tfm; + struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_cipher *child = ctx->child; + void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + if (walk.src.virt.addr == walk.dst.virt.addr) + nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child, + xor); + else + nbytes = crypto_cbc_encrypt_segment(desc, &walk, child, + xor); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_cipher *tfm, + void (*xor)(u8 *, const u8 *, + unsigned int)) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_decrypt; + int bsize = crypto_cipher_blocksize(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + u8 *iv = walk->iv; + + do { + fn(crypto_cipher_tfm(tfm), dst, src); + xor(dst, iv, bsize); + iv = src; + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; +} + +static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_cipher *tfm, + void (*xor)(u8 *, const u8 *, + unsigned int)) +{ + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = + crypto_cipher_alg(tfm)->cia_decrypt; + int bsize = crypto_cipher_blocksize(tfm); + unsigned long alignmask = crypto_cipher_alignmask(tfm); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 stack[bsize + alignmask]; + u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); + + memcpy(first_iv, walk->iv, bsize); + + /* Start of the last block. */ + src += nbytes - nbytes % bsize - bsize; + memcpy(walk->iv, src, bsize); + + for (;;) { + fn(crypto_cipher_tfm(tfm), src, src); + if ((nbytes -= bsize) < bsize) + break; + xor(src, src - bsize, bsize); + src -= bsize; + } + + xor(src, first_iv, bsize); + + return nbytes; +} + +static int crypto_cbc_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + struct crypto_blkcipher *tfm = desc->tfm; + struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_cipher *child = ctx->child; + void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + if (walk.src.virt.addr == walk.dst.virt.addr) + nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child, + xor); + else + nbytes = crypto_cbc_decrypt_segment(desc, &walk, child, + xor); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static void xor_byte(u8 *a, const u8 *b, unsigned int bs) +{ + do { + *a++ ^= *b++; + } while (--bs); +} + +static void xor_quad(u8 *dst, const u8 *src, unsigned int bs) +{ + u32 *a = (u32 *)dst; + u32 *b = (u32 *)src; + + do { + *a++ ^= *b++; + } while ((bs -= 4)); +} + +static void xor_64(u8 *a, const u8 *b, unsigned int bs) +{ + ((u32 *)a)[0] ^= ((u32 *)b)[0]; + ((u32 *)a)[1] ^= ((u32 *)b)[1]; +} + +static void xor_128(u8 *a, const u8 *b, unsigned int bs) +{ + ((u32 *)a)[0] ^= ((u32 *)b)[0]; + ((u32 *)a)[1] ^= ((u32 *)b)[1]; + ((u32 *)a)[2] ^= ((u32 *)b)[2]; + ((u32 *)a)[3] ^= ((u32 *)b)[3]; +} + +static int crypto_cbc_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + + switch (crypto_tfm_alg_blocksize(tfm)) { + case 8: + ctx->xor = xor_64; + break; + + case 16: + ctx->xor = xor_128; + break; + + default: + if (crypto_tfm_alg_blocksize(tfm) % 4) + ctx->xor = xor_byte; + else + ctx->xor = xor_quad; + } + + tfm = crypto_spawn_tfm(spawn); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + ctx->child = crypto_cipher_cast(tfm); + return 0; +} + +static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + crypto_free_cipher(ctx->child); +} + +static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = crypto_alloc_instance("cbc", alg); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_blkcipher_type; + + if (!(alg->cra_blocksize % 4)) + inst->alg.cra_alignmask |= 3; + inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; + inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; + inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + + inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx); + + inst->alg.cra_init = crypto_cbc_init_tfm; + inst->alg.cra_exit = crypto_cbc_exit_tfm; + + inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey; + inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt; + inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + +static void crypto_cbc_free(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(inst); +} + +static struct crypto_template crypto_cbc_tmpl = { + .name = "cbc", + .alloc = crypto_cbc_alloc, + .free = crypto_cbc_free, + .module = THIS_MODULE, +}; + +static int __init crypto_cbc_module_init(void) +{ + return crypto_register_template(&crypto_cbc_tmpl); +} + +static void __exit crypto_cbc_module_exit(void) +{ + crypto_unregister_template(&crypto_cbc_tmpl); +} + +module_init(crypto_cbc_module_init); +module_exit(crypto_cbc_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CBC block cipher algorithm"); diff --git a/crypto/cipher.c b/crypto/cipher.c index b899eb97abd7..9e03701cfdcc 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -23,6 +23,28 @@ #include "internal.h" #include "scatterwalk.h" +struct cipher_alg_compat { + unsigned int cia_min_keysize; + unsigned int cia_max_keysize; + int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + + unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); + unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); + unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); + unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); +}; + static inline void xor_64(u8 *a, const u8 *b) { ((u32 *)a)[0] ^= ((u32 *)b)[0]; @@ -45,15 +67,10 @@ static unsigned int crypt_slow(const struct cipher_desc *desc, u8 buffer[bsize * 2 + alignmask]; u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); u8 *dst = src + bsize; - unsigned int n; - - n = scatterwalk_copychunks(src, in, bsize, 0); - scatterwalk_advance(in, n); + scatterwalk_copychunks(src, in, bsize, 0); desc->prfn(desc, dst, src, bsize); - - n = scatterwalk_copychunks(dst, out, bsize, 1); - scatterwalk_advance(out, n); + scatterwalk_copychunks(dst, out, bsize, 1); return bsize; } @@ -64,12 +81,16 @@ static inline unsigned int crypt_fast(const struct cipher_desc *desc, unsigned int nbytes, u8 *tmp) { u8 *src, *dst; + u8 *real_src, *real_dst; + + real_src = scatterwalk_map(in, 0); + real_dst = scatterwalk_map(out, 1); - src = in->data; - dst = scatterwalk_samebuf(in, out) ? src : out->data; + src = real_src; + dst = scatterwalk_samebuf(in, out) ? src : real_dst; if (tmp) { - memcpy(tmp, in->data, nbytes); + memcpy(tmp, src, nbytes); src = tmp; dst = tmp; } @@ -77,7 +98,10 @@ static inline unsigned int crypt_fast(const struct cipher_desc *desc, nbytes = desc->prfn(desc, dst, src, nbytes); if (tmp) - memcpy(out->data, tmp, nbytes); + memcpy(real_dst, tmp, nbytes); + + scatterwalk_unmap(real_src, 0); + scatterwalk_unmap(real_dst, 1); scatterwalk_advance(in, nbytes); scatterwalk_advance(out, nbytes); @@ -126,9 +150,6 @@ static int crypt(const struct cipher_desc *desc, tmp = (u8 *)buffer; } - scatterwalk_map(&walk_in, 0); - scatterwalk_map(&walk_out, 1); - n = scatterwalk_clamp(&walk_in, n); n = scatterwalk_clamp(&walk_out, n); @@ -145,7 +166,7 @@ static int crypt(const struct cipher_desc *desc, if (!nbytes) break; - crypto_yield(tfm); + crypto_yield(tfm->crt_flags); } if (buffer) @@ -264,12 +285,12 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; + tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } else - return cia->cia_setkey(tfm, key, keylen, - &tfm->crt_flags); + return cia->cia_setkey(tfm, key, keylen); } static int ecb_encrypt(struct crypto_tfm *tfm, @@ -277,7 +298,7 @@ static int ecb_encrypt(struct crypto_tfm *tfm, struct scatterlist *src, unsigned int nbytes) { struct cipher_desc desc; - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; desc.tfm = tfm; desc.crfn = cipher->cia_encrypt; @@ -292,7 +313,7 @@ static int ecb_decrypt(struct crypto_tfm *tfm, unsigned int nbytes) { struct cipher_desc desc; - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; desc.tfm = tfm; desc.crfn = cipher->cia_decrypt; @@ -307,7 +328,7 @@ static int cbc_encrypt(struct crypto_tfm *tfm, unsigned int nbytes) { struct cipher_desc desc; - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; desc.tfm = tfm; desc.crfn = cipher->cia_encrypt; @@ -323,7 +344,7 @@ static int cbc_encrypt_iv(struct crypto_tfm *tfm, unsigned int nbytes, u8 *iv) { struct cipher_desc desc; - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; desc.tfm = tfm; desc.crfn = cipher->cia_encrypt; @@ -339,7 +360,7 @@ static int cbc_decrypt(struct crypto_tfm *tfm, unsigned int nbytes) { struct cipher_desc desc; - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; desc.tfm = tfm; desc.crfn = cipher->cia_decrypt; @@ -355,7 +376,7 @@ static int cbc_decrypt_iv(struct crypto_tfm *tfm, unsigned int nbytes, u8 *iv) { struct cipher_desc desc; - struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; desc.tfm = tfm; desc.crfn = cipher->cia_decrypt; @@ -388,17 +409,67 @@ int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags) return 0; } +static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, + const u8 *), + struct crypto_tfm *tfm, + u8 *dst, const u8 *src) +{ + unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); + unsigned int size = crypto_tfm_alg_blocksize(tfm); + u8 buffer[size + alignmask]; + u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + + memcpy(tmp, src, size); + fn(tfm, tmp, tmp); + memcpy(dst, tmp, size); +} + +static void cipher_encrypt_unaligned(struct crypto_tfm *tfm, + u8 *dst, const u8 *src) +{ + unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + + if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { + cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src); + return; + } + + cipher->cia_encrypt(tfm, dst, src); +} + +static void cipher_decrypt_unaligned(struct crypto_tfm *tfm, + u8 *dst, const u8 *src) +{ + unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; + + if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { + cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src); + return; + } + + cipher->cia_decrypt(tfm, dst, src); +} + int crypto_init_cipher_ops(struct crypto_tfm *tfm) { int ret = 0; struct cipher_tfm *ops = &tfm->crt_cipher; + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; ops->cit_setkey = setkey; + ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ? + cipher_encrypt_unaligned : cipher->cia_encrypt; + ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ? + cipher_decrypt_unaligned : cipher->cia_decrypt; switch (tfm->crt_cipher.cit_mode) { case CRYPTO_TFM_MODE_ECB: ops->cit_encrypt = ecb_encrypt; ops->cit_decrypt = ecb_decrypt; + ops->cit_encrypt_iv = nocrypt_iv; + ops->cit_decrypt_iv = nocrypt_iv; break; case CRYPTO_TFM_MODE_CBC: diff --git a/crypto/crc32c.c b/crypto/crc32c.c index f2660123aeb4..0fa744392a4c 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c @@ -16,14 +16,14 @@ #include <linux/string.h> #include <linux/crypto.h> #include <linux/crc32c.h> -#include <linux/types.h> -#include <asm/byteorder.h> +#include <linux/kernel.h> #define CHKSUM_BLOCK_SIZE 32 #define CHKSUM_DIGEST_SIZE 4 struct chksum_ctx { u32 crc; + u32 key; }; /* @@ -35,7 +35,7 @@ static void chksum_init(struct crypto_tfm *tfm) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); - mctx->crc = ~(u32)0; /* common usage */ + mctx->crc = mctx->key; } /* @@ -44,16 +44,15 @@ static void chksum_init(struct crypto_tfm *tfm) * the seed. */ static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); if (keylen != sizeof(mctx->crc)) { - if (flags) - *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } - mctx->crc = __cpu_to_le32(*(u32 *)key); + mctx->key = le32_to_cpu(*(__le32 *)key); return 0; } @@ -61,19 +60,23 @@ static void chksum_update(struct crypto_tfm *tfm, const u8 *data, unsigned int length) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); - u32 mcrc; - mcrc = crc32c(mctx->crc, data, (size_t)length); - - mctx->crc = mcrc; + mctx->crc = crc32c(mctx->crc, data, length); } static void chksum_final(struct crypto_tfm *tfm, u8 *out) { struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); - u32 mcrc = (mctx->crc ^ ~(u32)0); - *(u32 *)out = __le32_to_cpu(mcrc); + *(__le32 *)out = ~cpu_to_le32(mctx->crc); +} + +static int crc32c_cra_init(struct crypto_tfm *tfm) +{ + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); + + mctx->key = ~0; + return 0; } static struct crypto_alg alg = { @@ -83,6 +86,7 @@ static struct crypto_alg alg = { .cra_ctxsize = sizeof(struct chksum_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_init = crc32c_cra_init, .cra_u = { .digest = { .dia_digestsize= CHKSUM_DIGEST_SIZE, diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index a0d956b52949..24dbb5d8617e 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -48,7 +48,7 @@ static void null_final(struct crypto_tfm *tfm, u8 *out) { } static int null_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { return 0; } static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c new file mode 100644 index 000000000000..9b5b15601068 --- /dev/null +++ b/crypto/cryptomgr.c @@ -0,0 +1,156 @@ +/* + * Create default crypto algorithm instances. + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <linux/crypto.h> +#include <linux/ctype.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/rtnetlink.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/workqueue.h> + +#include "internal.h" + +struct cryptomgr_param { + struct work_struct work; + + struct { + struct rtattr attr; + struct crypto_attr_alg data; + } alg; + + struct { + u32 type; + u32 mask; + char name[CRYPTO_MAX_ALG_NAME]; + } larval; + + char template[CRYPTO_MAX_ALG_NAME]; +}; + +static void cryptomgr_probe(void *data) +{ + struct cryptomgr_param *param = data; + struct crypto_template *tmpl; + struct crypto_instance *inst; + int err; + + tmpl = crypto_lookup_template(param->template); + if (!tmpl) + goto err; + + do { + inst = tmpl->alloc(¶m->alg, sizeof(param->alg)); + if (IS_ERR(inst)) + err = PTR_ERR(inst); + else if ((err = crypto_register_instance(tmpl, inst))) + tmpl->free(inst); + } while (err == -EAGAIN && !signal_pending(current)); + + crypto_tmpl_put(tmpl); + + if (err) + goto err; + +out: + kfree(param); + return; + +err: + crypto_larval_error(param->larval.name, param->larval.type, + param->larval.mask); + goto out; +} + +static int cryptomgr_schedule_probe(struct crypto_larval *larval) +{ + struct cryptomgr_param *param; + const char *name = larval->alg.cra_name; + const char *p; + unsigned int len; + + param = kmalloc(sizeof(*param), GFP_KERNEL); + if (!param) + goto err; + + for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) + ; + + len = p - name; + if (!len || *p != '(') + goto err_free_param; + + memcpy(param->template, name, len); + param->template[len] = 0; + + name = p + 1; + for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++) + ; + + len = p - name; + if (!len || *p != ')' || p[1]) + goto err_free_param; + + param->alg.attr.rta_len = sizeof(param->alg); + param->alg.attr.rta_type = CRYPTOA_ALG; + memcpy(param->alg.data.name, name, len); + param->alg.data.name[len] = 0; + + memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME); + param->larval.type = larval->alg.cra_flags; + param->larval.mask = larval->mask; + + INIT_WORK(¶m->work, cryptomgr_probe, param); + schedule_work(¶m->work); + + return NOTIFY_STOP; + +err_free_param: + kfree(param); +err: + return NOTIFY_OK; +} + +static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, + void *data) +{ + switch (msg) { + case CRYPTO_MSG_ALG_REQUEST: + return cryptomgr_schedule_probe(data); + } + + return NOTIFY_DONE; +} + +static struct notifier_block cryptomgr_notifier = { + .notifier_call = cryptomgr_notify, +}; + +static int __init cryptomgr_init(void) +{ + return crypto_register_notifier(&cryptomgr_notifier); +} + +static void __exit cryptomgr_exit(void) +{ + int err = crypto_unregister_notifier(&cryptomgr_notifier); + BUG_ON(err); +} + +module_init(cryptomgr_init); +module_exit(cryptomgr_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Crypto Algorithm Manager"); diff --git a/crypto/des.c b/crypto/des.c index a9d3c235a6af..1df3a714fa47 100644 --- a/crypto/des.c +++ b/crypto/des.c @@ -784,9 +784,10 @@ static void dkey(u32 *pe, const u8 *k) } static int des_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { struct des_ctx *dctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; u32 tmp[DES_EXPKEY_WORDS]; int ret; @@ -864,11 +865,12 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) * */ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { const u32 *K = (const u32 *)key; struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); u32 *expkey = dctx->expkey; + u32 *flags = &tfm->crt_flags; if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5])))) diff --git a/crypto/digest.c b/crypto/digest.c index 603006a7bef2..0155a94e4b15 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -11,29 +11,89 @@ * any later version. * */ -#include <linux/crypto.h> + #include <linux/mm.h> #include <linux/errno.h> #include <linux/highmem.h> -#include <asm/scatterlist.h> +#include <linux/module.h> +#include <linux/scatterlist.h> + #include "internal.h" +#include "scatterwalk.h" -static void init(struct crypto_tfm *tfm) +void crypto_digest_init(struct crypto_tfm *tfm) { - tfm->__crt_alg->cra_digest.dia_init(tfm); + struct crypto_hash *hash = crypto_hash_cast(tfm); + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; + + crypto_hash_init(&desc); +} +EXPORT_SYMBOL_GPL(crypto_digest_init); + +void crypto_digest_update(struct crypto_tfm *tfm, + struct scatterlist *sg, unsigned int nsg) +{ + struct crypto_hash *hash = crypto_hash_cast(tfm); + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; + unsigned int nbytes = 0; + unsigned int i; + + for (i = 0; i < nsg; i++) + nbytes += sg[i].length; + + crypto_hash_update(&desc, sg, nbytes); +} +EXPORT_SYMBOL_GPL(crypto_digest_update); + +void crypto_digest_final(struct crypto_tfm *tfm, u8 *out) +{ + struct crypto_hash *hash = crypto_hash_cast(tfm); + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; + + crypto_hash_final(&desc, out); } +EXPORT_SYMBOL_GPL(crypto_digest_final); -static void update(struct crypto_tfm *tfm, - struct scatterlist *sg, unsigned int nsg) +void crypto_digest_digest(struct crypto_tfm *tfm, + struct scatterlist *sg, unsigned int nsg, u8 *out) { + struct crypto_hash *hash = crypto_hash_cast(tfm); + struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags }; + unsigned int nbytes = 0; unsigned int i; + + for (i = 0; i < nsg; i++) + nbytes += sg[i].length; + + crypto_hash_digest(&desc, sg, nbytes, out); +} +EXPORT_SYMBOL_GPL(crypto_digest_digest); + +static int init(struct hash_desc *desc) +{ + struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); + + tfm->__crt_alg->cra_digest.dia_init(tfm); + return 0; +} + +static int update(struct hash_desc *desc, + struct scatterlist *sg, unsigned int nbytes) +{ + struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); - for (i = 0; i < nsg; i++) { + if (!nbytes) + return 0; + + for (;;) { + struct page *pg = sg->page; + unsigned int offset = sg->offset; + unsigned int l = sg->length; - struct page *pg = sg[i].page; - unsigned int offset = sg[i].offset; - unsigned int l = sg[i].length; + if (unlikely(l > nbytes)) + l = nbytes; + nbytes -= l; do { unsigned int bytes_from_page = min(l, ((unsigned int) @@ -55,41 +115,60 @@ static void update(struct crypto_tfm *tfm, tfm->__crt_alg->cra_digest.dia_update(tfm, p, bytes_from_page); crypto_kunmap(src, 0); - crypto_yield(tfm); + crypto_yield(desc->flags); offset = 0; pg++; l -= bytes_from_page; } while (l > 0); + + if (!nbytes) + break; + sg = sg_next(sg); } + + return 0; } -static void final(struct crypto_tfm *tfm, u8 *out) +static int final(struct hash_desc *desc, u8 *out) { + struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); + struct digest_alg *digest = &tfm->__crt_alg->cra_digest; + if (unlikely((unsigned long)out & alignmask)) { - unsigned int size = crypto_tfm_alg_digestsize(tfm); - u8 buffer[size + alignmask]; - u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - tfm->__crt_alg->cra_digest.dia_final(tfm, dst); - memcpy(out, dst, size); + unsigned long align = alignmask + 1; + unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); + u8 *dst = (u8 *)ALIGN(addr, align) + + ALIGN(tfm->__crt_alg->cra_ctxsize, align); + + digest->dia_final(tfm, dst); + memcpy(out, dst, digest->dia_digestsize); } else - tfm->__crt_alg->cra_digest.dia_final(tfm, out); + digest->dia_final(tfm, out); + + return 0; +} + +static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen) +{ + crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK); + return -ENOSYS; } -static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) +static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen) { - u32 flags; - if (tfm->__crt_alg->cra_digest.dia_setkey == NULL) - return -ENOSYS; - return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags); + struct crypto_tfm *tfm = crypto_hash_tfm(hash); + + crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK); + return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen); } -static void digest(struct crypto_tfm *tfm, - struct scatterlist *sg, unsigned int nsg, u8 *out) +static int digest(struct hash_desc *desc, + struct scatterlist *sg, unsigned int nbytes, u8 *out) { - init(tfm); - update(tfm, sg, nsg); - final(tfm, out); + init(desc); + update(desc, sg, nbytes); + return final(desc, out); } int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) @@ -99,18 +178,22 @@ int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) int crypto_init_digest_ops(struct crypto_tfm *tfm) { - struct digest_tfm *ops = &tfm->crt_digest; + struct hash_tfm *ops = &tfm->crt_hash; + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest; + + if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm)) + return -EINVAL; - ops->dit_init = init; - ops->dit_update = update; - ops->dit_final = final; - ops->dit_digest = digest; - ops->dit_setkey = setkey; + ops->init = init; + ops->update = update; + ops->final = final; + ops->digest = digest; + ops->setkey = dalg->dia_setkey ? setkey : nosetkey; + ops->digestsize = dalg->dia_digestsize; - return crypto_alloc_hmac_block(tfm); + return 0; } void crypto_exit_digest_ops(struct crypto_tfm *tfm) { - crypto_free_hmac_block(tfm); } diff --git a/crypto/ecb.c b/crypto/ecb.c new file mode 100644 index 000000000000..f239aa9c4017 --- /dev/null +++ b/crypto/ecb.c @@ -0,0 +1,181 @@ +/* + * ECB: Electronic CodeBook mode + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ + +#include <crypto/algapi.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> + +struct crypto_ecb_ctx { + struct crypto_cipher *child; +}; + +static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key, + unsigned int keylen) +{ + struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent); + struct crypto_cipher *child = ctx->child; + int err; + + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(child, key, keylen); + crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int crypto_ecb_crypt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, + struct crypto_cipher *tfm, + void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) +{ + int bsize = crypto_cipher_blocksize(tfm); + unsigned int nbytes; + int err; + + err = blkcipher_walk_virt(desc, walk); + + while ((nbytes = walk->nbytes)) { + u8 *wsrc = walk->src.virt.addr; + u8 *wdst = walk->dst.virt.addr; + + do { + fn(crypto_cipher_tfm(tfm), wdst, wsrc); + + wsrc += bsize; + wdst += bsize; + } while ((nbytes -= bsize) >= bsize); + + err = blkcipher_walk_done(desc, walk, nbytes); + } + + return err; +} + +static int crypto_ecb_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + struct crypto_blkcipher *tfm = desc->tfm; + struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_cipher *child = ctx->child; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return crypto_ecb_crypt(desc, &walk, child, + crypto_cipher_alg(child)->cia_encrypt); +} + +static int crypto_ecb_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct blkcipher_walk walk; + struct crypto_blkcipher *tfm = desc->tfm; + struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct crypto_cipher *child = ctx->child; + + blkcipher_walk_init(&walk, dst, src, nbytes); + return crypto_ecb_crypt(desc, &walk, child, + crypto_cipher_alg(child)->cia_decrypt); +} + +static int crypto_ecb_init_tfm(struct crypto_tfm *tfm) +{ + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); + + tfm = crypto_spawn_tfm(spawn); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + ctx->child = crypto_cipher_cast(tfm); + return 0; +} + +static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) +{ + struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); + crypto_free_cipher(ctx->child); +} + +static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER, + CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = crypto_alloc_instance("ecb", alg); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_blkcipher_type; + + inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; + inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + + inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx); + + inst->alg.cra_init = crypto_ecb_init_tfm; + inst->alg.cra_exit = crypto_ecb_exit_tfm; + + inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey; + inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt; + inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} + +static void crypto_ecb_free(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(inst); +} + +static struct crypto_template crypto_ecb_tmpl = { + .name = "ecb", + .alloc = crypto_ecb_alloc, + .free = crypto_ecb_free, + .module = THIS_MODULE, +}; + +static int __init crypto_ecb_module_init(void) +{ + return crypto_register_template(&crypto_ecb_tmpl); +} + +static void __exit crypto_ecb_module_exit(void) +{ + crypto_unregister_template(&crypto_ecb_tmpl); +} + +module_init(crypto_ecb_module_init); +module_exit(crypto_ecb_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ECB block cipher algorithm"); diff --git a/crypto/hash.c b/crypto/hash.c new file mode 100644 index 000000000000..cdec23d885fe --- /dev/null +++ b/crypto/hash.c @@ -0,0 +1,61 @@ +/* + * Cryptographic Hash operations. + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/seq_file.h> + +#include "internal.h" + +static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg) +{ + return alg->cra_ctxsize; +} + +static int crypto_init_hash_ops(struct crypto_tfm *tfm) +{ + struct hash_tfm *crt = &tfm->crt_hash; + struct hash_alg *alg = &tfm->__crt_alg->cra_hash; + + if (alg->digestsize > crypto_tfm_alg_blocksize(tfm)) + return -EINVAL; + + crt->init = alg->init; + crt->update = alg->update; + crt->final = alg->final; + crt->digest = alg->digest; + crt->setkey = alg->setkey; + crt->digestsize = alg->digestsize; + + return 0; +} + +static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) + __attribute_used__; +static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg) +{ + seq_printf(m, "type : hash\n"); + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); + seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize); +} + +const struct crypto_type crypto_hash_type = { + .ctxsize = crypto_hash_ctxsize, + .init = crypto_init_hash_ops, +#ifdef CONFIG_PROC_FS + .show = crypto_hash_show, +#endif +}; +EXPORT_SYMBOL_GPL(crypto_hash_type); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Generic cryptographic hash type"); diff --git a/crypto/hmac.c b/crypto/hmac.c index 46120dee5ada..f403b6946047 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c @@ -4,121 +4,249 @@ * HMAC: Keyed-Hashing for Message Authentication (RFC2104). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * The HMAC implementation is derived from USAGI. * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) + * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ -#include <linux/crypto.h> -#include <linux/mm.h> -#include <linux/highmem.h> -#include <linux/slab.h> + +#include <crypto/algapi.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/scatterlist.h> -#include "internal.h" +#include <linux/slab.h> +#include <linux/string.h> -static void hash_key(struct crypto_tfm *tfm, u8 *key, unsigned int keylen) +struct hmac_ctx { + struct crypto_hash *child; +}; + +static inline void *align_ptr(void *p, unsigned int align) { - struct scatterlist tmp; - - sg_set_buf(&tmp, key, keylen); - crypto_digest_digest(tfm, &tmp, 1, key); + return (void *)ALIGN((unsigned long)p, align); } -int crypto_alloc_hmac_block(struct crypto_tfm *tfm) +static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm) { - int ret = 0; + return align_ptr(crypto_hash_ctx_aligned(tfm) + + crypto_hash_blocksize(tfm) * 2 + + crypto_hash_digestsize(tfm), sizeof(void *)); +} + +static int hmac_setkey(struct crypto_hash *parent, + const u8 *inkey, unsigned int keylen) +{ + int bs = crypto_hash_blocksize(parent); + int ds = crypto_hash_digestsize(parent); + char *ipad = crypto_hash_ctx_aligned(parent); + char *opad = ipad + bs; + char *digest = opad + bs; + struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); + struct crypto_hash *tfm = ctx->child; + unsigned int i; + + if (keylen > bs) { + struct hash_desc desc; + struct scatterlist tmp; + int err; + + desc.tfm = tfm; + desc.flags = crypto_hash_get_flags(parent); + desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP; + sg_set_buf(&tmp, inkey, keylen); - BUG_ON(!crypto_tfm_alg_blocksize(tfm)); - - tfm->crt_digest.dit_hmac_block = kmalloc(crypto_tfm_alg_blocksize(tfm), - GFP_KERNEL); - if (tfm->crt_digest.dit_hmac_block == NULL) - ret = -ENOMEM; + err = crypto_hash_digest(&desc, &tmp, keylen, digest); + if (err) + return err; - return ret; - + inkey = digest; + keylen = ds; + } + + memcpy(ipad, inkey, keylen); + memset(ipad + keylen, 0, bs - keylen); + memcpy(opad, ipad, bs); + + for (i = 0; i < bs; i++) { + ipad[i] ^= 0x36; + opad[i] ^= 0x5c; + } + + return 0; } -void crypto_free_hmac_block(struct crypto_tfm *tfm) +static int hmac_init(struct hash_desc *pdesc) { - kfree(tfm->crt_digest.dit_hmac_block); + struct crypto_hash *parent = pdesc->tfm; + int bs = crypto_hash_blocksize(parent); + int ds = crypto_hash_digestsize(parent); + char *ipad = crypto_hash_ctx_aligned(parent); + struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *)); + struct hash_desc desc; + struct scatterlist tmp; + + desc.tfm = ctx->child; + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + sg_set_buf(&tmp, ipad, bs); + + return unlikely(crypto_hash_init(&desc)) ?: + crypto_hash_update(&desc, &tmp, 1); } -void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen) +static int hmac_update(struct hash_desc *pdesc, + struct scatterlist *sg, unsigned int nbytes) { - unsigned int i; + struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); + struct hash_desc desc; + + desc.tfm = ctx->child; + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + return crypto_hash_update(&desc, sg, nbytes); +} + +static int hmac_final(struct hash_desc *pdesc, u8 *out) +{ + struct crypto_hash *parent = pdesc->tfm; + int bs = crypto_hash_blocksize(parent); + int ds = crypto_hash_digestsize(parent); + char *opad = crypto_hash_ctx_aligned(parent) + bs; + char *digest = opad + bs; + struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); + struct hash_desc desc; struct scatterlist tmp; - char *ipad = tfm->crt_digest.dit_hmac_block; - - if (*keylen > crypto_tfm_alg_blocksize(tfm)) { - hash_key(tfm, key, *keylen); - *keylen = crypto_tfm_alg_digestsize(tfm); - } - memset(ipad, 0, crypto_tfm_alg_blocksize(tfm)); - memcpy(ipad, key, *keylen); + desc.tfm = ctx->child; + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + sg_set_buf(&tmp, opad, bs + ds); - for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) - ipad[i] ^= 0x36; + return unlikely(crypto_hash_final(&desc, digest)) ?: + crypto_hash_digest(&desc, &tmp, bs + ds, out); +} - sg_set_buf(&tmp, ipad, crypto_tfm_alg_blocksize(tfm)); - - crypto_digest_init(tfm); - crypto_digest_update(tfm, &tmp, 1); +static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg, + unsigned int nbytes, u8 *out) +{ + struct crypto_hash *parent = pdesc->tfm; + int bs = crypto_hash_blocksize(parent); + int ds = crypto_hash_digestsize(parent); + char *ipad = crypto_hash_ctx_aligned(parent); + char *opad = ipad + bs; + char *digest = opad + bs; + struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *)); + struct hash_desc desc; + struct scatterlist sg1[2]; + struct scatterlist sg2[1]; + + desc.tfm = ctx->child; + desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; + + sg_set_buf(sg1, ipad, bs); + sg1[1].page = (void *)sg; + sg1[1].length = 0; + sg_set_buf(sg2, opad, bs + ds); + + return unlikely(crypto_hash_digest(&desc, sg1, nbytes + bs, digest)) ?: + crypto_hash_digest(&desc, sg2, bs + ds, out); } -void crypto_hmac_update(struct crypto_tfm *tfm, - struct scatterlist *sg, unsigned int nsg) +static int hmac_init_tfm(struct crypto_tfm *tfm) { - crypto_digest_update(tfm, sg, nsg); + struct crypto_instance *inst = (void *)tfm->__crt_alg; + struct crypto_spawn *spawn = crypto_instance_ctx(inst); + struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); + + tfm = crypto_spawn_tfm(spawn); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + ctx->child = crypto_hash_cast(tfm); + return 0; } -void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, - unsigned int *keylen, u8 *out) +static void hmac_exit_tfm(struct crypto_tfm *tfm) { - unsigned int i; - struct scatterlist tmp; - char *opad = tfm->crt_digest.dit_hmac_block; - - if (*keylen > crypto_tfm_alg_blocksize(tfm)) { - hash_key(tfm, key, *keylen); - *keylen = crypto_tfm_alg_digestsize(tfm); - } + struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm)); + crypto_free_hash(ctx->child); +} - crypto_digest_final(tfm, out); +static void hmac_free(struct crypto_instance *inst) +{ + crypto_drop_spawn(crypto_instance_ctx(inst)); + kfree(inst); +} - memset(opad, 0, crypto_tfm_alg_blocksize(tfm)); - memcpy(opad, key, *keylen); - - for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++) - opad[i] ^= 0x5c; +static struct crypto_instance *hmac_alloc(void *param, unsigned int len) +{ + struct crypto_instance *inst; + struct crypto_alg *alg; + + alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH, + CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC); + if (IS_ERR(alg)) + return ERR_PTR(PTR_ERR(alg)); + + inst = crypto_alloc_instance("hmac", alg); + if (IS_ERR(inst)) + goto out_put_alg; + + inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH; + inst->alg.cra_priority = alg->cra_priority; + inst->alg.cra_blocksize = alg->cra_blocksize; + inst->alg.cra_alignmask = alg->cra_alignmask; + inst->alg.cra_type = &crypto_hash_type; + + inst->alg.cra_hash.digestsize = + (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize : + alg->cra_digest.dia_digestsize; + + inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) + + ALIGN(inst->alg.cra_blocksize * 2 + + inst->alg.cra_hash.digestsize, + sizeof(void *)); + + inst->alg.cra_init = hmac_init_tfm; + inst->alg.cra_exit = hmac_exit_tfm; + + inst->alg.cra_hash.init = hmac_init; + inst->alg.cra_hash.update = hmac_update; + inst->alg.cra_hash.final = hmac_final; + inst->alg.cra_hash.digest = hmac_digest; + inst->alg.cra_hash.setkey = hmac_setkey; + +out_put_alg: + crypto_mod_put(alg); + return inst; +} - sg_set_buf(&tmp, opad, crypto_tfm_alg_blocksize(tfm)); +static struct crypto_template hmac_tmpl = { + .name = "hmac", + .alloc = hmac_alloc, + .free = hmac_free, + .module = THIS_MODULE, +}; - crypto_digest_init(tfm); - crypto_digest_update(tfm, &tmp, 1); - - sg_set_buf(&tmp, out, crypto_tfm_alg_digestsize(tfm)); - - crypto_digest_update(tfm, &tmp, 1); - crypto_digest_final(tfm, out); +static int __init hmac_module_init(void) +{ + return crypto_register_template(&hmac_tmpl); } -void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, - struct scatterlist *sg, unsigned int nsg, u8 *out) +static void __exit hmac_module_exit(void) { - crypto_hmac_init(tfm, key, keylen); - crypto_hmac_update(tfm, sg, nsg); - crypto_hmac_final(tfm, key, keylen, out); + crypto_unregister_template(&hmac_tmpl); } -EXPORT_SYMBOL_GPL(crypto_hmac_init); -EXPORT_SYMBOL_GPL(crypto_hmac_update); -EXPORT_SYMBOL_GPL(crypto_hmac_final); -EXPORT_SYMBOL_GPL(crypto_hmac); +module_init(hmac_module_init); +module_exit(hmac_module_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HMAC hash algorithm"); diff --git a/crypto/internal.h b/crypto/internal.h index 959e602909a6..2da6ad4f3593 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -12,19 +12,43 @@ */ #ifndef _CRYPTO_INTERNAL_H #define _CRYPTO_INTERNAL_H -#include <linux/crypto.h> + +#include <crypto/algapi.h> +#include <linux/completion.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/list.h> +#include <linux/module.h> #include <linux/kernel.h> +#include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <asm/kmap_types.h> +/* Crypto notification events. */ +enum { + CRYPTO_MSG_ALG_REQUEST, + CRYPTO_MSG_ALG_REGISTER, + CRYPTO_MSG_ALG_UNREGISTER, + CRYPTO_MSG_TMPL_REGISTER, + CRYPTO_MSG_TMPL_UNREGISTER, +}; + +struct crypto_instance; +struct crypto_template; + +struct crypto_larval { + struct crypto_alg alg; + struct crypto_alg *adult; + struct completion completion; + u32 mask; +}; + extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; +extern struct blocking_notifier_head crypto_chain; extern enum km_type crypto_km_types[]; @@ -43,36 +67,33 @@ static inline void crypto_kunmap(void *vaddr, int out) kunmap_atomic(vaddr, crypto_kmap_type(out)); } -static inline void crypto_yield(struct crypto_tfm *tfm) +static inline void crypto_yield(u32 flags) { - if (tfm->crt_flags & CRYPTO_TFM_REQ_MAY_SLEEP) + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) cond_resched(); } -#ifdef CONFIG_CRYPTO_HMAC -int crypto_alloc_hmac_block(struct crypto_tfm *tfm); -void crypto_free_hmac_block(struct crypto_tfm *tfm); -#else -static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm) -{ - return 0; -} - -static inline void crypto_free_hmac_block(struct crypto_tfm *tfm) -{ } -#endif - #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); +void __exit crypto_exit_proc(void); #else static inline void crypto_init_proc(void) { } +static inline void crypto_exit_proc(void) +{ } #endif static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg, int flags) { - return alg->cra_ctxsize; + unsigned int len = alg->cra_ctxsize; + + if (alg->cra_alignmask) { + len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1); + len += alg->cra_digest.dia_digestsize; + } + + return len; } static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg, @@ -96,6 +117,10 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg, return alg->cra_ctxsize; } +struct crypto_alg *crypto_mod_get(struct crypto_alg *alg); +struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask); +struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask); + int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags); int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags); int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags); @@ -108,5 +133,52 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm); void crypto_exit_cipher_ops(struct crypto_tfm *tfm); void crypto_exit_compress_ops(struct crypto_tfm *tfm); +void crypto_larval_error(const char *name, u32 type, u32 mask); + +void crypto_shoot_alg(struct crypto_alg *alg); +struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags); + +int crypto_register_instance(struct crypto_template *tmpl, + struct crypto_instance *inst); + +int crypto_register_notifier(struct notifier_block *nb); +int crypto_unregister_notifier(struct notifier_block *nb); + +static inline void crypto_alg_put(struct crypto_alg *alg) +{ + if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) + alg->cra_destroy(alg); +} + +static inline int crypto_tmpl_get(struct crypto_template *tmpl) +{ + return try_module_get(tmpl->module); +} + +static inline void crypto_tmpl_put(struct crypto_template *tmpl) +{ + module_put(tmpl->module); +} + +static inline int crypto_is_larval(struct crypto_alg *alg) +{ + return alg->cra_flags & CRYPTO_ALG_LARVAL; +} + +static inline int crypto_is_dead(struct crypto_alg *alg) +{ + return alg->cra_flags & CRYPTO_ALG_DEAD; +} + +static inline int crypto_is_moribund(struct crypto_alg *alg) +{ + return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING); +} + +static inline int crypto_notify(unsigned long val, void *v) +{ + return blocking_notifier_call_chain(&crypto_chain, val, v); +} + #endif /* _CRYPTO_INTERNAL_H */ diff --git a/crypto/khazad.c b/crypto/khazad.c index d4c9d3657b36..9fa24a2dd6ff 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -755,19 +755,13 @@ static const u64 c[KHAZAD_ROUNDS + 1] = { }; static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; u64 K2, K1; - - if (key_len != 16) - { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } /* key is supposed to be 32-bit aligned */ K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index d061da21cfda..094397b48849 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -123,14 +123,13 @@ static void michael_final(struct crypto_tfm *tfm, u8 *out) static int michael_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); const __le32 *data = (const __le32 *)key; if (keylen != 8) { - if (flags) - *flags = CRYPTO_TFM_RES_BAD_KEY_LEN; + tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } diff --git a/crypto/proc.c b/crypto/proc.c index c0a5dd7ce2cc..dabce0676f63 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -12,6 +12,8 @@ * any later version. * */ + +#include <asm/atomic.h> #include <linux/init.h> #include <linux/crypto.h> #include <linux/rwsem.h> @@ -54,6 +56,7 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "driver : %s\n", alg->cra_driver_name); seq_printf(m, "module : %s\n", module_name(alg->cra_module)); seq_printf(m, "priority : %d\n", alg->cra_priority); + seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt)); switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_CIPHER: @@ -75,7 +78,10 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "type : compression\n"); break; default: - seq_printf(m, "type : unknown\n"); + if (alg->cra_type && alg->cra_type->show) + alg->cra_type->show(m, alg); + else + seq_printf(m, "type : unknown\n"); break; } @@ -110,3 +116,8 @@ void __init crypto_init_proc(void) if (proc) proc->proc_fops = &proc_crypto_ops; } + +void __exit crypto_exit_proc(void) +{ + remove_proc_entry("crypto", NULL); +} diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 2953e2cc56f0..35172d3f043b 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -15,9 +15,11 @@ */ #include <linux/kernel.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/pagemap.h> #include <linux/highmem.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> + #include "internal.h" #include "scatterwalk.h" @@ -27,88 +29,77 @@ enum km_type crypto_km_types[] = { KM_SOFTIRQ0, KM_SOFTIRQ1, }; +EXPORT_SYMBOL_GPL(crypto_km_types); -static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) +static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) { - if (out) - memcpy(sgdata, buf, nbytes); - else - memcpy(buf, sgdata, nbytes); + void *src = out ? buf : sgdata; + void *dst = out ? sgdata : buf; + + memcpy(dst, src, nbytes); } void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) { - unsigned int rest_of_page; - walk->sg = sg; - walk->page = sg->page; - walk->len_this_segment = sg->length; - BUG_ON(!sg->length); - rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1)); - walk->len_this_page = min(sg->length, rest_of_page); walk->offset = sg->offset; } +EXPORT_SYMBOL_GPL(scatterwalk_start); -void scatterwalk_map(struct scatter_walk *walk, int out) -{ - walk->data = crypto_kmap(walk->page, out) + walk->offset; -} - -static inline void scatterwalk_unmap(struct scatter_walk *walk, int out) +void *scatterwalk_map(struct scatter_walk *walk, int out) { - /* walk->data may be pointing the first byte of the next page; - however, we know we transfered at least one byte. So, - walk->data - 1 will be a virtual address in the mapped page. */ - crypto_kunmap(walk->data - 1, out); + return crypto_kmap(scatterwalk_page(walk), out) + + offset_in_page(walk->offset); } +EXPORT_SYMBOL_GPL(scatterwalk_map); static void scatterwalk_pagedone(struct scatter_walk *walk, int out, unsigned int more) { if (out) - flush_dcache_page(walk->page); + flush_dcache_page(scatterwalk_page(walk)); if (more) { - walk->len_this_segment -= walk->len_this_page; - - if (walk->len_this_segment) { - walk->page++; - walk->len_this_page = min(walk->len_this_segment, - (unsigned)PAGE_CACHE_SIZE); - walk->offset = 0; - } - else + walk->offset += PAGE_SIZE - 1; + walk->offset &= PAGE_MASK; + if (walk->offset >= walk->sg->offset + walk->sg->length) scatterwalk_start(walk, sg_next(walk->sg)); } } void scatterwalk_done(struct scatter_walk *walk, int out, int more) { - scatterwalk_unmap(walk, out); - if (walk->len_this_page == 0 || !more) + if (!offset_in_page(walk->offset) || !more) scatterwalk_pagedone(walk, out, more); } +EXPORT_SYMBOL_GPL(scatterwalk_done); -/* - * Do not call this unless the total length of all of the fragments - * has been verified as multiple of the block size. - */ -int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out) +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out) { - while (nbytes > walk->len_this_page) { - memcpy_dir(buf, walk->data, walk->len_this_page, out); - buf += walk->len_this_page; - nbytes -= walk->len_this_page; + for (;;) { + unsigned int len_this_page = scatterwalk_pagelen(walk); + u8 *vaddr; + + if (len_this_page > nbytes) + len_this_page = nbytes; + + vaddr = scatterwalk_map(walk, out); + memcpy_dir(buf, vaddr, len_this_page, out); + scatterwalk_unmap(vaddr, out); + + if (nbytes == len_this_page) + break; + + buf += len_this_page; + nbytes -= len_this_page; - scatterwalk_unmap(walk, out); scatterwalk_pagedone(walk, out, 1); - scatterwalk_map(walk, out); } - memcpy_dir(buf, walk->data, nbytes, out); - return nbytes; + scatterwalk_advance(walk, nbytes); } +EXPORT_SYMBOL_GPL(scatterwalk_copychunks); diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h index e79925c474a3..f1592cc2d0f4 100644 --- a/crypto/scatterwalk.h +++ b/crypto/scatterwalk.h @@ -14,45 +14,42 @@ #ifndef _CRYPTO_SCATTERWALK_H #define _CRYPTO_SCATTERWALK_H + #include <linux/mm.h> -#include <asm/scatterlist.h> +#include <linux/scatterlist.h> -struct scatter_walk { - struct scatterlist *sg; - struct page *page; - void *data; - unsigned int len_this_page; - unsigned int len_this_segment; - unsigned int offset; -}; +#include "internal.h" -/* Define sg_next is an inline routine now in case we want to change - scatterlist to a linked list later. */ static inline struct scatterlist *sg_next(struct scatterlist *sg) { - return sg + 1; + return (++sg)->length ? sg : (void *)sg->page; } -static inline int scatterwalk_samebuf(struct scatter_walk *walk_in, - struct scatter_walk *walk_out) +static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in, + struct scatter_walk *walk_out) { - return walk_in->page == walk_out->page && - walk_in->offset == walk_out->offset; + return !(((walk_in->sg->page - walk_out->sg->page) << PAGE_SHIFT) + + (int)(walk_in->offset - walk_out->offset)); +} + +static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +{ + unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; + unsigned int len_this_page = offset_in_page(~walk->offset) + 1; + return len_this_page > len ? len : len_this_page; } static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, unsigned int nbytes) { - return nbytes > walk->len_this_page ? walk->len_this_page : nbytes; + unsigned int len_this_page = scatterwalk_pagelen(walk); + return nbytes > len_this_page ? len_this_page : nbytes; } static inline void scatterwalk_advance(struct scatter_walk *walk, unsigned int nbytes) { - walk->data += nbytes; walk->offset += nbytes; - walk->len_this_page -= nbytes; - walk->len_this_segment -= nbytes; } static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, @@ -61,9 +58,20 @@ static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, return !(walk->offset & alignmask); } +static inline struct page *scatterwalk_page(struct scatter_walk *walk) +{ + return walk->sg->page + (walk->offset >> PAGE_SHIFT); +} + +static inline void scatterwalk_unmap(void *vaddr, int out) +{ + crypto_kunmap(vaddr, out); +} + void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); -int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); -void scatterwalk_map(struct scatter_walk *walk, int out); +void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, + size_t nbytes, int out); +void *scatterwalk_map(struct scatter_walk *walk, int out); void scatterwalk_done(struct scatter_walk *walk, int out, int more); #endif /* _CRYPTO_SCATTERWALK_H */ diff --git a/crypto/serpent.c b/crypto/serpent.c index de60cdddbf4a..465d091cd3ec 100644 --- a/crypto/serpent.c +++ b/crypto/serpent.c @@ -216,7 +216,7 @@ struct serpent_ctx { static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); u32 *k = ctx->expkey; @@ -224,13 +224,6 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, u32 r0,r1,r2,r3,r4; int i; - if ((keylen < SERPENT_MIN_KEY_SIZE) - || (keylen > SERPENT_MAX_KEY_SIZE)) - { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } - /* Copy key, add padding */ for (i = 0; i < keylen; ++i) @@ -497,21 +490,15 @@ static struct crypto_alg serpent_alg = { }; static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags) + unsigned int keylen) { u8 rev_key[SERPENT_MAX_KEY_SIZE]; int i; - if ((keylen < SERPENT_MIN_KEY_SIZE) - || (keylen > SERPENT_MAX_KEY_SIZE)) { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } - for (i = 0; i < keylen; ++i) rev_key[keylen - i - 1] = key[i]; - return serpent_setkey(tfm, rev_key, keylen, flags); + return serpent_setkey(tfm, rev_key, keylen); } static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) diff --git a/crypto/sha1.c b/crypto/sha1.c index 6c77b689f87e..1bba551e5b45 100644 --- a/crypto/sha1.c +++ b/crypto/sha1.c @@ -109,6 +109,7 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out) static struct crypto_alg alg = { .cra_name = "sha1", + .cra_driver_name= "sha1-generic", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha1_ctx), @@ -137,3 +138,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); + +MODULE_ALIAS("sha1-generic"); diff --git a/crypto/sha256.c b/crypto/sha256.c index bc71d85a7d02..716195bb54f2 100644 --- a/crypto/sha256.c +++ b/crypto/sha256.c @@ -309,6 +309,7 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out) static struct crypto_alg alg = { .cra_name = "sha256", + .cra_driver_name= "sha256-generic", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha256_ctx), @@ -337,3 +338,5 @@ module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm"); + +MODULE_ALIAS("sha256-generic"); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e52f56c5bd5e..83307420d31c 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -17,6 +17,7 @@ * */ +#include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> @@ -54,8 +55,6 @@ */ #define ENCRYPT 1 #define DECRYPT 0 -#define MODE_ECB 1 -#define MODE_CBC 0 static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; @@ -89,9 +88,11 @@ static void test_hash(char *algo, struct hash_testvec *template, unsigned int i, j, k, temp; struct scatterlist sg[8]; char result[64]; - struct crypto_tfm *tfm; + struct crypto_hash *tfm; + struct hash_desc desc; struct hash_testvec *hash_tv; unsigned int tsize; + int ret; printk("\ntesting %s\n", algo); @@ -105,30 +106,42 @@ static void test_hash(char *algo, struct hash_testvec *template, memcpy(tvmem, template, tsize); hash_tv = (void *)tvmem; - tfm = crypto_alloc_tfm(algo, 0); - if (tfm == NULL) { - printk("failed to load transform for %s\n", algo); + + tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + printk("failed to load transform for %s: %ld\n", algo, + PTR_ERR(tfm)); return; } + desc.tfm = tfm; + desc.flags = 0; + for (i = 0; i < tcount; i++) { printk("test %u:\n", i + 1); memset(result, 0, 64); sg_set_buf(&sg[0], hash_tv[i].plaintext, hash_tv[i].psize); - crypto_digest_init(tfm); - if (tfm->crt_u.digest.dit_setkey) { - crypto_digest_setkey(tfm, hash_tv[i].key, - hash_tv[i].ksize); + if (hash_tv[i].ksize) { + ret = crypto_hash_setkey(tfm, hash_tv[i].key, + hash_tv[i].ksize); + if (ret) { + printk("setkey() failed ret=%d\n", ret); + goto out; + } + } + + ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize, result); + if (ret) { + printk("digest () failed ret=%d\n", ret); + goto out; } - crypto_digest_update(tfm, sg, 1); - crypto_digest_final(tfm, result); - hexdump(result, crypto_tfm_alg_digestsize(tfm)); + hexdump(result, crypto_hash_digestsize(tfm)); printk("%s\n", memcmp(result, hash_tv[i].digest, - crypto_tfm_alg_digestsize(tfm)) ? + crypto_hash_digestsize(tfm)) ? "fail" : "pass"); } @@ -154,127 +167,56 @@ static void test_hash(char *algo, struct hash_testvec *template, hash_tv[i].tap[k]); } - crypto_digest_digest(tfm, sg, hash_tv[i].np, result); - - hexdump(result, crypto_tfm_alg_digestsize(tfm)); - printk("%s\n", - memcmp(result, hash_tv[i].digest, - crypto_tfm_alg_digestsize(tfm)) ? - "fail" : "pass"); - } - } - - crypto_free_tfm(tfm); -} - - -#ifdef CONFIG_CRYPTO_HMAC - -static void test_hmac(char *algo, struct hmac_testvec *template, - unsigned int tcount) -{ - unsigned int i, j, k, temp; - struct scatterlist sg[8]; - char result[64]; - struct crypto_tfm *tfm; - struct hmac_testvec *hmac_tv; - unsigned int tsize, klen; - - tfm = crypto_alloc_tfm(algo, 0); - if (tfm == NULL) { - printk("failed to load transform for %s\n", algo); - return; - } - - printk("\ntesting hmac_%s\n", algo); - - tsize = sizeof(struct hmac_testvec); - tsize *= tcount; - if (tsize > TVMEMSIZE) { - printk("template (%u) too big for tvmem (%u)\n", tsize, - TVMEMSIZE); - goto out; - } - - memcpy(tvmem, template, tsize); - hmac_tv = (void *)tvmem; - - for (i = 0; i < tcount; i++) { - printk("test %u:\n", i + 1); - memset(result, 0, sizeof (result)); - - klen = hmac_tv[i].ksize; - sg_set_buf(&sg[0], hmac_tv[i].plaintext, hmac_tv[i].psize); - - crypto_hmac(tfm, hmac_tv[i].key, &klen, sg, 1, result); + if (hash_tv[i].ksize) { + ret = crypto_hash_setkey(tfm, hash_tv[i].key, + hash_tv[i].ksize); - hexdump(result, crypto_tfm_alg_digestsize(tfm)); - printk("%s\n", - memcmp(result, hmac_tv[i].digest, - crypto_tfm_alg_digestsize(tfm)) ? "fail" : - "pass"); - } - - printk("\ntesting hmac_%s across pages\n", algo); - - memset(xbuf, 0, XBUFSIZE); - - j = 0; - for (i = 0; i < tcount; i++) { - if (hmac_tv[i].np) { - j++; - printk("test %u:\n",j); - memset(result, 0, 64); - - temp = 0; - klen = hmac_tv[i].ksize; - for (k = 0; k < hmac_tv[i].np; k++) { - memcpy(&xbuf[IDX[k]], - hmac_tv[i].plaintext + temp, - hmac_tv[i].tap[k]); - temp += hmac_tv[i].tap[k]; - sg_set_buf(&sg[k], &xbuf[IDX[k]], - hmac_tv[i].tap[k]); + if (ret) { + printk("setkey() failed ret=%d\n", ret); + goto out; + } } - crypto_hmac(tfm, hmac_tv[i].key, &klen, sg, - hmac_tv[i].np, result); - hexdump(result, crypto_tfm_alg_digestsize(tfm)); + ret = crypto_hash_digest(&desc, sg, hash_tv[i].psize, + result); + if (ret) { + printk("digest () failed ret=%d\n", ret); + goto out; + } + hexdump(result, crypto_hash_digestsize(tfm)); printk("%s\n", - memcmp(result, hmac_tv[i].digest, - crypto_tfm_alg_digestsize(tfm)) ? + memcmp(result, hash_tv[i].digest, + crypto_hash_digestsize(tfm)) ? "fail" : "pass"); } } + out: - crypto_free_tfm(tfm); + crypto_free_hash(tfm); } -#endif /* CONFIG_CRYPTO_HMAC */ - -static void test_cipher(char *algo, int mode, int enc, +static void test_cipher(char *algo, int enc, struct cipher_testvec *template, unsigned int tcount) { unsigned int ret, i, j, k, temp; unsigned int tsize; + unsigned int iv_len; + unsigned int len; char *q; - struct crypto_tfm *tfm; + struct crypto_blkcipher *tfm; char *key; struct cipher_testvec *cipher_tv; + struct blkcipher_desc desc; struct scatterlist sg[8]; - const char *e, *m; + const char *e; if (enc == ENCRYPT) e = "encryption"; else e = "decryption"; - if (mode == MODE_ECB) - m = "ECB"; - else - m = "CBC"; - printk("\ntesting %s %s %s\n", algo, m, e); + printk("\ntesting %s %s\n", algo, e); tsize = sizeof (struct cipher_testvec); tsize *= tcount; @@ -288,15 +230,15 @@ static void test_cipher(char *algo, int mode, int enc, memcpy(tvmem, template, tsize); cipher_tv = (void *)tvmem; - if (mode) - tfm = crypto_alloc_tfm(algo, 0); - else - tfm = crypto_alloc_tfm(algo, CRYPTO_TFM_MODE_CBC); + tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); - if (tfm == NULL) { - printk("failed to load transform for %s %s\n", algo, m); + if (IS_ERR(tfm)) { + printk("failed to load transform for %s: %ld\n", algo, + PTR_ERR(tfm)); return; } + desc.tfm = tfm; + desc.flags = 0; j = 0; for (i = 0; i < tcount; i++) { @@ -305,14 +247,17 @@ static void test_cipher(char *algo, int mode, int enc, printk("test %u (%d bit key):\n", j, cipher_tv[i].klen * 8); - tfm->crt_flags = 0; + crypto_blkcipher_clear_flags(tfm, ~0); if (cipher_tv[i].wk) - tfm->crt_flags |= CRYPTO_TFM_REQ_WEAK_KEY; + crypto_blkcipher_set_flags( + tfm, CRYPTO_TFM_REQ_WEAK_KEY); key = cipher_tv[i].key; - ret = crypto_cipher_setkey(tfm, key, cipher_tv[i].klen); + ret = crypto_blkcipher_setkey(tfm, key, + cipher_tv[i].klen); if (ret) { - printk("setkey() failed flags=%x\n", tfm->crt_flags); + printk("setkey() failed flags=%x\n", + crypto_blkcipher_get_flags(tfm)); if (!cipher_tv[i].fail) goto out; @@ -321,19 +266,19 @@ static void test_cipher(char *algo, int mode, int enc, sg_set_buf(&sg[0], cipher_tv[i].input, cipher_tv[i].ilen); - if (!mode) { - crypto_cipher_set_iv(tfm, cipher_tv[i].iv, - crypto_tfm_alg_ivsize(tfm)); - } - - if (enc) - ret = crypto_cipher_encrypt(tfm, sg, sg, cipher_tv[i].ilen); - else - ret = crypto_cipher_decrypt(tfm, sg, sg, cipher_tv[i].ilen); + iv_len = crypto_blkcipher_ivsize(tfm); + if (iv_len) + crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, + iv_len); + len = cipher_tv[i].ilen; + ret = enc ? + crypto_blkcipher_encrypt(&desc, sg, sg, len) : + crypto_blkcipher_decrypt(&desc, sg, sg, len); if (ret) { - printk("%s () failed flags=%x\n", e, tfm->crt_flags); + printk("%s () failed flags=%x\n", e, + desc.flags); goto out; } @@ -346,7 +291,7 @@ static void test_cipher(char *algo, int mode, int enc, } } - printk("\ntesting %s %s %s across pages (chunking)\n", algo, m, e); + printk("\ntesting %s %s across pages (chunking)\n", algo, e); memset(xbuf, 0, XBUFSIZE); j = 0; @@ -356,14 +301,17 @@ static void test_cipher(char *algo, int mode, int enc, printk("test %u (%d bit key):\n", j, cipher_tv[i].klen * 8); - tfm->crt_flags = 0; + crypto_blkcipher_clear_flags(tfm, ~0); if (cipher_tv[i].wk) - tfm->crt_flags |= CRYPTO_TFM_REQ_WEAK_KEY; + crypto_blkcipher_set_flags( + tfm, CRYPTO_TFM_REQ_WEAK_KEY); key = cipher_tv[i].key; - ret = crypto_cipher_setkey(tfm, key, cipher_tv[i].klen); + ret = crypto_blkcipher_setkey(tfm, key, + cipher_tv[i].klen); if (ret) { - printk("setkey() failed flags=%x\n", tfm->crt_flags); + printk("setkey() failed flags=%x\n", + crypto_blkcipher_get_flags(tfm)); if (!cipher_tv[i].fail) goto out; @@ -379,18 +327,19 @@ static void test_cipher(char *algo, int mode, int enc, cipher_tv[i].tap[k]); } - if (!mode) { - crypto_cipher_set_iv(tfm, cipher_tv[i].iv, - crypto_tfm_alg_ivsize(tfm)); - } + iv_len = crypto_blkcipher_ivsize(tfm); + if (iv_len) + crypto_blkcipher_set_iv(tfm, cipher_tv[i].iv, + iv_len); - if (enc) - ret = crypto_cipher_encrypt(tfm, sg, sg, cipher_tv[i].ilen); - else - ret = crypto_cipher_decrypt(tfm, sg, sg, cipher_tv[i].ilen); + len = cipher_tv[i].ilen; + ret = enc ? + crypto_blkcipher_encrypt(&desc, sg, sg, len) : + crypto_blkcipher_decrypt(&desc, sg, sg, len); if (ret) { - printk("%s () failed flags=%x\n", e, tfm->crt_flags); + printk("%s () failed flags=%x\n", e, + desc.flags); goto out; } @@ -409,10 +358,10 @@ static void test_cipher(char *algo, int mode, int enc, } out: - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); } -static int test_cipher_jiffies(struct crypto_tfm *tfm, int enc, char *p, +static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc, char *p, int blen, int sec) { struct scatterlist sg[1]; @@ -425,9 +374,9 @@ static int test_cipher_jiffies(struct crypto_tfm *tfm, int enc, char *p, for (start = jiffies, end = start + sec * HZ, bcount = 0; time_before(jiffies, end); bcount++) { if (enc) - ret = crypto_cipher_encrypt(tfm, sg, sg, blen); + ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); else - ret = crypto_cipher_decrypt(tfm, sg, sg, blen); + ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); if (ret) return ret; @@ -438,7 +387,7 @@ static int test_cipher_jiffies(struct crypto_tfm *tfm, int enc, char *p, return 0; } -static int test_cipher_cycles(struct crypto_tfm *tfm, int enc, char *p, +static int test_cipher_cycles(struct blkcipher_desc *desc, int enc, char *p, int blen) { struct scatterlist sg[1]; @@ -454,9 +403,9 @@ static int test_cipher_cycles(struct crypto_tfm *tfm, int enc, char *p, /* Warm-up run. */ for (i = 0; i < 4; i++) { if (enc) - ret = crypto_cipher_encrypt(tfm, sg, sg, blen); + ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); else - ret = crypto_cipher_decrypt(tfm, sg, sg, blen); + ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); if (ret) goto out; @@ -468,9 +417,9 @@ static int test_cipher_cycles(struct crypto_tfm *tfm, int enc, char *p, start = get_cycles(); if (enc) - ret = crypto_cipher_encrypt(tfm, sg, sg, blen); + ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); else - ret = crypto_cipher_decrypt(tfm, sg, sg, blen); + ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); end = get_cycles(); if (ret) @@ -490,35 +439,32 @@ out: return ret; } -static void test_cipher_speed(char *algo, int mode, int enc, unsigned int sec, +static void test_cipher_speed(char *algo, int enc, unsigned int sec, struct cipher_testvec *template, unsigned int tcount, struct cipher_speed *speed) { unsigned int ret, i, j, iv_len; unsigned char *key, *p, iv[128]; - struct crypto_tfm *tfm; - const char *e, *m; + struct crypto_blkcipher *tfm; + struct blkcipher_desc desc; + const char *e; if (enc == ENCRYPT) e = "encryption"; else e = "decryption"; - if (mode == MODE_ECB) - m = "ECB"; - else - m = "CBC"; - printk("\ntesting speed of %s %s %s\n", algo, m, e); + printk("\ntesting speed of %s %s\n", algo, e); - if (mode) - tfm = crypto_alloc_tfm(algo, 0); - else - tfm = crypto_alloc_tfm(algo, CRYPTO_TFM_MODE_CBC); + tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC); - if (tfm == NULL) { - printk("failed to load transform for %s %s\n", algo, m); + if (IS_ERR(tfm)) { + printk("failed to load transform for %s: %ld\n", algo, + PTR_ERR(tfm)); return; } + desc.tfm = tfm; + desc.flags = 0; for (i = 0; speed[i].klen != 0; i++) { if ((speed[i].blen + speed[i].klen) > TVMEMSIZE) { @@ -542,125 +488,231 @@ static void test_cipher_speed(char *algo, int mode, int enc, unsigned int sec, } p = (unsigned char *)tvmem + speed[i].klen; - ret = crypto_cipher_setkey(tfm, key, speed[i].klen); + ret = crypto_blkcipher_setkey(tfm, key, speed[i].klen); if (ret) { - printk("setkey() failed flags=%x\n", tfm->crt_flags); + printk("setkey() failed flags=%x\n", + crypto_blkcipher_get_flags(tfm)); goto out; } - if (!mode) { - iv_len = crypto_tfm_alg_ivsize(tfm); + iv_len = crypto_blkcipher_ivsize(tfm); + if (iv_len) { memset(&iv, 0xff, iv_len); - crypto_cipher_set_iv(tfm, iv, iv_len); + crypto_blkcipher_set_iv(tfm, iv, iv_len); } if (sec) - ret = test_cipher_jiffies(tfm, enc, p, speed[i].blen, + ret = test_cipher_jiffies(&desc, enc, p, speed[i].blen, sec); else - ret = test_cipher_cycles(tfm, enc, p, speed[i].blen); + ret = test_cipher_cycles(&desc, enc, p, speed[i].blen); if (ret) { - printk("%s() failed flags=%x\n", e, tfm->crt_flags); + printk("%s() failed flags=%x\n", e, desc.flags); break; } } out: - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); } -static void test_digest_jiffies(struct crypto_tfm *tfm, char *p, int blen, - int plen, char *out, int sec) +static int test_hash_jiffies_digest(struct hash_desc *desc, char *p, int blen, + char *out, int sec) +{ + struct scatterlist sg[1]; + unsigned long start, end; + int bcount; + int ret; + + for (start = jiffies, end = start + sec * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + sg_set_buf(sg, p, blen); + ret = crypto_hash_digest(desc, sg, blen, out); + if (ret) + return ret; + } + + printk("%6u opers/sec, %9lu bytes/sec\n", + bcount / sec, ((long)bcount * blen) / sec); + + return 0; +} + +static int test_hash_jiffies(struct hash_desc *desc, char *p, int blen, + int plen, char *out, int sec) { struct scatterlist sg[1]; unsigned long start, end; int bcount, pcount; + int ret; + + if (plen == blen) + return test_hash_jiffies_digest(desc, p, blen, out, sec); for (start = jiffies, end = start + sec * HZ, bcount = 0; time_before(jiffies, end); bcount++) { - crypto_digest_init(tfm); + ret = crypto_hash_init(desc); + if (ret) + return ret; for (pcount = 0; pcount < blen; pcount += plen) { sg_set_buf(sg, p + pcount, plen); - crypto_digest_update(tfm, sg, 1); + ret = crypto_hash_update(desc, sg, plen); + if (ret) + return ret; } /* we assume there is enough space in 'out' for the result */ - crypto_digest_final(tfm, out); + ret = crypto_hash_final(desc, out); + if (ret) + return ret; } printk("%6u opers/sec, %9lu bytes/sec\n", bcount / sec, ((long)bcount * blen) / sec); - return; + return 0; +} + +static int test_hash_cycles_digest(struct hash_desc *desc, char *p, int blen, + char *out) +{ + struct scatterlist sg[1]; + unsigned long cycles = 0; + int i; + int ret; + + local_bh_disable(); + local_irq_disable(); + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + sg_set_buf(sg, p, blen); + ret = crypto_hash_digest(desc, sg, blen, out); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + + sg_set_buf(sg, p, blen); + ret = crypto_hash_digest(desc, sg, blen, out); + if (ret) + goto out; + + end = get_cycles(); + + cycles += end - start; + } + +out: + local_irq_enable(); + local_bh_enable(); + + if (ret) + return ret; + + printk("%6lu cycles/operation, %4lu cycles/byte\n", + cycles / 8, cycles / (8 * blen)); + + return 0; } -static void test_digest_cycles(struct crypto_tfm *tfm, char *p, int blen, - int plen, char *out) +static int test_hash_cycles(struct hash_desc *desc, char *p, int blen, + int plen, char *out) { struct scatterlist sg[1]; unsigned long cycles = 0; int i, pcount; + int ret; + + if (plen == blen) + return test_hash_cycles_digest(desc, p, blen, out); local_bh_disable(); local_irq_disable(); /* Warm-up run. */ for (i = 0; i < 4; i++) { - crypto_digest_init(tfm); + ret = crypto_hash_init(desc); + if (ret) + goto out; for (pcount = 0; pcount < blen; pcount += plen) { sg_set_buf(sg, p + pcount, plen); - crypto_digest_update(tfm, sg, 1); + ret = crypto_hash_update(desc, sg, plen); + if (ret) + goto out; } - crypto_digest_final(tfm, out); + crypto_hash_final(desc, out); + if (ret) + goto out; } /* The real thing. */ for (i = 0; i < 8; i++) { cycles_t start, end; - crypto_digest_init(tfm); - start = get_cycles(); + ret = crypto_hash_init(desc); + if (ret) + goto out; for (pcount = 0; pcount < blen; pcount += plen) { sg_set_buf(sg, p + pcount, plen); - crypto_digest_update(tfm, sg, 1); + ret = crypto_hash_update(desc, sg, plen); + if (ret) + goto out; } - crypto_digest_final(tfm, out); + ret = crypto_hash_final(desc, out); + if (ret) + goto out; end = get_cycles(); cycles += end - start; } +out: local_irq_enable(); local_bh_enable(); + if (ret) + return ret; + printk("%6lu cycles/operation, %4lu cycles/byte\n", cycles / 8, cycles / (8 * blen)); - return; + return 0; } -static void test_digest_speed(char *algo, unsigned int sec, - struct digest_speed *speed) +static void test_hash_speed(char *algo, unsigned int sec, + struct hash_speed *speed) { - struct crypto_tfm *tfm; + struct crypto_hash *tfm; + struct hash_desc desc; char output[1024]; int i; + int ret; printk("\ntesting speed of %s\n", algo); - tfm = crypto_alloc_tfm(algo, 0); + tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); - if (tfm == NULL) { - printk("failed to load transform for %s\n", algo); + if (IS_ERR(tfm)) { + printk("failed to load transform for %s: %ld\n", algo, + PTR_ERR(tfm)); return; } - if (crypto_tfm_alg_digestsize(tfm) > sizeof(output)) { + desc.tfm = tfm; + desc.flags = 0; + + if (crypto_hash_digestsize(tfm) > sizeof(output)) { printk("digestsize(%u) > outputbuffer(%zu)\n", - crypto_tfm_alg_digestsize(tfm), sizeof(output)); + crypto_hash_digestsize(tfm), sizeof(output)); goto out; } @@ -677,20 +729,27 @@ static void test_digest_speed(char *algo, unsigned int sec, memset(tvmem, 0xff, speed[i].blen); if (sec) - test_digest_jiffies(tfm, tvmem, speed[i].blen, speed[i].plen, output, sec); + ret = test_hash_jiffies(&desc, tvmem, speed[i].blen, + speed[i].plen, output, sec); else - test_digest_cycles(tfm, tvmem, speed[i].blen, speed[i].plen, output); + ret = test_hash_cycles(&desc, tvmem, speed[i].blen, + speed[i].plen, output); + + if (ret) { + printk("hashing failed ret=%d\n", ret); + break; + } } out: - crypto_free_tfm(tfm); + crypto_free_hash(tfm); } static void test_deflate(void) { unsigned int i; char result[COMP_BUF_SIZE]; - struct crypto_tfm *tfm; + struct crypto_comp *tfm; struct comp_testvec *tv; unsigned int tsize; @@ -762,105 +821,7 @@ static void test_deflate(void) ilen, dlen); } out: - crypto_free_tfm(tfm); -} - -static void test_crc32c(void) -{ -#define NUMVEC 6 -#define VECSIZE 40 - - int i, j, pass; - u32 crc; - u8 b, test_vec[NUMVEC][VECSIZE]; - static u32 vec_results[NUMVEC] = { - 0x0e2c157f, 0xe980ebf6, 0xde74bded, - 0xd579c862, 0xba979ad0, 0x2b29d913 - }; - static u32 tot_vec_results = 0x24c5d375; - - struct scatterlist sg[NUMVEC]; - struct crypto_tfm *tfm; - char *fmtdata = "testing crc32c initialized to %08x: %s\n"; -#define SEEDTESTVAL 0xedcba987 - u32 seed; - - printk("\ntesting crc32c\n"); - - tfm = crypto_alloc_tfm("crc32c", 0); - if (tfm == NULL) { - printk("failed to load transform for crc32c\n"); - return; - } - - crypto_digest_init(tfm); - crypto_digest_final(tfm, (u8*)&crc); - printk(fmtdata, crc, (crc == 0) ? "pass" : "ERROR"); - - /* - * stuff test_vec with known values, simple incrementing - * byte values. - */ - b = 0; - for (i = 0; i < NUMVEC; i++) { - for (j = 0; j < VECSIZE; j++) - test_vec[i][j] = ++b; - sg_set_buf(&sg[i], test_vec[i], VECSIZE); - } - - seed = SEEDTESTVAL; - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); - crypto_digest_final(tfm, (u8*)&crc); - printk("testing crc32c setkey returns %08x : %s\n", crc, (crc == (SEEDTESTVAL ^ ~(u32)0)) ? - "pass" : "ERROR"); - - printk("testing crc32c using update/final:\n"); - - pass = 1; /* assume all is well */ - - for (i = 0; i < NUMVEC; i++) { - seed = ~(u32)0; - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); - crypto_digest_update(tfm, &sg[i], 1); - crypto_digest_final(tfm, (u8*)&crc); - if (crc == vec_results[i]) { - printk(" %08x:OK", crc); - } else { - printk(" %08x:BAD, wanted %08x\n", crc, vec_results[i]); - pass = 0; - } - } - - printk("\ntesting crc32c using incremental accumulator:\n"); - crc = 0; - for (i = 0; i < NUMVEC; i++) { - seed = (crc ^ ~(u32)0); - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); - crypto_digest_update(tfm, &sg[i], 1); - crypto_digest_final(tfm, (u8*)&crc); - } - if (crc == tot_vec_results) { - printk(" %08x:OK", crc); - } else { - printk(" %08x:BAD, wanted %08x\n", crc, tot_vec_results); - pass = 0; - } - - printk("\ntesting crc32c using digest:\n"); - seed = ~(u32)0; - (void)crypto_digest_setkey(tfm, (const u8*)&seed, sizeof(u32)); - crypto_digest_digest(tfm, sg, NUMVEC, (u8*)&crc); - if (crc == tot_vec_results) { - printk(" %08x:OK", crc); - } else { - printk(" %08x:BAD, wanted %08x\n", crc, tot_vec_results); - pass = 0; - } - - printk("\n%s\n", pass ? "pass" : "ERROR"); - - crypto_free_tfm(tfm); - printk("crc32c test complete\n"); + crypto_free_comp(tfm); } static void test_available(void) @@ -869,8 +830,8 @@ static void test_available(void) while (*name) { printk("alg %s ", *name); - printk((crypto_alg_available(*name, 0)) ? - "found\n" : "not found\n"); + printk(crypto_has_alg(*name, 0, CRYPTO_ALG_ASYNC) ? + "found\n" : "not found\n"); name++; } } @@ -885,79 +846,119 @@ static void do_test(void) test_hash("sha1", sha1_tv_template, SHA1_TEST_VECTORS); //DES - test_cipher ("des", MODE_ECB, ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); - test_cipher ("des", MODE_ECB, DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); - test_cipher ("des", MODE_CBC, ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); - test_cipher ("des", MODE_CBC, DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template, + DES_ENC_TEST_VECTORS); + test_cipher("ecb(des)", DECRYPT, des_dec_tv_template, + DES_DEC_TEST_VECTORS); + test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template, + DES_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template, + DES_CBC_DEC_TEST_VECTORS); //DES3_EDE - test_cipher ("des3_ede", MODE_ECB, ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); - test_cipher ("des3_ede", MODE_ECB, DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); + test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template, + DES3_EDE_ENC_TEST_VECTORS); + test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, + DES3_EDE_DEC_TEST_VECTORS); test_hash("md4", md4_tv_template, MD4_TEST_VECTORS); test_hash("sha256", sha256_tv_template, SHA256_TEST_VECTORS); //BLOWFISH - test_cipher ("blowfish", MODE_ECB, ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); - test_cipher ("blowfish", MODE_ECB, DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); - test_cipher ("blowfish", MODE_CBC, ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); - test_cipher ("blowfish", MODE_CBC, DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template, + BF_ENC_TEST_VECTORS); + test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template, + BF_DEC_TEST_VECTORS); + test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template, + BF_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template, + BF_CBC_DEC_TEST_VECTORS); //TWOFISH - test_cipher ("twofish", MODE_ECB, ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); - test_cipher ("twofish", MODE_ECB, DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); - test_cipher ("twofish", MODE_CBC, ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); - test_cipher ("twofish", MODE_CBC, DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template, + TF_ENC_TEST_VECTORS); + test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template, + TF_DEC_TEST_VECTORS); + test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template, + TF_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template, + TF_CBC_DEC_TEST_VECTORS); //SERPENT - test_cipher ("serpent", MODE_ECB, ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); - test_cipher ("serpent", MODE_ECB, DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); + test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template, + SERPENT_ENC_TEST_VECTORS); + test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template, + SERPENT_DEC_TEST_VECTORS); //TNEPRES - test_cipher ("tnepres", MODE_ECB, ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); - test_cipher ("tnepres", MODE_ECB, DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); + test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template, + TNEPRES_ENC_TEST_VECTORS); + test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template, + TNEPRES_DEC_TEST_VECTORS); //AES - test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); - test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); - test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); - test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template, + AES_ENC_TEST_VECTORS); + test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template, + AES_DEC_TEST_VECTORS); + test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template, + AES_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template, + AES_CBC_DEC_TEST_VECTORS); //CAST5 - test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); - test_cipher ("cast5", MODE_ECB, DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); + test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, + CAST5_ENC_TEST_VECTORS); + test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template, + CAST5_DEC_TEST_VECTORS); //CAST6 - test_cipher ("cast6", MODE_ECB, ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); - test_cipher ("cast6", MODE_ECB, DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); + test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template, + CAST6_ENC_TEST_VECTORS); + test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template, + CAST6_DEC_TEST_VECTORS); //ARC4 - test_cipher ("arc4", MODE_ECB, ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); - test_cipher ("arc4", MODE_ECB, DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); + test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template, + ARC4_ENC_TEST_VECTORS); + test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template, + ARC4_DEC_TEST_VECTORS); //TEA - test_cipher ("tea", MODE_ECB, ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); - test_cipher ("tea", MODE_ECB, DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); + test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template, + TEA_ENC_TEST_VECTORS); + test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template, + TEA_DEC_TEST_VECTORS); //XTEA - test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); - test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); + test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template, + XTEA_ENC_TEST_VECTORS); + test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template, + XTEA_DEC_TEST_VECTORS); //KHAZAD - test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); - test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); + test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template, + KHAZAD_ENC_TEST_VECTORS); + test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template, + KHAZAD_DEC_TEST_VECTORS); //ANUBIS - test_cipher ("anubis", MODE_ECB, ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); - test_cipher ("anubis", MODE_ECB, DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS); - test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); - test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); + test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template, + ANUBIS_ENC_TEST_VECTORS); + test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template, + ANUBIS_DEC_TEST_VECTORS); + test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template, + ANUBIS_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template, + ANUBIS_CBC_ENC_TEST_VECTORS); //XETA - test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); - test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); + test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, + XETA_ENC_TEST_VECTORS); + test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template, + XETA_DEC_TEST_VECTORS); test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS); test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS); @@ -968,12 +969,13 @@ static void do_test(void) test_hash("tgr160", tgr160_tv_template, TGR160_TEST_VECTORS); test_hash("tgr128", tgr128_tv_template, TGR128_TEST_VECTORS); test_deflate(); - test_crc32c(); -#ifdef CONFIG_CRYPTO_HMAC - test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); - test_hmac("sha1", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); - test_hmac("sha256", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); -#endif + test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); + test_hash("hmac(md5)", hmac_md5_tv_template, + HMAC_MD5_TEST_VECTORS); + test_hash("hmac(sha1)", hmac_sha1_tv_template, + HMAC_SHA1_TEST_VECTORS); + test_hash("hmac(sha256)", hmac_sha256_tv_template, + HMAC_SHA256_TEST_VECTORS); test_hash("michael_mic", michael_mic_tv_template, MICHAEL_MIC_TEST_VECTORS); break; @@ -987,15 +989,21 @@ static void do_test(void) break; case 3: - test_cipher ("des", MODE_ECB, ENCRYPT, des_enc_tv_template, DES_ENC_TEST_VECTORS); - test_cipher ("des", MODE_ECB, DECRYPT, des_dec_tv_template, DES_DEC_TEST_VECTORS); - test_cipher ("des", MODE_CBC, ENCRYPT, des_cbc_enc_tv_template, DES_CBC_ENC_TEST_VECTORS); - test_cipher ("des", MODE_CBC, DECRYPT, des_cbc_dec_tv_template, DES_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(des)", ENCRYPT, des_enc_tv_template, + DES_ENC_TEST_VECTORS); + test_cipher("ecb(des)", DECRYPT, des_dec_tv_template, + DES_DEC_TEST_VECTORS); + test_cipher("cbc(des)", ENCRYPT, des_cbc_enc_tv_template, + DES_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(des)", DECRYPT, des_cbc_dec_tv_template, + DES_CBC_DEC_TEST_VECTORS); break; case 4: - test_cipher ("des3_ede", MODE_ECB, ENCRYPT, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS); - test_cipher ("des3_ede", MODE_ECB, DECRYPT, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS); + test_cipher("ecb(des3_ede)", ENCRYPT, des3_ede_enc_tv_template, + DES3_EDE_ENC_TEST_VECTORS); + test_cipher("ecb(des3_ede)", DECRYPT, des3_ede_dec_tv_template, + DES3_EDE_DEC_TEST_VECTORS); break; case 5: @@ -1007,29 +1015,43 @@ static void do_test(void) break; case 7: - test_cipher ("blowfish", MODE_ECB, ENCRYPT, bf_enc_tv_template, BF_ENC_TEST_VECTORS); - test_cipher ("blowfish", MODE_ECB, DECRYPT, bf_dec_tv_template, BF_DEC_TEST_VECTORS); - test_cipher ("blowfish", MODE_CBC, ENCRYPT, bf_cbc_enc_tv_template, BF_CBC_ENC_TEST_VECTORS); - test_cipher ("blowfish", MODE_CBC, DECRYPT, bf_cbc_dec_tv_template, BF_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(blowfish)", ENCRYPT, bf_enc_tv_template, + BF_ENC_TEST_VECTORS); + test_cipher("ecb(blowfish)", DECRYPT, bf_dec_tv_template, + BF_DEC_TEST_VECTORS); + test_cipher("cbc(blowfish)", ENCRYPT, bf_cbc_enc_tv_template, + BF_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(blowfish)", DECRYPT, bf_cbc_dec_tv_template, + BF_CBC_DEC_TEST_VECTORS); break; case 8: - test_cipher ("twofish", MODE_ECB, ENCRYPT, tf_enc_tv_template, TF_ENC_TEST_VECTORS); - test_cipher ("twofish", MODE_ECB, DECRYPT, tf_dec_tv_template, TF_DEC_TEST_VECTORS); - test_cipher ("twofish", MODE_CBC, ENCRYPT, tf_cbc_enc_tv_template, TF_CBC_ENC_TEST_VECTORS); - test_cipher ("twofish", MODE_CBC, DECRYPT, tf_cbc_dec_tv_template, TF_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(twofish)", ENCRYPT, tf_enc_tv_template, + TF_ENC_TEST_VECTORS); + test_cipher("ecb(twofish)", DECRYPT, tf_dec_tv_template, + TF_DEC_TEST_VECTORS); + test_cipher("cbc(twofish)", ENCRYPT, tf_cbc_enc_tv_template, + TF_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(twofish)", DECRYPT, tf_cbc_dec_tv_template, + TF_CBC_DEC_TEST_VECTORS); break; case 9: - test_cipher ("serpent", MODE_ECB, ENCRYPT, serpent_enc_tv_template, SERPENT_ENC_TEST_VECTORS); - test_cipher ("serpent", MODE_ECB, DECRYPT, serpent_dec_tv_template, SERPENT_DEC_TEST_VECTORS); + test_cipher("ecb(serpent)", ENCRYPT, serpent_enc_tv_template, + SERPENT_ENC_TEST_VECTORS); + test_cipher("ecb(serpent)", DECRYPT, serpent_dec_tv_template, + SERPENT_DEC_TEST_VECTORS); break; case 10: - test_cipher ("aes", MODE_ECB, ENCRYPT, aes_enc_tv_template, AES_ENC_TEST_VECTORS); - test_cipher ("aes", MODE_ECB, DECRYPT, aes_dec_tv_template, AES_DEC_TEST_VECTORS); - test_cipher ("aes", MODE_CBC, ENCRYPT, aes_cbc_enc_tv_template, AES_CBC_ENC_TEST_VECTORS); - test_cipher ("aes", MODE_CBC, DECRYPT, aes_cbc_dec_tv_template, AES_CBC_DEC_TEST_VECTORS); + test_cipher("ecb(aes)", ENCRYPT, aes_enc_tv_template, + AES_ENC_TEST_VECTORS); + test_cipher("ecb(aes)", DECRYPT, aes_dec_tv_template, + AES_DEC_TEST_VECTORS); + test_cipher("cbc(aes)", ENCRYPT, aes_cbc_enc_tv_template, + AES_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(aes)", DECRYPT, aes_cbc_dec_tv_template, + AES_CBC_DEC_TEST_VECTORS); break; case 11: @@ -1045,18 +1067,24 @@ static void do_test(void) break; case 14: - test_cipher ("cast5", MODE_ECB, ENCRYPT, cast5_enc_tv_template, CAST5_ENC_TEST_VECTORS); - test_cipher ("cast5", MODE_ECB, DECRYPT, cast5_dec_tv_template, CAST5_DEC_TEST_VECTORS); + test_cipher("ecb(cast5)", ENCRYPT, cast5_enc_tv_template, + CAST5_ENC_TEST_VECTORS); + test_cipher("ecb(cast5)", DECRYPT, cast5_dec_tv_template, + CAST5_DEC_TEST_VECTORS); break; case 15: - test_cipher ("cast6", MODE_ECB, ENCRYPT, cast6_enc_tv_template, CAST6_ENC_TEST_VECTORS); - test_cipher ("cast6", MODE_ECB, DECRYPT, cast6_dec_tv_template, CAST6_DEC_TEST_VECTORS); + test_cipher("ecb(cast6)", ENCRYPT, cast6_enc_tv_template, + CAST6_ENC_TEST_VECTORS); + test_cipher("ecb(cast6)", DECRYPT, cast6_dec_tv_template, + CAST6_DEC_TEST_VECTORS); break; case 16: - test_cipher ("arc4", MODE_ECB, ENCRYPT, arc4_enc_tv_template, ARC4_ENC_TEST_VECTORS); - test_cipher ("arc4", MODE_ECB, DECRYPT, arc4_dec_tv_template, ARC4_DEC_TEST_VECTORS); + test_cipher("ecb(arc4)", ENCRYPT, arc4_enc_tv_template, + ARC4_ENC_TEST_VECTORS); + test_cipher("ecb(arc4)", DECRYPT, arc4_dec_tv_template, + ARC4_DEC_TEST_VECTORS); break; case 17: @@ -1064,22 +1092,28 @@ static void do_test(void) break; case 18: - test_crc32c(); + test_hash("crc32c", crc32c_tv_template, CRC32C_TEST_VECTORS); break; case 19: - test_cipher ("tea", MODE_ECB, ENCRYPT, tea_enc_tv_template, TEA_ENC_TEST_VECTORS); - test_cipher ("tea", MODE_ECB, DECRYPT, tea_dec_tv_template, TEA_DEC_TEST_VECTORS); + test_cipher("ecb(tea)", ENCRYPT, tea_enc_tv_template, + TEA_ENC_TEST_VECTORS); + test_cipher("ecb(tea)", DECRYPT, tea_dec_tv_template, + TEA_DEC_TEST_VECTORS); break; case 20: - test_cipher ("xtea", MODE_ECB, ENCRYPT, xtea_enc_tv_template, XTEA_ENC_TEST_VECTORS); - test_cipher ("xtea", MODE_ECB, DECRYPT, xtea_dec_tv_template, XTEA_DEC_TEST_VECTORS); + test_cipher("ecb(xtea)", ENCRYPT, xtea_enc_tv_template, + XTEA_ENC_TEST_VECTORS); + test_cipher("ecb(xtea)", DECRYPT, xtea_dec_tv_template, + XTEA_DEC_TEST_VECTORS); break; case 21: - test_cipher ("khazad", MODE_ECB, ENCRYPT, khazad_enc_tv_template, KHAZAD_ENC_TEST_VECTORS); - test_cipher ("khazad", MODE_ECB, DECRYPT, khazad_dec_tv_template, KHAZAD_DEC_TEST_VECTORS); + test_cipher("ecb(khazad)", ENCRYPT, khazad_enc_tv_template, + KHAZAD_ENC_TEST_VECTORS); + test_cipher("ecb(khazad)", DECRYPT, khazad_dec_tv_template, + KHAZAD_DEC_TEST_VECTORS); break; case 22: @@ -1095,15 +1129,21 @@ static void do_test(void) break; case 25: - test_cipher ("tnepres", MODE_ECB, ENCRYPT, tnepres_enc_tv_template, TNEPRES_ENC_TEST_VECTORS); - test_cipher ("tnepres", MODE_ECB, DECRYPT, tnepres_dec_tv_template, TNEPRES_DEC_TEST_VECTORS); + test_cipher("ecb(tnepres)", ENCRYPT, tnepres_enc_tv_template, + TNEPRES_ENC_TEST_VECTORS); + test_cipher("ecb(tnepres)", DECRYPT, tnepres_dec_tv_template, + TNEPRES_DEC_TEST_VECTORS); break; case 26: - test_cipher ("anubis", MODE_ECB, ENCRYPT, anubis_enc_tv_template, ANUBIS_ENC_TEST_VECTORS); - test_cipher ("anubis", MODE_ECB, DECRYPT, anubis_dec_tv_template, ANUBIS_DEC_TEST_VECTORS); - test_cipher ("anubis", MODE_CBC, ENCRYPT, anubis_cbc_enc_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); - test_cipher ("anubis", MODE_CBC, DECRYPT, anubis_cbc_dec_tv_template, ANUBIS_CBC_ENC_TEST_VECTORS); + test_cipher("ecb(anubis)", ENCRYPT, anubis_enc_tv_template, + ANUBIS_ENC_TEST_VECTORS); + test_cipher("ecb(anubis)", DECRYPT, anubis_dec_tv_template, + ANUBIS_DEC_TEST_VECTORS); + test_cipher("cbc(anubis)", ENCRYPT, anubis_cbc_enc_tv_template, + ANUBIS_CBC_ENC_TEST_VECTORS); + test_cipher("cbc(anubis)", DECRYPT, anubis_cbc_dec_tv_template, + ANUBIS_CBC_ENC_TEST_VECTORS); break; case 27: @@ -1120,85 +1160,88 @@ static void do_test(void) break; case 30: - test_cipher ("xeta", MODE_ECB, ENCRYPT, xeta_enc_tv_template, XETA_ENC_TEST_VECTORS); - test_cipher ("xeta", MODE_ECB, DECRYPT, xeta_dec_tv_template, XETA_DEC_TEST_VECTORS); + test_cipher("ecb(xeta)", ENCRYPT, xeta_enc_tv_template, + XETA_ENC_TEST_VECTORS); + test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template, + XETA_DEC_TEST_VECTORS); break; -#ifdef CONFIG_CRYPTO_HMAC case 100: - test_hmac("md5", hmac_md5_tv_template, HMAC_MD5_TEST_VECTORS); + test_hash("hmac(md5)", hmac_md5_tv_template, + HMAC_MD5_TEST_VECTORS); break; case 101: - test_hmac("sha1", hmac_sha1_tv_template, HMAC_SHA1_TEST_VECTORS); + test_hash("hmac(sha1)", hmac_sha1_tv_template, + HMAC_SHA1_TEST_VECTORS); break; case 102: - test_hmac("sha256", hmac_sha256_tv_template, HMAC_SHA256_TEST_VECTORS); + test_hash("hmac(sha256)", hmac_sha256_tv_template, + HMAC_SHA256_TEST_VECTORS); break; -#endif case 200: - test_cipher_speed("aes", MODE_ECB, ENCRYPT, sec, NULL, 0, + test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, aes_speed_template); - test_cipher_speed("aes", MODE_ECB, DECRYPT, sec, NULL, 0, + test_cipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0, aes_speed_template); - test_cipher_speed("aes", MODE_CBC, ENCRYPT, sec, NULL, 0, + test_cipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0, aes_speed_template); - test_cipher_speed("aes", MODE_CBC, DECRYPT, sec, NULL, 0, + test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0, aes_speed_template); break; case 201: - test_cipher_speed("des3_ede", MODE_ECB, ENCRYPT, sec, + test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS, des3_ede_speed_template); - test_cipher_speed("des3_ede", MODE_ECB, DECRYPT, sec, + test_cipher_speed("ecb(des3_ede)", DECRYPT, sec, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS, des3_ede_speed_template); - test_cipher_speed("des3_ede", MODE_CBC, ENCRYPT, sec, + test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec, des3_ede_enc_tv_template, DES3_EDE_ENC_TEST_VECTORS, des3_ede_speed_template); - test_cipher_speed("des3_ede", MODE_CBC, DECRYPT, sec, + test_cipher_speed("cbc(des3_ede)", DECRYPT, sec, des3_ede_dec_tv_template, DES3_EDE_DEC_TEST_VECTORS, des3_ede_speed_template); break; case 202: - test_cipher_speed("twofish", MODE_ECB, ENCRYPT, sec, NULL, 0, + test_cipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0, twofish_speed_template); - test_cipher_speed("twofish", MODE_ECB, DECRYPT, sec, NULL, 0, + test_cipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0, twofish_speed_template); - test_cipher_speed("twofish", MODE_CBC, ENCRYPT, sec, NULL, 0, + test_cipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0, twofish_speed_template); - test_cipher_speed("twofish", MODE_CBC, DECRYPT, sec, NULL, 0, + test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0, twofish_speed_template); break; case 203: - test_cipher_speed("blowfish", MODE_ECB, ENCRYPT, sec, NULL, 0, + test_cipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0, blowfish_speed_template); - test_cipher_speed("blowfish", MODE_ECB, DECRYPT, sec, NULL, 0, + test_cipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0, blowfish_speed_template); - test_cipher_speed("blowfish", MODE_CBC, ENCRYPT, sec, NULL, 0, + test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0, blowfish_speed_template); - test_cipher_speed("blowfish", MODE_CBC, DECRYPT, sec, NULL, 0, + test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0, blowfish_speed_template); break; case 204: - test_cipher_speed("des", MODE_ECB, ENCRYPT, sec, NULL, 0, + test_cipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0, des_speed_template); - test_cipher_speed("des", MODE_ECB, DECRYPT, sec, NULL, 0, + test_cipher_speed("ecb(des)", DECRYPT, sec, NULL, 0, des_speed_template); - test_cipher_speed("des", MODE_CBC, ENCRYPT, sec, NULL, 0, + test_cipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0, des_speed_template); - test_cipher_speed("des", MODE_CBC, DECRYPT, sec, NULL, 0, + test_cipher_speed("cbc(des)", DECRYPT, sec, NULL, 0, des_speed_template); break; @@ -1206,51 +1249,51 @@ static void do_test(void) /* fall through */ case 301: - test_digest_speed("md4", sec, generic_digest_speed_template); + test_hash_speed("md4", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 302: - test_digest_speed("md5", sec, generic_digest_speed_template); + test_hash_speed("md5", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 303: - test_digest_speed("sha1", sec, generic_digest_speed_template); + test_hash_speed("sha1", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 304: - test_digest_speed("sha256", sec, generic_digest_speed_template); + test_hash_speed("sha256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 305: - test_digest_speed("sha384", sec, generic_digest_speed_template); + test_hash_speed("sha384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 306: - test_digest_speed("sha512", sec, generic_digest_speed_template); + test_hash_speed("sha512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 307: - test_digest_speed("wp256", sec, generic_digest_speed_template); + test_hash_speed("wp256", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 308: - test_digest_speed("wp384", sec, generic_digest_speed_template); + test_hash_speed("wp384", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 309: - test_digest_speed("wp512", sec, generic_digest_speed_template); + test_hash_speed("wp512", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 310: - test_digest_speed("tgr128", sec, generic_digest_speed_template); + test_hash_speed("tgr128", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 311: - test_digest_speed("tgr160", sec, generic_digest_speed_template); + test_hash_speed("tgr160", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 312: - test_digest_speed("tgr192", sec, generic_digest_speed_template); + test_hash_speed("tgr192", sec, generic_hash_speed_template); if (mode > 300 && mode < 400) break; case 399: diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 1fac5602f633..a40c4411729e 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -28,7 +28,7 @@ struct hash_testvec { /* only used with keyed hash algorithms */ char key[128] __attribute__ ((__aligned__(4))); - char plaintext[128]; + char plaintext[240]; char digest[MAX_DIGEST_SIZE]; unsigned char tap[MAX_TAP]; unsigned char psize; @@ -36,16 +36,6 @@ struct hash_testvec { unsigned char ksize; }; -struct hmac_testvec { - char key[128]; - char plaintext[128]; - char digest[MAX_DIGEST_SIZE]; - unsigned char tap[MAX_TAP]; - unsigned char ksize; - unsigned char psize; - unsigned char np; -}; - struct cipher_testvec { char key[MAX_KEYLEN] __attribute__ ((__aligned__(4))); char iv[MAX_IVLEN]; @@ -65,7 +55,7 @@ struct cipher_speed { unsigned int blen; }; -struct digest_speed { +struct hash_speed { unsigned int blen; /* buffer length */ unsigned int plen; /* per-update length */ }; @@ -697,14 +687,13 @@ static struct hash_testvec tgr128_tv_template[] = { }, }; -#ifdef CONFIG_CRYPTO_HMAC /* * HMAC-MD5 test vectors from RFC2202 * (These need to be fixed to not use strlen). */ #define HMAC_MD5_TEST_VECTORS 7 -static struct hmac_testvec hmac_md5_tv_template[] = +static struct hash_testvec hmac_md5_tv_template[] = { { .key = { [0 ... 15] = 0x0b }, @@ -768,7 +757,7 @@ static struct hmac_testvec hmac_md5_tv_template[] = */ #define HMAC_SHA1_TEST_VECTORS 7 -static struct hmac_testvec hmac_sha1_tv_template[] = { +static struct hash_testvec hmac_sha1_tv_template[] = { { .key = { [0 ... 19] = 0x0b }, .ksize = 20, @@ -833,7 +822,7 @@ static struct hmac_testvec hmac_sha1_tv_template[] = { */ #define HMAC_SHA256_TEST_VECTORS 10 -static struct hmac_testvec hmac_sha256_tv_template[] = { +static struct hash_testvec hmac_sha256_tv_template[] = { { .key = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, @@ -944,8 +933,6 @@ static struct hmac_testvec hmac_sha256_tv_template[] = { }, }; -#endif /* CONFIG_CRYPTO_HMAC */ - /* * DES test vectors. */ @@ -2897,6 +2884,183 @@ static struct hash_testvec michael_mic_tv_template[] = { }; /* + * CRC32C test vectors + */ +#define CRC32C_TEST_VECTORS 14 + +static struct hash_testvec crc32c_tv_template[] = { + { + .psize = 0, + .digest = { 0x00, 0x00, 0x00, 0x00 } + }, + { + .key = { 0x87, 0xa9, 0xcb, 0xed }, + .ksize = 4, + .psize = 0, + .digest = { 0x78, 0x56, 0x34, 0x12 }, + }, + { + .key = { 0xff, 0xff, 0xff, 0xff }, + .ksize = 4, + .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28 }, + .psize = 40, + .digest = { 0x7f, 0x15, 0x2c, 0x0e } + }, + { + .key = { 0xff, 0xff, 0xff, 0xff }, + .ksize = 4, + .plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 }, + .psize = 40, + .digest = { 0xf6, 0xeb, 0x80, 0xe9 } + }, + { + .key = { 0xff, 0xff, 0xff, 0xff }, + .ksize = 4, + .plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 }, + .psize = 40, + .digest = { 0xed, 0xbd, 0x74, 0xde } + }, + { + .key = { 0xff, 0xff, 0xff, 0xff }, + .ksize = 4, + .plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 }, + .psize = 40, + .digest = { 0x62, 0xc8, 0x79, 0xd5 } + }, + { + .key = { 0xff, 0xff, 0xff, 0xff }, + .ksize = 4, + .plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, + 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, + 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, + 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 }, + .psize = 40, + .digest = { 0xd0, 0x9a, 0x97, 0xba } + }, + { + .key = { 0xff, 0xff, 0xff, 0xff }, + .ksize = 4, + .plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, + 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, + 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, + 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 }, + .psize = 40, + .digest = { 0x13, 0xd9, 0x29, 0x2b } + }, + { + .key = { 0x80, 0xea, 0xd3, 0xf1 }, + .ksize = 4, + .plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 }, + .psize = 40, + .digest = { 0x0c, 0xb5, 0xe2, 0xa2 } + }, + { + .key = { 0xf3, 0x4a, 0x1d, 0x5d }, + .ksize = 4, + .plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 }, + .psize = 40, + .digest = { 0xd1, 0x7f, 0xfb, 0xa6 } + }, + { + .key = { 0x2e, 0x80, 0x04, 0x59 }, + .ksize = 4, + .plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 }, + .psize = 40, + .digest = { 0x59, 0x33, 0xe6, 0x7a } + }, + { + .key = { 0xa6, 0xcc, 0x19, 0x85 }, + .ksize = 4, + .plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, + 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, + 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, + 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 }, + .psize = 40, + .digest = { 0xbe, 0x03, 0x01, 0xd2 } + }, + { + .key = { 0x41, 0xfc, 0xfe, 0x2d }, + .ksize = 4, + .plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, + 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, + 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, + 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 }, + .psize = 40, + .digest = { 0x75, 0xd3, 0xc5, 0x24 } + }, + { + .key = { 0xff, 0xff, 0xff, 0xff }, + .ksize = 4, + .plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, + 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, + 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, + 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, + 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, + 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, + 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, + 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, + 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, + 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, + 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, + 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, + 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, + 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, + 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, + 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, + 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 }, + .psize = 240, + .digest = { 0x75, 0xd3, 0xc5, 0x24 }, + .np = 2, + .tap = { 31, 209 } + }, +}; + +/* * Cipher speed tests */ static struct cipher_speed aes_speed_template[] = { @@ -2983,7 +3147,7 @@ static struct cipher_speed des_speed_template[] = { /* * Digest speed tests */ -static struct digest_speed generic_digest_speed_template[] = { +static struct hash_speed generic_hash_speed_template[] = { { .blen = 16, .plen = 16, }, { .blen = 64, .plen = 16, }, { .blen = 64, .plen = 64, }, diff --git a/crypto/tea.c b/crypto/tea.c index 5367adc82fc9..1c54e26fa529 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -46,16 +46,10 @@ struct xtea_ctx { }; static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; - - if (key_len != 16) - { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } ctx->KEY[0] = le32_to_cpu(key[0]); ctx->KEY[1] = le32_to_cpu(key[1]); @@ -125,16 +119,10 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) } static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; - - if (key_len != 16) - { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; - } ctx->KEY[0] = le32_to_cpu(key[0]); ctx->KEY[1] = le32_to_cpu(key[1]); diff --git a/crypto/twofish.c b/crypto/twofish.c index ec2488242e2d..4979a2be48a9 100644 --- a/crypto/twofish.c +++ b/crypto/twofish.c @@ -39,6 +39,7 @@ */ #include <asm/byteorder.h> +#include <crypto/twofish.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> @@ -46,534 +47,6 @@ #include <linux/crypto.h> #include <linux/bitops.h> - -/* The large precomputed tables for the Twofish cipher (twofish.c) - * Taken from the same source as twofish.c - * Marc Mutz <Marc@Mutz.com> - */ - -/* These two tables are the q0 and q1 permutations, exactly as described in - * the Twofish paper. */ - -static const u8 q0[256] = { - 0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78, - 0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C, - 0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30, - 0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82, - 0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE, - 0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B, - 0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45, - 0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7, - 0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF, - 0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8, - 0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED, - 0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90, - 0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B, - 0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B, - 0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F, - 0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A, - 0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17, - 0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72, - 0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68, - 0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4, - 0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42, - 0x4A, 0x5E, 0xC1, 0xE0 -}; - -static const u8 q1[256] = { - 0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B, - 0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1, - 0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B, - 0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5, - 0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54, - 0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96, - 0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7, - 0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8, - 0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF, - 0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9, - 0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D, - 0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E, - 0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21, - 0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01, - 0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E, - 0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64, - 0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44, - 0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E, - 0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B, - 0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9, - 0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56, - 0x55, 0x09, 0xBE, 0x91 -}; - -/* These MDS tables are actually tables of MDS composed with q0 and q1, - * because it is only ever used that way and we can save some time by - * precomputing. Of course the main saving comes from precomputing the - * GF(2^8) multiplication involved in the MDS matrix multiply; by looking - * things up in these tables we reduce the matrix multiply to four lookups - * and three XORs. Semi-formally, the definition of these tables is: - * mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T - * mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T - * where ^T means "transpose", the matrix multiply is performed in GF(2^8) - * represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described - * by Schneier et al, and I'm casually glossing over the byte/word - * conversion issues. */ - -static const u32 mds[4][256] = { - {0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B, - 0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B, - 0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32, - 0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1, - 0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA, - 0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B, - 0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1, - 0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5, - 0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490, - 0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154, - 0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0, - 0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796, - 0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228, - 0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7, - 0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3, - 0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8, - 0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477, - 0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF, - 0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C, - 0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9, - 0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA, - 0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D, - 0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72, - 0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E, - 0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76, - 0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321, - 0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39, - 0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01, - 0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D, - 0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E, - 0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5, - 0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64, - 0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7, - 0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544, - 0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E, - 0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E, - 0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A, - 0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B, - 0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2, - 0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9, - 0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504, - 0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756, - 0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91}, - - {0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252, - 0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A, - 0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020, - 0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141, - 0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444, - 0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424, - 0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A, - 0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757, - 0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383, - 0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A, - 0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9, - 0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656, - 0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1, - 0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898, - 0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414, - 0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3, - 0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1, - 0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989, - 0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5, - 0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282, - 0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E, - 0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E, - 0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202, - 0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC, - 0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565, - 0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A, - 0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808, - 0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272, - 0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A, - 0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969, - 0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505, - 0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5, - 0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D, - 0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343, - 0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF, - 0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3, - 0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F, - 0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646, - 0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6, - 0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF, - 0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A, - 0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7, - 0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8}, - - {0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B, - 0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F, - 0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A, - 0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783, - 0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70, - 0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3, - 0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB, - 0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA, - 0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4, - 0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41, - 0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C, - 0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07, - 0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622, - 0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18, - 0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035, - 0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96, - 0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84, - 0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E, - 0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F, - 0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD, - 0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558, - 0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40, - 0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA, - 0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85, - 0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF, - 0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773, - 0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D, - 0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B, - 0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C, - 0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19, - 0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086, - 0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D, - 0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74, - 0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755, - 0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691, - 0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D, - 0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4, - 0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53, - 0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E, - 0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9, - 0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705, - 0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7, - 0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF}, - - {0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98, - 0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866, - 0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643, - 0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77, - 0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9, - 0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C, - 0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3, - 0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216, - 0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F, - 0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25, - 0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF, - 0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7, - 0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4, - 0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E, - 0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA, - 0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C, - 0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12, - 0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A, - 0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D, - 0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE, - 0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A, - 0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C, - 0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B, - 0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4, - 0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B, - 0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3, - 0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE, - 0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB, - 0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85, - 0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA, - 0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E, - 0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8, - 0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33, - 0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC, - 0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718, - 0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA, - 0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8, - 0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872, - 0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882, - 0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D, - 0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10, - 0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6, - 0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8} -}; - -/* The exp_to_poly and poly_to_exp tables are used to perform efficient - * operations in GF(2^8) represented as GF(2)[x]/w(x) where - * w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the - * definition of the RS matrix in the key schedule. Elements of that field - * are polynomials of degree not greater than 7 and all coefficients 0 or 1, - * which can be represented naturally by bytes (just substitute x=2). In that - * form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8) - * multiplication is inefficient without hardware support. To multiply - * faster, I make use of the fact x is a generator for the nonzero elements, - * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for - * some n in 0..254. Note that that caret is exponentiation in GF(2^8), - * *not* polynomial notation. So if I want to compute pq where p and q are - * in GF(2^8), I can just say: - * 1. if p=0 or q=0 then pq=0 - * 2. otherwise, find m and n such that p=x^m and q=x^n - * 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq - * The translations in steps 2 and 3 are looked up in the tables - * poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this - * in action, look at the CALC_S macro. As additional wrinkles, note that - * one of my operands is always a constant, so the poly_to_exp lookup on it - * is done in advance; I included the original values in the comments so - * readers can have some chance of recognizing that this *is* the RS matrix - * from the Twofish paper. I've only included the table entries I actually - * need; I never do a lookup on a variable input of zero and the biggest - * exponents I'll ever see are 254 (variable) and 237 (constant), so they'll - * never sum to more than 491. I'm repeating part of the exp_to_poly table - * so that I don't have to do mod-255 reduction in the exponent arithmetic. - * Since I know my constant operands are never zero, I only have to worry - * about zero values in the variable operand, and I do it with a simple - * conditional branch. I know conditionals are expensive, but I couldn't - * see a non-horrible way of avoiding them, and I did manage to group the - * statements so that each if covers four group multiplications. */ - -static const u8 poly_to_exp[255] = { - 0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19, - 0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A, - 0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C, - 0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B, - 0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47, - 0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D, - 0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8, - 0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C, - 0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83, - 0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48, - 0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26, - 0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E, - 0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3, - 0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9, - 0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A, - 0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D, - 0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75, - 0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84, - 0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64, - 0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49, - 0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF, - 0x85, 0xC8, 0xA1 -}; - -static const u8 exp_to_poly[492] = { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2, - 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03, - 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6, - 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A, - 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63, - 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C, - 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07, - 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88, - 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12, - 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7, - 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C, - 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8, - 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25, - 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A, - 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE, - 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC, - 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E, - 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92, - 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89, - 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB, - 0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1, - 0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, - 0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, - 0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, - 0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, - 0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, - 0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, - 0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, - 0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, - 0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, - 0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, - 0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, - 0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, - 0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, - 0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, - 0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, - 0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, - 0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, - 0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, - 0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, - 0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB -}; - - -/* The table constants are indices of - * S-box entries, preprocessed through q0 and q1. */ -static const u8 calc_sb_tbl[512] = { - 0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4, - 0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8, - 0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B, - 0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B, - 0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD, - 0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1, - 0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B, - 0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F, - 0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B, - 0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D, - 0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E, - 0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5, - 0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14, - 0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3, - 0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54, - 0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51, - 0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A, - 0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96, - 0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10, - 0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C, - 0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7, - 0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70, - 0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB, - 0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8, - 0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF, - 0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC, - 0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF, - 0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2, - 0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82, - 0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9, - 0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97, - 0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17, - 0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D, - 0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3, - 0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C, - 0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E, - 0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F, - 0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49, - 0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21, - 0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9, - 0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD, - 0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01, - 0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F, - 0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48, - 0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E, - 0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19, - 0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57, - 0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64, - 0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE, - 0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5, - 0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44, - 0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69, - 0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15, - 0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E, - 0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34, - 0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC, - 0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B, - 0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB, - 0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52, - 0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9, - 0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4, - 0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2, - 0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56, - 0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91 -}; - -/* Macro to perform one column of the RS matrix multiplication. The - * parameters a, b, c, and d are the four bytes of output; i is the index - * of the key bytes, and w, x, y, and z, are the column of constants from - * the RS matrix, preprocessed through the poly_to_exp table. */ - -#define CALC_S(a, b, c, d, i, w, x, y, z) \ - if (key[i]) { \ - tmp = poly_to_exp[key[i] - 1]; \ - (a) ^= exp_to_poly[tmp + (w)]; \ - (b) ^= exp_to_poly[tmp + (x)]; \ - (c) ^= exp_to_poly[tmp + (y)]; \ - (d) ^= exp_to_poly[tmp + (z)]; \ - } - -/* Macros to calculate the key-dependent S-boxes for a 128-bit key using - * the S vector from CALC_S. CALC_SB_2 computes a single entry in all - * four S-boxes, where i is the index of the entry to compute, and a and b - * are the index numbers preprocessed through the q0 and q1 tables - * respectively. */ - -#define CALC_SB_2(i, a, b) \ - ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \ - ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \ - ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \ - ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh] - -/* Macro exactly like CALC_SB_2, but for 192-bit keys. */ - -#define CALC_SB192_2(i, a, b) \ - ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \ - ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \ - ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \ - ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl]; - -/* Macro exactly like CALC_SB_2, but for 256-bit keys. */ - -#define CALC_SB256_2(i, a, b) \ - ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \ - ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \ - ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \ - ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp]; - -/* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the - * last two stages of the h() function for a given index (either 2i or 2i+1). - * a, b, c, and d are the four bytes going into the last two stages. For - * 128-bit keys, this is the entire h() function and a and c are the index - * preprocessed through q0 and q1 respectively; for longer keys they are the - * output of previous stages. j is the index of the first key byte to use. - * CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2 - * twice, doing the Pseudo-Hadamard Transform, and doing the necessary - * rotations. Its parameters are: a, the array to write the results into, - * j, the index of the first output entry, k and l, the preprocessed indices - * for index 2i, and m and n, the preprocessed indices for index 2i+1. - * CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an - * additional lookup-and-XOR stage. The parameters a, b, c and d are the - * four bytes going into the last three stages. For 192-bit keys, c = d - * are the index preprocessed through q0, and a = b are the index - * preprocessed through q1; j is the index of the first key byte to use. - * CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro - * instead of CALC_K_2. - * CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an - * additional lookup-and-XOR stage. The parameters a and b are the index - * preprocessed through q0 and q1 respectively; j is the index of the first - * key byte to use. CALC_K256 is identical to CALC_K but for using the - * CALC_K256_2 macro instead of CALC_K_2. */ - -#define CALC_K_2(a, b, c, d, j) \ - mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \ - ^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \ - ^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \ - ^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]] - -#define CALC_K(a, j, k, l, m, n) \ - x = CALC_K_2 (k, l, k, l, 0); \ - y = CALC_K_2 (m, n, m, n, 4); \ - y = rol32(y, 8); \ - x += y; y += x; ctx->a[j] = x; \ - ctx->a[(j) + 1] = rol32(y, 9) - -#define CALC_K192_2(a, b, c, d, j) \ - CALC_K_2 (q0[a ^ key[(j) + 16]], \ - q1[b ^ key[(j) + 17]], \ - q0[c ^ key[(j) + 18]], \ - q1[d ^ key[(j) + 19]], j) - -#define CALC_K192(a, j, k, l, m, n) \ - x = CALC_K192_2 (l, l, k, k, 0); \ - y = CALC_K192_2 (n, n, m, m, 4); \ - y = rol32(y, 8); \ - x += y; y += x; ctx->a[j] = x; \ - ctx->a[(j) + 1] = rol32(y, 9) - -#define CALC_K256_2(a, b, j) \ - CALC_K192_2 (q1[b ^ key[(j) + 24]], \ - q1[a ^ key[(j) + 25]], \ - q0[a ^ key[(j) + 26]], \ - q0[b ^ key[(j) + 27]], j) - -#define CALC_K256(a, j, k, l, m, n) \ - x = CALC_K256_2 (k, l, 0); \ - y = CALC_K256_2 (m, n, 4); \ - y = rol32(y, 8); \ - x += y; y += x; ctx->a[j] = x; \ - ctx->a[(j) + 1] = rol32(y, 9) - - /* Macros to compute the g() function in the encryption and decryption * rounds. G1 is the straight g() function; G2 includes the 8-bit * rotation for the high 32-bit word. */ @@ -630,176 +103,7 @@ static const u8 calc_sb_tbl[512] = { x ^= ctx->w[m]; \ dst[n] = cpu_to_le32(x) -#define TF_MIN_KEY_SIZE 16 -#define TF_MAX_KEY_SIZE 32 -#define TF_BLOCK_SIZE 16 - -/* Structure for an expanded Twofish key. s contains the key-dependent - * S-boxes composed with the MDS matrix; w contains the eight "whitening" - * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note - * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ -struct twofish_ctx { - u32 s[4][256], w[8], k[32]; -}; - -/* Perform the key setup. */ -static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int key_len, u32 *flags) -{ - - struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); - int i, j, k; - - /* Temporaries for CALC_K. */ - u32 x, y; - - /* The S vector used to key the S-boxes, split up into individual bytes. - * 128-bit keys use only sa through sh; 256-bit use all of them. */ - u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0; - u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0; - - /* Temporary for CALC_S. */ - u8 tmp; - - /* Check key length. */ - if (key_len != 16 && key_len != 24 && key_len != 32) - { - *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; - return -EINVAL; /* unsupported key length */ - } - - /* Compute the first two words of the S vector. The magic numbers are - * the entries of the RS matrix, preprocessed through poly_to_exp. The - * numbers in the comments are the original (polynomial form) matrix - * entries. */ - CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ - CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ - CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ - CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ - CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ - CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ - CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ - CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ - CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ - CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ - CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ - CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ - CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ - CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ - CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ - CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ - - if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */ - /* Calculate the third word of the S vector */ - CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ - CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ - CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ - CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ - CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ - CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ - CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ - CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ - } - - if (key_len == 32) { /* 256-bit key */ - /* Calculate the fourth word of the S vector */ - CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ - CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ - CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ - CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ - CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ - CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ - CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ - CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ - - /* Compute the S-boxes. */ - for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { - CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); - } - - /* Calculate whitening and round subkeys. The constants are - * indices of subkeys, preprocessed through q0 and q1. */ - CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3); - CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); - CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); - CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); - CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); - CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B); - CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); - CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); - CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); - CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD); - CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71); - CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); - CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F); - CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B); - CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA); - CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F); - CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); - CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); - CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00); - CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); - } else if (key_len == 24) { /* 192-bit key */ - /* Compute the S-boxes. */ - for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { - CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); - } - - /* Calculate whitening and round subkeys. The constants are - * indices of subkeys, preprocessed through q0 and q1. */ - CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3); - CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); - CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); - CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); - CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); - CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B); - CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); - CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); - CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); - CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD); - CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71); - CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); - CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F); - CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B); - CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA); - CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F); - CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); - CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); - CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00); - CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); - } else { /* 128-bit key */ - /* Compute the S-boxes. */ - for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { - CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); - } - - /* Calculate whitening and round subkeys. The constants are - * indices of subkeys, preprocessed through q0 and q1. */ - CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3); - CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); - CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B); - CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8); - CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3); - CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B); - CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D); - CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B); - CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32); - CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD); - CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71); - CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); - CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F); - CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B); - CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA); - CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F); - CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); - CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B); - CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00); - CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D); - } - - return 0; -} /* Encrypt one block. in and out may be the same. */ static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) @@ -877,6 +181,8 @@ static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) static struct crypto_alg alg = { .cra_name = "twofish", + .cra_driver_name = "twofish-generic", + .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TF_BLOCK_SIZE, .cra_ctxsize = sizeof(struct twofish_ctx), diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c new file mode 100644 index 000000000000..b4b9c0c3f4ae --- /dev/null +++ b/crypto/twofish_common.c @@ -0,0 +1,744 @@ +/* + * Common Twofish algorithm parts shared between the c and assembler + * implementations + * + * Originally Twofish for GPG + * By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998 + * 256-bit key length added March 20, 1999 + * Some modifications to reduce the text size by Werner Koch, April, 1998 + * Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com> + * Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net> + * + * The original author has disclaimed all copyright interest in this + * code and thus put it in the public domain. The subsequent authors + * have put this under the GNU General Public License. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + * + * This code is a "clean room" implementation, written from the paper + * _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey, + * Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available + * through http://www.counterpane.com/twofish.html + * + * For background information on multiplication in finite fields, used for + * the matrix operations in the key schedule, see the book _Contemporary + * Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the + * Third Edition. + */ + +#include <crypto/twofish.h> +#include <linux/bitops.h> +#include <linux/crypto.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> + + +/* The large precomputed tables for the Twofish cipher (twofish.c) + * Taken from the same source as twofish.c + * Marc Mutz <Marc@Mutz.com> + */ + +/* These two tables are the q0 and q1 permutations, exactly as described in + * the Twofish paper. */ + +static const u8 q0[256] = { + 0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78, + 0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C, + 0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30, + 0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82, + 0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE, + 0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B, + 0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45, + 0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7, + 0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF, + 0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8, + 0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED, + 0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90, + 0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B, + 0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B, + 0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F, + 0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A, + 0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17, + 0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72, + 0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68, + 0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4, + 0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42, + 0x4A, 0x5E, 0xC1, 0xE0 +}; + +static const u8 q1[256] = { + 0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B, + 0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1, + 0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B, + 0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5, + 0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54, + 0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96, + 0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7, + 0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8, + 0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF, + 0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9, + 0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D, + 0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E, + 0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21, + 0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01, + 0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E, + 0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64, + 0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44, + 0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E, + 0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B, + 0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9, + 0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56, + 0x55, 0x09, 0xBE, 0x91 +}; + +/* These MDS tables are actually tables of MDS composed with q0 and q1, + * because it is only ever used that way and we can save some time by + * precomputing. Of course the main saving comes from precomputing the + * GF(2^8) multiplication involved in the MDS matrix multiply; by looking + * things up in these tables we reduce the matrix multiply to four lookups + * and three XORs. Semi-formally, the definition of these tables is: + * mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T + * mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T + * where ^T means "transpose", the matrix multiply is performed in GF(2^8) + * represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described + * by Schneier et al, and I'm casually glossing over the byte/word + * conversion issues. */ + +static const u32 mds[4][256] = { + { + 0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B, + 0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B, + 0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32, + 0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1, + 0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA, + 0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B, + 0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1, + 0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5, + 0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490, + 0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154, + 0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0, + 0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796, + 0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228, + 0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7, + 0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3, + 0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8, + 0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477, + 0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF, + 0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C, + 0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9, + 0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA, + 0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D, + 0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72, + 0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E, + 0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76, + 0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321, + 0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39, + 0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01, + 0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D, + 0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E, + 0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5, + 0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64, + 0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7, + 0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544, + 0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E, + 0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E, + 0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A, + 0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B, + 0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2, + 0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9, + 0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504, + 0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756, + 0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91}, + + { + 0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252, + 0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A, + 0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020, + 0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141, + 0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444, + 0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424, + 0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A, + 0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757, + 0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383, + 0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A, + 0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9, + 0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656, + 0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1, + 0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898, + 0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414, + 0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3, + 0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1, + 0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989, + 0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5, + 0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282, + 0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E, + 0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E, + 0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202, + 0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC, + 0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565, + 0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A, + 0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808, + 0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272, + 0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A, + 0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969, + 0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505, + 0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5, + 0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D, + 0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343, + 0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF, + 0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3, + 0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F, + 0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646, + 0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6, + 0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF, + 0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A, + 0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7, + 0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8}, + + { + 0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B, + 0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F, + 0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A, + 0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783, + 0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70, + 0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3, + 0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB, + 0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA, + 0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4, + 0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41, + 0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C, + 0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07, + 0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622, + 0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18, + 0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035, + 0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96, + 0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84, + 0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E, + 0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F, + 0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD, + 0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558, + 0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40, + 0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA, + 0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85, + 0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF, + 0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773, + 0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D, + 0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B, + 0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C, + 0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19, + 0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086, + 0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D, + 0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74, + 0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755, + 0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691, + 0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D, + 0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4, + 0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53, + 0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E, + 0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9, + 0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705, + 0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7, + 0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF}, + + { + 0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98, + 0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866, + 0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643, + 0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77, + 0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9, + 0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C, + 0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3, + 0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216, + 0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F, + 0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25, + 0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF, + 0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7, + 0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4, + 0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E, + 0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA, + 0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C, + 0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12, + 0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A, + 0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D, + 0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE, + 0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A, + 0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C, + 0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B, + 0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4, + 0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B, + 0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3, + 0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE, + 0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB, + 0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85, + 0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA, + 0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E, + 0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8, + 0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33, + 0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC, + 0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718, + 0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA, + 0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8, + 0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872, + 0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882, + 0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D, + 0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10, + 0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6, + 0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8} +}; + +/* The exp_to_poly and poly_to_exp tables are used to perform efficient + * operations in GF(2^8) represented as GF(2)[x]/w(x) where + * w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the + * definition of the RS matrix in the key schedule. Elements of that field + * are polynomials of degree not greater than 7 and all coefficients 0 or 1, + * which can be represented naturally by bytes (just substitute x=2). In that + * form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8) + * multiplication is inefficient without hardware support. To multiply + * faster, I make use of the fact x is a generator for the nonzero elements, + * so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for + * some n in 0..254. Note that that caret is exponentiation in GF(2^8), + * *not* polynomial notation. So if I want to compute pq where p and q are + * in GF(2^8), I can just say: + * 1. if p=0 or q=0 then pq=0 + * 2. otherwise, find m and n such that p=x^m and q=x^n + * 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq + * The translations in steps 2 and 3 are looked up in the tables + * poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this + * in action, look at the CALC_S macro. As additional wrinkles, note that + * one of my operands is always a constant, so the poly_to_exp lookup on it + * is done in advance; I included the original values in the comments so + * readers can have some chance of recognizing that this *is* the RS matrix + * from the Twofish paper. I've only included the table entries I actually + * need; I never do a lookup on a variable input of zero and the biggest + * exponents I'll ever see are 254 (variable) and 237 (constant), so they'll + * never sum to more than 491. I'm repeating part of the exp_to_poly table + * so that I don't have to do mod-255 reduction in the exponent arithmetic. + * Since I know my constant operands are never zero, I only have to worry + * about zero values in the variable operand, and I do it with a simple + * conditional branch. I know conditionals are expensive, but I couldn't + * see a non-horrible way of avoiding them, and I did manage to group the + * statements so that each if covers four group multiplications. */ + +static const u8 poly_to_exp[255] = { + 0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19, + 0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A, + 0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C, + 0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B, + 0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47, + 0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D, + 0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8, + 0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C, + 0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83, + 0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48, + 0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26, + 0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E, + 0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3, + 0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9, + 0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A, + 0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D, + 0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75, + 0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84, + 0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64, + 0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49, + 0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF, + 0x85, 0xC8, 0xA1 +}; + +static const u8 exp_to_poly[492] = { + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2, + 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03, + 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6, + 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A, + 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63, + 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C, + 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07, + 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88, + 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12, + 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7, + 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C, + 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8, + 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25, + 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A, + 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE, + 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC, + 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E, + 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92, + 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89, + 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB, + 0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1, + 0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, + 0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, + 0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, + 0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, + 0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, + 0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, + 0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, + 0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, + 0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, + 0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, + 0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, + 0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, + 0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, + 0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, + 0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, + 0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, + 0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, + 0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, + 0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, + 0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB +}; + + +/* The table constants are indices of + * S-box entries, preprocessed through q0 and q1. */ +static const u8 calc_sb_tbl[512] = { + 0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4, + 0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8, + 0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B, + 0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B, + 0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD, + 0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1, + 0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B, + 0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F, + 0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B, + 0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D, + 0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E, + 0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5, + 0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14, + 0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3, + 0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54, + 0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51, + 0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A, + 0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96, + 0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10, + 0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C, + 0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7, + 0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70, + 0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB, + 0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8, + 0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF, + 0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC, + 0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF, + 0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2, + 0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82, + 0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9, + 0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97, + 0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17, + 0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D, + 0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3, + 0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C, + 0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E, + 0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F, + 0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49, + 0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21, + 0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9, + 0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD, + 0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01, + 0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F, + 0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48, + 0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E, + 0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19, + 0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57, + 0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64, + 0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE, + 0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5, + 0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44, + 0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69, + 0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15, + 0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E, + 0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34, + 0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC, + 0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B, + 0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB, + 0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52, + 0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9, + 0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4, + 0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2, + 0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56, + 0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91 +}; + +/* Macro to perform one column of the RS matrix multiplication. The + * parameters a, b, c, and d are the four bytes of output; i is the index + * of the key bytes, and w, x, y, and z, are the column of constants from + * the RS matrix, preprocessed through the poly_to_exp table. */ + +#define CALC_S(a, b, c, d, i, w, x, y, z) \ + if (key[i]) { \ + tmp = poly_to_exp[key[i] - 1]; \ + (a) ^= exp_to_poly[tmp + (w)]; \ + (b) ^= exp_to_poly[tmp + (x)]; \ + (c) ^= exp_to_poly[tmp + (y)]; \ + (d) ^= exp_to_poly[tmp + (z)]; \ + } + +/* Macros to calculate the key-dependent S-boxes for a 128-bit key using + * the S vector from CALC_S. CALC_SB_2 computes a single entry in all + * four S-boxes, where i is the index of the entry to compute, and a and b + * are the index numbers preprocessed through the q0 and q1 tables + * respectively. */ + +#define CALC_SB_2(i, a, b) \ + ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \ + ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \ + ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \ + ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh] + +/* Macro exactly like CALC_SB_2, but for 192-bit keys. */ + +#define CALC_SB192_2(i, a, b) \ + ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \ + ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \ + ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \ + ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl]; + +/* Macro exactly like CALC_SB_2, but for 256-bit keys. */ + +#define CALC_SB256_2(i, a, b) \ + ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \ + ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \ + ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \ + ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp]; + +/* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the + * last two stages of the h() function for a given index (either 2i or 2i+1). + * a, b, c, and d are the four bytes going into the last two stages. For + * 128-bit keys, this is the entire h() function and a and c are the index + * preprocessed through q0 and q1 respectively; for longer keys they are the + * output of previous stages. j is the index of the first key byte to use. + * CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2 + * twice, doing the Pseudo-Hadamard Transform, and doing the necessary + * rotations. Its parameters are: a, the array to write the results into, + * j, the index of the first output entry, k and l, the preprocessed indices + * for index 2i, and m and n, the preprocessed indices for index 2i+1. + * CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an + * additional lookup-and-XOR stage. The parameters a, b, c and d are the + * four bytes going into the last three stages. For 192-bit keys, c = d + * are the index preprocessed through q0, and a = b are the index + * preprocessed through q1; j is the index of the first key byte to use. + * CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro + * instead of CALC_K_2. + * CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an + * additional lookup-and-XOR stage. The parameters a and b are the index + * preprocessed through q0 and q1 respectively; j is the index of the first + * key byte to use. CALC_K256 is identical to CALC_K but for using the + * CALC_K256_2 macro instead of CALC_K_2. */ + +#define CALC_K_2(a, b, c, d, j) \ + mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \ + ^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \ + ^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \ + ^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]] + +#define CALC_K(a, j, k, l, m, n) \ + x = CALC_K_2 (k, l, k, l, 0); \ + y = CALC_K_2 (m, n, m, n, 4); \ + y = rol32(y, 8); \ + x += y; y += x; ctx->a[j] = x; \ + ctx->a[(j) + 1] = rol32(y, 9) + +#define CALC_K192_2(a, b, c, d, j) \ + CALC_K_2 (q0[a ^ key[(j) + 16]], \ + q1[b ^ key[(j) + 17]], \ + q0[c ^ key[(j) + 18]], \ + q1[d ^ key[(j) + 19]], j) + +#define CALC_K192(a, j, k, l, m, n) \ + x = CALC_K192_2 (l, l, k, k, 0); \ + y = CALC_K192_2 (n, n, m, m, 4); \ + y = rol32(y, 8); \ + x += y; y += x; ctx->a[j] = x; \ + ctx->a[(j) + 1] = rol32(y, 9) + +#define CALC_K256_2(a, b, j) \ + CALC_K192_2 (q1[b ^ key[(j) + 24]], \ + q1[a ^ key[(j) + 25]], \ + q0[a ^ key[(j) + 26]], \ + q0[b ^ key[(j) + 27]], j) + +#define CALC_K256(a, j, k, l, m, n) \ + x = CALC_K256_2 (k, l, 0); \ + y = CALC_K256_2 (m, n, 4); \ + y = rol32(y, 8); \ + x += y; y += x; ctx->a[j] = x; \ + ctx->a[(j) + 1] = rol32(y, 9) + +/* Perform the key setup. */ +int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len) +{ + + struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *flags = &tfm->crt_flags; + + int i, j, k; + + /* Temporaries for CALC_K. */ + u32 x, y; + + /* The S vector used to key the S-boxes, split up into individual bytes. + * 128-bit keys use only sa through sh; 256-bit use all of them. */ + u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0; + u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0; + + /* Temporary for CALC_S. */ + u8 tmp; + + /* Check key length. */ + if (key_len % 8) + { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; /* unsupported key length */ + } + + /* Compute the first two words of the S vector. The magic numbers are + * the entries of the RS matrix, preprocessed through poly_to_exp. The + * numbers in the comments are the original (polynomial form) matrix + * entries. */ + CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ + CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ + CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ + CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ + CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ + CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ + CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ + CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ + CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ + CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ + CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ + CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ + CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ + CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ + CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ + CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ + + if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */ + /* Calculate the third word of the S vector */ + CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ + CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ + CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ + CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ + CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ + CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ + CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ + CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ + } + + if (key_len == 32) { /* 256-bit key */ + /* Calculate the fourth word of the S vector */ + CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ + CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ + CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ + CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ + CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ + CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ + CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ + CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ + + /* Compute the S-boxes. */ + for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { + CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); + } + + /* Calculate whitening and round subkeys. The constants are + * indices of subkeys, preprocessed through q0 and q1. */ + CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3); + CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); + CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); + CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); + CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); + CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B); + CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); + CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); + CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); + CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD); + CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71); + CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); + CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F); + CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B); + CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA); + CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F); + CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); + CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); + CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00); + CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); + } else if (key_len == 24) { /* 192-bit key */ + /* Compute the S-boxes. */ + for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { + CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); + } + + /* Calculate whitening and round subkeys. The constants are + * indices of subkeys, preprocessed through q0 and q1. */ + CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3); + CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); + CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B); + CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8); + CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3); + CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B); + CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D); + CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B); + CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32); + CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD); + CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71); + CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); + CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F); + CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B); + CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA); + CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F); + CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); + CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B); + CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00); + CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D); + } else { /* 128-bit key */ + /* Compute the S-boxes. */ + for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) { + CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] ); + } + + /* Calculate whitening and round subkeys. The constants are + * indices of subkeys, preprocessed through q0 and q1. */ + CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3); + CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4); + CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B); + CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8); + CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3); + CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B); + CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D); + CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B); + CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32); + CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD); + CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71); + CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1); + CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F); + CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B); + CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA); + CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F); + CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA); + CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B); + CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00); + CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D); + } + + return 0; +} + +EXPORT_SYMBOL_GPL(twofish_setkey); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Twofish cipher common functions"); diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c index 0c85e9d6a448..7080b413ddc9 100644 --- a/drivers/base/hypervisor.c +++ b/drivers/base/hypervisor.c @@ -1,8 +1,9 @@ /* * hypervisor.c - /sys/hypervisor subsystem. * - * This file is released under the GPLv2 + * Copyright (C) IBM Corp. 2006 * + * This file is released under the GPLv2 */ #include <linux/kobject.h> diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c index 3d4261c39f16..40535036e893 100644 --- a/drivers/block/cryptoloop.c +++ b/drivers/block/cryptoloop.c @@ -40,11 +40,13 @@ static int cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) { int err = -EINVAL; + int cipher_len; + int mode_len; char cms[LO_NAME_SIZE]; /* cipher-mode string */ char *cipher; char *mode; char *cmsp = cms; /* c-m string pointer */ - struct crypto_tfm *tfm = NULL; + struct crypto_blkcipher *tfm; /* encryption breaks for non sector aligned offsets */ @@ -53,20 +55,39 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); cms[LO_NAME_SIZE - 1] = 0; - cipher = strsep(&cmsp, "-"); - mode = strsep(&cmsp, "-"); - - if (mode == NULL || strcmp(mode, "cbc") == 0) - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | - CRYPTO_TFM_REQ_MAY_SLEEP); - else if (strcmp(mode, "ecb") == 0) - tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | - CRYPTO_TFM_REQ_MAY_SLEEP); - if (tfm == NULL) + + cipher = cmsp; + cipher_len = strcspn(cmsp, "-"); + + mode = cmsp + cipher_len; + mode_len = 0; + if (*mode) { + mode++; + mode_len = strcspn(mode, "-"); + } + + if (!mode_len) { + mode = "cbc"; + mode_len = 3; + } + + if (cipher_len + mode_len + 3 > LO_NAME_SIZE) return -EINVAL; - err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key, - info->lo_encrypt_key_size); + memmove(cms, mode, mode_len); + cmsp = cms + mode_len; + *cmsp++ = '('; + memcpy(cmsp, info->lo_crypt_name, cipher_len); + cmsp += cipher_len; + *cmsp++ = ')'; + *cmsp = 0; + + tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, + info->lo_encrypt_key_size); if (err != 0) goto out_free_tfm; @@ -75,99 +96,49 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) return 0; out_free_tfm: - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); out: return err; } -typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm, +typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc, struct scatterlist *sg_out, struct scatterlist *sg_in, unsigned int nsg); - -static int -cryptoloop_transfer_ecb(struct loop_device *lo, int cmd, - struct page *raw_page, unsigned raw_off, - struct page *loop_page, unsigned loop_off, - int size, sector_t IV) -{ - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; - struct scatterlist sg_out = { NULL, }; - struct scatterlist sg_in = { NULL, }; - - encdec_ecb_t encdecfunc; - struct page *in_page, *out_page; - unsigned in_offs, out_offs; - - if (cmd == READ) { - in_page = raw_page; - in_offs = raw_off; - out_page = loop_page; - out_offs = loop_off; - encdecfunc = tfm->crt_u.cipher.cit_decrypt; - } else { - in_page = loop_page; - in_offs = loop_off; - out_page = raw_page; - out_offs = raw_off; - encdecfunc = tfm->crt_u.cipher.cit_encrypt; - } - - while (size > 0) { - const int sz = min(size, LOOP_IV_SECTOR_SIZE); - - sg_in.page = in_page; - sg_in.offset = in_offs; - sg_in.length = sz; - - sg_out.page = out_page; - sg_out.offset = out_offs; - sg_out.length = sz; - - encdecfunc(tfm, &sg_out, &sg_in, sz); - - size -= sz; - in_offs += sz; - out_offs += sz; - } - - return 0; -} - -typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm, - struct scatterlist *sg_out, - struct scatterlist *sg_in, - unsigned int nsg, u8 *iv); - static int -cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, - struct page *raw_page, unsigned raw_off, - struct page *loop_page, unsigned loop_off, - int size, sector_t IV) +cryptoloop_transfer(struct loop_device *lo, int cmd, + struct page *raw_page, unsigned raw_off, + struct page *loop_page, unsigned loop_off, + int size, sector_t IV) { - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; + struct crypto_blkcipher *tfm = lo->key_data; + struct blkcipher_desc desc = { + .tfm = tfm, + .flags = CRYPTO_TFM_REQ_MAY_SLEEP, + }; struct scatterlist sg_out = { NULL, }; struct scatterlist sg_in = { NULL, }; encdec_cbc_t encdecfunc; struct page *in_page, *out_page; unsigned in_offs, out_offs; + int err; if (cmd == READ) { in_page = raw_page; in_offs = raw_off; out_page = loop_page; out_offs = loop_off; - encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv; + encdecfunc = crypto_blkcipher_crt(tfm)->decrypt; } else { in_page = loop_page; in_offs = loop_off; out_page = raw_page; out_offs = raw_off; - encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv; + encdecfunc = crypto_blkcipher_crt(tfm)->encrypt; } while (size > 0) { @@ -183,7 +154,10 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, sg_out.offset = out_offs; sg_out.length = sz; - encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv); + desc.info = iv; + err = encdecfunc(&desc, &sg_out, &sg_in, sz); + if (err) + return err; IV++; size -= sz; @@ -195,32 +169,6 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd, } static int -cryptoloop_transfer(struct loop_device *lo, int cmd, - struct page *raw_page, unsigned raw_off, - struct page *loop_page, unsigned loop_off, - int size, sector_t IV) -{ - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; - if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB) - { - lo->transfer = cryptoloop_transfer_ecb; - return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off, - loop_page, loop_off, size, IV); - } - if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC) - { - lo->transfer = cryptoloop_transfer_cbc; - return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off, - loop_page, loop_off, size, IV); - } - - /* This is not supposed to happen */ - - printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n"); - return -EINVAL; -} - -static int cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) { return -EINVAL; @@ -229,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg) static int cryptoloop_release(struct loop_device *lo) { - struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; + struct crypto_blkcipher *tfm = lo->key_data; if (tfm != NULL) { - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); lo->key_data = NULL; return 0; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index c40e487d9f5c..52ea94b891f5 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -495,6 +495,21 @@ config LEGACY_PTY_COUNT When not in use, each legacy PTY occupies 12 bytes on 32-bit architectures and 24 bytes on 64-bit architectures. +config BRIQ_PANEL + tristate 'Total Impact briQ front panel driver' + depends on PPC_CHRP + ---help--- + The briQ is a small footprint CHRP computer with a frontpanel VFD, a + tristate led and two switches. It is the size of a CDROM drive. + + If you have such one and want anything showing on the VFD then you + must answer Y here. + + To compile this driver as a module, choose M here: the + module will be called briq_panel. + + It's safe to say N here. + config PRINTER tristate "Parallel printer support" depends on PARPORT @@ -596,6 +611,13 @@ config HVC_CONSOLE console. This driver allows each pSeries partition to have a console which is accessed via the HMC. +config HVC_ISERIES + bool "iSeries Hypervisor Virtual Console support" + depends on PPC_ISERIES && !VIOCONS + select HVC_DRIVER + help + iSeries machines support a hypervisor virtual console. + config HVC_RTAS bool "IBM RTAS Console support" depends on PPC_RTAS diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 6e0f4469d8bb..8c6dfc621520 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o obj-$(CONFIG_SX) += sx.o generic_serial.o obj-$(CONFIG_RIO) += rio/ generic_serial.o obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o +obj-$(CONFIG_HVC_ISERIES) += hvc_iseries.o obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o obj-$(CONFIG_HVC_DRIVER) += hvc_console.o obj-$(CONFIG_RAW_DRIVER) += raw.o @@ -51,6 +52,7 @@ obj-$(CONFIG_VIOCONS) += viocons.o obj-$(CONFIG_VIOTAPE) += viotape.o obj-$(CONFIG_HVCS) += hvcs.o obj-$(CONFIG_SGI_MBCS) += mbcs.o +obj-$(CONFIG_BRIQ_PANEL) += briq_panel.o obj-$(CONFIG_PRINTER) += lp.o obj-$(CONFIG_TIPAR) += tipar.o diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c index 1de1b12043bf..91b71e750ee1 100644 --- a/drivers/char/agp/uninorth-agp.c +++ b/drivers/char/agp/uninorth-agp.c @@ -601,8 +601,8 @@ static int __devinit agp_uninorth_probe(struct pci_dev *pdev, uninorth_node = of_find_node_by_name(NULL, "u3"); } if (uninorth_node) { - int *revprop = (int *) - get_property(uninorth_node, "device-rev", NULL); + const int *revprop = get_property(uninorth_node, + "device-rev", NULL); if (revprop != NULL) uninorth_rev = *revprop & 0x3f; of_node_put(uninorth_node); diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c new file mode 100644 index 000000000000..a0e5eac5f33a --- /dev/null +++ b/drivers/char/briq_panel.c @@ -0,0 +1,268 @@ +/* + * Drivers for the Total Impact PPC based computer "BRIQ" + * by Dr. Karsten Jeppesen + * + */ + +#include <linux/module.h> + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/tty.h> +#include <linux/timer.h> +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/wait.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/init.h> + +#include <asm/uaccess.h> +#include <asm/io.h> +#include <asm/prom.h> + +#define BRIQ_PANEL_MINOR 156 +#define BRIQ_PANEL_VFD_IOPORT 0x0390 +#define BRIQ_PANEL_LED_IOPORT 0x0398 +#define BRIQ_PANEL_VER "1.1 (04/20/2002)" +#define BRIQ_PANEL_MSG0 "Loading Linux" + +static int vfd_is_open; +static unsigned char vfd[40]; +static int vfd_cursor; +static unsigned char ledpb, led; + +static void update_vfd(void) +{ + int i; + + /* cursor home */ + outb(0x02, BRIQ_PANEL_VFD_IOPORT); + for (i=0; i<20; i++) + outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1); + + /* cursor to next line */ + outb(0xc0, BRIQ_PANEL_VFD_IOPORT); + for (i=20; i<40; i++) + outb(vfd[i], BRIQ_PANEL_VFD_IOPORT + 1); + +} + +static void set_led(char state) +{ + if (state == 'R') + led = 0x01; + else if (state == 'G') + led = 0x02; + else if (state == 'Y') + led = 0x03; + else if (state == 'X') + led = 0x00; + outb(led, BRIQ_PANEL_LED_IOPORT); +} + +static int briq_panel_open(struct inode *ino, struct file *filep) +{ + /* enforce single access */ + if (vfd_is_open) + return -EBUSY; + vfd_is_open = 1; + + return 0; +} + +static int briq_panel_release(struct inode *ino, struct file *filep) +{ + if (!vfd_is_open) + return -ENODEV; + + vfd_is_open = 0; + + return 0; +} + +static ssize_t briq_panel_read(struct file *file, char *buf, size_t count, + loff_t *ppos) +{ + unsigned short c; + unsigned char cp; + +#if 0 /* Can't seek (pread) on this device */ + if (ppos != &file->f_pos) + return -ESPIPE; +#endif + + if (!vfd_is_open) + return -ENODEV; + + c = (inb(BRIQ_PANEL_LED_IOPORT) & 0x000c) | (ledpb & 0x0003); + set_led(' '); + /* upper button released */ + if ((!(ledpb & 0x0004)) && (c & 0x0004)) { + cp = ' '; + ledpb = c; + if (copy_to_user(buf, &cp, 1)) + return -EFAULT; + return 1; + } + /* lower button released */ + else if ((!(ledpb & 0x0008)) && (c & 0x0008)) { + cp = '\r'; + ledpb = c; + if (copy_to_user(buf, &cp, 1)) + return -EFAULT; + return 1; + } else { + ledpb = c; + return 0; + } +} + +static void scroll_vfd( void ) +{ + int i; + + for (i=0; i<20; i++) { + vfd[i] = vfd[i+20]; + vfd[i+20] = ' '; + } + vfd_cursor = 20; +} + +static ssize_t briq_panel_write(struct file *file, const char *buf, size_t len, + loff_t *ppos) +{ + size_t indx = len; + int i, esc = 0; + +#if 0 /* Can't seek (pwrite) on this device */ + if (ppos != &file->f_pos) + return -ESPIPE; +#endif + + if (!vfd_is_open) + return -EBUSY; + + for (;;) { + if (!indx) + break; + if (esc) { + set_led(*buf); + esc = 0; + } else if (*buf == 27) { + esc = 1; + } else if (*buf == 12) { + /* do a form feed */ + for (i=0; i<40; i++) + vfd[i] = ' '; + vfd_cursor = 0; + } else if (*buf == 10) { + if (vfd_cursor < 20) + vfd_cursor = 20; + else if (vfd_cursor < 40) + vfd_cursor = 40; + else if (vfd_cursor < 60) + vfd_cursor = 60; + if (vfd_cursor > 59) + scroll_vfd(); + } else { + /* just a character */ + if (vfd_cursor > 39) + scroll_vfd(); + vfd[vfd_cursor++] = *buf; + } + indx--; + buf++; + } + update_vfd(); + + return len; +} + +static struct file_operations briq_panel_fops = { + .owner = THIS_MODULE, + .read = briq_panel_read, + .write = briq_panel_write, + .open = briq_panel_open, + .release = briq_panel_release, +}; + +static struct miscdevice briq_panel_miscdev = { + BRIQ_PANEL_MINOR, + "briq_panel", + &briq_panel_fops +}; + +static int __init briq_panel_init(void) +{ + struct device_node *root = find_path_device("/"); + char *machine; + int i; + + machine = get_property(root, "model", NULL); + if (!machine || strncmp(machine, "TotalImpact,BRIQ-1", 18) != 0) + return -ENODEV; + + printk(KERN_INFO + "briq_panel: v%s Dr. Karsten Jeppesen (kj@totalimpact.com)\n", + BRIQ_PANEL_VER); + + if (!request_region(BRIQ_PANEL_VFD_IOPORT, 4, "BRIQ Front Panel")) + return -EBUSY; + + if (!request_region(BRIQ_PANEL_LED_IOPORT, 2, "BRIQ Front Panel")) { + release_region(BRIQ_PANEL_VFD_IOPORT, 4); + return -EBUSY; + } + ledpb = inb(BRIQ_PANEL_LED_IOPORT) & 0x000c; + + if (misc_register(&briq_panel_miscdev) < 0) { + release_region(BRIQ_PANEL_VFD_IOPORT, 4); + release_region(BRIQ_PANEL_LED_IOPORT, 2); + return -EBUSY; + } + + outb(0x38, BRIQ_PANEL_VFD_IOPORT); /* Function set */ + outb(0x01, BRIQ_PANEL_VFD_IOPORT); /* Clear display */ + outb(0x0c, BRIQ_PANEL_VFD_IOPORT); /* Display on */ + outb(0x06, BRIQ_PANEL_VFD_IOPORT); /* Entry normal */ + for (i=0; i<40; i++) + vfd[i]=' '; +#ifndef MODULE + vfd[0] = 'L'; + vfd[1] = 'o'; + vfd[2] = 'a'; + vfd[3] = 'd'; + vfd[4] = 'i'; + vfd[5] = 'n'; + vfd[6] = 'g'; + vfd[7] = ' '; + vfd[8] = '.'; + vfd[9] = '.'; + vfd[10] = '.'; +#endif /* !MODULE */ + + update_vfd(); + + return 0; +} + +static void __exit briq_panel_exit(void) +{ + misc_deregister(&briq_panel_miscdev); + release_region(BRIQ_PANEL_VFD_IOPORT, 4); + release_region(BRIQ_PANEL_LED_IOPORT, 2); +} + +module_init(briq_panel_init); +module_exit(briq_panel_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Karsten Jeppesen <karsten@jeppesens.com>"); +MODULE_DESCRIPTION("Driver for the Total Impact briQ front panel"); diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 613d67f1c7f0..a76d2c40dd5e 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -80,7 +80,8 @@ struct hvc_struct { struct tty_struct *tty; unsigned int count; int do_wakeup; - char outbuf[N_OUTBUF] __ALIGNED__; + char *outbuf; + int outbuf_size; int n_outbuf; uint32_t vtermno; struct hv_ops *ops; @@ -319,10 +320,8 @@ static int hvc_open(struct tty_struct *tty, struct file * filp) struct kobject *kobjp; /* Auto increments kobject reference if found. */ - if (!(hp = hvc_get_by_index(tty->index))) { - printk(KERN_WARNING "hvc_console: tty open failed, no vty associated with tty.\n"); + if (!(hp = hvc_get_by_index(tty->index))) return -ENODEV; - } spin_lock_irqsave(&hp->lock, flags); /* Check and then increment for fast path open. */ @@ -505,7 +504,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count if (hp->n_outbuf > 0) hvc_push(hp); - while (count > 0 && (rsize = N_OUTBUF - hp->n_outbuf) > 0) { + while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) { if (rsize > count) rsize = count; memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); @@ -538,7 +537,7 @@ static int hvc_write_room(struct tty_struct *tty) if (!hp) return -1; - return N_OUTBUF - hp->n_outbuf; + return hp->outbuf_size - hp->n_outbuf; } static int hvc_chars_in_buffer(struct tty_struct *tty) @@ -729,12 +728,13 @@ static struct kobj_type hvc_kobj_type = { }; struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, - struct hv_ops *ops) + struct hv_ops *ops, int outbuf_size) { struct hvc_struct *hp; int i; - hp = kmalloc(sizeof(*hp), GFP_KERNEL); + hp = kmalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size, + GFP_KERNEL); if (!hp) return ERR_PTR(-ENOMEM); @@ -743,6 +743,8 @@ struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int irq, hp->vtermno = vtermno; hp->irq = irq; hp->ops = ops; + hp->outbuf_size = outbuf_size; + hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))]; kobject_init(&hp->kobj); hp->kobj.ktype = &hvc_kobj_type; diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h index 96b7401319c1..8c59818050e6 100644 --- a/drivers/char/hvc_console.h +++ b/drivers/char/hvc_console.h @@ -56,7 +56,7 @@ extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops); /* register a vterm for hvc tty operation (module_init or hotplug add) */ extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int irq, - struct hv_ops *ops); + struct hv_ops *ops, int outbuf_size); /* remove a vterm from hvc tty operation (modele_exit or hotplug remove) */ extern int __devexit hvc_remove(struct hvc_struct *hp); diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c new file mode 100644 index 000000000000..4747729459c7 --- /dev/null +++ b/drivers/char/hvc_iseries.c @@ -0,0 +1,594 @@ +/* + * iSeries vio driver interface to hvc_console.c + * + * This code is based heavily on hvc_vio.c and viocons.c + * + * Copyright (C) 2006 Stephen Rothwell, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include <stdarg.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/console.h> + +#include <asm/hvconsole.h> +#include <asm/vio.h> +#include <asm/prom.h> +#include <asm/iseries/vio.h> +#include <asm/iseries/hv_call.h> +#include <asm/iseries/hv_lp_config.h> +#include <asm/iseries/hv_lp_event.h> + +#include "hvc_console.h" + +#define VTTY_PORTS 10 + +static DEFINE_SPINLOCK(consolelock); +static DEFINE_SPINLOCK(consoleloglock); + +static const char hvc_driver_name[] = "hvc_console"; + +#define IN_BUF_SIZE 200 + +/* + * Our port information. + */ +static struct port_info { + HvLpIndex lp; + u64 seq; /* sequence number of last HV send */ + u64 ack; /* last ack from HV */ + struct hvc_struct *hp; + int in_start; + int in_end; + unsigned char in_buf[IN_BUF_SIZE]; +} port_info[VTTY_PORTS] = { + [ 0 ... VTTY_PORTS - 1 ] = { + .lp = HvLpIndexInvalid + } +}; + +#define viochar_is_console(pi) ((pi) == &port_info[0]) + +static struct vio_device_id hvc_driver_table[] __devinitdata = { + {"serial", "IBM,iSeries-vty"}, + { "", "" } +}; +MODULE_DEVICE_TABLE(vio, hvc_driver_table); + +static void hvlog(char *fmt, ...) +{ + int i; + unsigned long flags; + va_list args; + static char buf[256]; + + spin_lock_irqsave(&consoleloglock, flags); + va_start(args, fmt); + i = vscnprintf(buf, sizeof(buf) - 1, fmt, args); + va_end(args); + buf[i++] = '\r'; + HvCall_writeLogBuffer(buf, i); + spin_unlock_irqrestore(&consoleloglock, flags); +} + +/* + * Initialize the common fields in a charLpEvent + */ +static void init_data_event(struct viocharlpevent *viochar, HvLpIndex lp) +{ + struct HvLpEvent *hev = &viochar->event; + + memset(viochar, 0, sizeof(struct viocharlpevent)); + + hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK | + HV_LP_EVENT_INT; + hev->xType = HvLpEvent_Type_VirtualIo; + hev->xSubtype = viomajorsubtype_chario | viochardata; + hev->xSourceLp = HvLpConfig_getLpIndex(); + hev->xTargetLp = lp; + hev->xSizeMinus1 = sizeof(struct viocharlpevent); + hev->xSourceInstanceId = viopath_sourceinst(lp); + hev->xTargetInstanceId = viopath_targetinst(lp); +} + +static int get_chars(uint32_t vtermno, char *buf, int count) +{ + struct port_info *pi; + int n = 0; + unsigned long flags; + + if (vtermno >= VTTY_PORTS) + return -EINVAL; + if (count == 0) + return 0; + + pi = &port_info[vtermno]; + spin_lock_irqsave(&consolelock, flags); + + if (pi->in_end == 0) + goto done; + + n = pi->in_end - pi->in_start; + if (n > count) + n = count; + memcpy(buf, &pi->in_buf[pi->in_start], n); + pi->in_start += n; + if (pi->in_start == pi->in_end) { + pi->in_start = 0; + pi->in_end = 0; + } +done: + spin_unlock_irqrestore(&consolelock, flags); + return n; +} + +static int put_chars(uint32_t vtermno, const char *buf, int count) +{ + struct viocharlpevent *viochar; + struct port_info *pi; + HvLpEvent_Rc hvrc; + unsigned long flags; + int sent = 0; + + if (vtermno >= VTTY_PORTS) + return -EINVAL; + + pi = &port_info[vtermno]; + + spin_lock_irqsave(&consolelock, flags); + + if (viochar_is_console(pi) && !viopath_isactive(pi->lp)) { + spin_lock_irqsave(&consoleloglock, flags); + HvCall_writeLogBuffer(buf, count); + spin_unlock_irqrestore(&consoleloglock, flags); + sent = count; + goto done; + } + + viochar = vio_get_event_buffer(viomajorsubtype_chario); + if (viochar == NULL) { + hvlog("\n\rviocons: Can't get viochar buffer."); + goto done; + } + + while ((count > 0) && ((pi->seq - pi->ack) < VIOCHAR_WINDOW)) { + int len; + + len = (count > VIOCHAR_MAX_DATA) ? VIOCHAR_MAX_DATA : count; + + if (viochar_is_console(pi)) { + spin_lock_irqsave(&consoleloglock, flags); + HvCall_writeLogBuffer(buf, len); + spin_unlock_irqrestore(&consoleloglock, flags); + } + + init_data_event(viochar, pi->lp); + + viochar->len = len; + viochar->event.xCorrelationToken = pi->seq++; + viochar->event.xSizeMinus1 = + offsetof(struct viocharlpevent, data) + len; + + memcpy(viochar->data, buf, len); + + hvrc = HvCallEvent_signalLpEvent(&viochar->event); + if (hvrc) + hvlog("\n\rerror sending event! return code %d\n\r", + (int)hvrc); + sent += len; + count -= len; + buf += len; + } + + vio_free_event_buffer(viomajorsubtype_chario, viochar); +done: + spin_unlock_irqrestore(&consolelock, flags); + return sent; +} + +static struct hv_ops hvc_get_put_ops = { + .get_chars = get_chars, + .put_chars = put_chars, +}; + +static int __devinit hvc_vio_probe(struct vio_dev *vdev, + const struct vio_device_id *id) +{ + struct hvc_struct *hp; + struct port_info *pi; + + /* probed with invalid parameters. */ + if (!vdev || !id) + return -EPERM; + + if (vdev->unit_address >= VTTY_PORTS) + return -ENODEV; + + pi = &port_info[vdev->unit_address]; + + hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops, + VIOCHAR_MAX_DATA); + if (IS_ERR(hp)) + return PTR_ERR(hp); + pi->hp = hp; + dev_set_drvdata(&vdev->dev, pi); + + return 0; +} + +static int __devexit hvc_vio_remove(struct vio_dev *vdev) +{ + struct port_info *pi = dev_get_drvdata(&vdev->dev); + struct hvc_struct *hp = pi->hp; + + return hvc_remove(hp); +} + +static struct vio_driver hvc_vio_driver = { + .id_table = hvc_driver_table, + .probe = hvc_vio_probe, + .remove = hvc_vio_remove, + .driver = { + .name = hvc_driver_name, + .owner = THIS_MODULE, + } +}; + +static void hvc_open_event(struct HvLpEvent *event) +{ + unsigned long flags; + struct viocharlpevent *cevent = (struct viocharlpevent *)event; + u8 port = cevent->virtual_device; + struct port_info *pi; + int reject = 0; + + if (hvlpevent_is_ack(event)) { + if (port >= VTTY_PORTS) + return; + + spin_lock_irqsave(&consolelock, flags); + + pi = &port_info[port]; + if (event->xRc == HvLpEvent_Rc_Good) { + pi->seq = pi->ack = 0; + /* + * This line allows connections from the primary + * partition but once one is connected from the + * primary partition nothing short of a reboot + * of linux will allow access from the hosting + * partition again without a required iSeries fix. + */ + pi->lp = event->xTargetLp; + } + + spin_unlock_irqrestore(&consolelock, flags); + if (event->xRc != HvLpEvent_Rc_Good) + printk(KERN_WARNING + "hvc: handle_open_event: event->xRc == (%d).\n", + event->xRc); + + if (event->xCorrelationToken != 0) { + atomic_t *aptr= (atomic_t *)event->xCorrelationToken; + atomic_set(aptr, 1); + } else + printk(KERN_WARNING + "hvc: weird...got open ack without atomic\n"); + return; + } + + /* This had better require an ack, otherwise complain */ + if (!hvlpevent_need_ack(event)) { + printk(KERN_WARNING "hvc: viocharopen without ack bit!\n"); + return; + } + + spin_lock_irqsave(&consolelock, flags); + + /* Make sure this is a good virtual tty */ + if (port >= VTTY_PORTS) { + event->xRc = HvLpEvent_Rc_SubtypeError; + cevent->subtype_result_code = viorc_openRejected; + /* + * Flag state here since we can't printk while holding + * the consolelock spinlock. + */ + reject = 1; + } else { + pi = &port_info[port]; + if ((pi->lp != HvLpIndexInvalid) && + (pi->lp != event->xSourceLp)) { + /* + * If this is tty is already connected to a different + * partition, fail. + */ + event->xRc = HvLpEvent_Rc_SubtypeError; + cevent->subtype_result_code = viorc_openRejected; + reject = 2; + } else { + pi->lp = event->xSourceLp; + event->xRc = HvLpEvent_Rc_Good; + cevent->subtype_result_code = viorc_good; + pi->seq = pi->ack = 0; + } + } + + spin_unlock_irqrestore(&consolelock, flags); + + if (reject == 1) + printk(KERN_WARNING "hvc: open rejected: bad virtual tty.\n"); + else if (reject == 2) + printk(KERN_WARNING "hvc: open rejected: console in exclusive " + "use by another partition.\n"); + + /* Return the acknowledgement */ + HvCallEvent_ackLpEvent(event); +} + +/* + * Handle a close charLpEvent. This should ONLY be an Interrupt because the + * virtual console should never actually issue a close event to the hypervisor + * because the virtual console never goes away. A close event coming from the + * hypervisor simply means that there are no client consoles connected to the + * virtual console. + */ +static void hvc_close_event(struct HvLpEvent *event) +{ + unsigned long flags; + struct viocharlpevent *cevent = (struct viocharlpevent *)event; + u8 port = cevent->virtual_device; + + if (!hvlpevent_is_int(event)) { + printk(KERN_WARNING + "hvc: got unexpected close acknowlegement\n"); + return; + } + + if (port >= VTTY_PORTS) { + printk(KERN_WARNING + "hvc: close message from invalid virtual device.\n"); + return; + } + + /* For closes, just mark the console partition invalid */ + spin_lock_irqsave(&consolelock, flags); + + if (port_info[port].lp == event->xSourceLp) + port_info[port].lp = HvLpIndexInvalid; + + spin_unlock_irqrestore(&consolelock, flags); +} + +static void hvc_data_event(struct HvLpEvent *event) +{ + unsigned long flags; + struct viocharlpevent *cevent = (struct viocharlpevent *)event; + struct port_info *pi; + int n; + u8 port = cevent->virtual_device; + + if (port >= VTTY_PORTS) { + printk(KERN_WARNING "hvc: data on invalid virtual device %d\n", + port); + return; + } + if (cevent->len == 0) + return; + + /* + * Change 05/01/2003 - Ryan Arnold: If a partition other than + * the current exclusive partition tries to send us data + * events then just drop them on the floor because we don't + * want his stinking data. He isn't authorized to receive + * data because he wasn't the first one to get the console, + * therefore he shouldn't be allowed to send data either. + * This will work without an iSeries fix. + */ + pi = &port_info[port]; + if (pi->lp != event->xSourceLp) + return; + + spin_lock_irqsave(&consolelock, flags); + + n = IN_BUF_SIZE - pi->in_end; + if (n > cevent->len) + n = cevent->len; + if (n > 0) { + memcpy(&pi->in_buf[pi->in_end], cevent->data, n); + pi->in_end += n; + } + spin_unlock_irqrestore(&consolelock, flags); + if (n == 0) + printk(KERN_WARNING "hvc: input buffer overflow\n"); +} + +static void hvc_ack_event(struct HvLpEvent *event) +{ + struct viocharlpevent *cevent = (struct viocharlpevent *)event; + unsigned long flags; + u8 port = cevent->virtual_device; + + if (port >= VTTY_PORTS) { + printk(KERN_WARNING "hvc: data on invalid virtual device\n"); + return; + } + + spin_lock_irqsave(&consolelock, flags); + port_info[port].ack = event->xCorrelationToken; + spin_unlock_irqrestore(&consolelock, flags); +} + +static void hvc_config_event(struct HvLpEvent *event) +{ + struct viocharlpevent *cevent = (struct viocharlpevent *)event; + + if (cevent->data[0] == 0x01) + printk(KERN_INFO "hvc: window resized to %d: %d: %d: %d\n", + cevent->data[1], cevent->data[2], + cevent->data[3], cevent->data[4]); + else + printk(KERN_WARNING "hvc: unknown config event\n"); +} + +static void hvc_handle_event(struct HvLpEvent *event) +{ + int charminor; + + if (event == NULL) + return; + + charminor = event->xSubtype & VIOMINOR_SUBTYPE_MASK; + switch (charminor) { + case viocharopen: + hvc_open_event(event); + break; + case viocharclose: + hvc_close_event(event); + break; + case viochardata: + hvc_data_event(event); + break; + case viocharack: + hvc_ack_event(event); + break; + case viocharconfig: + hvc_config_event(event); + break; + default: + if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) { + event->xRc = HvLpEvent_Rc_InvalidSubtype; + HvCallEvent_ackLpEvent(event); + } + } +} + +static int send_open(HvLpIndex remoteLp, void *sem) +{ + return HvCallEvent_signalLpEventFast(remoteLp, + HvLpEvent_Type_VirtualIo, + viomajorsubtype_chario | viocharopen, + HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck, + viopath_sourceinst(remoteLp), + viopath_targetinst(remoteLp), + (u64)(unsigned long)sem, VIOVERSION << 16, + 0, 0, 0, 0); +} + +static int hvc_vio_init(void) +{ + atomic_t wait_flag; + int rc; + + /* +2 for fudge */ + rc = viopath_open(HvLpConfig_getPrimaryLpIndex(), + viomajorsubtype_chario, VIOCHAR_WINDOW + 2); + if (rc) + printk(KERN_WARNING "hvc: error opening to primary %d\n", rc); + + if (viopath_hostLp == HvLpIndexInvalid) + vio_set_hostlp(); + + /* + * And if the primary is not the same as the hosting LP, open to the + * hosting lp + */ + if ((viopath_hostLp != HvLpIndexInvalid) && + (viopath_hostLp != HvLpConfig_getPrimaryLpIndex())) { + printk(KERN_INFO "hvc: open path to hosting (%d)\n", + viopath_hostLp); + rc = viopath_open(viopath_hostLp, viomajorsubtype_chario, + VIOCHAR_WINDOW + 2); /* +2 for fudge */ + if (rc) + printk(KERN_WARNING + "error opening to partition %d: %d\n", + viopath_hostLp, rc); + } + + if (vio_setHandler(viomajorsubtype_chario, hvc_handle_event) < 0) + printk(KERN_WARNING + "hvc: error seting handler for console events!\n"); + + /* + * First, try to open the console to the hosting lp. + * Wait on a semaphore for the response. + */ + atomic_set(&wait_flag, 0); + if ((viopath_isactive(viopath_hostLp)) && + (send_open(viopath_hostLp, &wait_flag) == 0)) { + printk(KERN_INFO "hvc: hosting partition %d\n", viopath_hostLp); + while (atomic_read(&wait_flag) == 0) + mb(); + atomic_set(&wait_flag, 0); + } + + /* + * If we don't have an active console, try the primary + */ + if ((!viopath_isactive(port_info[0].lp)) && + (viopath_isactive(HvLpConfig_getPrimaryLpIndex())) && + (send_open(HvLpConfig_getPrimaryLpIndex(), &wait_flag) == 0)) { + printk(KERN_INFO "hvc: opening console to primary partition\n"); + while (atomic_read(&wait_flag) == 0) + mb(); + } + + /* Register as a vio device to receive callbacks */ + rc = vio_register_driver(&hvc_vio_driver); + + return rc; +} +module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */ + +static void hvc_vio_exit(void) +{ + vio_unregister_driver(&hvc_vio_driver); +} +module_exit(hvc_vio_exit); + +/* the device tree order defines our numbering */ +static int hvc_find_vtys(void) +{ + struct device_node *vty; + int num_found = 0; + + for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL; + vty = of_find_node_by_name(vty, "vty")) { + uint32_t *vtermno; + + /* We have statically defined space for only a certain number + * of console adapters. + */ + if ((num_found >= MAX_NR_HVC_CONSOLES) || + (num_found >= VTTY_PORTS)) + break; + + vtermno = (uint32_t *)get_property(vty, "reg", NULL); + if (!vtermno) + continue; + + if (!device_is_compatible(vty, "IBM,iSeries-vty")) + continue; + + if (num_found == 0) + add_preferred_console("hvc", 0, NULL); + hvc_instantiate(*vtermno, num_found, &hvc_get_put_ops); + ++num_found; + } + + return num_found; +} +console_initcall(hvc_find_vtys); diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c index 57106e02fd2e..4b97eaf18602 100644 --- a/drivers/char/hvc_rtas.c +++ b/drivers/char/hvc_rtas.c @@ -94,7 +94,7 @@ static int hvc_rtas_init(void) /* Allocate an hvc_struct for the console device we instantiated * earlier. Save off hp so that we can return it on exit */ - hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops); + hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops, 16); if (IS_ERR(hp)) return PTR_ERR(hp); diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c index 9add81ceb440..cc95941148fb 100644 --- a/drivers/char/hvc_vio.c +++ b/drivers/char/hvc_vio.c @@ -90,7 +90,8 @@ static int __devinit hvc_vio_probe(struct vio_dev *vdev, if (!vdev || !id) return -EPERM; - hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops); + hp = hvc_alloc(vdev->unit_address, vdev->irq, &hvc_get_put_ops, + MAX_VIO_PUT_CHARS); if (IS_ERR(hp)) return PTR_ERR(hp); dev_set_drvdata(&vdev->dev, hp); @@ -140,7 +141,7 @@ static int hvc_find_vtys(void) for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL; vty = of_find_node_by_name(vty, "vty")) { - uint32_t *vtermno; + const uint32_t *vtermno; /* We have statically defined space for only a certain number * of console adapters. @@ -148,7 +149,7 @@ static int hvc_find_vtys(void) if (num_found >= MAX_NR_HVC_CONSOLES) break; - vtermno = (uint32_t *)get_property(vty, "reg", NULL); + vtermno = get_property(vty, "reg", NULL); if (!vtermno) continue; diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 017f755632a3..a89a95fb5e40 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c @@ -1274,11 +1274,10 @@ static int __init hvsi_console_init(void) vty != NULL; vty = of_find_compatible_node(vty, "serial", "hvterm-protocol")) { struct hvsi_struct *hp; - uint32_t *vtermno; - uint32_t *irq; + const uint32_t *vtermno, *irq; - vtermno = (uint32_t *)get_property(vty, "reg", NULL); - irq = (uint32_t *)get_property(vty, "interrupts", NULL); + vtermno = get_property(vty, "reg", NULL); + irq = get_property(vty, "interrupts", NULL); if (!vtermno || !irq) continue; diff --git a/drivers/char/tpm/tpm_atmel.h b/drivers/char/tpm/tpm_atmel.h index 2e68eeb8a2cd..aefd683c60b7 100644 --- a/drivers/char/tpm/tpm_atmel.h +++ b/drivers/char/tpm/tpm_atmel.h @@ -37,7 +37,7 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) { struct device_node *dn; unsigned long address, size; - unsigned int *reg; + const unsigned int *reg; int reglen; int naddrc; int nsizec; @@ -52,7 +52,7 @@ static void __iomem * atmel_get_base_addr(unsigned long *base, int *region_size) return NULL; } - reg = (unsigned int *) get_property(dn, "reg", ®len); + reg = get_property(dn, "reg", ®len); naddrc = prom_n_addr_cells(dn); nsizec = prom_n_size_cells(dn); diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c index 766f7864c6c6..f3efeaf2826e 100644 --- a/drivers/char/viocons.c +++ b/drivers/char/viocons.c @@ -43,7 +43,6 @@ #include <linux/sysrq.h> #include <asm/iseries/vio.h> - #include <asm/iseries/hv_lp_event.h> #include <asm/iseries/hv_call_event.h> #include <asm/iseries/hv_lp_config.h> @@ -67,35 +66,6 @@ static int vio_sysrq_pressed; extern int sysrq_enabled; #endif -/* - * The structure of the events that flow between us and OS/400. You can't - * mess with this unless the OS/400 side changes too - */ -struct viocharlpevent { - struct HvLpEvent event; - u32 reserved; - u16 version; - u16 subtype_result_code; - u8 virtual_device; - u8 len; - u8 data[VIOCHAR_MAX_DATA]; -}; - -#define VIOCHAR_WINDOW 10 -#define VIOCHAR_HIGHWATERMARK 3 - -enum viocharsubtype { - viocharopen = 0x0001, - viocharclose = 0x0002, - viochardata = 0x0003, - viocharack = 0x0004, - viocharconfig = 0x0005 -}; - -enum viochar_rc { - viochar_rc_ebusy = 1 -}; - #define VIOCHAR_NUM_BUF 16 /* @@ -1183,6 +1153,7 @@ static int __init viocons_init(void) port_info[i].magic = VIOTTY_MAGIC; } HvCall_setLogBufferFormatAndCodepage(HvCall_LogBuffer_ASCII, 437); + add_preferred_console("viocons", 0, NULL); register_console(&viocons_early); return 0; } diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index b72b2049aaae..73c78bf75d7f 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c @@ -940,7 +940,6 @@ static void vioHandleTapeEvent(struct HvLpEvent *event) static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id) { - char tapename[32]; int i = vdev->unit_address; int j; @@ -956,10 +955,9 @@ static int viotape_probe(struct vio_dev *vdev, const struct vio_device_id *id) "iseries!vt%d", i); class_device_create(tape_class, NULL, MKDEV(VIOTAPE_MAJOR, i | 0x80), NULL, "iseries!nvt%d", i); - sprintf(tapename, "iseries/vt%d", i); - printk(VIOTAPE_KERN_INFO "tape %s is iSeries " + printk(VIOTAPE_KERN_INFO "tape iseries/vt%d is iSeries " "resource %10.10s type %4.4s, model %3.3s\n", - tapename, viotape_unitinfo[i].rsrcname, + i, viotape_unitinfo[i].rsrcname, viotape_unitinfo[i].type, viotape_unitinfo[i].model); return 0; } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 4263935443cc..adb554153f67 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -2,22 +2,53 @@ menu "Hardware crypto devices" config CRYPTO_DEV_PADLOCK tristate "Support for VIA PadLock ACE" - depends on CRYPTO && X86_32 + depends on X86_32 + select CRYPTO_ALGAPI + default m help Some VIA processors come with an integrated crypto engine (so called VIA PadLock ACE, Advanced Cryptography Engine) - that provides instructions for very fast {en,de}cryption - with some algorithms. + that provides instructions for very fast cryptographic + operations with supported algorithms. The instructions are used only when the CPU supports them. - Otherwise software encryption is used. If you are unsure, - say Y. + Otherwise software encryption is used. + + Selecting M for this option will compile a helper module + padlock.ko that should autoload all below configured + algorithms. Don't worry if your hardware does not support + some or all of them. In such case padlock.ko will + simply write a single line into the kernel log informing + about its failure but everything will keep working fine. + + If you are unsure, say M. The compiled module will be + called padlock.ko config CRYPTO_DEV_PADLOCK_AES - bool "Support for AES in VIA PadLock" + tristate "PadLock driver for AES algorithm" depends on CRYPTO_DEV_PADLOCK - default y + select CRYPTO_BLKCIPHER + default m help Use VIA PadLock for AES algorithm. + Available in VIA C3 and newer CPUs. + + If unsure say M. The compiled module will be + called padlock-aes.ko + +config CRYPTO_DEV_PADLOCK_SHA + tristate "PadLock driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_PADLOCK + select CRYPTO_SHA1 + select CRYPTO_SHA256 + default m + help + Use VIA PadLock for SHA1/SHA256 algorithms. + + Available in VIA C7 and newer processors. + + If unsure say M. The compiled module will be + called padlock-sha.ko + endmenu diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 45426ca19a23..4c3d0ec1cf80 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -1,7 +1,3 @@ - obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o - -padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o - -padlock-objs := padlock-generic.o $(padlock-objs-y) - +obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o +obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b643d71298a9..d4501dc7e650 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -43,11 +43,11 @@ * --------------------------------------------------------------------------- */ +#include <crypto/algapi.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <asm/byteorder.h> @@ -59,6 +59,17 @@ #define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */ #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) +/* Control word. */ +struct cword { + unsigned int __attribute__ ((__packed__)) + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); + /* Whenever making any changes to the following * structure *make sure* you keep E, d_data * and cword aligned on 16 Bytes boundaries!!! */ @@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len) return 0; } -static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +static inline struct aes_ctx *aes_ctx_common(void *ctx) { - unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); + unsigned long addr = (unsigned long)ctx; unsigned long align = PADLOCK_ALIGNMENT; if (align <= crypto_tfm_ctx_alignment()) @@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) return (struct aes_ctx *)ALIGN(addr, align); } +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm) +{ + return aes_ctx_common(crypto_blkcipher_ctx(tfm)); +} + static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, - unsigned int key_len, u32 *flags) + unsigned int key_len) { struct aes_ctx *ctx = aes_ctx(tfm); const __le32 *key = (const __le32 *)in_key; + u32 *flags = &tfm->crt_flags; uint32_t i, t, u, v, w; uint32_t P[AES_EXTENDED_KEY_SIZE]; uint32_t rounds; - if (key_len != 16 && key_len != 24 && key_len != 32) { + if (key_len % 8) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } @@ -430,80 +452,212 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); } -static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(desc->tfm); - padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, - nbytes / AES_BLOCK_SIZE); - return nbytes & ~(AES_BLOCK_SIZE - 1); + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; } -static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int ecb_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(desc->tfm); - padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, - nbytes / AES_BLOCK_SIZE); - return nbytes & ~(AES_BLOCK_SIZE - 1); + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; } -static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) -{ - struct aes_ctx *ctx = aes_ctx(desc->tfm); - u8 *iv; +static struct crypto_alg ecb_aes_alg = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, + } + } +}; - iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, - &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); - memcpy(desc->info, iv, AES_BLOCK_SIZE); +static int cbc_aes_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } - return nbytes & ~(AES_BLOCK_SIZE - 1); + return err; } -static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, - const u8 *in, unsigned int nbytes) +static int cbc_aes_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(desc->tfm); - padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, - nbytes / AES_BLOCK_SIZE); - return nbytes & ~(AES_BLOCK_SIZE - 1); + struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); + struct blkcipher_walk walk; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + while ((nbytes = walk.nbytes)) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; } -static struct crypto_alg aes_alg = { - .cra_name = "aes", - .cra_driver_name = "aes-padlock", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, +static struct crypto_alg cbc_aes_alg = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc-aes-padlock", + .cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aes_ctx), .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, - .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list), .cra_u = { - .cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt, - .cia_encrypt_ecb = aes_encrypt_ecb, - .cia_decrypt_ecb = aes_decrypt_ecb, - .cia_encrypt_cbc = aes_encrypt_cbc, - .cia_decrypt_cbc = aes_decrypt_cbc, + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, } } }; -int __init padlock_init_aes(void) +static int __init padlock_init(void) { - printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + int ret; + + if (!cpu_has_xcrypt) { + printk(KERN_ERR PFX "VIA PadLock not detected.\n"); + return -ENODEV; + } + + if (!cpu_has_xcrypt_enabled) { + printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } gen_tabs(); - return crypto_register_alg(&aes_alg); + if ((ret = crypto_register_alg(&aes_alg))) + goto aes_err; + + if ((ret = crypto_register_alg(&ecb_aes_alg))) + goto ecb_aes_err; + + if ((ret = crypto_register_alg(&cbc_aes_alg))) + goto cbc_aes_err; + + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_alg(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); + goto out; } -void __exit padlock_fini_aes(void) +static void __exit padlock_fini(void) { + crypto_unregister_alg(&cbc_aes_alg); + crypto_unregister_alg(&ecb_aes_alg); crypto_unregister_alg(&aes_alg); } + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("VIA PadLock AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS("aes-padlock"); diff --git a/drivers/crypto/padlock-generic.c b/drivers/crypto/padlock-generic.c deleted file mode 100644 index 18cf0e8274a7..000000000000 --- a/drivers/crypto/padlock-generic.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Cryptographic API. - * - * Support for VIA PadLock hardware crypto engine. - * - * Copyright (c) 2004 Michal Ludvig <michal@logix.cz> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/crypto.h> -#include <asm/byteorder.h> -#include "padlock.h" - -static int __init -padlock_init(void) -{ - int ret = -ENOSYS; - - if (!cpu_has_xcrypt) { - printk(KERN_ERR PFX "VIA PadLock not detected.\n"); - return -ENODEV; - } - - if (!cpu_has_xcrypt_enabled) { - printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); - return -ENODEV; - } - -#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES - if ((ret = padlock_init_aes())) { - printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); - return ret; - } -#endif - - if (ret == -ENOSYS) - printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n"); - - return ret; -} - -static void __exit -padlock_fini(void) -{ -#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES - padlock_fini_aes(); -#endif -} - -module_init(padlock_init); -module_exit(padlock_fini); - -MODULE_DESCRIPTION("VIA PadLock crypto engine support."); -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Michal Ludvig"); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c new file mode 100644 index 000000000000..a781fd23b607 --- /dev/null +++ b/drivers/crypto/padlock-sha.c @@ -0,0 +1,318 @@ +/* + * Cryptographic API. + * + * Support for VIA PadLock hardware crypto engine. + * + * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <crypto/algapi.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/cryptohash.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/scatterlist.h> +#include "padlock.h" + +#define SHA1_DEFAULT_FALLBACK "sha1-generic" +#define SHA1_DIGEST_SIZE 20 +#define SHA1_HMAC_BLOCK_SIZE 64 + +#define SHA256_DEFAULT_FALLBACK "sha256-generic" +#define SHA256_DIGEST_SIZE 32 +#define SHA256_HMAC_BLOCK_SIZE 64 + +struct padlock_sha_ctx { + char *data; + size_t used; + int bypass; + void (*f_sha_padlock)(const char *in, char *out, int count); + struct hash_desc fallback; +}; + +static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm) +{ + return crypto_tfm_ctx(tfm); +} + +/* We'll need aligned address on the stack */ +#define NEAREST_ALIGNED(ptr) \ + ((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT)) + +static struct crypto_alg sha1_alg, sha256_alg; + +static void padlock_sha_bypass(struct crypto_tfm *tfm) +{ + if (ctx(tfm)->bypass) + return; + + crypto_hash_init(&ctx(tfm)->fallback); + if (ctx(tfm)->data && ctx(tfm)->used) { + struct scatterlist sg; + + sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used); + crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length); + } + + ctx(tfm)->used = 0; + ctx(tfm)->bypass = 1; +} + +static void padlock_sha_init(struct crypto_tfm *tfm) +{ + ctx(tfm)->used = 0; + ctx(tfm)->bypass = 0; +} + +static void padlock_sha_update(struct crypto_tfm *tfm, + const uint8_t *data, unsigned int length) +{ + /* Our buffer is always one page. */ + if (unlikely(!ctx(tfm)->bypass && + (ctx(tfm)->used + length > PAGE_SIZE))) + padlock_sha_bypass(tfm); + + if (unlikely(ctx(tfm)->bypass)) { + struct scatterlist sg; + sg_set_buf(&sg, (uint8_t *)data, length); + crypto_hash_update(&ctx(tfm)->fallback, &sg, length); + return; + } + + memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length); + ctx(tfm)->used += length; +} + +static inline void padlock_output_block(uint32_t *src, + uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +static void padlock_do_sha1(const char *in, char *out, int count) +{ + /* We can't store directly to *out as it may be unaligned. */ + /* BTW Don't reduce the buffer size below 128 Bytes! + * PadLock microcode needs it that big. */ + char buf[128+16]; + char *result = NEAREST_ALIGNED(buf); + + ((uint32_t *)result)[0] = 0x67452301; + ((uint32_t *)result)[1] = 0xEFCDAB89; + ((uint32_t *)result)[2] = 0x98BADCFE; + ((uint32_t *)result)[3] = 0x10325476; + ((uint32_t *)result)[4] = 0xC3D2E1F0; + + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */ + : "+S"(in), "+D"(result) + : "c"(count), "a"(0)); + + padlock_output_block((uint32_t *)result, (uint32_t *)out, 5); +} + +static void padlock_do_sha256(const char *in, char *out, int count) +{ + /* We can't store directly to *out as it may be unaligned. */ + /* BTW Don't reduce the buffer size below 128 Bytes! + * PadLock microcode needs it that big. */ + char buf[128+16]; + char *result = NEAREST_ALIGNED(buf); + + ((uint32_t *)result)[0] = 0x6A09E667; + ((uint32_t *)result)[1] = 0xBB67AE85; + ((uint32_t *)result)[2] = 0x3C6EF372; + ((uint32_t *)result)[3] = 0xA54FF53A; + ((uint32_t *)result)[4] = 0x510E527F; + ((uint32_t *)result)[5] = 0x9B05688C; + ((uint32_t *)result)[6] = 0x1F83D9AB; + ((uint32_t *)result)[7] = 0x5BE0CD19; + + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */ + : "+S"(in), "+D"(result) + : "c"(count), "a"(0)); + + padlock_output_block((uint32_t *)result, (uint32_t *)out, 8); +} + +static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out) +{ + if (unlikely(ctx(tfm)->bypass)) { + crypto_hash_final(&ctx(tfm)->fallback, out); + ctx(tfm)->bypass = 0; + return; + } + + /* Pass the input buffer to PadLock microcode... */ + ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used); + + ctx(tfm)->used = 0; +} + +static int padlock_cra_init(struct crypto_tfm *tfm) +{ + const char *fallback_driver_name = tfm->__crt_alg->cra_name; + struct crypto_hash *fallback_tfm; + + /* For now we'll allocate one page. This + * could eventually be configurable one day. */ + ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL); + if (!ctx(tfm)->data) + return -ENOMEM; + + /* Allocate a fallback and abort if it failed. */ + fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0, + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(fallback_tfm)) { + printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n", + fallback_driver_name); + free_page((unsigned long)(ctx(tfm)->data)); + return PTR_ERR(fallback_tfm); + } + + ctx(tfm)->fallback.tfm = fallback_tfm; + return 0; +} + +static int padlock_sha1_cra_init(struct crypto_tfm *tfm) +{ + ctx(tfm)->f_sha_padlock = padlock_do_sha1; + + return padlock_cra_init(tfm); +} + +static int padlock_sha256_cra_init(struct crypto_tfm *tfm) +{ + ctx(tfm)->f_sha_padlock = padlock_do_sha256; + + return padlock_cra_init(tfm); +} + +static void padlock_cra_exit(struct crypto_tfm *tfm) +{ + if (ctx(tfm)->data) { + free_page((unsigned long)(ctx(tfm)->data)); + ctx(tfm)->data = NULL; + } + + crypto_free_hash(ctx(tfm)->fallback.tfm); + ctx(tfm)->fallback.tfm = NULL; +} + +static struct crypto_alg sha1_alg = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), + .cra_init = padlock_sha1_cra_init, + .cra_exit = padlock_cra_exit, + .cra_u = { + .digest = { + .dia_digestsize = SHA1_DIGEST_SIZE, + .dia_init = padlock_sha_init, + .dia_update = padlock_sha_update, + .dia_final = padlock_sha_final, + } + } +}; + +static struct crypto_alg sha256_alg = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_DIGEST | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct padlock_sha_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(sha256_alg.cra_list), + .cra_init = padlock_sha256_cra_init, + .cra_exit = padlock_cra_exit, + .cra_u = { + .digest = { + .dia_digestsize = SHA256_DIGEST_SIZE, + .dia_init = padlock_sha_init, + .dia_update = padlock_sha_update, + .dia_final = padlock_sha_final, + } + } +}; + +static void __init padlock_sha_check_fallbacks(void) +{ + if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK)) + printk(KERN_WARNING PFX + "Couldn't load fallback module for sha1.\n"); + + if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK)) + printk(KERN_WARNING PFX + "Couldn't load fallback module for sha256.\n"); +} + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + + if (!cpu_has_phe) { + printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n"); + return -ENODEV; + } + + if (!cpu_has_phe_enabled) { + printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + padlock_sha_check_fallbacks(); + + rc = crypto_register_alg(&sha1_alg); + if (rc) + goto out; + + rc = crypto_register_alg(&sha256_alg); + if (rc) + goto out_unreg1; + + printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_alg(&sha1_alg); +out: + printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_alg(&sha1_alg); + crypto_unregister_alg(&sha256_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + +MODULE_ALIAS("sha1-padlock"); +MODULE_ALIAS("sha256-padlock"); diff --git a/drivers/crypto/padlock.c b/drivers/crypto/padlock.c new file mode 100644 index 000000000000..d6d7dd5bb98c --- /dev/null +++ b/drivers/crypto/padlock.c @@ -0,0 +1,58 @@ +/* + * Cryptographic API. + * + * Support for VIA PadLock hardware crypto engine. + * + * Copyright (c) 2006 Michal Ludvig <michal@logix.cz> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/scatterlist.h> +#include "padlock.h" + +static int __init padlock_init(void) +{ + int success = 0; + + if (crypto_has_cipher("aes-padlock", 0, 0)) + success++; + + if (crypto_has_hash("sha1-padlock", 0, 0)) + success++; + + if (crypto_has_hash("sha256-padlock", 0, 0)) + success++; + + if (!success) { + printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n"); + return -ENODEV; + } + + printk(KERN_NOTICE PFX "%d drivers are available.\n", success); + + return 0; +} + +static void __exit padlock_fini(void) +{ +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("Load all configured PadLock algorithms."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); + diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h index b78489bc298a..b728e4518bd1 100644 --- a/drivers/crypto/padlock.h +++ b/drivers/crypto/padlock.h @@ -15,22 +15,9 @@ #define PADLOCK_ALIGNMENT 16 -/* Control word. */ -struct cword { - unsigned int __attribute__ ((__packed__)) - rounds:4, - algo:3, - keygen:1, - interm:1, - encdec:1, - ksize:2; -} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT))); - #define PFX "padlock: " -#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES -int padlock_init_aes(void); -void padlock_fini_aes(void); -#endif +#define PADLOCK_CRA_PRIORITY 300 +#define PADLOCK_COMPOSITE_PRIORITY 400 #endif /* _CRYPTO_PADLOCK_H */ diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 53bb43593863..d658d9107955 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -207,7 +207,8 @@ static int i2c_powermac_probe(struct device *dev) struct pmac_i2c_bus *bus = dev->platform_data; struct device_node *parent = NULL; struct i2c_adapter *adapter; - char name[32], *basename; + char name[32]; + const char *basename; int rc; if (bus == NULL) diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index ebf961f1718d..996c694341bc 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c @@ -1154,7 +1154,7 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) { struct device_node *np = pmif->node; - int *bidp; + const int *bidp; pmif->cable_80 = 0; pmif->broken_dma = pmif->broken_dma_warn = 0; @@ -1176,14 +1176,14 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) pmif->broken_dma = 1; } - bidp = (int *)get_property(np, "AAPL,bus-id", NULL); + bidp = get_property(np, "AAPL,bus-id", NULL); pmif->aapl_bus_id = bidp ? *bidp : 0; /* Get cable type from device-tree */ if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6 || pmif->kind == controller_sh_ata6) { - char* cable = get_property(np, "cable-type", NULL); + const char* cable = get_property(np, "cable-type", NULL); if (cable && !strncmp(cable, "80-", 3)) pmif->cable_80 = 1; } @@ -1326,7 +1326,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) if (macio_irq_count(mdev) == 0) { printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n", i, mdev->ofdev.node->full_name); - irq = 13; + irq = irq_create_mapping(NULL, 13); } else irq = macio_irq(mdev, 0); diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 69a53d476b5b..9edfacee7d84 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -14,7 +14,7 @@ config INFINIBAND_USER_MAD ---help--- Userspace InfiniBand Management Datagram (MAD) support. This is the kernel side of the userspace MAD support, which allows - userspace processes to send and receive MADs. You will also + userspace processes to send and receive MADs. You will also need libibumad from <http://www.openib.org>. config INFINIBAND_USER_ACCESS @@ -36,6 +36,8 @@ config INFINIBAND_ADDR_TRANS source "drivers/infiniband/hw/mthca/Kconfig" source "drivers/infiniband/hw/ipath/Kconfig" +source "drivers/infiniband/hw/ehca/Kconfig" +source "drivers/infiniband/hw/amso1100/Kconfig" source "drivers/infiniband/ulp/ipoib/Kconfig" diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index c7ff58c1d0e5..2b5d1098ef45 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -1,6 +1,8 @@ obj-$(CONFIG_INFINIBAND) += core/ obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/ -obj-$(CONFIG_IPATH_CORE) += hw/ipath/ +obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ +obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ +obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/ obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/ diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 68e73ec2d1f8..163d991eb8c9 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -1,7 +1,7 @@ infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ - ib_cm.o $(infiniband-y) + ib_cm.o iw_cm.o $(infiniband-y) obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o @@ -14,6 +14,8 @@ ib_sa-y := sa_query.o ib_cm-y := cm.o +iw_cm-y := iwcm.o + rdma_cm-y := cma.o ib_addr-y := addr.o diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 1205e8027829..9cbf09e2052f 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -61,12 +61,15 @@ static LIST_HEAD(req_list); static DECLARE_WORK(work, process_req, NULL); static struct workqueue_struct *addr_wq; -static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, - unsigned char *dst_dev_addr) +int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, + const unsigned char *dst_dev_addr) { switch (dev->type) { case ARPHRD_INFINIBAND: - dev_addr->dev_type = IB_NODE_CA; + dev_addr->dev_type = RDMA_NODE_IB_CA; + break; + case ARPHRD_ETHER: + dev_addr->dev_type = RDMA_NODE_RNIC; break; default: return -EADDRNOTAVAIL; @@ -78,6 +81,7 @@ static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); return 0; } +EXPORT_SYMBOL(rdma_copy_addr); int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) { @@ -89,7 +93,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) if (!dev) return -EADDRNOTAVAIL; - ret = copy_addr(dev_addr, dev, NULL); + ret = rdma_copy_addr(dev_addr, dev, NULL); dev_put(dev); return ret; } @@ -161,7 +165,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in, /* If the device does ARP internally, return 'done' */ if (rt->idev->dev->flags & IFF_NOARP) { - copy_addr(addr, rt->idev->dev, NULL); + rdma_copy_addr(addr, rt->idev->dev, NULL); goto put; } @@ -181,7 +185,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in, src_in->sin_addr.s_addr = rt->rt_src; } - ret = copy_addr(addr, neigh->dev, neigh->ha); + ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); release: neigh_release(neigh); put: @@ -245,7 +249,7 @@ static int addr_resolve_local(struct sockaddr_in *src_in, if (ZERONET(src_ip)) { src_in->sin_family = dst_in->sin_family; src_in->sin_addr.s_addr = dst_ip; - ret = copy_addr(addr, dev, dev->dev_addr); + ret = rdma_copy_addr(addr, dev, dev->dev_addr); } else if (LOOPBACK(src_ip)) { ret = rdma_translate_ip((struct sockaddr *)dst_in, addr); if (!ret) @@ -327,10 +331,10 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) } EXPORT_SYMBOL(rdma_addr_cancel); -static int netevent_callback(struct notifier_block *self, unsigned long event, +static int netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { - if (event == NETEVENT_NEIGH_UPDATE) { + if (event == NETEVENT_NEIGH_UPDATE) { struct neighbour *neigh = ctx; if (neigh->dev->type == ARPHRD_INFINIBAND && diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 75313ade2e0d..20e9f64e67a6 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -62,12 +62,13 @@ struct ib_update_work { static inline int start_port(struct ib_device *device) { - return device->node_type == IB_NODE_SWITCH ? 0 : 1; + return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; } static inline int end_port(struct ib_device *device) { - return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; + return (device->node_type == RDMA_NODE_IB_SWITCH) ? + 0 : device->phys_port_cnt; } int ib_get_cached_gid(struct ib_device *device, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 0de335b7bfc2..f35fcc4c0638 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004-2006 Intel Corporation. All rights reserved. * Copyright (c) 2004 Topspin Corporation. All rights reserved. * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. @@ -41,6 +41,7 @@ #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/pci.h> +#include <linux/random.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/workqueue.h> @@ -73,6 +74,7 @@ static struct ib_cm { struct rb_root remote_id_table; struct rb_root remote_sidr_table; struct idr local_id_table; + __be32 random_id_operand; struct workqueue_struct *wq; } cm; @@ -177,7 +179,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, if (IS_ERR(ah)) return PTR_ERR(ah); - m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, + m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, cm_id_priv->av.pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC); @@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) static int cm_alloc_id(struct cm_id_private *cm_id_priv) { unsigned long flags; - int ret; + int ret, id; static int next_id; do { spin_lock_irqsave(&cm.lock, flags); - ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++, - (__force int *) &cm_id_priv->id.local_id); + ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, + next_id++, &id); spin_unlock_irqrestore(&cm.lock, flags); } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); + + cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand); return ret; } @@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id) unsigned long flags; spin_lock_irqsave(&cm.lock, flags); - idr_remove(&cm.local_id_table, (__force int) local_id); + idr_remove(&cm.local_id_table, + (__force int) (local_id ^ cm.random_id_operand)); spin_unlock_irqrestore(&cm.lock, flags); } @@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id) { struct cm_id_private *cm_id_priv; - cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id); + cm_id_priv = idr_find(&cm.local_id_table, + (__force int) (local_id ^ cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); @@ -679,6 +685,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv) { int wait_time; + cm_cleanup_timewait(cm_id_priv->timewait_info); + /* * The cm_id could be destroyed by the user before we exit timewait. * To protect against this, we search for the cm_id after exiting @@ -1354,7 +1362,7 @@ static int cm_req_handler(struct cm_work *work) id.local_id); if (IS_ERR(cm_id_priv->timewait_info)) { ret = PTR_ERR(cm_id_priv->timewait_info); - goto error1; + goto destroy; } cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id; cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid; @@ -1363,7 +1371,8 @@ static int cm_req_handler(struct cm_work *work) listen_cm_id_priv = cm_match_req(work, cm_id_priv); if (!listen_cm_id_priv) { ret = -EINVAL; - goto error2; + kfree(cm_id_priv->timewait_info); + goto destroy; } cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; @@ -1373,12 +1382,22 @@ static int cm_req_handler(struct cm_work *work) cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); - if (ret) - goto error3; + if (ret) { + ib_get_cached_gid(work->port->cm_dev->device, + work->port->port_num, 0, &work->path[0].sgid); + ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID, + &work->path[0].sgid, sizeof work->path[0].sgid, + NULL, 0); + goto rejected; + } if (req_msg->alt_local_lid) { ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); - if (ret) - goto error3; + if (ret) { + ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, + &work->path[0].sgid, + sizeof work->path[0].sgid, NULL, 0); + goto rejected; + } } cm_id_priv->tid = req_msg->hdr.tid; cm_id_priv->timeout_ms = cm_convert_to_ms( @@ -1400,12 +1419,11 @@ static int cm_req_handler(struct cm_work *work) cm_deref_id(listen_cm_id_priv); return 0; -error3: atomic_dec(&cm_id_priv->refcount); +rejected: + atomic_dec(&cm_id_priv->refcount); cm_deref_id(listen_cm_id_priv); - cm_cleanup_timewait(cm_id_priv->timewait_info); -error2: kfree(cm_id_priv->timewait_info); - cm_id_priv->timewait_info = NULL; -error1: ib_destroy_cm_id(&cm_id_priv->id); +destroy: + ib_destroy_cm_id(cm_id); return ret; } @@ -2072,8 +2090,9 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) spin_unlock_irqrestore(&cm.lock, flags); return NULL; } - cm_id_priv = idr_find(&cm.local_id_table, - (__force int) timewait_info->work.local_id); + cm_id_priv = idr_find(&cm.local_id_table, (__force int) + (timewait_info->work.local_id ^ + cm.random_id_operand)); if (cm_id_priv) { if (cm_id_priv->id.remote_id == remote_id) atomic_inc(&cm_id_priv->refcount); @@ -3125,7 +3144,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; if (cm_id_priv->responder_resources) - qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ; + qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | + IB_ACCESS_REMOTE_ATOMIC; qp_attr->pkey_index = cm_id_priv->av.pkey_index; qp_attr->port_num = cm_id_priv->av.port->port_num; ret = 0; @@ -3262,6 +3282,9 @@ static void cm_add_one(struct ib_device *device) int ret; u8 i; + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) * device->phys_port_cnt, GFP_KERNEL); if (!cm_dev) @@ -3349,6 +3372,7 @@ static int __init ib_cm_init(void) cm.remote_qp_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT; idr_init(&cm.local_id_table); + get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); idr_pre_get(&cm.local_id_table, GFP_KERNEL); cm.wq = create_workqueue("ib_cm"); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5d625a81193f..1178bd434d1b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -35,6 +35,7 @@ #include <linux/mutex.h> #include <linux/random.h> #include <linux/idr.h> +#include <linux/inetdevice.h> #include <net/tcp.h> @@ -43,6 +44,7 @@ #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include <rdma/ib_sa.h> +#include <rdma/iw_cm.h> MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("Generic RDMA CM Agent"); @@ -60,6 +62,7 @@ static struct ib_client cma_client = { .remove = cma_remove_one }; +static struct ib_sa_client sa_client; static LIST_HEAD(dev_list); static LIST_HEAD(listen_any_list); static DEFINE_MUTEX(lock); @@ -124,6 +127,7 @@ struct rdma_id_private { int query_id; union { struct ib_cm_id *ib; + struct iw_cm_id *iw; } cm_id; u32 seq_num; @@ -259,15 +263,24 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv) id_priv->cma_dev = NULL; } -static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) +static int cma_acquire_dev(struct rdma_id_private *id_priv) { + enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type; struct cma_device *cma_dev; union ib_gid gid; int ret = -ENODEV; - ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid), + switch (rdma_node_get_transport(dev_type)) { + case RDMA_TRANSPORT_IB: + ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); + break; + case RDMA_TRANSPORT_IWARP: + iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); + break; + default: + return -ENODEV; + } - mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) { ret = ib_find_cached_gid(cma_dev->device, &gid, &id_priv->id.port_num, NULL); @@ -276,20 +289,9 @@ static int cma_acquire_ib_dev(struct rdma_id_private *id_priv) break; } } - mutex_unlock(&lock); return ret; } -static int cma_acquire_dev(struct rdma_id_private *id_priv) -{ - switch (id_priv->id.route.addr.dev_addr.dev_type) { - case IB_NODE_CA: - return cma_acquire_ib_dev(id_priv); - default: - return -ENODEV; - } -} - static void cma_deref_id(struct rdma_id_private *id_priv) { if (atomic_dec_and_test(&id_priv->refcount)) @@ -347,6 +349,16 @@ static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) IB_QP_PKEY_INDEX | IB_QP_PORT); } +static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + + qp_attr.qp_state = IB_QPS_INIT; + qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; + + return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS); +} + int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { @@ -362,10 +374,13 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, if (IS_ERR(qp)) return PTR_ERR(qp); - switch (id->device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: ret = cma_init_ib_qp(id_priv, qp); break; + case RDMA_TRANSPORT_IWARP: + ret = cma_init_iw_qp(id_priv, qp); + break; default: ret = -ENOSYS; break; @@ -451,13 +466,17 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int ret; id_priv = container_of(id, struct rdma_id_private, id); - switch (id_priv->id.device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id_priv->id.device->node_type)) { + case RDMA_TRANSPORT_IB: ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, qp_attr_mask); if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; break; + case RDMA_TRANSPORT_IWARP: + ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, + qp_attr_mask); + break; default: ret = -ENOSYS; break; @@ -590,8 +609,8 @@ static int cma_notify_user(struct rdma_id_private *id_priv, static void cma_cancel_route(struct rdma_id_private *id_priv) { - switch (id_priv->id.device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id_priv->id.device->node_type)) { + case RDMA_TRANSPORT_IB: if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); break; @@ -611,11 +630,15 @@ static void cma_destroy_listen(struct rdma_id_private *id_priv) cma_exch(id_priv, CMA_DESTROYING); if (id_priv->cma_dev) { - switch (id_priv->id.device->node_type) { - case IB_NODE_CA: - if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) + switch (rdma_node_get_transport(id_priv->id.device->node_type)) { + case RDMA_TRANSPORT_IB: + if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) ib_destroy_cm_id(id_priv->cm_id.ib); break; + case RDMA_TRANSPORT_IWARP: + if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) + iw_destroy_cm_id(id_priv->cm_id.iw); + break; default: break; } @@ -689,19 +712,25 @@ void rdma_destroy_id(struct rdma_cm_id *id) state = cma_exch(id_priv, CMA_DESTROYING); cma_cancel_operation(id_priv, state); + mutex_lock(&lock); if (id_priv->cma_dev) { - switch (id->device->node_type) { - case IB_NODE_CA: - if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) + mutex_unlock(&lock); + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: + if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) ib_destroy_cm_id(id_priv->cm_id.ib); break; + case RDMA_TRANSPORT_IWARP: + if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) + iw_destroy_cm_id(id_priv->cm_id.iw); + break; default: break; } - mutex_lock(&lock); + mutex_lock(&lock); cma_detach_from_dev(id_priv); - mutex_unlock(&lock); } + mutex_unlock(&lock); cma_release_port(id_priv); cma_deref_id(id_priv); @@ -869,7 +898,7 @@ static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); - rt->addr.dev_addr.dev_type = IB_NODE_CA; + rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA; id_priv = container_of(id, struct rdma_id_private, id); id_priv->state = CMA_CONNECT; @@ -898,7 +927,9 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) } atomic_inc(&conn_id->dev_remove); - ret = cma_acquire_ib_dev(conn_id); + mutex_lock(&lock); + ret = cma_acquire_dev(conn_id); + mutex_unlock(&lock); if (ret) { ret = -ENODEV; cma_release_remove(conn_id); @@ -982,6 +1013,130 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, } } +static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) +{ + struct rdma_id_private *id_priv = iw_id->context; + enum rdma_cm_event_type event = 0; + struct sockaddr_in *sin; + int ret = 0; + + atomic_inc(&id_priv->dev_remove); + + switch (iw_event->event) { + case IW_CM_EVENT_CLOSE: + event = RDMA_CM_EVENT_DISCONNECTED; + break; + case IW_CM_EVENT_CONNECT_REPLY: + sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; + *sin = iw_event->local_addr; + sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; + *sin = iw_event->remote_addr; + if (iw_event->status) + event = RDMA_CM_EVENT_REJECTED; + else + event = RDMA_CM_EVENT_ESTABLISHED; + break; + case IW_CM_EVENT_ESTABLISHED: + event = RDMA_CM_EVENT_ESTABLISHED; + break; + default: + BUG_ON(1); + } + + ret = cma_notify_user(id_priv, event, iw_event->status, + iw_event->private_data, + iw_event->private_data_len); + if (ret) { + /* Destroy the CM ID by returning a non-zero value. */ + id_priv->cm_id.iw = NULL; + cma_exch(id_priv, CMA_DESTROYING); + cma_release_remove(id_priv); + rdma_destroy_id(&id_priv->id); + return ret; + } + + cma_release_remove(id_priv); + return ret; +} + +static int iw_conn_req_handler(struct iw_cm_id *cm_id, + struct iw_cm_event *iw_event) +{ + struct rdma_cm_id *new_cm_id; + struct rdma_id_private *listen_id, *conn_id; + struct sockaddr_in *sin; + struct net_device *dev = NULL; + int ret; + + listen_id = cm_id->context; + atomic_inc(&listen_id->dev_remove); + if (!cma_comp(listen_id, CMA_LISTEN)) { + ret = -ECONNABORTED; + goto out; + } + + /* Create a new RDMA id for the new IW CM ID */ + new_cm_id = rdma_create_id(listen_id->id.event_handler, + listen_id->id.context, + RDMA_PS_TCP); + if (!new_cm_id) { + ret = -ENOMEM; + goto out; + } + conn_id = container_of(new_cm_id, struct rdma_id_private, id); + atomic_inc(&conn_id->dev_remove); + conn_id->state = CMA_CONNECT; + + dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); + if (!dev) { + ret = -EADDRNOTAVAIL; + cma_release_remove(conn_id); + rdma_destroy_id(new_cm_id); + goto out; + } + ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); + if (ret) { + cma_release_remove(conn_id); + rdma_destroy_id(new_cm_id); + goto out; + } + + mutex_lock(&lock); + ret = cma_acquire_dev(conn_id); + mutex_unlock(&lock); + if (ret) { + cma_release_remove(conn_id); + rdma_destroy_id(new_cm_id); + goto out; + } + + conn_id->cm_id.iw = cm_id; + cm_id->context = conn_id; + cm_id->cm_handler = cma_iw_handler; + + sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr; + *sin = iw_event->local_addr; + sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; + *sin = iw_event->remote_addr; + + ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, + iw_event->private_data, + iw_event->private_data_len); + if (ret) { + /* User wants to destroy the CM ID */ + conn_id->cm_id.iw = NULL; + cma_exch(conn_id, CMA_DESTROYING); + cma_release_remove(conn_id); + rdma_destroy_id(&conn_id->id); + } + +out: + if (dev) + dev_put(dev); + cma_release_remove(listen_id); + return ret; +} + static int cma_ib_listen(struct rdma_id_private *id_priv) { struct ib_cm_compare_data compare_data; @@ -1011,6 +1166,30 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) return ret; } +static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) +{ + int ret; + struct sockaddr_in *sin; + + id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, + iw_conn_req_handler, + id_priv); + if (IS_ERR(id_priv->cm_id.iw)) + return PTR_ERR(id_priv->cm_id.iw); + + sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; + id_priv->cm_id.iw->local_addr = *sin; + + ret = iw_cm_listen(id_priv->cm_id.iw, backlog); + + if (ret) { + iw_destroy_cm_id(id_priv->cm_id.iw); + id_priv->cm_id.iw = NULL; + } + + return ret; +} + static int cma_listen_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) { @@ -1087,12 +1266,17 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) id_priv->backlog = backlog; if (id->device) { - switch (id->device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: ret = cma_ib_listen(id_priv); if (ret) goto err; break; + case RDMA_TRANSPORT_IWARP: + ret = cma_iw_listen(id_priv, backlog); + if (ret) + goto err; + break; default: ret = -ENOSYS; goto err; @@ -1140,7 +1324,7 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); path_rec.numb_path = 1; - id_priv->query_id = ib_sa_path_rec_get(id_priv->id.device, + id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, id_priv->id.port_num, &path_rec, IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, @@ -1231,6 +1415,23 @@ err: } EXPORT_SYMBOL(rdma_set_ib_paths); +static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) +{ + struct cma_work *work; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (!work) + return -ENOMEM; + + work->id = id_priv; + INIT_WORK(&work->work, cma_work_handler, work); + work->old_state = CMA_ROUTE_QUERY; + work->new_state = CMA_ROUTE_RESOLVED; + work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; + queue_work(cma_wq, &work->work); + return 0; +} + int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) { struct rdma_id_private *id_priv; @@ -1241,10 +1442,13 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) return -EINVAL; atomic_inc(&id_priv->refcount); - switch (id->device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: ret = cma_resolve_ib_route(id_priv, timeout_ms); break; + case RDMA_TRANSPORT_IWARP: + ret = cma_resolve_iw_route(id_priv, timeout_ms); + break; default: ret = -ENOSYS; break; @@ -1309,16 +1513,26 @@ static void addr_handler(int status, struct sockaddr *src_addr, enum rdma_cm_event_type event; atomic_inc(&id_priv->dev_remove); - if (!id_priv->cma_dev && !status) + + /* + * Grab mutex to block rdma_destroy_id() from removing the device while + * we're trying to acquire it. + */ + mutex_lock(&lock); + if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { + mutex_unlock(&lock); + goto out; + } + + if (!status && !id_priv->cma_dev) status = cma_acquire_dev(id_priv); + mutex_unlock(&lock); if (status) { - if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND)) + if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) goto out; event = RDMA_CM_EVENT_ADDR_ERROR; } else { - if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) - goto out; memcpy(&id_priv->id.route.addr.src_addr, src_addr, ip_addr_size(src_addr)); event = RDMA_CM_EVENT_ADDR_RESOLVED; @@ -1492,7 +1706,7 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { if (cma_any_addr(&cur_id->id.route.addr.src_addr)) return -EADDRNOTAVAIL; - + cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) return -EADDRINUSE; @@ -1542,8 +1756,11 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) if (!cma_any_addr(addr)) { ret = rdma_translate_ip(addr, &id->route.addr.dev_addr); - if (!ret) + if (!ret) { + mutex_lock(&lock); ret = cma_acquire_dev(id_priv); + mutex_unlock(&lock); + } if (ret) goto err; } @@ -1649,6 +1866,47 @@ out: return ret; } +static int cma_connect_iw(struct rdma_id_private *id_priv, + struct rdma_conn_param *conn_param) +{ + struct iw_cm_id *cm_id; + struct sockaddr_in* sin; + int ret; + struct iw_cm_conn_param iw_param; + + cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); + if (IS_ERR(cm_id)) { + ret = PTR_ERR(cm_id); + goto out; + } + + id_priv->cm_id.iw = cm_id; + + sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr; + cm_id->local_addr = *sin; + + sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr; + cm_id->remote_addr = *sin; + + ret = cma_modify_qp_rtr(&id_priv->id); + if (ret) { + iw_destroy_cm_id(cm_id); + return ret; + } + + iw_param.ord = conn_param->initiator_depth; + iw_param.ird = conn_param->responder_resources; + iw_param.private_data = conn_param->private_data; + iw_param.private_data_len = conn_param->private_data_len; + if (id_priv->id.qp) + iw_param.qpn = id_priv->qp_num; + else + iw_param.qpn = conn_param->qp_num; + ret = iw_cm_connect(cm_id, &iw_param); +out: + return ret; +} + int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv; @@ -1664,10 +1922,13 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - switch (id->device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: ret = cma_connect_ib(id_priv, conn_param); break; + case RDMA_TRANSPORT_IWARP: + ret = cma_connect_iw(id_priv, conn_param); + break; default: ret = -ENOSYS; break; @@ -1708,6 +1969,28 @@ static int cma_accept_ib(struct rdma_id_private *id_priv, return ib_send_cm_rep(id_priv->cm_id.ib, &rep); } +static int cma_accept_iw(struct rdma_id_private *id_priv, + struct rdma_conn_param *conn_param) +{ + struct iw_cm_conn_param iw_param; + int ret; + + ret = cma_modify_qp_rtr(&id_priv->id); + if (ret) + return ret; + + iw_param.ord = conn_param->initiator_depth; + iw_param.ird = conn_param->responder_resources; + iw_param.private_data = conn_param->private_data; + iw_param.private_data_len = conn_param->private_data_len; + if (id_priv->id.qp) { + iw_param.qpn = id_priv->qp_num; + } else + iw_param.qpn = conn_param->qp_num; + + return iw_cm_accept(id_priv->cm_id.iw, &iw_param); +} + int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv; @@ -1723,13 +2006,16 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - switch (id->device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: if (conn_param) ret = cma_accept_ib(id_priv, conn_param); else ret = cma_rep_recv(id_priv); break; + case RDMA_TRANSPORT_IWARP: + ret = cma_accept_iw(id_priv, conn_param); + break; default: ret = -ENOSYS; break; @@ -1756,12 +2042,16 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, if (!cma_comp(id_priv, CMA_CONNECT)) return -EINVAL; - switch (id->device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: ret = ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, private_data, private_data_len); break; + case RDMA_TRANSPORT_IWARP: + ret = iw_cm_reject(id_priv->cm_id.iw, + private_data, private_data_len); + break; default: ret = -ENOSYS; break; @@ -1780,17 +2070,20 @@ int rdma_disconnect(struct rdma_cm_id *id) !cma_comp(id_priv, CMA_DISCONNECT)) return -EINVAL; - ret = cma_modify_qp_err(id); - if (ret) - goto out; - - switch (id->device->node_type) { - case IB_NODE_CA: + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: + ret = cma_modify_qp_err(id); + if (ret) + goto out; /* Initiate or respond to a disconnect. */ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); break; + case RDMA_TRANSPORT_IWARP: + ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); + break; default: + ret = -EINVAL; break; } out: @@ -1907,12 +2200,15 @@ static int cma_init(void) if (!cma_wq) return -ENOMEM; + ib_sa_register_client(&sa_client); + ret = ib_register_client(&cma_client); if (ret) goto err; return 0; err: + ib_sa_unregister_client(&sa_client); destroy_workqueue(cma_wq); return ret; } @@ -1920,6 +2216,7 @@ err: static void cma_cleanup(void) { ib_unregister_client(&cma_client); + ib_sa_unregister_client(&sa_client); destroy_workqueue(cma_wq); idr_destroy(&sdp_ps); idr_destroy(&tcp_ps); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b2f3cb91d9bc..63d2a39fb82c 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -385,7 +385,7 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client) EXPORT_SYMBOL(ib_get_client_data); /** - * ib_set_client_data - Get IB client context + * ib_set_client_data - Set IB client context * @device:Device to set context for * @client:Client to set context for * @data:Context to set @@ -505,7 +505,7 @@ int ib_query_port(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr) { - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { if (port_num) return -EINVAL; } else if (port_num < 1 || port_num > device->phys_port_cnt) @@ -580,7 +580,7 @@ int ib_modify_port(struct ib_device *device, u8 port_num, int port_modify_mask, struct ib_port_modify *port_modify) { - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { if (port_num) return -EINVAL; } else if (port_num < 1 || port_num > device->phys_port_cnt) diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c new file mode 100644 index 000000000000..c3fb304a4e86 --- /dev/null +++ b/drivers/infiniband/core/iwcm.c @@ -0,0 +1,1019 @@ +/* + * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/idr.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/rbtree.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/completion.h> + +#include <rdma/iw_cm.h> +#include <rdma/ib_addr.h> + +#include "iwcm.h" + +MODULE_AUTHOR("Tom Tucker"); +MODULE_DESCRIPTION("iWARP CM"); +MODULE_LICENSE("Dual BSD/GPL"); + +static struct workqueue_struct *iwcm_wq; +struct iwcm_work { + struct work_struct work; + struct iwcm_id_private *cm_id; + struct list_head list; + struct iw_cm_event event; + struct list_head free_list; +}; + +/* + * The following services provide a mechanism for pre-allocating iwcm_work + * elements. The design pre-allocates them based on the cm_id type: + * LISTENING IDS: Get enough elements preallocated to handle the + * listen backlog. + * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE + * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE + * + * Allocating them in connect and listen avoids having to deal + * with allocation failures on the event upcall from the provider (which + * is called in the interrupt context). + * + * One exception is when creating the cm_id for incoming connection requests. + * There are two cases: + * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If + * the backlog is exceeded, then no more connection request events will + * be processed. cm_event_handler() returns -ENOMEM in this case. Its up + * to the provider to reject the connectino request. + * 2) in the connection request workqueue handler, cm_conn_req_handler(). + * If work elements cannot be allocated for the new connect request cm_id, + * then IWCM will call the provider reject method. This is ok since + * cm_conn_req_handler() runs in the workqueue thread context. + */ + +static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) +{ + struct iwcm_work *work; + + if (list_empty(&cm_id_priv->work_free_list)) + return NULL; + work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, + free_list); + list_del_init(&work->free_list); + return work; +} + +static void put_work(struct iwcm_work *work) +{ + list_add(&work->free_list, &work->cm_id->work_free_list); +} + +static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) +{ + struct list_head *e, *tmp; + + list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) + kfree(list_entry(e, struct iwcm_work, free_list)); +} + +static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) +{ + struct iwcm_work *work; + + BUG_ON(!list_empty(&cm_id_priv->work_free_list)); + while (count--) { + work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); + if (!work) { + dealloc_work_entries(cm_id_priv); + return -ENOMEM; + } + work->cm_id = cm_id_priv; + INIT_LIST_HEAD(&work->list); + put_work(work); + } + return 0; +} + +/* + * Save private data from incoming connection requests in the + * cm_id_priv so the low level driver doesn't have to. Adjust + * the event ptr to point to the local copy. + */ +static int copy_private_data(struct iwcm_id_private *cm_id_priv, + struct iw_cm_event *event) +{ + void *p; + + p = kmalloc(event->private_data_len, GFP_ATOMIC); + if (!p) + return -ENOMEM; + memcpy(p, event->private_data, event->private_data_len); + event->private_data = p; + return 0; +} + +/* + * Release a reference on cm_id. If the last reference is being removed + * and iw_destroy_cm_id is waiting, wake up the waiting thread. + */ +static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) +{ + int ret = 0; + + BUG_ON(atomic_read(&cm_id_priv->refcount)==0); + if (atomic_dec_and_test(&cm_id_priv->refcount)) { + BUG_ON(!list_empty(&cm_id_priv->work_list)); + if (waitqueue_active(&cm_id_priv->destroy_comp.wait)) { + BUG_ON(cm_id_priv->state != IW_CM_STATE_DESTROYING); + BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, + &cm_id_priv->flags)); + ret = 1; + } + complete(&cm_id_priv->destroy_comp); + } + + return ret; +} + +static void add_ref(struct iw_cm_id *cm_id) +{ + struct iwcm_id_private *cm_id_priv; + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + atomic_inc(&cm_id_priv->refcount); +} + +static void rem_ref(struct iw_cm_id *cm_id) +{ + struct iwcm_id_private *cm_id_priv; + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + iwcm_deref_id(cm_id_priv); +} + +static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); + +struct iw_cm_id *iw_create_cm_id(struct ib_device *device, + iw_cm_handler cm_handler, + void *context) +{ + struct iwcm_id_private *cm_id_priv; + + cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); + if (!cm_id_priv) + return ERR_PTR(-ENOMEM); + + cm_id_priv->state = IW_CM_STATE_IDLE; + cm_id_priv->id.device = device; + cm_id_priv->id.cm_handler = cm_handler; + cm_id_priv->id.context = context; + cm_id_priv->id.event_handler = cm_event_handler; + cm_id_priv->id.add_ref = add_ref; + cm_id_priv->id.rem_ref = rem_ref; + spin_lock_init(&cm_id_priv->lock); + atomic_set(&cm_id_priv->refcount, 1); + init_waitqueue_head(&cm_id_priv->connect_wait); + init_completion(&cm_id_priv->destroy_comp); + INIT_LIST_HEAD(&cm_id_priv->work_list); + INIT_LIST_HEAD(&cm_id_priv->work_free_list); + + return &cm_id_priv->id; +} +EXPORT_SYMBOL(iw_create_cm_id); + + +static int iwcm_modify_qp_err(struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + + if (!qp) + return -EINVAL; + + qp_attr.qp_state = IB_QPS_ERR; + return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); +} + +/* + * This is really the RDMAC CLOSING state. It is most similar to the + * IB SQD QP state. + */ +static int iwcm_modify_qp_sqd(struct ib_qp *qp) +{ + struct ib_qp_attr qp_attr; + + BUG_ON(qp == NULL); + qp_attr.qp_state = IB_QPS_SQD; + return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); +} + +/* + * CM_ID <-- CLOSING + * + * Block if a passive or active connection is currenlty being processed. Then + * process the event as follows: + * - If we are ESTABLISHED, move to CLOSING and modify the QP state + * based on the abrupt flag + * - If the connection is already in the CLOSING or IDLE state, the peer is + * disconnecting concurrently with us and we've already seen the + * DISCONNECT event -- ignore the request and return 0 + * - Disconnect on a listening endpoint returns -EINVAL + */ +int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) +{ + struct iwcm_id_private *cm_id_priv; + unsigned long flags; + int ret = 0; + struct ib_qp *qp = NULL; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + /* Wait if we're currently in a connect or accept downcall */ + wait_event(cm_id_priv->connect_wait, + !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); + + spin_lock_irqsave(&cm_id_priv->lock, flags); + switch (cm_id_priv->state) { + case IW_CM_STATE_ESTABLISHED: + cm_id_priv->state = IW_CM_STATE_CLOSING; + + /* QP could be <nul> for user-mode client */ + if (cm_id_priv->qp) + qp = cm_id_priv->qp; + else + ret = -EINVAL; + break; + case IW_CM_STATE_LISTEN: + ret = -EINVAL; + break; + case IW_CM_STATE_CLOSING: + /* remote peer closed first */ + case IW_CM_STATE_IDLE: + /* accept or connect returned !0 */ + break; + case IW_CM_STATE_CONN_RECV: + /* + * App called disconnect before/without calling accept after + * connect_request event delivered. + */ + break; + case IW_CM_STATE_CONN_SENT: + /* Can only get here if wait above fails */ + default: + BUG(); + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + if (qp) { + if (abrupt) + ret = iwcm_modify_qp_err(qp); + else + ret = iwcm_modify_qp_sqd(qp); + + /* + * If both sides are disconnecting the QP could + * already be in ERR or SQD states + */ + ret = 0; + } + + return ret; +} +EXPORT_SYMBOL(iw_cm_disconnect); + +/* + * CM_ID <-- DESTROYING + * + * Clean up all resources associated with the connection and release + * the initial reference taken by iw_create_cm_id. + */ +static void destroy_cm_id(struct iw_cm_id *cm_id) +{ + struct iwcm_id_private *cm_id_priv; + unsigned long flags; + int ret; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + /* + * Wait if we're currently in a connect or accept downcall. A + * listening endpoint should never block here. + */ + wait_event(cm_id_priv->connect_wait, + !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); + + spin_lock_irqsave(&cm_id_priv->lock, flags); + switch (cm_id_priv->state) { + case IW_CM_STATE_LISTEN: + cm_id_priv->state = IW_CM_STATE_DESTROYING; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + /* destroy the listening endpoint */ + ret = cm_id->device->iwcm->destroy_listen(cm_id); + spin_lock_irqsave(&cm_id_priv->lock, flags); + break; + case IW_CM_STATE_ESTABLISHED: + cm_id_priv->state = IW_CM_STATE_DESTROYING; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + /* Abrupt close of the connection */ + (void)iwcm_modify_qp_err(cm_id_priv->qp); + spin_lock_irqsave(&cm_id_priv->lock, flags); + break; + case IW_CM_STATE_IDLE: + case IW_CM_STATE_CLOSING: + cm_id_priv->state = IW_CM_STATE_DESTROYING; + break; + case IW_CM_STATE_CONN_RECV: + /* + * App called destroy before/without calling accept after + * receiving connection request event notification. + */ + cm_id_priv->state = IW_CM_STATE_DESTROYING; + break; + case IW_CM_STATE_CONN_SENT: + case IW_CM_STATE_DESTROYING: + default: + BUG(); + break; + } + if (cm_id_priv->qp) { + cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); + cm_id_priv->qp = NULL; + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + (void)iwcm_deref_id(cm_id_priv); +} + +/* + * This function is only called by the application thread and cannot + * be called by the event thread. The function will wait for all + * references to be released on the cm_id and then kfree the cm_id + * object. + */ +void iw_destroy_cm_id(struct iw_cm_id *cm_id) +{ + struct iwcm_id_private *cm_id_priv; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); + + destroy_cm_id(cm_id); + + wait_for_completion(&cm_id_priv->destroy_comp); + + dealloc_work_entries(cm_id_priv); + + kfree(cm_id_priv); +} +EXPORT_SYMBOL(iw_destroy_cm_id); + +/* + * CM_ID <-- LISTEN + * + * Start listening for connect requests. Generates one CONNECT_REQUEST + * event for each inbound connect request. + */ +int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) +{ + struct iwcm_id_private *cm_id_priv; + unsigned long flags; + int ret = 0; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + + ret = alloc_work_entries(cm_id_priv, backlog); + if (ret) + return ret; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + switch (cm_id_priv->state) { + case IW_CM_STATE_IDLE: + cm_id_priv->state = IW_CM_STATE_LISTEN; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + ret = cm_id->device->iwcm->create_listen(cm_id, backlog); + if (ret) + cm_id_priv->state = IW_CM_STATE_IDLE; + spin_lock_irqsave(&cm_id_priv->lock, flags); + break; + default: + ret = -EINVAL; + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + return ret; +} +EXPORT_SYMBOL(iw_cm_listen); + +/* + * CM_ID <-- IDLE + * + * Rejects an inbound connection request. No events are generated. + */ +int iw_cm_reject(struct iw_cm_id *cm_id, + const void *private_data, + u8 private_data_len) +{ + struct iwcm_id_private *cm_id_priv; + unsigned long flags; + int ret; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + + spin_lock_irqsave(&cm_id_priv->lock, flags); + if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + wake_up_all(&cm_id_priv->connect_wait); + return -EINVAL; + } + cm_id_priv->state = IW_CM_STATE_IDLE; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + ret = cm_id->device->iwcm->reject(cm_id, private_data, + private_data_len); + + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + wake_up_all(&cm_id_priv->connect_wait); + + return ret; +} +EXPORT_SYMBOL(iw_cm_reject); + +/* + * CM_ID <-- ESTABLISHED + * + * Accepts an inbound connection request and generates an ESTABLISHED + * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block + * until the ESTABLISHED event is received from the provider. + */ +int iw_cm_accept(struct iw_cm_id *cm_id, + struct iw_cm_conn_param *iw_param) +{ + struct iwcm_id_private *cm_id_priv; + struct ib_qp *qp; + unsigned long flags; + int ret; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + + spin_lock_irqsave(&cm_id_priv->lock, flags); + if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + wake_up_all(&cm_id_priv->connect_wait); + return -EINVAL; + } + /* Get the ib_qp given the QPN */ + qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); + if (!qp) { + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return -EINVAL; + } + cm_id->device->iwcm->add_ref(qp); + cm_id_priv->qp = qp; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + ret = cm_id->device->iwcm->accept(cm_id, iw_param); + if (ret) { + /* An error on accept precludes provider events */ + BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); + cm_id_priv->state = IW_CM_STATE_IDLE; + spin_lock_irqsave(&cm_id_priv->lock, flags); + if (cm_id_priv->qp) { + cm_id->device->iwcm->rem_ref(qp); + cm_id_priv->qp = NULL; + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + wake_up_all(&cm_id_priv->connect_wait); + } + + return ret; +} +EXPORT_SYMBOL(iw_cm_accept); + +/* + * Active Side: CM_ID <-- CONN_SENT + * + * If successful, results in the generation of a CONNECT_REPLY + * event. iw_cm_disconnect and iw_cm_destroy will block until the + * CONNECT_REPLY event is received from the provider. + */ +int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) +{ + struct iwcm_id_private *cm_id_priv; + int ret = 0; + unsigned long flags; + struct ib_qp *qp; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + + ret = alloc_work_entries(cm_id_priv, 4); + if (ret) + return ret; + + set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + spin_lock_irqsave(&cm_id_priv->lock, flags); + + if (cm_id_priv->state != IW_CM_STATE_IDLE) { + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + wake_up_all(&cm_id_priv->connect_wait); + return -EINVAL; + } + + /* Get the ib_qp given the QPN */ + qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); + if (!qp) { + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return -EINVAL; + } + cm_id->device->iwcm->add_ref(qp); + cm_id_priv->qp = qp; + cm_id_priv->state = IW_CM_STATE_CONN_SENT; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + ret = cm_id->device->iwcm->connect(cm_id, iw_param); + if (ret) { + spin_lock_irqsave(&cm_id_priv->lock, flags); + if (cm_id_priv->qp) { + cm_id->device->iwcm->rem_ref(qp); + cm_id_priv->qp = NULL; + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); + cm_id_priv->state = IW_CM_STATE_IDLE; + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + wake_up_all(&cm_id_priv->connect_wait); + } + + return ret; +} +EXPORT_SYMBOL(iw_cm_connect); + +/* + * Passive Side: new CM_ID <-- CONN_RECV + * + * Handles an inbound connect request. The function creates a new + * iw_cm_id to represent the new connection and inherits the client + * callback function and other attributes from the listening parent. + * + * The work item contains a pointer to the listen_cm_id and the event. The + * listen_cm_id contains the client cm_handler, context and + * device. These are copied when the device is cloned. The event + * contains the new four tuple. + * + * An error on the child should not affect the parent, so this + * function does not return a value. + */ +static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, + struct iw_cm_event *iw_event) +{ + unsigned long flags; + struct iw_cm_id *cm_id; + struct iwcm_id_private *cm_id_priv; + int ret; + + /* + * The provider should never generate a connection request + * event with a bad status. + */ + BUG_ON(iw_event->status); + + /* + * We could be destroying the listening id. If so, ignore this + * upcall. + */ + spin_lock_irqsave(&listen_id_priv->lock, flags); + if (listen_id_priv->state != IW_CM_STATE_LISTEN) { + spin_unlock_irqrestore(&listen_id_priv->lock, flags); + return; + } + spin_unlock_irqrestore(&listen_id_priv->lock, flags); + + cm_id = iw_create_cm_id(listen_id_priv->id.device, + listen_id_priv->id.cm_handler, + listen_id_priv->id.context); + /* If the cm_id could not be created, ignore the request */ + if (IS_ERR(cm_id)) + return; + + cm_id->provider_data = iw_event->provider_data; + cm_id->local_addr = iw_event->local_addr; + cm_id->remote_addr = iw_event->remote_addr; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + cm_id_priv->state = IW_CM_STATE_CONN_RECV; + + ret = alloc_work_entries(cm_id_priv, 3); + if (ret) { + iw_cm_reject(cm_id, NULL, 0); + iw_destroy_cm_id(cm_id); + return; + } + + /* Call the client CM handler */ + ret = cm_id->cm_handler(cm_id, iw_event); + if (ret) { + set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); + destroy_cm_id(cm_id); + if (atomic_read(&cm_id_priv->refcount)==0) + kfree(cm_id); + } + + if (iw_event->private_data_len) + kfree(iw_event->private_data); +} + +/* + * Passive Side: CM_ID <-- ESTABLISHED + * + * The provider generated an ESTABLISHED event which means that + * the MPA negotion has completed successfully and we are now in MPA + * FPDU mode. + * + * This event can only be received in the CONN_RECV state. If the + * remote peer closed, the ESTABLISHED event would be received followed + * by the CLOSE event. If the app closes, it will block until we wake + * it up after processing this event. + */ +static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, + struct iw_cm_event *iw_event) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + + /* + * We clear the CONNECT_WAIT bit here to allow the callback + * function to call iw_cm_disconnect. Calling iw_destroy_cm_id + * from a callback handler is not allowed. + */ + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); + cm_id_priv->state = IW_CM_STATE_ESTABLISHED; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); + wake_up_all(&cm_id_priv->connect_wait); + + return ret; +} + +/* + * Active Side: CM_ID <-- ESTABLISHED + * + * The app has called connect and is waiting for the established event to + * post it's requests to the server. This event will wake up anyone + * blocked in iw_cm_disconnect or iw_destroy_id. + */ +static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, + struct iw_cm_event *iw_event) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + /* + * Clear the connect wait bit so a callback function calling + * iw_cm_disconnect will not wait and deadlock this thread + */ + clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); + BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); + if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { + cm_id_priv->id.local_addr = iw_event->local_addr; + cm_id_priv->id.remote_addr = iw_event->remote_addr; + cm_id_priv->state = IW_CM_STATE_ESTABLISHED; + } else { + /* REJECTED or RESET */ + cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); + cm_id_priv->qp = NULL; + cm_id_priv->state = IW_CM_STATE_IDLE; + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); + + if (iw_event->private_data_len) + kfree(iw_event->private_data); + + /* Wake up waiters on connect complete */ + wake_up_all(&cm_id_priv->connect_wait); + + return ret; +} + +/* + * CM_ID <-- CLOSING + * + * If in the ESTABLISHED state, move to CLOSING. + */ +static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, + struct iw_cm_event *iw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) + cm_id_priv->state = IW_CM_STATE_CLOSING; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); +} + +/* + * CM_ID <-- IDLE + * + * If in the ESTBLISHED or CLOSING states, the QP will have have been + * moved by the provider to the ERR state. Disassociate the CM_ID from + * the QP, move to IDLE, and remove the 'connected' reference. + * + * If in some other state, the cm_id was destroyed asynchronously. + * This is the last reference that will result in waking up + * the app thread blocked in iw_destroy_cm_id. + */ +static int cm_close_handler(struct iwcm_id_private *cm_id_priv, + struct iw_cm_event *iw_event) +{ + unsigned long flags; + int ret = 0; + spin_lock_irqsave(&cm_id_priv->lock, flags); + + if (cm_id_priv->qp) { + cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); + cm_id_priv->qp = NULL; + } + switch (cm_id_priv->state) { + case IW_CM_STATE_ESTABLISHED: + case IW_CM_STATE_CLOSING: + cm_id_priv->state = IW_CM_STATE_IDLE; + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); + spin_lock_irqsave(&cm_id_priv->lock, flags); + break; + case IW_CM_STATE_DESTROYING: + break; + default: + BUG(); + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + return ret; +} + +static int process_event(struct iwcm_id_private *cm_id_priv, + struct iw_cm_event *iw_event) +{ + int ret = 0; + + switch (iw_event->event) { + case IW_CM_EVENT_CONNECT_REQUEST: + cm_conn_req_handler(cm_id_priv, iw_event); + break; + case IW_CM_EVENT_CONNECT_REPLY: + ret = cm_conn_rep_handler(cm_id_priv, iw_event); + break; + case IW_CM_EVENT_ESTABLISHED: + ret = cm_conn_est_handler(cm_id_priv, iw_event); + break; + case IW_CM_EVENT_DISCONNECT: + cm_disconnect_handler(cm_id_priv, iw_event); + break; + case IW_CM_EVENT_CLOSE: + ret = cm_close_handler(cm_id_priv, iw_event); + break; + default: + BUG(); + } + + return ret; +} + +/* + * Process events on the work_list for the cm_id. If the callback + * function requests that the cm_id be deleted, a flag is set in the + * cm_id flags to indicate that when the last reference is + * removed, the cm_id is to be destroyed. This is necessary to + * distinguish between an object that will be destroyed by the app + * thread asleep on the destroy_comp list vs. an object destroyed + * here synchronously when the last reference is removed. + */ +static void cm_work_handler(void *arg) +{ + struct iwcm_work *work = arg, lwork; + struct iwcm_id_private *cm_id_priv = work->cm_id; + unsigned long flags; + int empty; + int ret = 0; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + empty = list_empty(&cm_id_priv->work_list); + while (!empty) { + work = list_entry(cm_id_priv->work_list.next, + struct iwcm_work, list); + list_del_init(&work->list); + empty = list_empty(&cm_id_priv->work_list); + lwork = *work; + put_work(work); + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + + ret = process_event(cm_id_priv, &work->event); + if (ret) { + set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); + destroy_cm_id(&cm_id_priv->id); + } + BUG_ON(atomic_read(&cm_id_priv->refcount)==0); + if (iwcm_deref_id(cm_id_priv)) + return; + + if (atomic_read(&cm_id_priv->refcount)==0 && + test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { + dealloc_work_entries(cm_id_priv); + kfree(cm_id_priv); + return; + } + spin_lock_irqsave(&cm_id_priv->lock, flags); + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); +} + +/* + * This function is called on interrupt context. Schedule events on + * the iwcm_wq thread to allow callback functions to downcall into + * the CM and/or block. Events are queued to a per-CM_ID + * work_list. If this is the first event on the work_list, the work + * element is also queued on the iwcm_wq thread. + * + * Each event holds a reference on the cm_id. Until the last posted + * event has been delivered and processed, the cm_id cannot be + * deleted. + * + * Returns: + * 0 - the event was handled. + * -ENOMEM - the event was not handled due to lack of resources. + */ +static int cm_event_handler(struct iw_cm_id *cm_id, + struct iw_cm_event *iw_event) +{ + struct iwcm_work *work; + struct iwcm_id_private *cm_id_priv; + unsigned long flags; + int ret = 0; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + + spin_lock_irqsave(&cm_id_priv->lock, flags); + work = get_work(cm_id_priv); + if (!work) { + ret = -ENOMEM; + goto out; + } + + INIT_WORK(&work->work, cm_work_handler, work); + work->cm_id = cm_id_priv; + work->event = *iw_event; + + if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || + work->event.event == IW_CM_EVENT_CONNECT_REPLY) && + work->event.private_data_len) { + ret = copy_private_data(cm_id_priv, &work->event); + if (ret) { + put_work(work); + goto out; + } + } + + atomic_inc(&cm_id_priv->refcount); + if (list_empty(&cm_id_priv->work_list)) { + list_add_tail(&work->list, &cm_id_priv->work_list); + queue_work(iwcm_wq, &work->work); + } else + list_add_tail(&work->list, &cm_id_priv->work_list); +out: + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return ret; +} + +static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, + struct ib_qp_attr *qp_attr, + int *qp_attr_mask) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + switch (cm_id_priv->state) { + case IW_CM_STATE_IDLE: + case IW_CM_STATE_CONN_SENT: + case IW_CM_STATE_CONN_RECV: + case IW_CM_STATE_ESTABLISHED: + *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; + qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE| + IB_ACCESS_REMOTE_READ; + ret = 0; + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return ret; +} + +static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, + struct ib_qp_attr *qp_attr, + int *qp_attr_mask) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cm_id_priv->lock, flags); + switch (cm_id_priv->state) { + case IW_CM_STATE_IDLE: + case IW_CM_STATE_CONN_SENT: + case IW_CM_STATE_CONN_RECV: + case IW_CM_STATE_ESTABLISHED: + *qp_attr_mask = 0; + ret = 0; + break; + default: + ret = -EINVAL; + break; + } + spin_unlock_irqrestore(&cm_id_priv->lock, flags); + return ret; +} + +int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, + struct ib_qp_attr *qp_attr, + int *qp_attr_mask) +{ + struct iwcm_id_private *cm_id_priv; + int ret; + + cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); + switch (qp_attr->qp_state) { + case IB_QPS_INIT: + case IB_QPS_RTR: + ret = iwcm_init_qp_init_attr(cm_id_priv, + qp_attr, qp_attr_mask); + break; + case IB_QPS_RTS: + ret = iwcm_init_qp_rts_attr(cm_id_priv, + qp_attr, qp_attr_mask); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} +EXPORT_SYMBOL(iw_cm_init_qp_attr); + +static int __init iw_cm_init(void) +{ + iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); + if (!iwcm_wq) + return -ENOMEM; + + return 0; +} + +static void __exit iw_cm_cleanup(void) +{ + destroy_workqueue(iwcm_wq); +} + +module_init(iw_cm_init); +module_exit(iw_cm_cleanup); diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h new file mode 100644 index 000000000000..3f6cc82564c8 --- /dev/null +++ b/drivers/infiniband/core/iwcm.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef IWCM_H +#define IWCM_H + +enum iw_cm_state { + IW_CM_STATE_IDLE, /* unbound, inactive */ + IW_CM_STATE_LISTEN, /* listen waiting for connect */ + IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */ + IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */ + IW_CM_STATE_ESTABLISHED, /* established */ + IW_CM_STATE_CLOSING, /* disconnect */ + IW_CM_STATE_DESTROYING /* object being deleted */ +}; + +struct iwcm_id_private { + struct iw_cm_id id; + enum iw_cm_state state; + unsigned long flags; + struct ib_qp *qp; + struct completion destroy_comp; + wait_queue_head_t connect_wait; + struct list_head work_list; + spinlock_t lock; + atomic_t refcount; + struct list_head work_free_list; +}; + +#define IWCM_F_CALLBACK_DESTROY 1 +#define IWCM_F_CONNECT_WAIT 2 + +#endif /* IWCM_H */ diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1c3cfbbe6a97..082f03c158f0 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1246,8 +1246,8 @@ static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class, int i; for (i = 0; i < MAX_MGMT_OUI; i++) - /* Is there matching OUI for this vendor class ? */ - if (!memcmp(vendor_class->oui[i], oui, 3)) + /* Is there matching OUI for this vendor class ? */ + if (!memcmp(vendor_class->oui[i], oui, 3)) return i; return -1; @@ -2237,7 +2237,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, &mad_agent_priv->send_list, agent_list) { if (mad_send_wr->status == IB_WC_SUCCESS) { - mad_send_wr->status = IB_WC_WR_FLUSH_ERR; + mad_send_wr->status = IB_WC_WR_FLUSH_ERR; mad_send_wr->refcount -= (mad_send_wr->timeout > 0); } } @@ -2528,10 +2528,10 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } } sg_list.addr = dma_map_single(qp_info->port_priv-> - device->dma_device, + device->dma_device, &mad_priv->grh, sizeof *mad_priv - - sizeof mad_priv->header, + sizeof mad_priv->header, DMA_FROM_DEVICE); pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; @@ -2606,7 +2606,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) struct ib_qp *qp; attr = kmalloc(sizeof *attr, GFP_KERNEL); - if (!attr) { + if (!attr) { printk(KERN_ERR PFX "Couldn't kmalloc ib_qp_attr\n"); return -ENOMEM; } @@ -2876,7 +2876,10 @@ static void ib_mad_init_device(struct ib_device *device) { int start, end, i; - if (device->node_type == IB_NODE_SWITCH) { + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + + if (device->node_type == RDMA_NODE_IB_SWITCH) { start = 0; end = 0; } else { @@ -2923,7 +2926,7 @@ static void ib_mad_remove_device(struct ib_device *device) { int i, num_ports, cur_port; - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { num_ports = 1; cur_port = 0; } else { diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d147f3bad2ce..1da9adbccaec 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -39,7 +39,6 @@ #include <linux/completion.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/workqueue.h> #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index ebcd5b181770..1ef79d015a1e 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -33,8 +33,6 @@ * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $ */ -#include <linux/dma-mapping.h> - #include "mad_priv.h" #include "mad_rmpp.h" @@ -60,6 +58,7 @@ struct mad_rmpp_recv { int last_ack; int seg_num; int newwin; + int repwin; __be64 tid; u32 src_qp; @@ -170,6 +169,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, return msg; } +static void ack_ds_ack(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *recv_wc) +{ + struct ib_mad_send_buf *msg; + struct ib_rmpp_mad *rmpp_mad; + int ret; + + msg = alloc_response_msg(&agent->agent, recv_wc); + if (IS_ERR(msg)) + return; + + rmpp_mad = msg->mad; + memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); + + rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; + ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); + rmpp_mad->rmpp_hdr.seg_num = 0; + rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); + + ret = ib_post_send_mad(msg, NULL); + if (ret) { + ib_destroy_ah(msg->ah); + ib_free_send_mad(msg); + } +} + void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) { struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; @@ -271,6 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, rmpp_recv->newwin = 1; rmpp_recv->seg_num = 1; rmpp_recv->last_ack = 0; + rmpp_recv->repwin = 1; mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; rmpp_recv->tid = mad_hdr->tid; @@ -365,7 +391,7 @@ static inline int window_size(struct ib_mad_agent_private *agent) static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, int seg_num) { - struct ib_mad_recv_buf *seg_buf; + struct ib_mad_recv_buf *seg_buf; int cur_seg_num; list_for_each_entry_reverse(seg_buf, rmpp_list, list) { @@ -591,6 +617,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, break; } +static void process_ds_ack(struct ib_mad_agent_private *agent, + struct ib_mad_recv_wc *mad_recv_wc, int newwin) +{ + struct mad_rmpp_recv *rmpp_recv; + + rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); + if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) + rmpp_recv->repwin = newwin; +} + static void process_rmpp_ack(struct ib_mad_agent_private *agent, struct ib_mad_recv_wc *mad_recv_wc) { @@ -616,8 +652,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, spin_lock_irqsave(&agent->lock, flags); mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); - if (!mad_send_wr) - goto out; /* Unmatched ACK */ + if (!mad_send_wr) { + if (!seg_num) + process_ds_ack(agent, mad_recv_wc, newwin); + goto out; /* Unmatched or DS RMPP ACK */ + } + + if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && + (mad_send_wr->timeout)) { + spin_unlock_irqrestore(&agent->lock, flags); + ack_ds_ack(agent, mad_recv_wc); + return; /* Repeated ACK for DS RMPP transaction */ + } if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) @@ -656,6 +702,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, if (mad_send_wr->refcount == 1) ib_reset_mad_timeout(mad_send_wr, mad_send_wr->send_buf.timeout_ms); + spin_unlock_irqrestore(&agent->lock, flags); + ack_ds_ack(agent, mad_recv_wc); + return; } else if (mad_send_wr->refcount == 1 && mad_send_wr->seg_num < mad_send_wr->newwin && mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { @@ -772,6 +821,39 @@ out: return NULL; } +static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) +{ + struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; + struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; + struct mad_rmpp_recv *rmpp_recv; + struct ib_ah_attr ah_attr; + unsigned long flags; + int newwin = 1; + + if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) + goto out; + + spin_lock_irqsave(&agent->lock, flags); + list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { + if (rmpp_recv->tid != mad_hdr->tid || + rmpp_recv->mgmt_class != mad_hdr->mgmt_class || + rmpp_recv->class_version != mad_hdr->class_version || + (rmpp_recv->method & IB_MGMT_METHOD_RESP)) + continue; + + if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) + continue; + + if (rmpp_recv->slid == ah_attr.dlid) { + newwin = rmpp_recv->repwin; + break; + } + } + spin_unlock_irqrestore(&agent->lock, flags); +out: + return newwin; +} + int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) { struct ib_rmpp_mad *rmpp_mad; @@ -787,7 +869,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) return IB_RMPP_RESULT_INTERNAL; } - mad_send_wr->newwin = 1; + mad_send_wr->newwin = init_newwin(mad_send_wr); /* We need to wait for the final ACK even if there isn't a response */ mad_send_wr->refcount += (mad_send_wr->timeout == 0); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index d6b84226bba7..1706d3c7e95e 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -75,6 +76,7 @@ struct ib_sa_device { struct ib_sa_query { void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); void (*release)(struct ib_sa_query *); + struct ib_sa_client *client; struct ib_sa_port *port; struct ib_mad_send_buf *mad_buf; struct ib_sa_sm_ah *sm_ah; @@ -415,6 +417,31 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event } } +void ib_sa_register_client(struct ib_sa_client *client) +{ + atomic_set(&client->users, 1); + init_completion(&client->comp); +} +EXPORT_SYMBOL(ib_sa_register_client); + +static inline void ib_sa_client_get(struct ib_sa_client *client) +{ + atomic_inc(&client->users); +} + +static inline void ib_sa_client_put(struct ib_sa_client *client) +{ + if (atomic_dec_and_test(&client->users)) + complete(&client->comp); +} + +void ib_sa_unregister_client(struct ib_sa_client *client) +{ + ib_sa_client_put(client); + wait_for_completion(&client->comp); +} +EXPORT_SYMBOL(ib_sa_unregister_client); + /** * ib_sa_cancel_query - try to cancel an SA query * @id:ID of query to cancel @@ -557,6 +584,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) /** * ib_sa_path_rec_get - Start a Path get query + * @client:SA client * @device:device to send query on * @port_num: port number to send query on * @rec:Path Record to send in query @@ -579,7 +607,8 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) * error code. Otherwise it is a query ID that can be used to cancel * the query. */ -int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, +int ib_sa_path_rec_get(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, struct ib_sa_path_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, @@ -614,8 +643,10 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, goto err1; } - query->callback = callback; - query->context = context; + ib_sa_client_get(client); + query->sa_query.client = client; + query->callback = callback; + query->context = context; mad = query->sa_query.mad_buf->mad; init_mad(mad, agent); @@ -639,6 +670,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, err2: *sa_query = NULL; + ib_sa_client_put(query->sa_query.client); ib_free_send_mad(query->sa_query.mad_buf); err1: @@ -671,6 +703,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) /** * ib_sa_service_rec_query - Start Service Record operation + * @client:SA client * @device:device to send request on * @port_num: port number to send request on * @method:SA method - should be get, set, or delete @@ -695,7 +728,8 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) * error code. Otherwise it is a request ID that can be used to cancel * the query. */ -int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, +int ib_sa_service_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, @@ -735,8 +769,10 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, goto err1; } - query->callback = callback; - query->context = context; + ib_sa_client_get(client); + query->sa_query.client = client; + query->callback = callback; + query->context = context; mad = query->sa_query.mad_buf->mad; init_mad(mad, agent); @@ -761,6 +797,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, err2: *sa_query = NULL; + ib_sa_client_put(query->sa_query.client); ib_free_send_mad(query->sa_query.mad_buf); err1: @@ -791,7 +828,8 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); } -int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, +int ib_sa_mcmember_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, @@ -827,8 +865,10 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, goto err1; } - query->callback = callback; - query->context = context; + ib_sa_client_get(client); + query->sa_query.client = client; + query->callback = callback; + query->context = context; mad = query->sa_query.mad_buf->mad; init_mad(mad, agent); @@ -853,6 +893,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, err2: *sa_query = NULL; + ib_sa_client_put(query->sa_query.client); ib_free_send_mad(query->sa_query.mad_buf); err1: @@ -887,8 +928,9 @@ static void send_handler(struct ib_mad_agent *agent, idr_remove(&query_idr, query->id); spin_unlock_irqrestore(&idr_lock, flags); - ib_free_send_mad(mad_send_wc->send_buf); + ib_free_send_mad(mad_send_wc->send_buf); kref_put(&query->sm_ah->ref, free_sm_ah); + ib_sa_client_put(query->client); query->release(query); } @@ -919,7 +961,10 @@ static void ib_sa_add_one(struct ib_device *device) struct ib_sa_device *sa_dev; int s, e, i; - if (device->node_type == IB_NODE_SWITCH) + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + + if (device->node_type == RDMA_NODE_IB_SWITCH) s = e = 0; else { s = 1; diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 35852e794e26..54b81e17ad50 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -64,7 +64,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, /* C14-9:2 */ if (hop_ptr && hop_ptr < hop_cnt) { - if (node_type != IB_NODE_SWITCH) + if (node_type != RDMA_NODE_IB_SWITCH) return 0; /* smp->return_path set when received */ @@ -77,7 +77,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, if (hop_ptr == hop_cnt) { /* smp->return_path set when received */ smp->hop_ptr++; - return (node_type == IB_NODE_SWITCH || + return (node_type == RDMA_NODE_IB_SWITCH || smp->dr_dlid == IB_LID_PERMISSIVE); } @@ -95,7 +95,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, /* C14-13:2 */ if (2 <= hop_ptr && hop_ptr <= hop_cnt) { - if (node_type != IB_NODE_SWITCH) + if (node_type != RDMA_NODE_IB_SWITCH) return 0; smp->hop_ptr--; @@ -107,7 +107,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp, if (hop_ptr == 1) { smp->hop_ptr--; /* C14-13:3 -- SMPs destined for SM shouldn't be here */ - return (node_type == IB_NODE_SWITCH || + return (node_type == RDMA_NODE_IB_SWITCH || smp->dr_slid == IB_LID_PERMISSIVE); } @@ -142,7 +142,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, /* C14-9:2 -- intermediate hop */ if (hop_ptr && hop_ptr < hop_cnt) { - if (node_type != IB_NODE_SWITCH) + if (node_type != RDMA_NODE_IB_SWITCH) return 0; smp->return_path[hop_ptr] = port_num; @@ -156,7 +156,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, smp->return_path[hop_ptr] = port_num; /* smp->hop_ptr updated when sending */ - return (node_type == IB_NODE_SWITCH || + return (node_type == RDMA_NODE_IB_SWITCH || smp->dr_dlid == IB_LID_PERMISSIVE); } @@ -175,7 +175,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, /* C14-13:2 */ if (2 <= hop_ptr && hop_ptr <= hop_cnt) { - if (node_type != IB_NODE_SWITCH) + if (node_type != RDMA_NODE_IB_SWITCH) return 0; /* smp->hop_ptr updated when sending */ @@ -190,7 +190,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp, return 1; } /* smp->hop_ptr updated when sending */ - return (node_type == IB_NODE_SWITCH); + return (node_type == RDMA_NODE_IB_SWITCH); } /* C14-13:4 -- hop_ptr = 0 -> give to SM */ diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 21f9282c1b25..709323c14c5d 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -68,7 +68,7 @@ struct port_table_attribute { int index; }; -static inline int ibdev_is_alive(const struct ib_device *dev) +static inline int ibdev_is_alive(const struct ib_device *dev) { return dev->reg_state == IB_DEV_REGISTERED; } @@ -589,10 +589,11 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf) return -ENODEV; switch (dev->node_type) { - case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type); - case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); - case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); - default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); + case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); + case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); + case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); + case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); + default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); } } @@ -708,7 +709,7 @@ int ib_device_register_sysfs(struct ib_device *device) if (ret) goto err_put; - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { ret = add_port(device, 0); if (ret) goto err_put; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index c1c6fda9452c..ad4f4d5c2924 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -309,9 +309,9 @@ static int ib_ucm_event_process(struct ib_cm_event *evt, info = evt->param.apr_rcvd.apr_info; break; case IB_CM_SIDR_REQ_RECEIVED: - uvt->resp.u.sidr_req_resp.pkey = + uvt->resp.u.sidr_req_resp.pkey = evt->param.sidr_req_rcvd.pkey; - uvt->resp.u.sidr_req_resp.port = + uvt->resp.u.sidr_req_resp.port = evt->param.sidr_req_rcvd.port; uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; break; @@ -1237,7 +1237,7 @@ static struct class ucm_class = { static ssize_t show_ibdev(struct class_device *class_dev, char *buf) { struct ib_ucm_device *dev; - + dev = container_of(class_dev, struct ib_ucm_device, class_dev); return sprintf(buf, "%s\n", dev->ib_dev->name); } @@ -1247,7 +1247,8 @@ static void ib_ucm_add_one(struct ib_device *device) { struct ib_ucm_device *ucm_dev; - if (!device->alloc_ucontext) + if (!device->alloc_ucontext || + rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) return; ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 1273f8807e84..807fbd6b8414 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * * This software is available to you under a choice of one of two @@ -1032,7 +1032,10 @@ static void ib_umad_add_one(struct ib_device *device) struct ib_umad_device *umad_dev; int s, e, i; - if (device->node_type == IB_NODE_SWITCH) + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + + if (device->node_type == RDMA_NODE_IB_SWITCH) s = e = 0; else { s = 1; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 30923eb68ec7..b72c7f69ca90 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -155,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id, } static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, - struct ib_ucontext *context) + struct ib_ucontext *context, int nested) { struct ib_uobject *uobj; @@ -163,7 +163,10 @@ static struct ib_uobject *idr_read_uobj(struct idr *idr, int id, if (!uobj) return NULL; - down_read(&uobj->mutex); + if (nested) + down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING); + else + down_read(&uobj->mutex); if (!uobj->live) { put_uobj_read(uobj); return NULL; @@ -190,17 +193,18 @@ static struct ib_uobject *idr_write_uobj(struct idr *idr, int id, return uobj; } -static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context) +static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context, + int nested) { struct ib_uobject *uobj; - uobj = idr_read_uobj(idr, id, context); + uobj = idr_read_uobj(idr, id, context, nested); return uobj ? uobj->object : NULL; } static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context) { - return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context); + return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0); } static void put_pd_read(struct ib_pd *pd) @@ -208,9 +212,9 @@ static void put_pd_read(struct ib_pd *pd) put_uobj_read(pd->uobject); } -static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context) +static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested) { - return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context); + return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested); } static void put_cq_read(struct ib_cq *cq) @@ -220,7 +224,7 @@ static void put_cq_read(struct ib_cq *cq) static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context) { - return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context); + return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0); } static void put_ah_read(struct ib_ah *ah) @@ -230,7 +234,7 @@ static void put_ah_read(struct ib_ah *ah) static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) { - return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context); + return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); } static void put_qp_read(struct ib_qp *qp) @@ -240,7 +244,7 @@ static void put_qp_read(struct ib_qp *qp) static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) { - return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context); + return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); } static void put_srq_read(struct ib_srq *srq) @@ -837,7 +841,6 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, err_copy: idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject); - err_free: ib_destroy_cq(cq); @@ -867,7 +870,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, (unsigned long) cmd.response + sizeof resp, in_len - sizeof cmd, out_len - sizeof resp); - cq = idr_read_cq(cmd.cq_handle, file->ucontext); + cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); if (!cq) return -EINVAL; @@ -875,11 +878,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, if (ret) goto out; - memset(&resp, 0, sizeof resp); resp.cqe = cq->cqe; if (copy_to_user((void __user *) (unsigned long) cmd.response, - &resp, sizeof resp)) + &resp, sizeof resp.cqe)) ret = -EFAULT; out: @@ -894,7 +896,6 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, { struct ib_uverbs_poll_cq cmd; struct ib_uverbs_poll_cq_resp *resp; - struct ib_uobject *uobj; struct ib_cq *cq; struct ib_wc *wc; int ret = 0; @@ -915,16 +916,15 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, goto out_wc; } - uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); - if (!uobj) { + cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); + if (!cq) { ret = -EINVAL; goto out; } - cq = uobj->object; resp->count = ib_poll_cq(cq, cmd.ne, wc); - put_uobj_read(uobj); + put_cq_read(cq); for (i = 0; i < resp->count; i++) { resp->wc[i].wr_id = wc[i].wr_id; @@ -959,21 +959,19 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, int out_len) { struct ib_uverbs_req_notify_cq cmd; - struct ib_uobject *uobj; struct ib_cq *cq; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; - uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext); - if (!uobj) + cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); + if (!cq) return -EINVAL; - cq = uobj->object; ib_req_notify_cq(cq, cmd.solicited_only ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); - put_uobj_read(uobj); + put_cq_read(cq); return in_len; } @@ -1064,9 +1062,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; pd = idr_read_pd(cmd.pd_handle, file->ucontext); - scq = idr_read_cq(cmd.send_cq_handle, file->ucontext); + scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? - scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext); + scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { ret = -EINVAL; @@ -1274,6 +1272,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, int out_len) { struct ib_uverbs_modify_qp cmd; + struct ib_udata udata; struct ib_qp *qp; struct ib_qp_attr *attr; int ret; @@ -1281,6 +1280,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, + out_len); + attr = kmalloc(sizeof *attr, GFP_KERNEL); if (!attr) return -ENOMEM; @@ -1337,7 +1339,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; - ret = ib_modify_qp(qp, attr, cmd.attr_mask); + ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); put_qp_read(qp); @@ -1674,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, break; } - if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) ret = -EFAULT; @@ -1724,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, break; } - if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) ret = -EFAULT; @@ -2055,6 +2055,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, int out_len) { struct ib_uverbs_modify_srq cmd; + struct ib_udata udata; struct ib_srq *srq; struct ib_srq_attr attr; int ret; @@ -2062,6 +2063,9 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, + out_len); + srq = idr_read_srq(cmd.srq_handle, file->ucontext); if (!srq) return -EINVAL; @@ -2069,7 +2073,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, attr.max_wr = cmd.max_wr; attr.srq_limit = cmd.srq_limit; - ret = ib_modify_srq(srq, &attr, cmd.attr_mask); + ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); put_srq_read(srq); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 468999c38803..8b5dd3649bbf 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -79,6 +79,23 @@ enum ib_rate mult_to_ib_rate(int mult) } EXPORT_SYMBOL(mult_to_ib_rate); +enum rdma_transport_type +rdma_node_get_transport(enum rdma_node_type node_type) +{ + switch (node_type) { + case RDMA_NODE_IB_CA: + case RDMA_NODE_IB_SWITCH: + case RDMA_NODE_IB_ROUTER: + return RDMA_TRANSPORT_IB; + case RDMA_NODE_RNIC: + return RDMA_TRANSPORT_IWARP; + default: + BUG(); + return 0; + } +} +EXPORT_SYMBOL(rdma_node_get_transport); + /* Protection domains */ struct ib_pd *ib_alloc_pd(struct ib_device *device) @@ -231,7 +248,7 @@ int ib_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask) { - return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); + return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_srq); @@ -547,7 +564,7 @@ int ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask) { - return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); + return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/infiniband/hw/amso1100/Kbuild new file mode 100644 index 000000000000..06964c4af849 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/Kbuild @@ -0,0 +1,8 @@ +ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG +EXTRA_CFLAGS += -DDEBUG +endif + +obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o + +iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \ + c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o diff --git a/drivers/infiniband/hw/amso1100/Kconfig b/drivers/infiniband/hw/amso1100/Kconfig new file mode 100644 index 000000000000..809cb14ac6de --- /dev/null +++ b/drivers/infiniband/hw/amso1100/Kconfig @@ -0,0 +1,15 @@ +config INFINIBAND_AMSO1100 + tristate "Ammasso 1100 HCA support" + depends on PCI && INET && INFINIBAND + ---help--- + This is a low-level driver for the Ammasso 1100 host + channel adapter (HCA). + +config INFINIBAND_AMSO1100_DEBUG + bool "Verbose debugging output" + depends on INFINIBAND_AMSO1100 + default n + ---help--- + This option causes the amso1100 driver to produce a bunch of + debug messages. Select this if you are developing the driver + or trying to diagnose a problem. diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c new file mode 100644 index 000000000000..9e9120f36019 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2.c @@ -0,0 +1,1255 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/inetdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/if_vlan.h> +#include <linux/crc32.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/byteorder.h> + +#include <rdma/ib_smi.h> +#include "c2.h" +#include "c2_provider.h" + +MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); +MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK + | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static int c2_up(struct net_device *netdev); +static int c2_down(struct net_device *netdev); +static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +static void c2_tx_interrupt(struct net_device *netdev); +static void c2_rx_interrupt(struct net_device *netdev); +static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs); +static void c2_tx_timeout(struct net_device *netdev); +static int c2_change_mtu(struct net_device *netdev, int new_mtu); +static void c2_reset(struct c2_port *c2_port); +static struct net_device_stats *c2_get_stats(struct net_device *netdev); + +static struct pci_device_id c2_pci_table[] = { + { PCI_DEVICE(0x18b8, 0xb001) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, c2_pci_table); + +static void c2_print_macaddr(struct net_device *netdev) +{ + pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, " + "IRQ %u\n", netdev->name, + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], + netdev->irq); +} + +static void c2_set_rxbufsize(struct c2_port *c2_port) +{ + struct net_device *netdev = c2_port->netdev; + + if (netdev->mtu > RX_BUF_SIZE) + c2_port->rx_buf_size = + netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) + + NET_IP_ALIGN; + else + c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE; +} + +/* + * Allocate TX ring elements and chain them together. + * One-to-one association of adapter descriptors with ring elements. + */ +static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, + dma_addr_t base, void __iomem * mmio_txp_ring) +{ + struct c2_tx_desc *tx_desc; + struct c2_txp_desc __iomem *txp_desc; + struct c2_element *elem; + int i; + + tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL); + if (!tx_ring->start) + return -ENOMEM; + + elem = tx_ring->start; + tx_desc = vaddr; + txp_desc = mmio_txp_ring; + for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) { + tx_desc->len = 0; + tx_desc->status = 0; + + /* Set TXP_HTXD_UNINIT */ + __raw_writeq(cpu_to_be64(0x1122334455667788ULL), + (void __iomem *) txp_desc + C2_TXP_ADDR); + __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN); + __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), + (void __iomem *) txp_desc + C2_TXP_FLAGS); + + elem->skb = NULL; + elem->ht_desc = tx_desc; + elem->hw_desc = txp_desc; + + if (i == tx_ring->count - 1) { + elem->next = tx_ring->start; + tx_desc->next_offset = base; + } else { + elem->next = elem + 1; + tx_desc->next_offset = + base + (i + 1) * sizeof(*tx_desc); + } + } + + tx_ring->to_use = tx_ring->to_clean = tx_ring->start; + + return 0; +} + +/* + * Allocate RX ring elements and chain them together. + * One-to-one association of adapter descriptors with ring elements. + */ +static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr, + dma_addr_t base, void __iomem * mmio_rxp_ring) +{ + struct c2_rx_desc *rx_desc; + struct c2_rxp_desc __iomem *rxp_desc; + struct c2_element *elem; + int i; + + rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL); + if (!rx_ring->start) + return -ENOMEM; + + elem = rx_ring->start; + rx_desc = vaddr; + rxp_desc = mmio_rxp_ring; + for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) { + rx_desc->len = 0; + rx_desc->status = 0; + + /* Set RXP_HRXD_UNINIT */ + __raw_writew(cpu_to_be16(RXP_HRXD_OK), + (void __iomem *) rxp_desc + C2_RXP_STATUS); + __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT); + __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN); + __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), + (void __iomem *) rxp_desc + C2_RXP_ADDR); + __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), + (void __iomem *) rxp_desc + C2_RXP_FLAGS); + + elem->skb = NULL; + elem->ht_desc = rx_desc; + elem->hw_desc = rxp_desc; + + if (i == rx_ring->count - 1) { + elem->next = rx_ring->start; + rx_desc->next_offset = base; + } else { + elem->next = elem + 1; + rx_desc->next_offset = + base + (i + 1) * sizeof(*rx_desc); + } + } + + rx_ring->to_use = rx_ring->to_clean = rx_ring->start; + + return 0; +} + +/* Setup buffer for receiving */ +static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem) +{ + struct c2_dev *c2dev = c2_port->c2dev; + struct c2_rx_desc *rx_desc = elem->ht_desc; + struct sk_buff *skb; + dma_addr_t mapaddr; + u32 maplen; + struct c2_rxp_hdr *rxp_hdr; + + skb = dev_alloc_skb(c2_port->rx_buf_size); + if (unlikely(!skb)) { + pr_debug("%s: out of memory for receive\n", + c2_port->netdev->name); + return -ENOMEM; + } + + /* Zero out the rxp hdr in the sk_buff */ + memset(skb->data, 0, sizeof(*rxp_hdr)); + + skb->dev = c2_port->netdev; + + maplen = c2_port->rx_buf_size; + mapaddr = + pci_map_single(c2dev->pcidev, skb->data, maplen, + PCI_DMA_FROMDEVICE); + + /* Set the sk_buff RXP_header to RXP_HRXD_READY */ + rxp_hdr = (struct c2_rxp_hdr *) skb->data; + rxp_hdr->flags = RXP_HRXD_READY; + + __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); + __raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)), + elem->hw_desc + C2_RXP_LEN); + __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR); + __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); + + elem->skb = skb; + elem->mapaddr = mapaddr; + elem->maplen = maplen; + rx_desc->len = maplen; + + return 0; +} + +/* + * Allocate buffers for the Rx ring + * For receive: rx_ring.to_clean is next received frame + */ +static int c2_rx_fill(struct c2_port *c2_port) +{ + struct c2_ring *rx_ring = &c2_port->rx_ring; + struct c2_element *elem; + int ret = 0; + + elem = rx_ring->start; + do { + if (c2_rx_alloc(c2_port, elem)) { + ret = 1; + break; + } + } while ((elem = elem->next) != rx_ring->start); + + rx_ring->to_clean = rx_ring->start; + return ret; +} + +/* Free all buffers in RX ring, assumes receiver stopped */ +static void c2_rx_clean(struct c2_port *c2_port) +{ + struct c2_dev *c2dev = c2_port->c2dev; + struct c2_ring *rx_ring = &c2_port->rx_ring; + struct c2_element *elem; + struct c2_rx_desc *rx_desc; + + elem = rx_ring->start; + do { + rx_desc = elem->ht_desc; + rx_desc->len = 0; + + __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); + __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); + __raw_writew(0, elem->hw_desc + C2_RXP_LEN); + __raw_writeq(cpu_to_be64(0x99aabbccddeeffULL), + elem->hw_desc + C2_RXP_ADDR); + __raw_writew(cpu_to_be16(RXP_HRXD_UNINIT), + elem->hw_desc + C2_RXP_FLAGS); + + if (elem->skb) { + pci_unmap_single(c2dev->pcidev, elem->mapaddr, + elem->maplen, PCI_DMA_FROMDEVICE); + dev_kfree_skb(elem->skb); + elem->skb = NULL; + } + } while ((elem = elem->next) != rx_ring->start); +} + +static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem) +{ + struct c2_tx_desc *tx_desc = elem->ht_desc; + + tx_desc->len = 0; + + pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen, + PCI_DMA_TODEVICE); + + if (elem->skb) { + dev_kfree_skb_any(elem->skb); + elem->skb = NULL; + } + + return 0; +} + +/* Free all buffers in TX ring, assumes transmitter stopped */ +static void c2_tx_clean(struct c2_port *c2_port) +{ + struct c2_ring *tx_ring = &c2_port->tx_ring; + struct c2_element *elem; + struct c2_txp_desc txp_htxd; + int retry; + unsigned long flags; + + spin_lock_irqsave(&c2_port->tx_lock, flags); + + elem = tx_ring->start; + + do { + retry = 0; + do { + txp_htxd.flags = + readw(elem->hw_desc + C2_TXP_FLAGS); + + if (txp_htxd.flags == TXP_HTXD_READY) { + retry = 1; + __raw_writew(0, + elem->hw_desc + C2_TXP_LEN); + __raw_writeq(0, + elem->hw_desc + C2_TXP_ADDR); + __raw_writew(cpu_to_be16(TXP_HTXD_DONE), + elem->hw_desc + C2_TXP_FLAGS); + c2_port->netstats.tx_dropped++; + break; + } else { + __raw_writew(0, + elem->hw_desc + C2_TXP_LEN); + __raw_writeq(cpu_to_be64(0x1122334455667788ULL), + elem->hw_desc + C2_TXP_ADDR); + __raw_writew(cpu_to_be16(TXP_HTXD_UNINIT), + elem->hw_desc + C2_TXP_FLAGS); + } + + c2_tx_free(c2_port->c2dev, elem); + + } while ((elem = elem->next) != tx_ring->start); + } while (retry); + + c2_port->tx_avail = c2_port->tx_ring.count - 1; + c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start; + + if (c2_port->tx_avail > MAX_SKB_FRAGS + 1) + netif_wake_queue(c2_port->netdev); + + spin_unlock_irqrestore(&c2_port->tx_lock, flags); +} + +/* + * Process transmit descriptors marked 'DONE' by the firmware, + * freeing up their unneeded sk_buffs. + */ +static void c2_tx_interrupt(struct net_device *netdev) +{ + struct c2_port *c2_port = netdev_priv(netdev); + struct c2_dev *c2dev = c2_port->c2dev; + struct c2_ring *tx_ring = &c2_port->tx_ring; + struct c2_element *elem; + struct c2_txp_desc txp_htxd; + + spin_lock(&c2_port->tx_lock); + + for (elem = tx_ring->to_clean; elem != tx_ring->to_use; + elem = elem->next) { + txp_htxd.flags = + be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS)); + + if (txp_htxd.flags != TXP_HTXD_DONE) + break; + + if (netif_msg_tx_done(c2_port)) { + /* PCI reads are expensive in fast path */ + txp_htxd.len = + be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN)); + pr_debug("%s: tx done slot %3Zu status 0x%x len " + "%5u bytes\n", + netdev->name, elem - tx_ring->start, + txp_htxd.flags, txp_htxd.len); + } + + c2_tx_free(c2dev, elem); + ++(c2_port->tx_avail); + } + + tx_ring->to_clean = elem; + + if (netif_queue_stopped(netdev) + && c2_port->tx_avail > MAX_SKB_FRAGS + 1) + netif_wake_queue(netdev); + + spin_unlock(&c2_port->tx_lock); +} + +static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) +{ + struct c2_rx_desc *rx_desc = elem->ht_desc; + struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; + + if (rxp_hdr->status != RXP_HRXD_OK || + rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) { + pr_debug("BAD RXP_HRXD\n"); + pr_debug(" rx_desc : %p\n", rx_desc); + pr_debug(" index : %Zu\n", + elem - c2_port->rx_ring.start); + pr_debug(" len : %u\n", rx_desc->len); + pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr, + (void *) __pa((unsigned long) rxp_hdr)); + pr_debug(" flags : 0x%x\n", rxp_hdr->flags); + pr_debug(" status: 0x%x\n", rxp_hdr->status); + pr_debug(" len : %u\n", rxp_hdr->len); + pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd); + } + + /* Setup the skb for reuse since we're dropping this pkt */ + elem->skb->tail = elem->skb->data = elem->skb->head; + + /* Zero out the rxp hdr in the sk_buff */ + memset(elem->skb->data, 0, sizeof(*rxp_hdr)); + + /* Write the descriptor to the adapter's rx ring */ + __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); + __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); + __raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)), + elem->hw_desc + C2_RXP_LEN); + __raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR); + __raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS); + + pr_debug("packet dropped\n"); + c2_port->netstats.rx_dropped++; +} + +static void c2_rx_interrupt(struct net_device *netdev) +{ + struct c2_port *c2_port = netdev_priv(netdev); + struct c2_dev *c2dev = c2_port->c2dev; + struct c2_ring *rx_ring = &c2_port->rx_ring; + struct c2_element *elem; + struct c2_rx_desc *rx_desc; + struct c2_rxp_hdr *rxp_hdr; + struct sk_buff *skb; + dma_addr_t mapaddr; + u32 maplen, buflen; + unsigned long flags; + + spin_lock_irqsave(&c2dev->lock, flags); + + /* Begin where we left off */ + rx_ring->to_clean = rx_ring->start + c2dev->cur_rx; + + for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean; + elem = elem->next) { + rx_desc = elem->ht_desc; + mapaddr = elem->mapaddr; + maplen = elem->maplen; + skb = elem->skb; + rxp_hdr = (struct c2_rxp_hdr *) skb->data; + + if (rxp_hdr->flags != RXP_HRXD_DONE) + break; + buflen = rxp_hdr->len; + + /* Sanity check the RXP header */ + if (rxp_hdr->status != RXP_HRXD_OK || + buflen > (rx_desc->len - sizeof(*rxp_hdr))) { + c2_rx_error(c2_port, elem); + continue; + } + + /* + * Allocate and map a new skb for replenishing the host + * RX desc + */ + if (c2_rx_alloc(c2_port, elem)) { + c2_rx_error(c2_port, elem); + continue; + } + + /* Unmap the old skb */ + pci_unmap_single(c2dev->pcidev, mapaddr, maplen, + PCI_DMA_FROMDEVICE); + + prefetch(skb->data); + + /* + * Skip past the leading 8 bytes comprising of the + * "struct c2_rxp_hdr", prepended by the adapter + * to the usual Ethernet header ("struct ethhdr"), + * to the start of the raw Ethernet packet. + * + * Fix up the various fields in the sk_buff before + * passing it up to netif_rx(). The transfer size + * (in bytes) specified by the adapter len field of + * the "struct rxp_hdr_t" does NOT include the + * "sizeof(struct c2_rxp_hdr)". + */ + skb->data += sizeof(*rxp_hdr); + skb->tail = skb->data + buflen; + skb->len = buflen; + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, netdev); + + netif_rx(skb); + + netdev->last_rx = jiffies; + c2_port->netstats.rx_packets++; + c2_port->netstats.rx_bytes += buflen; + } + + /* Save where we left off */ + rx_ring->to_clean = elem; + c2dev->cur_rx = elem - rx_ring->start; + C2_SET_CUR_RX(c2dev, c2dev->cur_rx); + + spin_unlock_irqrestore(&c2dev->lock, flags); +} + +/* + * Handle netisr0 TX & RX interrupts. + */ +static irqreturn_t c2_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned int netisr0, dmaisr; + int handled = 0; + struct c2_dev *c2dev = (struct c2_dev *) dev_id; + + /* Process CCILNET interrupts */ + netisr0 = readl(c2dev->regs + C2_NISR0); + if (netisr0) { + + /* + * There is an issue with the firmware that always + * provides the status of RX for both TX & RX + * interrupts. So process both queues here. + */ + c2_rx_interrupt(c2dev->netdev); + c2_tx_interrupt(c2dev->netdev); + + /* Clear the interrupt */ + writel(netisr0, c2dev->regs + C2_NISR0); + handled++; + } + + /* Process RNIC interrupts */ + dmaisr = readl(c2dev->regs + C2_DISR); + if (dmaisr) { + writel(dmaisr, c2dev->regs + C2_DISR); + c2_rnic_interrupt(c2dev); + handled++; + } + + if (handled) { + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static int c2_up(struct net_device *netdev) +{ + struct c2_port *c2_port = netdev_priv(netdev); + struct c2_dev *c2dev = c2_port->c2dev; + struct c2_element *elem; + struct c2_rxp_hdr *rxp_hdr; + struct in_device *in_dev; + size_t rx_size, tx_size; + int ret, i; + unsigned int netimr0; + + if (netif_msg_ifup(c2_port)) + pr_debug("%s: enabling interface\n", netdev->name); + + /* Set the Rx buffer size based on MTU */ + c2_set_rxbufsize(c2_port); + + /* Allocate DMA'able memory for Tx/Rx host descriptor rings */ + rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc); + tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); + + c2_port->mem_size = tx_size + rx_size; + c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size, + &c2_port->dma); + if (c2_port->mem == NULL) { + pr_debug("Unable to allocate memory for " + "host descriptor rings\n"); + return -ENOMEM; + } + + memset(c2_port->mem, 0, c2_port->mem_size); + + /* Create the Rx host descriptor ring */ + if ((ret = + c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma, + c2dev->mmio_rxp_ring))) { + pr_debug("Unable to create RX ring\n"); + goto bail0; + } + + /* Allocate Rx buffers for the host descriptor ring */ + if (c2_rx_fill(c2_port)) { + pr_debug("Unable to fill RX ring\n"); + goto bail1; + } + + /* Create the Tx host descriptor ring */ + if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size, + c2_port->dma + rx_size, + c2dev->mmio_txp_ring))) { + pr_debug("Unable to create TX ring\n"); + goto bail1; + } + + /* Set the TX pointer to where we left off */ + c2_port->tx_avail = c2_port->tx_ring.count - 1; + c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = + c2_port->tx_ring.start + c2dev->cur_tx; + + /* missing: Initialize MAC */ + + BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); + + /* Reset the adapter, ensures the driver is in sync with the RXP */ + c2_reset(c2_port); + + /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */ + for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count; + i++, elem++) { + rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; + rxp_hdr->flags = 0; + __raw_writew(cpu_to_be16(RXP_HRXD_READY), + elem->hw_desc + C2_RXP_FLAGS); + } + + /* Enable network packets */ + netif_start_queue(netdev); + + /* Enable IRQ */ + writel(0, c2dev->regs + C2_IDIS); + netimr0 = readl(c2dev->regs + C2_NIMR0); + netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT); + writel(netimr0, c2dev->regs + C2_NIMR0); + + /* Tell the stack to ignore arp requests for ipaddrs bound to + * other interfaces. This is needed to prevent the host stack + * from responding to arp requests to the ipaddr bound on the + * rdma interface. + */ + in_dev = in_dev_get(netdev); + in_dev->cnf.arp_ignore = 1; + in_dev_put(in_dev); + + return 0; + + bail1: + c2_rx_clean(c2_port); + kfree(c2_port->rx_ring.start); + + bail0: + pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, + c2_port->dma); + + return ret; +} + +static int c2_down(struct net_device *netdev) +{ + struct c2_port *c2_port = netdev_priv(netdev); + struct c2_dev *c2dev = c2_port->c2dev; + + if (netif_msg_ifdown(c2_port)) + pr_debug("%s: disabling interface\n", + netdev->name); + + /* Wait for all the queued packets to get sent */ + c2_tx_interrupt(netdev); + + /* Disable network packets */ + netif_stop_queue(netdev); + + /* Disable IRQs by clearing the interrupt mask */ + writel(1, c2dev->regs + C2_IDIS); + writel(0, c2dev->regs + C2_NIMR0); + + /* missing: Stop transmitter */ + + /* missing: Stop receiver */ + + /* Reset the adapter, ensures the driver is in sync with the RXP */ + c2_reset(c2_port); + + /* missing: Turn off LEDs here */ + + /* Free all buffers in the host descriptor rings */ + c2_tx_clean(c2_port); + c2_rx_clean(c2_port); + + /* Free the host descriptor rings */ + kfree(c2_port->rx_ring.start); + kfree(c2_port->tx_ring.start); + pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, + c2_port->dma); + + return 0; +} + +static void c2_reset(struct c2_port *c2_port) +{ + struct c2_dev *c2dev = c2_port->c2dev; + unsigned int cur_rx = c2dev->cur_rx; + + /* Tell the hardware to quiesce */ + C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI); + + /* + * The hardware will reset the C2_PCI_HRX_QUI bit once + * the RXP is quiesced. Wait 2 seconds for this. + */ + ssleep(2); + + cur_rx = C2_GET_CUR_RX(c2dev); + + if (cur_rx & C2_PCI_HRX_QUI) + pr_debug("c2_reset: failed to quiesce the hardware!\n"); + + cur_rx &= ~C2_PCI_HRX_QUI; + + c2dev->cur_rx = cur_rx; + + pr_debug("Current RX: %u\n", c2dev->cur_rx); +} + +static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct c2_port *c2_port = netdev_priv(netdev); + struct c2_dev *c2dev = c2_port->c2dev; + struct c2_ring *tx_ring = &c2_port->tx_ring; + struct c2_element *elem; + dma_addr_t mapaddr; + u32 maplen; + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&c2_port->tx_lock, flags); + + if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&c2_port->tx_lock, flags); + + pr_debug("%s: Tx ring full when queue awake!\n", + netdev->name); + return NETDEV_TX_BUSY; + } + + maplen = skb_headlen(skb); + mapaddr = + pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE); + + elem = tx_ring->to_use; + elem->skb = skb; + elem->mapaddr = mapaddr; + elem->maplen = maplen; + + /* Tell HW to xmit */ + __raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR); + __raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN); + __raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS); + + c2_port->netstats.tx_packets++; + c2_port->netstats.tx_bytes += maplen; + + /* Loop thru additional data fragments and queue them */ + if (skb_shinfo(skb)->nr_frags) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + maplen = frag->size; + mapaddr = + pci_map_page(c2dev->pcidev, frag->page, + frag->page_offset, maplen, + PCI_DMA_TODEVICE); + + elem = elem->next; + elem->skb = NULL; + elem->mapaddr = mapaddr; + elem->maplen = maplen; + + /* Tell HW to xmit */ + __raw_writeq(cpu_to_be64(mapaddr), + elem->hw_desc + C2_TXP_ADDR); + __raw_writew(cpu_to_be16(maplen), + elem->hw_desc + C2_TXP_LEN); + __raw_writew(cpu_to_be16(TXP_HTXD_READY), + elem->hw_desc + C2_TXP_FLAGS); + + c2_port->netstats.tx_packets++; + c2_port->netstats.tx_bytes += maplen; + } + } + + tx_ring->to_use = elem->next; + c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1); + + if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) { + netif_stop_queue(netdev); + if (netif_msg_tx_queued(c2_port)) + pr_debug("%s: transmit queue full\n", + netdev->name); + } + + spin_unlock_irqrestore(&c2_port->tx_lock, flags); + + netdev->trans_start = jiffies; + + return NETDEV_TX_OK; +} + +static struct net_device_stats *c2_get_stats(struct net_device *netdev) +{ + struct c2_port *c2_port = netdev_priv(netdev); + + return &c2_port->netstats; +} + +static void c2_tx_timeout(struct net_device *netdev) +{ + struct c2_port *c2_port = netdev_priv(netdev); + + if (netif_msg_timer(c2_port)) + pr_debug("%s: tx timeout\n", netdev->name); + + c2_tx_clean(c2_port); +} + +static int c2_change_mtu(struct net_device *netdev, int new_mtu) +{ + int ret = 0; + + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + c2_down(netdev); + + c2_up(netdev); + } + + return ret; +} + +/* Initialize network device */ +static struct net_device *c2_devinit(struct c2_dev *c2dev, + void __iomem * mmio_addr) +{ + struct c2_port *c2_port = NULL; + struct net_device *netdev = alloc_etherdev(sizeof(*c2_port)); + + if (!netdev) { + pr_debug("c2_port etherdev alloc failed"); + return NULL; + } + + SET_MODULE_OWNER(netdev); + SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); + + netdev->open = c2_up; + netdev->stop = c2_down; + netdev->hard_start_xmit = c2_xmit_frame; + netdev->get_stats = c2_get_stats; + netdev->tx_timeout = c2_tx_timeout; + netdev->change_mtu = c2_change_mtu; + netdev->watchdog_timeo = C2_TX_TIMEOUT; + netdev->irq = c2dev->pcidev->irq; + + c2_port = netdev_priv(netdev); + c2_port->netdev = netdev; + c2_port->c2dev = c2dev; + c2_port->msg_enable = netif_msg_init(debug, default_msg); + c2_port->tx_ring.count = C2_NUM_TX_DESC; + c2_port->rx_ring.count = C2_NUM_RX_DESC; + + spin_lock_init(&c2_port->tx_lock); + + /* Copy our 48-bit ethernet hardware address */ + memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6); + + /* Validate the MAC address */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + pr_debug("Invalid MAC Address\n"); + c2_print_macaddr(netdev); + free_netdev(netdev); + return NULL; + } + + c2dev->netdev = netdev; + + return netdev; +} + +static int __devinit c2_probe(struct pci_dev *pcidev, + const struct pci_device_id *ent) +{ + int ret = 0, i; + unsigned long reg0_start, reg0_flags, reg0_len; + unsigned long reg2_start, reg2_flags, reg2_len; + unsigned long reg4_start, reg4_flags, reg4_len; + unsigned kva_map_size; + struct net_device *netdev = NULL; + struct c2_dev *c2dev = NULL; + void __iomem *mmio_regs = NULL; + + printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n", + DRV_VERSION); + + /* Enable PCI device */ + ret = pci_enable_device(pcidev); + if (ret) { + printk(KERN_ERR PFX "%s: Unable to enable PCI device\n", + pci_name(pcidev)); + goto bail0; + } + + reg0_start = pci_resource_start(pcidev, BAR_0); + reg0_len = pci_resource_len(pcidev, BAR_0); + reg0_flags = pci_resource_flags(pcidev, BAR_0); + + reg2_start = pci_resource_start(pcidev, BAR_2); + reg2_len = pci_resource_len(pcidev, BAR_2); + reg2_flags = pci_resource_flags(pcidev, BAR_2); + + reg4_start = pci_resource_start(pcidev, BAR_4); + reg4_len = pci_resource_len(pcidev, BAR_4); + reg4_flags = pci_resource_flags(pcidev, BAR_4); + + pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len); + pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len); + pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len); + + /* Make sure PCI base addr are MMIO */ + if (!(reg0_flags & IORESOURCE_MEM) || + !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) { + printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); + ret = -ENODEV; + goto bail1; + } + + /* Check for weird/broken PCI region reporting */ + if ((reg0_len < C2_REG0_SIZE) || + (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) { + printk(KERN_ERR PFX "Invalid PCI region sizes\n"); + ret = -ENODEV; + goto bail1; + } + + /* Reserve PCI I/O and memory resources */ + ret = pci_request_regions(pcidev, DRV_NAME); + if (ret) { + printk(KERN_ERR PFX "%s: Unable to request regions\n", + pci_name(pcidev)); + goto bail1; + } + + if ((sizeof(dma_addr_t) > 4)) { + ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK); + if (ret < 0) { + printk(KERN_ERR PFX "64b DMA configuration failed\n"); + goto bail2; + } + } else { + ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK); + if (ret < 0) { + printk(KERN_ERR PFX "32b DMA configuration failed\n"); + goto bail2; + } + } + + /* Enables bus-mastering on the device */ + pci_set_master(pcidev); + + /* Remap the adapter PCI registers in BAR4 */ + mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, + sizeof(struct c2_adapter_pci_regs)); + if (mmio_regs == 0UL) { + printk(KERN_ERR PFX + "Unable to remap adapter PCI registers in BAR4\n"); + ret = -EIO; + goto bail2; + } + + /* Validate PCI regs magic */ + for (i = 0; i < sizeof(c2_magic); i++) { + if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) { + printk(KERN_ERR PFX "Downlevel Firmware boot loader " + "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash " + "utility to update your boot loader\n", + i + 1, sizeof(c2_magic), + readb(mmio_regs + C2_REGS_MAGIC + i), + c2_magic[i]); + printk(KERN_ERR PFX "Adapter not claimed\n"); + iounmap(mmio_regs); + ret = -EIO; + goto bail2; + } + } + + /* Validate the adapter version */ + if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { + printk(KERN_ERR PFX "Version mismatch " + "[fw=%u, c2=%u], Adapter not claimed\n", + be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)), + C2_VERSION); + ret = -EINVAL; + iounmap(mmio_regs); + goto bail2; + } + + /* Validate the adapter IVN */ + if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { + printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " + "the OpenIB device support kit. " + "[fw=0x%x, c2=0x%x], Adapter not claimed\n", + be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)), + C2_IVN); + ret = -EINVAL; + iounmap(mmio_regs); + goto bail2; + } + + /* Allocate hardware structure */ + c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev)); + if (!c2dev) { + printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", + pci_name(pcidev)); + ret = -ENOMEM; + iounmap(mmio_regs); + goto bail2; + } + + memset(c2dev, 0, sizeof(*c2dev)); + spin_lock_init(&c2dev->lock); + c2dev->pcidev = pcidev; + c2dev->cur_tx = 0; + + /* Get the last RX index */ + c2dev->cur_rx = + (be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) - + 0xffffc000) / sizeof(struct c2_rxp_desc); + + /* Request an interrupt line for the driver */ + ret = request_irq(pcidev->irq, c2_interrupt, SA_SHIRQ, DRV_NAME, c2dev); + if (ret) { + printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", + pci_name(pcidev), pcidev->irq); + iounmap(mmio_regs); + goto bail3; + } + + /* Set driver specific data */ + pci_set_drvdata(pcidev, c2dev); + + /* Initialize network device */ + if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { + iounmap(mmio_regs); + goto bail4; + } + + /* Save off the actual size prior to unmapping mmio_regs */ + kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE)); + + /* Unmap the adapter PCI registers in BAR4 */ + iounmap(mmio_regs); + + /* Register network device */ + ret = register_netdev(netdev); + if (ret) { + printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", + ret); + goto bail5; + } + + /* Disable network packets */ + netif_stop_queue(netdev); + + /* Remap the adapter HRXDQ PA space to kernel VA space */ + c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, + C2_RXP_HRXDQ_SIZE); + if (c2dev->mmio_rxp_ring == 0UL) { + printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); + ret = -EIO; + goto bail6; + } + + /* Remap the adapter HTXDQ PA space to kernel VA space */ + c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, + C2_TXP_HTXDQ_SIZE); + if (c2dev->mmio_txp_ring == 0UL) { + printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); + ret = -EIO; + goto bail7; + } + + /* Save off the current RX index in the last 4 bytes of the TXP Ring */ + C2_SET_CUR_RX(c2dev, c2dev->cur_rx); + + /* Remap the PCI registers in adapter BAR0 to kernel VA space */ + c2dev->regs = ioremap_nocache(reg0_start, reg0_len); + if (c2dev->regs == 0UL) { + printk(KERN_ERR PFX "Unable to remap BAR0\n"); + ret = -EIO; + goto bail8; + } + + /* Remap the PCI registers in adapter BAR4 to kernel VA space */ + c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; + c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, + kva_map_size); + if (c2dev->kva == 0UL) { + printk(KERN_ERR PFX "Unable to remap BAR4\n"); + ret = -EIO; + goto bail9; + } + + /* Print out the MAC address */ + c2_print_macaddr(netdev); + + ret = c2_rnic_init(c2dev); + if (ret) { + printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret); + goto bail10; + } + + c2_register_device(c2dev); + + return 0; + + bail10: + iounmap(c2dev->kva); + + bail9: + iounmap(c2dev->regs); + + bail8: + iounmap(c2dev->mmio_txp_ring); + + bail7: + iounmap(c2dev->mmio_rxp_ring); + + bail6: + unregister_netdev(netdev); + + bail5: + free_netdev(netdev); + + bail4: + free_irq(pcidev->irq, c2dev); + + bail3: + ib_dealloc_device(&c2dev->ibdev); + + bail2: + pci_release_regions(pcidev); + + bail1: + pci_disable_device(pcidev); + + bail0: + return ret; +} + +static void __devexit c2_remove(struct pci_dev *pcidev) +{ + struct c2_dev *c2dev = pci_get_drvdata(pcidev); + struct net_device *netdev = c2dev->netdev; + + /* Unregister with OpenIB */ + c2_unregister_device(c2dev); + + /* Clean up the RNIC resources */ + c2_rnic_term(c2dev); + + /* Remove network device from the kernel */ + unregister_netdev(netdev); + + /* Free network device */ + free_netdev(netdev); + + /* Free the interrupt line */ + free_irq(pcidev->irq, c2dev); + + /* missing: Turn LEDs off here */ + + /* Unmap adapter PA space */ + iounmap(c2dev->kva); + iounmap(c2dev->regs); + iounmap(c2dev->mmio_txp_ring); + iounmap(c2dev->mmio_rxp_ring); + + /* Free the hardware structure */ + ib_dealloc_device(&c2dev->ibdev); + + /* Release reserved PCI I/O and memory resources */ + pci_release_regions(pcidev); + + /* Disable PCI device */ + pci_disable_device(pcidev); + + /* Clear driver specific data */ + pci_set_drvdata(pcidev, NULL); +} + +static struct pci_driver c2_pci_driver = { + .name = DRV_NAME, + .id_table = c2_pci_table, + .probe = c2_probe, + .remove = __devexit_p(c2_remove), +}; + +static int __init c2_init_module(void) +{ + return pci_module_init(&c2_pci_driver); +} + +static void __exit c2_exit_module(void) +{ + pci_unregister_driver(&c2_pci_driver); +} + +module_init(c2_init_module); +module_exit(c2_exit_module); diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h new file mode 100644 index 000000000000..1b17dcdd0505 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2.h @@ -0,0 +1,551 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __C2_H +#define __C2_H + +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/idr.h> +#include <asm/semaphore.h> + +#include "c2_provider.h" +#include "c2_mq.h" +#include "c2_status.h" + +#define DRV_NAME "c2" +#define DRV_VERSION "1.1" +#define PFX DRV_NAME ": " + +#define BAR_0 0 +#define BAR_2 2 +#define BAR_4 4 + +#define RX_BUF_SIZE (1536 + 8) +#define ETH_JUMBO_MTU 9000 +#define C2_MAGIC "CEPHEUS" +#define C2_VERSION 4 +#define C2_IVN (18 & 0x7fffffff) + +#define C2_REG0_SIZE (16 * 1024) +#define C2_REG2_SIZE (2 * 1024 * 1024) +#define C2_REG4_SIZE (256 * 1024 * 1024) +#define C2_NUM_TX_DESC 341 +#define C2_NUM_RX_DESC 256 +#define C2_PCI_REGS_OFFSET (0x10000) +#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2)) +#define C2_RXP_HRXDQ_SIZE (4096) +#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE) +#define C2_TXP_HTXDQ_SIZE (4096) +#define C2_TX_TIMEOUT (6*HZ) + +/* CEPHEUS */ +static const u8 c2_magic[] = { + 0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53 +}; + +enum adapter_pci_regs { + C2_REGS_MAGIC = 0x0000, + C2_REGS_VERS = 0x0008, + C2_REGS_IVN = 0x000C, + C2_REGS_PCI_WINSIZE = 0x0010, + C2_REGS_Q0_QSIZE = 0x0014, + C2_REGS_Q0_MSGSIZE = 0x0018, + C2_REGS_Q0_POOLSTART = 0x001C, + C2_REGS_Q0_SHARED = 0x0020, + C2_REGS_Q1_QSIZE = 0x0024, + C2_REGS_Q1_MSGSIZE = 0x0028, + C2_REGS_Q1_SHARED = 0x0030, + C2_REGS_Q2_QSIZE = 0x0034, + C2_REGS_Q2_MSGSIZE = 0x0038, + C2_REGS_Q2_SHARED = 0x0040, + C2_REGS_ENADDR = 0x004C, + C2_REGS_RDMA_ENADDR = 0x0054, + C2_REGS_HRX_CUR = 0x006C, +}; + +struct c2_adapter_pci_regs { + char reg_magic[8]; + u32 version; + u32 ivn; + u32 pci_window_size; + u32 q0_q_size; + u32 q0_msg_size; + u32 q0_pool_start; + u32 q0_shared; + u32 q1_q_size; + u32 q1_msg_size; + u32 q1_pool_start; + u32 q1_shared; + u32 q2_q_size; + u32 q2_msg_size; + u32 q2_pool_start; + u32 q2_shared; + u32 log_start; + u32 log_size; + u8 host_enaddr[8]; + u8 rdma_enaddr[8]; + u32 crash_entry; + u32 crash_ready[2]; + u32 fw_txd_cur; + u32 fw_hrxd_cur; + u32 fw_rxd_cur; +}; + +enum pci_regs { + C2_HISR = 0x0000, + C2_DISR = 0x0004, + C2_HIMR = 0x0008, + C2_DIMR = 0x000C, + C2_NISR0 = 0x0010, + C2_NISR1 = 0x0014, + C2_NIMR0 = 0x0018, + C2_NIMR1 = 0x001C, + C2_IDIS = 0x0020, +}; + +enum { + C2_PCI_HRX_INT = 1 << 8, + C2_PCI_HTX_INT = 1 << 17, + C2_PCI_HRX_QUI = 1 << 31, +}; + +/* + * Cepheus registers in BAR0. + */ +struct c2_pci_regs { + u32 hostisr; + u32 dmaisr; + u32 hostimr; + u32 dmaimr; + u32 netisr0; + u32 netisr1; + u32 netimr0; + u32 netimr1; + u32 int_disable; +}; + +/* TXP flags */ +enum c2_txp_flags { + TXP_HTXD_DONE = 0, + TXP_HTXD_READY = 1 << 0, + TXP_HTXD_UNINIT = 1 << 1, +}; + +/* RXP flags */ +enum c2_rxp_flags { + RXP_HRXD_UNINIT = 0, + RXP_HRXD_READY = 1 << 0, + RXP_HRXD_DONE = 1 << 1, +}; + +/* RXP status */ +enum c2_rxp_status { + RXP_HRXD_ZERO = 0, + RXP_HRXD_OK = 1 << 0, + RXP_HRXD_BUF_OV = 1 << 1, +}; + +/* TXP descriptor fields */ +enum txp_desc { + C2_TXP_FLAGS = 0x0000, + C2_TXP_LEN = 0x0002, + C2_TXP_ADDR = 0x0004, +}; + +/* RXP descriptor fields */ +enum rxp_desc { + C2_RXP_FLAGS = 0x0000, + C2_RXP_STATUS = 0x0002, + C2_RXP_COUNT = 0x0004, + C2_RXP_LEN = 0x0006, + C2_RXP_ADDR = 0x0008, +}; + +struct c2_txp_desc { + u16 flags; + u16 len; + u64 addr; +} __attribute__ ((packed)); + +struct c2_rxp_desc { + u16 flags; + u16 status; + u16 count; + u16 len; + u64 addr; +} __attribute__ ((packed)); + +struct c2_rxp_hdr { + u16 flags; + u16 status; + u16 len; + u16 rsvd; +} __attribute__ ((packed)); + +struct c2_tx_desc { + u32 len; + u32 status; + dma_addr_t next_offset; +}; + +struct c2_rx_desc { + u32 len; + u32 status; + dma_addr_t next_offset; +}; + +struct c2_alloc { + u32 last; + u32 max; + spinlock_t lock; + unsigned long *table; +}; + +struct c2_array { + struct { + void **page; + int used; + } *page_list; +}; + +/* + * The MQ shared pointer pool is organized as a linked list of + * chunks. Each chunk contains a linked list of free shared pointers + * that can be allocated to a given user mode client. + * + */ +struct sp_chunk { + struct sp_chunk *next; + dma_addr_t dma_addr; + DECLARE_PCI_UNMAP_ADDR(mapping); + u16 head; + u16 shared_ptr[0]; +}; + +struct c2_pd_table { + u32 last; + u32 max; + spinlock_t lock; + unsigned long *table; +}; + +struct c2_qp_table { + struct idr idr; + spinlock_t lock; + int last; +}; + +struct c2_element { + struct c2_element *next; + void *ht_desc; /* host descriptor */ + void __iomem *hw_desc; /* hardware descriptor */ + struct sk_buff *skb; + dma_addr_t mapaddr; + u32 maplen; +}; + +struct c2_ring { + struct c2_element *to_clean; + struct c2_element *to_use; + struct c2_element *start; + unsigned long count; +}; + +struct c2_dev { + struct ib_device ibdev; + void __iomem *regs; + void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */ + void __iomem *mmio_rxp_ring; + spinlock_t lock; + struct pci_dev *pcidev; + struct net_device *netdev; + struct net_device *pseudo_netdev; + unsigned int cur_tx; + unsigned int cur_rx; + u32 adapter_handle; + int device_cap_flags; + void __iomem *kva; /* KVA device memory */ + unsigned long pa; /* PA device memory */ + void **qptr_array; + + kmem_cache_t *host_msg_cache; + + struct list_head cca_link; /* adapter list */ + struct list_head eh_wakeup_list; /* event wakeup list */ + wait_queue_head_t req_vq_wo; + + /* Cached RNIC properties */ + struct ib_device_attr props; + + struct c2_pd_table pd_table; + struct c2_qp_table qp_table; + int ports; /* num of GigE ports */ + int devnum; + spinlock_t vqlock; /* sync vbs req MQ */ + + /* Verbs Queues */ + struct c2_mq req_vq; /* Verbs Request MQ */ + struct c2_mq rep_vq; /* Verbs Reply MQ */ + struct c2_mq aeq; /* Async Events MQ */ + + /* Kernel client MQs */ + struct sp_chunk *kern_mqsp_pool; + + /* Device updates these values when posting messages to a host + * target queue */ + u16 req_vq_shared; + u16 rep_vq_shared; + u16 aeq_shared; + u16 irq_claimed; + + /* + * Shared host target pages for user-accessible MQs. + */ + int hthead; /* index of first free entry */ + void *htpages; /* kernel vaddr */ + int htlen; /* length of htpages memory */ + void *htuva; /* user mapped vaddr */ + spinlock_t htlock; /* serialize allocation */ + + u64 adapter_hint_uva; /* access to the activity FIFO */ + + // spinlock_t aeq_lock; + // spinlock_t rnic_lock; + + u16 *hint_count; + dma_addr_t hint_count_dma; + u16 hints_read; + + int init; /* TRUE if it's ready */ + char ae_cache_name[16]; + char vq_cache_name[16]; +}; + +struct c2_port { + u32 msg_enable; + struct c2_dev *c2dev; + struct net_device *netdev; + + spinlock_t tx_lock; + u32 tx_avail; + struct c2_ring tx_ring; + struct c2_ring rx_ring; + + void *mem; /* PCI memory for host rings */ + dma_addr_t dma; + unsigned long mem_size; + + u32 rx_buf_size; + + struct net_device_stats netstats; +}; + +/* + * Activity FIFO registers in BAR0. + */ +#define PCI_BAR0_HOST_HINT 0x100 +#define PCI_BAR0_ADAPTER_HINT 0x2000 + +/* + * Ammasso PCI vendor id and Cepheus PCI device id. + */ +#define CQ_ARMED 0x01 +#define CQ_WAIT_FOR_DMA 0x80 + +/* + * The format of a hint is as follows: + * Lower 16 bits are the count of hints for the queue. + * Next 15 bits are the qp_index + * Upper most bit depends on who reads it: + * If read by producer, then it means Full (1) or Not-Full (0) + * If read by consumer, then it means Empty (1) or Not-Empty (0) + */ +#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count) +#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16) +#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF) + + +/* + * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t + * struct. + */ +#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000 + +#ifndef readq +static inline u64 readq(const void __iomem * addr) +{ + u64 ret = readl(addr + 4); + ret <<= 32; + ret |= readl(addr); + + return ret; +} +#endif + +#ifndef writeq +static inline void __raw_writeq(u64 val, void __iomem * addr) +{ + __raw_writel((u32) (val), addr); + __raw_writel((u32) (val >> 32), (addr + 4)); +} +#endif + +#define C2_SET_CUR_RX(c2dev, cur_rx) \ + __raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092) + +#define C2_GET_CUR_RX(c2dev) \ + be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092)) + +static inline struct c2_dev *to_c2dev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct c2_dev, ibdev); +} + +static inline int c2_errno(void *reply) +{ + switch (c2_wr_get_result(reply)) { + case C2_OK: + return 0; + case CCERR_NO_BUFS: + case CCERR_INSUFFICIENT_RESOURCES: + case CCERR_ZERO_RDMA_READ_RESOURCES: + return -ENOMEM; + case CCERR_MR_IN_USE: + case CCERR_QP_IN_USE: + return -EBUSY; + case CCERR_ADDR_IN_USE: + return -EADDRINUSE; + case CCERR_ADDR_NOT_AVAIL: + return -EADDRNOTAVAIL; + case CCERR_CONN_RESET: + return -ECONNRESET; + case CCERR_NOT_IMPLEMENTED: + case CCERR_INVALID_WQE: + return -ENOSYS; + case CCERR_QP_NOT_PRIVILEGED: + return -EPERM; + case CCERR_STACK_ERROR: + return -EPROTO; + case CCERR_ACCESS_VIOLATION: + case CCERR_BASE_AND_BOUNDS_VIOLATION: + return -EFAULT; + case CCERR_STAG_STATE_NOT_INVALID: + case CCERR_INVALID_ADDRESS: + case CCERR_INVALID_CQ: + case CCERR_INVALID_EP: + case CCERR_INVALID_MODIFIER: + case CCERR_INVALID_MTU: + case CCERR_INVALID_PD_ID: + case CCERR_INVALID_QP: + case CCERR_INVALID_RNIC: + case CCERR_INVALID_STAG: + return -EINVAL; + default: + return -EAGAIN; + } +} + +/* Device */ +extern int c2_register_device(struct c2_dev *c2dev); +extern void c2_unregister_device(struct c2_dev *c2dev); +extern int c2_rnic_init(struct c2_dev *c2dev); +extern void c2_rnic_term(struct c2_dev *c2dev); +extern void c2_rnic_interrupt(struct c2_dev *c2dev); +extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); +extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask); + +/* QPs */ +extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, + struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp); +extern void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp); +extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn); +extern int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, + struct ib_qp_attr *attr, int attr_mask); +extern int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, + int ord, int ird); +extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, + struct ib_send_wr **bad_wr); +extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, + struct ib_recv_wr **bad_wr); +extern void __devinit c2_init_qp_table(struct c2_dev *c2dev); +extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev); +extern void c2_set_qp_state(struct c2_qp *, int); +extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn); + +/* PDs */ +extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd); +extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd); +extern int __devinit c2_init_pd_table(struct c2_dev *c2dev); +extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev); + +/* CQs */ +extern int c2_init_cq(struct c2_dev *c2dev, int entries, + struct c2_ucontext *ctx, struct c2_cq *cq); +extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq); +extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); +extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); +extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); +extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); + +/* CM */ +extern int c2_llp_connect(struct iw_cm_id *cm_id, + struct iw_cm_conn_param *iw_param); +extern int c2_llp_accept(struct iw_cm_id *cm_id, + struct iw_cm_conn_param *iw_param); +extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, + u8 pdata_len); +extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog); +extern int c2_llp_service_destroy(struct iw_cm_id *cm_id); + +/* MM */ +extern int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, + int page_size, int pbl_depth, u32 length, + u32 off, u64 *va, enum c2_acf acf, + struct c2_mr *mr); +extern int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index); + +/* AE */ +extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index); + +/* MQSP Allocator */ +extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, + struct sp_chunk **root); +extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root); +extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, + dma_addr_t *dma_addr, gfp_t gfp_mask); +extern void c2_free_mqsp(u16 * mqsp); +#endif diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c new file mode 100644 index 000000000000..08f46c83a3a4 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_ae.c @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "c2.h" +#include <rdma/iw_cm.h> +#include "c2_status.h" +#include "c2_ae.h" + +static int c2_convert_cm_status(u32 c2_status) +{ + switch (c2_status) { + case C2_CONN_STATUS_SUCCESS: + return 0; + case C2_CONN_STATUS_REJECTED: + return -ENETRESET; + case C2_CONN_STATUS_REFUSED: + return -ECONNREFUSED; + case C2_CONN_STATUS_TIMEDOUT: + return -ETIMEDOUT; + case C2_CONN_STATUS_NETUNREACH: + return -ENETUNREACH; + case C2_CONN_STATUS_HOSTUNREACH: + return -EHOSTUNREACH; + case C2_CONN_STATUS_INVALID_RNIC: + return -EINVAL; + case C2_CONN_STATUS_INVALID_QP: + return -EINVAL; + case C2_CONN_STATUS_INVALID_QP_STATE: + return -EINVAL; + case C2_CONN_STATUS_ADDR_NOT_AVAIL: + return -EADDRNOTAVAIL; + default: + printk(KERN_ERR PFX + "%s - Unable to convert CM status: %d\n", + __FUNCTION__, c2_status); + return -EIO; + } +} + +#ifdef DEBUG +static const char* to_event_str(int event) +{ + static const char* event_str[] = { + "CCAE_REMOTE_SHUTDOWN", + "CCAE_ACTIVE_CONNECT_RESULTS", + "CCAE_CONNECTION_REQUEST", + "CCAE_LLP_CLOSE_COMPLETE", + "CCAE_TERMINATE_MESSAGE_RECEIVED", + "CCAE_LLP_CONNECTION_RESET", + "CCAE_LLP_CONNECTION_LOST", + "CCAE_LLP_SEGMENT_SIZE_INVALID", + "CCAE_LLP_INVALID_CRC", + "CCAE_LLP_BAD_FPDU", + "CCAE_INVALID_DDP_VERSION", + "CCAE_INVALID_RDMA_VERSION", + "CCAE_UNEXPECTED_OPCODE", + "CCAE_INVALID_DDP_QUEUE_NUMBER", + "CCAE_RDMA_READ_NOT_ENABLED", + "CCAE_RDMA_WRITE_NOT_ENABLED", + "CCAE_RDMA_READ_TOO_SMALL", + "CCAE_NO_L_BIT", + "CCAE_TAGGED_INVALID_STAG", + "CCAE_TAGGED_BASE_BOUNDS_VIOLATION", + "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION", + "CCAE_TAGGED_INVALID_PD", + "CCAE_WRAP_ERROR", + "CCAE_BAD_CLOSE", + "CCAE_BAD_LLP_CLOSE", + "CCAE_INVALID_MSN_RANGE", + "CCAE_INVALID_MSN_GAP", + "CCAE_IRRQ_OVERFLOW", + "CCAE_IRRQ_MSN_GAP", + "CCAE_IRRQ_MSN_RANGE", + "CCAE_IRRQ_INVALID_STAG", + "CCAE_IRRQ_BASE_BOUNDS_VIOLATION", + "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION", + "CCAE_IRRQ_INVALID_PD", + "CCAE_IRRQ_WRAP_ERROR", + "CCAE_CQ_SQ_COMPLETION_OVERFLOW", + "CCAE_CQ_RQ_COMPLETION_ERROR", + "CCAE_QP_SRQ_WQE_ERROR", + "CCAE_QP_LOCAL_CATASTROPHIC_ERROR", + "CCAE_CQ_OVERFLOW", + "CCAE_CQ_OPERATION_ERROR", + "CCAE_SRQ_LIMIT_REACHED", + "CCAE_QP_RQ_LIMIT_REACHED", + "CCAE_SRQ_CATASTROPHIC_ERROR", + "CCAE_RNIC_CATASTROPHIC_ERROR" + }; + + if (event < CCAE_REMOTE_SHUTDOWN || + event > CCAE_RNIC_CATASTROPHIC_ERROR) + return "<invalid event>"; + + event -= CCAE_REMOTE_SHUTDOWN; + return event_str[event]; +} + +static const char *to_qp_state_str(int state) +{ + switch (state) { + case C2_QP_STATE_IDLE: + return "C2_QP_STATE_IDLE"; + case C2_QP_STATE_CONNECTING: + return "C2_QP_STATE_CONNECTING"; + case C2_QP_STATE_RTS: + return "C2_QP_STATE_RTS"; + case C2_QP_STATE_CLOSING: + return "C2_QP_STATE_CLOSING"; + case C2_QP_STATE_TERMINATE: + return "C2_QP_STATE_TERMINATE"; + case C2_QP_STATE_ERROR: + return "C2_QP_STATE_ERROR"; + default: + return "<invalid QP state>"; + }; +} +#endif + +void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) +{ + struct c2_mq *mq = c2dev->qptr_array[mq_index]; + union c2wr *wr; + void *resource_user_context; + struct iw_cm_event cm_event; + struct ib_event ib_event; + enum c2_resource_indicator resource_indicator; + enum c2_event_id event_id; + unsigned long flags; + int status; + + /* + * retreive the message + */ + wr = c2_mq_consume(mq); + if (!wr) + return; + + memset(&ib_event, 0, sizeof(ib_event)); + memset(&cm_event, 0, sizeof(cm_event)); + + event_id = c2_wr_get_id(wr); + resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type); + resource_user_context = + (void *) (unsigned long) wr->ae.ae_generic.user_context; + + status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr)); + + pr_debug("event received c2_dev=%p, event_id=%d, " + "resource_indicator=%d, user_context=%p, status = %d\n", + c2dev, event_id, resource_indicator, resource_user_context, + status); + + switch (resource_indicator) { + case C2_RES_IND_QP:{ + + struct c2_qp *qp = (struct c2_qp *)resource_user_context; + struct iw_cm_id *cm_id = qp->cm_id; + struct c2wr_ae_active_connect_results *res; + + if (!cm_id) { + pr_debug("event received, but cm_id is <nul>, qp=%p!\n", + qp); + goto ignore_it; + } + pr_debug("%s: event = %s, user_context=%llx, " + "resource_type=%x, " + "resource=%x, qp_state=%s\n", + __FUNCTION__, + to_event_str(event_id), + be64_to_cpu(wr->ae.ae_generic.user_context), + be32_to_cpu(wr->ae.ae_generic.resource_type), + be32_to_cpu(wr->ae.ae_generic.resource), + to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state))); + + c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state)); + + switch (event_id) { + case CCAE_ACTIVE_CONNECT_RESULTS: + res = &wr->ae.ae_active_connect_results; + cm_event.event = IW_CM_EVENT_CONNECT_REPLY; + cm_event.local_addr.sin_addr.s_addr = res->laddr; + cm_event.remote_addr.sin_addr.s_addr = res->raddr; + cm_event.local_addr.sin_port = res->lport; + cm_event.remote_addr.sin_port = res->rport; + if (status == 0) { + cm_event.private_data_len = + be32_to_cpu(res->private_data_length); + cm_event.private_data = res->private_data; + } else { + spin_lock_irqsave(&qp->lock, flags); + if (qp->cm_id) { + qp->cm_id->rem_ref(qp->cm_id); + qp->cm_id = NULL; + } + spin_unlock_irqrestore(&qp->lock, flags); + cm_event.private_data_len = 0; + cm_event.private_data = NULL; + } + if (cm_id->event_handler) + cm_id->event_handler(cm_id, &cm_event); + break; + case CCAE_TERMINATE_MESSAGE_RECEIVED: + case CCAE_CQ_SQ_COMPLETION_OVERFLOW: + ib_event.device = &c2dev->ibdev; + ib_event.element.qp = &qp->ibqp; + ib_event.event = IB_EVENT_QP_REQ_ERR; + + if (qp->ibqp.event_handler) + qp->ibqp.event_handler(&ib_event, + qp->ibqp. + qp_context); + break; + case CCAE_BAD_CLOSE: + case CCAE_LLP_CLOSE_COMPLETE: + case CCAE_LLP_CONNECTION_RESET: + case CCAE_LLP_CONNECTION_LOST: + BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b); + + spin_lock_irqsave(&qp->lock, flags); + if (qp->cm_id) { + qp->cm_id->rem_ref(qp->cm_id); + qp->cm_id = NULL; + } + spin_unlock_irqrestore(&qp->lock, flags); + cm_event.event = IW_CM_EVENT_CLOSE; + cm_event.status = 0; + if (cm_id->event_handler) + cm_id->event_handler(cm_id, &cm_event); + break; + default: + BUG_ON(1); + pr_debug("%s:%d Unexpected event_id=%d on QP=%p, " + "CM_ID=%p\n", + __FUNCTION__, __LINE__, + event_id, qp, cm_id); + break; + } + break; + } + + case C2_RES_IND_EP:{ + + struct c2wr_ae_connection_request *req = + &wr->ae.ae_connection_request; + struct iw_cm_id *cm_id = + (struct iw_cm_id *)resource_user_context; + + pr_debug("C2_RES_IND_EP event_id=%d\n", event_id); + if (event_id != CCAE_CONNECTION_REQUEST) { + pr_debug("%s: Invalid event_id: %d\n", + __FUNCTION__, event_id); + break; + } + cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; + cm_event.provider_data = (void*)(unsigned long)req->cr_handle; + cm_event.local_addr.sin_addr.s_addr = req->laddr; + cm_event.remote_addr.sin_addr.s_addr = req->raddr; + cm_event.local_addr.sin_port = req->lport; + cm_event.remote_addr.sin_port = req->rport; + cm_event.private_data_len = + be32_to_cpu(req->private_data_length); + cm_event.private_data = req->private_data; + + if (cm_id->event_handler) + cm_id->event_handler(cm_id, &cm_event); + break; + } + + case C2_RES_IND_CQ:{ + struct c2_cq *cq = + (struct c2_cq *) resource_user_context; + + pr_debug("IB_EVENT_CQ_ERR\n"); + ib_event.device = &c2dev->ibdev; + ib_event.element.cq = &cq->ibcq; + ib_event.event = IB_EVENT_CQ_ERR; + + if (cq->ibcq.event_handler) + cq->ibcq.event_handler(&ib_event, + cq->ibcq.cq_context); + } + + default: + printk("Bad resource indicator = %d\n", + resource_indicator); + break; + } + + ignore_it: + c2_mq_free(mq); +} diff --git a/drivers/infiniband/hw/amso1100/c2_ae.h b/drivers/infiniband/hw/amso1100/c2_ae.h new file mode 100644 index 000000000000..3a065c33b83b --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_ae.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _C2_AE_H_ +#define _C2_AE_H_ + +/* + * WARNING: If you change this file, also bump C2_IVN_BASE + * in common/include/clustercore/c2_ivn.h. + */ + +/* + * Asynchronous Event Identifiers + * + * These start at 0x80 only so it's obvious from inspection that + * they are not work-request statuses. This isn't critical. + * + * NOTE: these event id's must fit in eight bits. + */ +enum c2_event_id { + CCAE_REMOTE_SHUTDOWN = 0x80, + CCAE_ACTIVE_CONNECT_RESULTS, + CCAE_CONNECTION_REQUEST, + CCAE_LLP_CLOSE_COMPLETE, + CCAE_TERMINATE_MESSAGE_RECEIVED, + CCAE_LLP_CONNECTION_RESET, + CCAE_LLP_CONNECTION_LOST, + CCAE_LLP_SEGMENT_SIZE_INVALID, + CCAE_LLP_INVALID_CRC, + CCAE_LLP_BAD_FPDU, + CCAE_INVALID_DDP_VERSION, + CCAE_INVALID_RDMA_VERSION, + CCAE_UNEXPECTED_OPCODE, + CCAE_INVALID_DDP_QUEUE_NUMBER, + CCAE_RDMA_READ_NOT_ENABLED, + CCAE_RDMA_WRITE_NOT_ENABLED, + CCAE_RDMA_READ_TOO_SMALL, + CCAE_NO_L_BIT, + CCAE_TAGGED_INVALID_STAG, + CCAE_TAGGED_BASE_BOUNDS_VIOLATION, + CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION, + CCAE_TAGGED_INVALID_PD, + CCAE_WRAP_ERROR, + CCAE_BAD_CLOSE, + CCAE_BAD_LLP_CLOSE, + CCAE_INVALID_MSN_RANGE, + CCAE_INVALID_MSN_GAP, + CCAE_IRRQ_OVERFLOW, + CCAE_IRRQ_MSN_GAP, + CCAE_IRRQ_MSN_RANGE, + CCAE_IRRQ_INVALID_STAG, + CCAE_IRRQ_BASE_BOUNDS_VIOLATION, + CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION, + CCAE_IRRQ_INVALID_PD, + CCAE_IRRQ_WRAP_ERROR, + CCAE_CQ_SQ_COMPLETION_OVERFLOW, + CCAE_CQ_RQ_COMPLETION_ERROR, + CCAE_QP_SRQ_WQE_ERROR, + CCAE_QP_LOCAL_CATASTROPHIC_ERROR, + CCAE_CQ_OVERFLOW, + CCAE_CQ_OPERATION_ERROR, + CCAE_SRQ_LIMIT_REACHED, + CCAE_QP_RQ_LIMIT_REACHED, + CCAE_SRQ_CATASTROPHIC_ERROR, + CCAE_RNIC_CATASTROPHIC_ERROR +/* WARNING If you add more id's, make sure their values fit in eight bits. */ +}; + +/* + * Resource Indicators and Identifiers + */ +enum c2_resource_indicator { + C2_RES_IND_QP = 1, + C2_RES_IND_EP, + C2_RES_IND_CQ, + C2_RES_IND_SRQ, +}; + +#endif /* _C2_AE_H_ */ diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c new file mode 100644 index 000000000000..1d2529992c0c --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_alloc.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/bitmap.h> + +#include "c2.h" + +static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, + struct sp_chunk **head) +{ + int i; + struct sp_chunk *new_head; + + new_head = (struct sp_chunk *) __get_free_page(gfp_mask); + if (new_head == NULL) + return -ENOMEM; + + new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, + PAGE_SIZE, DMA_FROM_DEVICE); + pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); + + new_head->next = NULL; + new_head->head = 0; + + /* build list where each index is the next free slot */ + for (i = 0; + i < (PAGE_SIZE - sizeof(struct sp_chunk) - + sizeof(u16)) / sizeof(u16) - 1; + i++) { + new_head->shared_ptr[i] = i + 1; + } + /* terminate list */ + new_head->shared_ptr[i] = 0xFFFF; + + *head = new_head; + return 0; +} + +int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, + struct sp_chunk **root) +{ + return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root); +} + +void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root) +{ + struct sp_chunk *next; + + while (root) { + next = root->next; + dma_unmap_single(c2dev->ibdev.dma_device, + pci_unmap_addr(root, mapping), PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page((struct page *) root); + root = next; + } +} + +u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, + dma_addr_t *dma_addr, gfp_t gfp_mask) +{ + u16 mqsp; + + while (head) { + mqsp = head->head; + if (mqsp != 0xFFFF) { + head->head = head->shared_ptr[mqsp]; + break; + } else if (head->next == NULL) { + if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == + 0) { + head = head->next; + mqsp = head->head; + head->head = head->shared_ptr[mqsp]; + break; + } else + return NULL; + } else + head = head->next; + } + if (head) { + *dma_addr = head->dma_addr + + ((unsigned long) &(head->shared_ptr[mqsp]) - + (unsigned long) head); + pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__, + &(head->shared_ptr[mqsp]), (u64)*dma_addr); + return &(head->shared_ptr[mqsp]); + } + return NULL; +} + +void c2_free_mqsp(u16 * mqsp) +{ + struct sp_chunk *head; + u16 idx; + + /* The chunk containing this ptr begins at the page boundary */ + head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK); + + /* Link head to new mqsp */ + *mqsp = head->head; + + /* Compute the shared_ptr index */ + idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1; + idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1; + + /* Point this index at the head */ + head->shared_ptr[idx] = head->head; + + /* Point head at this index */ + head->head = idx; +} diff --git a/drivers/infiniband/hw/amso1100/c2_cm.c b/drivers/infiniband/hw/amso1100/c2_cm.c new file mode 100644 index 000000000000..485254efdd1e --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_cm.c @@ -0,0 +1,452 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include "c2.h" +#include "c2_wr.h" +#include "c2_vq.h" +#include <rdma/iw_cm.h> + +int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) +{ + struct c2_dev *c2dev = to_c2dev(cm_id->device); + struct ib_qp *ibqp; + struct c2_qp *qp; + struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ + struct c2_vq_req *vq_req; + int err; + + ibqp = c2_get_qp(cm_id->device, iw_param->qpn); + if (!ibqp) + return -EINVAL; + qp = to_c2qp(ibqp); + + /* Associate QP <--> CM_ID */ + cm_id->provider_data = qp; + cm_id->add_ref(cm_id); + qp->cm_id = cm_id; + + /* + * only support the max private_data length + */ + if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { + err = -EINVAL; + goto bail0; + } + /* + * Set the rdma read limits + */ + err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); + if (err) + goto bail0; + + /* + * Create and send a WR_QP_CONNECT... + */ + wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); + if (!wr) { + err = -ENOMEM; + goto bail0; + } + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) { + err = -ENOMEM; + goto bail1; + } + + c2_wr_set_id(wr, CCWR_QP_CONNECT); + wr->hdr.context = 0; + wr->rnic_handle = c2dev->adapter_handle; + wr->qp_handle = qp->adapter_handle; + + wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr; + wr->remote_port = cm_id->remote_addr.sin_port; + + /* + * Move any private data from the callers's buf into + * the WR. + */ + if (iw_param->private_data) { + wr->private_data_length = + cpu_to_be32(iw_param->private_data_len); + memcpy(&wr->private_data[0], iw_param->private_data, + iw_param->private_data_len); + } else + wr->private_data_length = 0; + + /* + * Send WR to adapter. NOTE: There is no synch reply from + * the adapter. + */ + err = vq_send_wr(c2dev, (union c2wr *) wr); + vq_req_free(c2dev, vq_req); + + bail1: + kfree(wr); + bail0: + if (err) { + /* + * If we fail, release reference on QP and + * disassociate QP from CM_ID + */ + cm_id->provider_data = NULL; + qp->cm_id = NULL; + cm_id->rem_ref(cm_id); + } + return err; +} + +int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) +{ + struct c2_dev *c2dev; + struct c2wr_ep_listen_create_req wr; + struct c2wr_ep_listen_create_rep *reply; + struct c2_vq_req *vq_req; + int err; + + c2dev = to_c2dev(cm_id->device); + if (c2dev == NULL) + return -EINVAL; + + /* + * Allocate verbs request. + */ + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + /* + * Build the WR + */ + c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE); + wr.hdr.context = (u64) (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.local_addr = cm_id->local_addr.sin_addr.s_addr; + wr.local_port = cm_id->local_addr.sin_port; + wr.backlog = cpu_to_be32(backlog); + wr.user_context = (u64) (unsigned long) cm_id; + + /* + * Reference the request struct. Dereferenced in the int handler. + */ + vq_req_get(c2dev, vq_req); + + /* + * Send WR to adapter + */ + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + /* + * Wait for reply from adapter + */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail0; + + /* + * Process reply + */ + reply = + (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail1; + } + + if ((err = c2_errno(reply)) != 0) + goto bail1; + + /* + * Keep the adapter handle. Used in subsequent destroy + */ + cm_id->provider_data = (void*)(unsigned long) reply->ep_handle; + + /* + * free vq stuff + */ + vq_repbuf_free(c2dev, reply); + vq_req_free(c2dev, vq_req); + + return 0; + + bail1: + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + + +int c2_llp_service_destroy(struct iw_cm_id *cm_id) +{ + + struct c2_dev *c2dev; + struct c2wr_ep_listen_destroy_req wr; + struct c2wr_ep_listen_destroy_rep *reply; + struct c2_vq_req *vq_req; + int err; + + c2dev = to_c2dev(cm_id->device); + if (c2dev == NULL) + return -EINVAL; + + /* + * Allocate verbs request. + */ + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + /* + * Build the WR + */ + c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.ep_handle = (u32)(unsigned long)cm_id->provider_data; + + /* + * reference the request struct. dereferenced in the int handler. + */ + vq_req_get(c2dev, vq_req); + + /* + * Send WR to adapter + */ + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + /* + * Wait for reply from adapter + */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail0; + + /* + * Process reply + */ + reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail0; + } + if ((err = c2_errno(reply)) != 0) + goto bail1; + + bail1: + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) +{ + struct c2_dev *c2dev = to_c2dev(cm_id->device); + struct c2_qp *qp; + struct ib_qp *ibqp; + struct c2wr_cr_accept_req *wr; /* variable length WR */ + struct c2_vq_req *vq_req; + struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */ + int err; + + ibqp = c2_get_qp(cm_id->device, iw_param->qpn); + if (!ibqp) + return -EINVAL; + qp = to_c2qp(ibqp); + + /* Set the RDMA read limits */ + err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); + if (err) + goto bail0; + + /* Allocate verbs request. */ + vq_req = vq_req_alloc(c2dev); + if (!vq_req) { + err = -ENOMEM; + goto bail1; + } + vq_req->qp = qp; + vq_req->cm_id = cm_id; + vq_req->event = IW_CM_EVENT_ESTABLISHED; + + wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); + if (!wr) { + err = -ENOMEM; + goto bail2; + } + + /* Build the WR */ + c2_wr_set_id(wr, CCWR_CR_ACCEPT); + wr->hdr.context = (unsigned long) vq_req; + wr->rnic_handle = c2dev->adapter_handle; + wr->ep_handle = (u32) (unsigned long) cm_id->provider_data; + wr->qp_handle = qp->adapter_handle; + + /* Replace the cr_handle with the QP after accept */ + cm_id->provider_data = qp; + cm_id->add_ref(cm_id); + qp->cm_id = cm_id; + + cm_id->provider_data = qp; + + /* Validate private_data length */ + if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { + err = -EINVAL; + goto bail2; + } + + if (iw_param->private_data) { + wr->private_data_length = cpu_to_be32(iw_param->private_data_len); + memcpy(&wr->private_data[0], + iw_param->private_data, iw_param->private_data_len); + } else + wr->private_data_length = 0; + + /* Reference the request struct. Dereferenced in the int handler. */ + vq_req_get(c2dev, vq_req); + + /* Send WR to adapter */ + err = vq_send_wr(c2dev, (union c2wr *) wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail2; + } + + /* Wait for reply from adapter */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail2; + + /* Check that reply is present */ + reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail2; + } + + err = c2_errno(reply); + vq_repbuf_free(c2dev, reply); + + if (!err) + c2_set_qp_state(qp, C2_QP_STATE_RTS); + bail2: + kfree(wr); + bail1: + vq_req_free(c2dev, vq_req); + bail0: + if (err) { + /* + * If we fail, release reference on QP and + * disassociate QP from CM_ID + */ + cm_id->provider_data = NULL; + qp->cm_id = NULL; + cm_id->rem_ref(cm_id); + } + return err; +} + +int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) +{ + struct c2_dev *c2dev; + struct c2wr_cr_reject_req wr; + struct c2_vq_req *vq_req; + struct c2wr_cr_reject_rep *reply; + int err; + + c2dev = to_c2dev(cm_id->device); + + /* + * Allocate verbs request. + */ + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + /* + * Build the WR + */ + c2_wr_set_id(&wr, CCWR_CR_REJECT); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.ep_handle = (u32) (unsigned long) cm_id->provider_data; + + /* + * reference the request struct. dereferenced in the int handler. + */ + vq_req_get(c2dev, vq_req); + + /* + * Send WR to adapter + */ + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + /* + * Wait for reply from adapter + */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail0; + + /* + * Process reply + */ + reply = (struct c2wr_cr_reject_rep *) (unsigned long) + vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail0; + } + err = c2_errno(reply); + /* + * free vq stuff + */ + vq_repbuf_free(c2dev, reply); + + bail0: + vq_req_free(c2dev, vq_req); + return err; +} diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c new file mode 100644 index 000000000000..9d7bcc5ade93 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_cq.c @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#include "c2.h" +#include "c2_vq.h" +#include "c2_status.h" + +#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1)) + +static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn) +{ + struct c2_cq *cq; + unsigned long flags; + + spin_lock_irqsave(&c2dev->lock, flags); + cq = c2dev->qptr_array[cqn]; + if (!cq) { + spin_unlock_irqrestore(&c2dev->lock, flags); + return NULL; + } + atomic_inc(&cq->refcount); + spin_unlock_irqrestore(&c2dev->lock, flags); + return cq; +} + +static void c2_cq_put(struct c2_cq *cq) +{ + if (atomic_dec_and_test(&cq->refcount)) + wake_up(&cq->wait); +} + +void c2_cq_event(struct c2_dev *c2dev, u32 mq_index) +{ + struct c2_cq *cq; + + cq = c2_cq_get(c2dev, mq_index); + if (!cq) { + printk("discarding events on destroyed CQN=%d\n", mq_index); + return; + } + + (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + c2_cq_put(cq); +} + +void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) +{ + struct c2_cq *cq; + struct c2_mq *q; + + cq = c2_cq_get(c2dev, mq_index); + if (!cq) + return; + + spin_lock_irq(&cq->lock); + q = &cq->mq; + if (q && !c2_mq_empty(q)) { + u16 priv = q->priv; + struct c2wr_ce *msg; + + while (priv != be16_to_cpu(*q->shared)) { + msg = (struct c2wr_ce *) + (q->msg_pool.host + priv * q->msg_size); + if (msg->qp_user_context == (u64) (unsigned long) qp) { + msg->qp_user_context = (u64) 0; + } + priv = (priv + 1) % q->q_size; + } + } + spin_unlock_irq(&cq->lock); + c2_cq_put(cq); +} + +static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status) +{ + switch (status) { + case C2_OK: + return IB_WC_SUCCESS; + case CCERR_FLUSHED: + return IB_WC_WR_FLUSH_ERR; + case CCERR_BASE_AND_BOUNDS_VIOLATION: + return IB_WC_LOC_PROT_ERR; + case CCERR_ACCESS_VIOLATION: + return IB_WC_LOC_ACCESS_ERR; + case CCERR_TOTAL_LENGTH_TOO_BIG: + return IB_WC_LOC_LEN_ERR; + case CCERR_INVALID_WINDOW: + return IB_WC_MW_BIND_ERR; + default: + return IB_WC_GENERAL_ERR; + } +} + + +static inline int c2_poll_one(struct c2_dev *c2dev, + struct c2_cq *cq, struct ib_wc *entry) +{ + struct c2wr_ce *ce; + struct c2_qp *qp; + int is_recv = 0; + + ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); + if (!ce) { + return -EAGAIN; + } + + /* + * if the qp returned is null then this qp has already + * been freed and we are unable process the completion. + * try pulling the next message + */ + while ((qp = + (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { + c2_mq_free(&cq->mq); + ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); + if (!ce) + return -EAGAIN; + } + + entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce)); + entry->wr_id = ce->hdr.context; + entry->qp_num = ce->handle; + entry->wc_flags = 0; + entry->slid = 0; + entry->sl = 0; + entry->src_qp = 0; + entry->dlid_path_bits = 0; + entry->pkey_index = 0; + + switch (c2_wr_get_id(ce)) { + case C2_WR_TYPE_SEND: + entry->opcode = IB_WC_SEND; + break; + case C2_WR_TYPE_RDMA_WRITE: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case C2_WR_TYPE_RDMA_READ: + entry->opcode = IB_WC_RDMA_READ; + break; + case C2_WR_TYPE_BIND_MW: + entry->opcode = IB_WC_BIND_MW; + break; + case C2_WR_TYPE_RECV: + entry->byte_len = be32_to_cpu(ce->bytes_rcvd); + entry->opcode = IB_WC_RECV; + is_recv = 1; + break; + default: + break; + } + + /* consume the WQEs */ + if (is_recv) + c2_mq_lconsume(&qp->rq_mq, 1); + else + c2_mq_lconsume(&qp->sq_mq, + be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1); + + /* free the message */ + c2_mq_free(&cq->mq); + + return 0; +} + +int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) +{ + struct c2_dev *c2dev = to_c2dev(ibcq->device); + struct c2_cq *cq = to_c2cq(ibcq); + unsigned long flags; + int npolled, err; + + spin_lock_irqsave(&cq->lock, flags); + + for (npolled = 0; npolled < num_entries; ++npolled) { + + err = c2_poll_one(c2dev, cq, entry + npolled); + if (err) + break; + } + + spin_unlock_irqrestore(&cq->lock, flags); + + return npolled; +} + +int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +{ + struct c2_mq_shared __iomem *shared; + struct c2_cq *cq; + + cq = to_c2cq(ibcq); + shared = cq->mq.peer; + + if (notify == IB_CQ_NEXT_COMP) + writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type); + else if (notify == IB_CQ_SOLICITED) + writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type); + else + return -EINVAL; + + writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed); + + /* + * Now read back shared->armed to make the PCI + * write synchronous. This is necessary for + * correct cq notification semantics. + */ + readb(&shared->armed); + + return 0; +} + +static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) +{ + + dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), + mq->q_size * mq->msg_size, DMA_FROM_DEVICE); + free_pages((unsigned long) mq->msg_pool.host, + get_order(mq->q_size * mq->msg_size)); +} + +static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, + int msg_size) +{ + unsigned long pool_start; + + pool_start = __get_free_pages(GFP_KERNEL, + get_order(q_size * msg_size)); + if (!pool_start) + return -ENOMEM; + + c2_mq_rep_init(mq, + 0, /* index (currently unknown) */ + q_size, + msg_size, + (u8 *) pool_start, + NULL, /* peer (currently unknown) */ + C2_MQ_HOST_TARGET); + + mq->host_dma = dma_map_single(c2dev->ibdev.dma_device, + (void *)pool_start, + q_size * msg_size, DMA_FROM_DEVICE); + pci_unmap_addr_set(mq, mapping, mq->host_dma); + + return 0; +} + +int c2_init_cq(struct c2_dev *c2dev, int entries, + struct c2_ucontext *ctx, struct c2_cq *cq) +{ + struct c2wr_cq_create_req wr; + struct c2wr_cq_create_rep *reply; + unsigned long peer_pa; + struct c2_vq_req *vq_req; + int err; + + might_sleep(); + + cq->ibcq.cqe = entries - 1; + cq->is_kernel = !ctx; + + /* Allocate a shared pointer */ + cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, + &cq->mq.shared_dma, GFP_KERNEL); + if (!cq->mq.shared) + return -ENOMEM; + + /* Allocate pages for the message pool */ + err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE); + if (err) + goto bail0; + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) { + err = -ENOMEM; + goto bail1; + } + + memset(&wr, 0, sizeof(wr)); + c2_wr_set_id(&wr, CCWR_CQ_CREATE); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.msg_size = cpu_to_be32(cq->mq.msg_size); + wr.depth = cpu_to_be32(cq->mq.q_size); + wr.shared_ht = cpu_to_be64(cq->mq.shared_dma); + wr.msg_pool = cpu_to_be64(cq->mq.host_dma); + wr.user_context = (u64) (unsigned long) (cq); + + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail2; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail2; + + reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail2; + } + + if ((err = c2_errno(reply)) != 0) + goto bail3; + + cq->adapter_handle = reply->cq_handle; + cq->mq.index = be32_to_cpu(reply->mq_index); + + peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared); + cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE); + if (!cq->mq.peer) { + err = -ENOMEM; + goto bail3; + } + + vq_repbuf_free(c2dev, reply); + vq_req_free(c2dev, vq_req); + + spin_lock_init(&cq->lock); + atomic_set(&cq->refcount, 1); + init_waitqueue_head(&cq->wait); + + /* + * Use the MQ index allocated by the adapter to + * store the CQ in the qptr_array + */ + cq->cqn = cq->mq.index; + c2dev->qptr_array[cq->cqn] = cq; + + return 0; + + bail3: + vq_repbuf_free(c2dev, reply); + bail2: + vq_req_free(c2dev, vq_req); + bail1: + c2_free_cq_buf(c2dev, &cq->mq); + bail0: + c2_free_mqsp(cq->mq.shared); + + return err; +} + +void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq) +{ + int err; + struct c2_vq_req *vq_req; + struct c2wr_cq_destroy_req wr; + struct c2wr_cq_destroy_rep *reply; + + might_sleep(); + + /* Clear CQ from the qptr array */ + spin_lock_irq(&c2dev->lock); + c2dev->qptr_array[cq->mq.index] = NULL; + atomic_dec(&cq->refcount); + spin_unlock_irq(&c2dev->lock); + + wait_event(cq->wait, !atomic_read(&cq->refcount)); + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) { + goto bail0; + } + + memset(&wr, 0, sizeof(wr)); + c2_wr_set_id(&wr, CCWR_CQ_DESTROY); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.cq_handle = cq->adapter_handle; + + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail1; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail1; + + reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg); + + vq_repbuf_free(c2dev, reply); + bail1: + vq_req_free(c2dev, vq_req); + bail0: + if (cq->is_kernel) { + c2_free_cq_buf(c2dev, &cq->mq); + } + + return; +} diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c new file mode 100644 index 000000000000..0d0bc33ca30a --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_intr.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "c2.h" +#include <rdma/iw_cm.h> +#include "c2_vq.h" + +static void handle_mq(struct c2_dev *c2dev, u32 index); +static void handle_vq(struct c2_dev *c2dev, u32 mq_index); + +/* + * Handle RNIC interrupts + */ +void c2_rnic_interrupt(struct c2_dev *c2dev) +{ + unsigned int mq_index; + + while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) { + mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT); + if (mq_index & 0x80000000) { + break; + } + + c2dev->hints_read++; + handle_mq(c2dev, mq_index); + } + +} + +/* + * Top level MQ handler + */ +static void handle_mq(struct c2_dev *c2dev, u32 mq_index) +{ + if (c2dev->qptr_array[mq_index] == NULL) { + pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n", + mq_index); + return; + } + + switch (mq_index) { + case (0): + /* + * An index of 0 in the activity queue + * indicates the req vq now has messages + * available... + * + * Wake up any waiters waiting on req VQ + * message availability. + */ + wake_up(&c2dev->req_vq_wo); + break; + case (1): + handle_vq(c2dev, mq_index); + break; + case (2): + /* We have to purge the VQ in case there are pending + * accept reply requests that would result in the + * generation of an ESTABLISHED event. If we don't + * generate these first, a CLOSE event could end up + * being delivered before the ESTABLISHED event. + */ + handle_vq(c2dev, 1); + + c2_ae_event(c2dev, mq_index); + break; + default: + /* There is no event synchronization between CQ events + * and AE or CM events. In fact, CQE could be + * delivered for all of the I/O up to and including the + * FLUSH for a peer disconenct prior to the ESTABLISHED + * event being delivered to the app. The reason for this + * is that CM events are delivered on a thread, while AE + * and CM events are delivered on interrupt context. + */ + c2_cq_event(c2dev, mq_index); + break; + } + + return; +} + +/* + * Handles verbs WR replies. + */ +static void handle_vq(struct c2_dev *c2dev, u32 mq_index) +{ + void *adapter_msg, *reply_msg; + struct c2wr_hdr *host_msg; + struct c2wr_hdr tmp; + struct c2_mq *reply_vq; + struct c2_vq_req *req; + struct iw_cm_event cm_event; + int err; + + reply_vq = (struct c2_mq *) c2dev->qptr_array[mq_index]; + + /* + * get next msg from mq_index into adapter_msg. + * don't free it yet. + */ + adapter_msg = c2_mq_consume(reply_vq); + if (adapter_msg == NULL) { + return; + } + + host_msg = vq_repbuf_alloc(c2dev); + + /* + * If we can't get a host buffer, then we'll still + * wakeup the waiter, we just won't give him the msg. + * It is assumed the waiter will deal with this... + */ + if (!host_msg) { + pr_debug("handle_vq: no repbufs!\n"); + + /* + * just copy the WR header into a local variable. + * this allows us to still demux on the context + */ + host_msg = &tmp; + memcpy(host_msg, adapter_msg, sizeof(tmp)); + reply_msg = NULL; + } else { + memcpy(host_msg, adapter_msg, reply_vq->msg_size); + reply_msg = host_msg; + } + + /* + * consume the msg from the MQ + */ + c2_mq_free(reply_vq); + + /* + * wakeup the waiter. + */ + req = (struct c2_vq_req *) (unsigned long) host_msg->context; + if (req == NULL) { + /* + * We should never get here, as the adapter should + * never send us a reply that we're not expecting. + */ + vq_repbuf_free(c2dev, host_msg); + pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n"); + return; + } + + err = c2_errno(reply_msg); + if (!err) switch (req->event) { + case IW_CM_EVENT_ESTABLISHED: + c2_set_qp_state(req->qp, + C2_QP_STATE_RTS); + case IW_CM_EVENT_CLOSE: + + /* + * Move the QP to RTS if this is + * the established event + */ + cm_event.event = req->event; + cm_event.status = 0; + cm_event.local_addr = req->cm_id->local_addr; + cm_event.remote_addr = req->cm_id->remote_addr; + cm_event.private_data = NULL; + cm_event.private_data_len = 0; + req->cm_id->event_handler(req->cm_id, &cm_event); + break; + default: + break; + } + + req->reply_msg = (u64) (unsigned long) (reply_msg); + atomic_set(&req->reply_ready, 1); + wake_up(&req->wait_object); + + /* + * If the request was cancelled, then this put will + * free the vq_req memory...and reply_msg!!! + */ + vq_req_put(c2dev, req); +} diff --git a/drivers/infiniband/hw/amso1100/c2_mm.c b/drivers/infiniband/hw/amso1100/c2_mm.c new file mode 100644 index 000000000000..1e4f46493fcb --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_mm.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "c2.h" +#include "c2_vq.h" + +#define PBL_VIRT 1 +#define PBL_PHYS 2 + +/* + * Send all the PBL messages to convey the remainder of the PBL + * Wait for the adapter's reply on the last one. + * This is indicated by setting the MEM_PBL_COMPLETE in the flags. + * + * NOTE: vq_req is _not_ freed by this function. The VQ Host + * Reply buffer _is_ freed by this function. + */ +static int +send_pbl_messages(struct c2_dev *c2dev, u32 stag_index, + unsigned long va, u32 pbl_depth, + struct c2_vq_req *vq_req, int pbl_type) +{ + u32 pbe_count; /* amt that fits in a PBL msg */ + u32 count; /* amt in this PBL MSG. */ + struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */ + struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */ + int err, pbl_virt, pbl_index, i; + + switch (pbl_type) { + case PBL_VIRT: + pbl_virt = 1; + break; + case PBL_PHYS: + pbl_virt = 0; + break; + default: + return -EINVAL; + break; + } + + pbe_count = (c2dev->req_vq.msg_size - + sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64); + wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); + if (!wr) { + return -ENOMEM; + } + c2_wr_set_id(wr, CCWR_NSMR_PBL); + + /* + * Only the last PBL message will generate a reply from the verbs, + * so we set the context to 0 indicating there is no kernel verbs + * handler blocked awaiting this reply. + */ + wr->hdr.context = 0; + wr->rnic_handle = c2dev->adapter_handle; + wr->stag_index = stag_index; /* already swapped */ + wr->flags = 0; + pbl_index = 0; + while (pbl_depth) { + count = min(pbe_count, pbl_depth); + wr->addrs_length = cpu_to_be32(count); + + /* + * If this is the last message, then reference the + * vq request struct cuz we're gonna wait for a reply. + * also make this PBL msg as the last one. + */ + if (count == pbl_depth) { + /* + * reference the request struct. dereferenced in the + * int handler. + */ + vq_req_get(c2dev, vq_req); + wr->flags = cpu_to_be32(MEM_PBL_COMPLETE); + + /* + * This is the last PBL message. + * Set the context to our VQ Request Object so we can + * wait for the reply. + */ + wr->hdr.context = (unsigned long) vq_req; + } + + /* + * If pbl_virt is set then va is a virtual address + * that describes a virtually contiguous memory + * allocation. The wr needs the start of each virtual page + * to be converted to the corresponding physical address + * of the page. If pbl_virt is not set then va is an array + * of physical addresses and there is no conversion to do. + * Just fill in the wr with what is in the array. + */ + for (i = 0; i < count; i++) { + if (pbl_virt) { + va += PAGE_SIZE; + } else { + wr->paddrs[i] = + cpu_to_be64(((u64 *)va)[pbl_index + i]); + } + } + + /* + * Send WR to adapter + */ + err = vq_send_wr(c2dev, (union c2wr *) wr); + if (err) { + if (count <= pbe_count) { + vq_req_put(c2dev, vq_req); + } + goto bail0; + } + pbl_depth -= count; + pbl_index += count; + } + + /* + * Now wait for the reply... + */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) { + goto bail0; + } + + /* + * Process reply + */ + reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail0; + } + + err = c2_errno(reply); + + vq_repbuf_free(c2dev, reply); + bail0: + kfree(wr); + return err; +} + +#define C2_PBL_MAX_DEPTH 131072 +int +c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, + int page_size, int pbl_depth, u32 length, + u32 offset, u64 *va, enum c2_acf acf, + struct c2_mr *mr) +{ + struct c2_vq_req *vq_req; + struct c2wr_nsmr_register_req *wr; + struct c2wr_nsmr_register_rep *reply; + u16 flags; + int i, pbe_count, count; + int err; + + if (!va || !length || !addr_list || !pbl_depth) + return -EINTR; + + /* + * Verify PBL depth is within rnic max + */ + if (pbl_depth > C2_PBL_MAX_DEPTH) { + return -EINTR; + } + + /* + * allocate verbs request object + */ + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); + if (!wr) { + err = -ENOMEM; + goto bail0; + } + + /* + * build the WR + */ + c2_wr_set_id(wr, CCWR_NSMR_REGISTER); + wr->hdr.context = (unsigned long) vq_req; + wr->rnic_handle = c2dev->adapter_handle; + + flags = (acf | MEM_VA_BASED | MEM_REMOTE); + + /* + * compute how many pbes can fit in the message + */ + pbe_count = (c2dev->req_vq.msg_size - + sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64); + + if (pbl_depth <= pbe_count) { + flags |= MEM_PBL_COMPLETE; + } + wr->flags = cpu_to_be16(flags); + wr->stag_key = 0; //stag_key; + wr->va = cpu_to_be64(*va); + wr->pd_id = mr->pd->pd_id; + wr->pbe_size = cpu_to_be32(page_size); + wr->length = cpu_to_be32(length); + wr->pbl_depth = cpu_to_be32(pbl_depth); + wr->fbo = cpu_to_be32(offset); + count = min(pbl_depth, pbe_count); + wr->addrs_length = cpu_to_be32(count); + + /* + * fill out the PBL for this message + */ + for (i = 0; i < count; i++) { + wr->paddrs[i] = cpu_to_be64(addr_list[i]); + } + + /* + * regerence the request struct + */ + vq_req_get(c2dev, vq_req); + + /* + * send the WR to the adapter + */ + err = vq_send_wr(c2dev, (union c2wr *) wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail1; + } + + /* + * wait for reply from adapter + */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) { + goto bail1; + } + + /* + * process reply + */ + reply = + (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail1; + } + if ((err = c2_errno(reply))) { + goto bail2; + } + //*p_pb_entries = be32_to_cpu(reply->pbl_depth); + mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); + vq_repbuf_free(c2dev, reply); + + /* + * if there are still more PBEs we need to send them to + * the adapter and wait for a reply on the final one. + * reuse vq_req for this purpose. + */ + pbl_depth -= count; + if (pbl_depth) { + + vq_req->reply_msg = (unsigned long) NULL; + atomic_set(&vq_req->reply_ready, 0); + err = send_pbl_messages(c2dev, + cpu_to_be32(mr->ibmr.lkey), + (unsigned long) &addr_list[i], + pbl_depth, vq_req, PBL_PHYS); + if (err) { + goto bail1; + } + } + + vq_req_free(c2dev, vq_req); + kfree(wr); + + return err; + + bail2: + vq_repbuf_free(c2dev, reply); + bail1: + kfree(wr); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index) +{ + struct c2_vq_req *vq_req; /* verbs request object */ + struct c2wr_stag_dealloc_req wr; /* work request */ + struct c2wr_stag_dealloc_rep *reply; /* WR reply */ + int err; + + + /* + * allocate verbs request object + */ + vq_req = vq_req_alloc(c2dev); + if (!vq_req) { + return -ENOMEM; + } + + /* + * Build the WR + */ + c2_wr_set_id(&wr, CCWR_STAG_DEALLOC); + wr.hdr.context = (u64) (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.stag_index = cpu_to_be32(stag_index); + + /* + * reference the request struct. dereferenced in the int handler. + */ + vq_req_get(c2dev, vq_req); + + /* + * Send WR to adapter + */ + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + /* + * Wait for reply from adapter + */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) { + goto bail0; + } + + /* + * Process reply + */ + reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail0; + } + + err = c2_errno(reply); + + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} diff --git a/drivers/infiniband/hw/amso1100/c2_mq.c b/drivers/infiniband/hw/amso1100/c2_mq.c new file mode 100644 index 000000000000..b88a75592102 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_mq.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "c2.h" +#include "c2_mq.h" + +void *c2_mq_alloc(struct c2_mq *q) +{ + BUG_ON(q->magic != C2_MQ_MAGIC); + BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); + + if (c2_mq_full(q)) { + return NULL; + } else { +#ifdef DEBUG + struct c2wr_hdr *m = + (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size); +#ifdef CCMSGMAGIC + BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC)); + m->magic = cpu_to_be32(CCWR_MAGIC); +#endif + return m; +#else + return q->msg_pool.host + q->priv * q->msg_size; +#endif + } +} + +void c2_mq_produce(struct c2_mq *q) +{ + BUG_ON(q->magic != C2_MQ_MAGIC); + BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); + + if (!c2_mq_full(q)) { + q->priv = (q->priv + 1) % q->q_size; + q->hint_count++; + /* Update peer's offset. */ + __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); + } +} + +void *c2_mq_consume(struct c2_mq *q) +{ + BUG_ON(q->magic != C2_MQ_MAGIC); + BUG_ON(q->type != C2_MQ_HOST_TARGET); + + if (c2_mq_empty(q)) { + return NULL; + } else { +#ifdef DEBUG + struct c2wr_hdr *m = (struct c2wr_hdr *) + (q->msg_pool.host + q->priv * q->msg_size); +#ifdef CCMSGMAGIC + BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC)); +#endif + return m; +#else + return q->msg_pool.host + q->priv * q->msg_size; +#endif + } +} + +void c2_mq_free(struct c2_mq *q) +{ + BUG_ON(q->magic != C2_MQ_MAGIC); + BUG_ON(q->type != C2_MQ_HOST_TARGET); + + if (!c2_mq_empty(q)) { + +#ifdef CCMSGMAGIC + { + struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *) + (q->msg_pool.adapter + q->priv * q->msg_size); + __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic); + } +#endif + q->priv = (q->priv + 1) % q->q_size; + /* Update peer's offset. */ + __raw_writew(cpu_to_be16(q->priv), &q->peer->shared); + } +} + + +void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count) +{ + BUG_ON(q->magic != C2_MQ_MAGIC); + BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); + + while (wqe_count--) { + BUG_ON(c2_mq_empty(q)); + *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size); + } +} + +#if 0 +u32 c2_mq_count(struct c2_mq *q) +{ + s32 count; + + if (q->type == C2_MQ_HOST_TARGET) + count = be16_to_cpu(*q->shared) - q->priv; + else + count = q->priv - be16_to_cpu(*q->shared); + + if (count < 0) + count += q->q_size; + + return (u32) count; +} +#endif /* 0 */ + +void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, + u8 __iomem *pool_start, u16 __iomem *peer, u32 type) +{ + BUG_ON(!q->shared); + + /* This code assumes the byte swapping has already been done! */ + q->index = index; + q->q_size = q_size; + q->msg_size = msg_size; + q->msg_pool.adapter = pool_start; + q->peer = (struct c2_mq_shared __iomem *) peer; + q->magic = C2_MQ_MAGIC; + q->type = type; + q->priv = 0; + q->hint_count = 0; + return; +} +void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, + u8 *pool_start, u16 __iomem *peer, u32 type) +{ + BUG_ON(!q->shared); + + /* This code assumes the byte swapping has already been done! */ + q->index = index; + q->q_size = q_size; + q->msg_size = msg_size; + q->msg_pool.host = pool_start; + q->peer = (struct c2_mq_shared __iomem *) peer; + q->magic = C2_MQ_MAGIC; + q->type = type; + q->priv = 0; + q->hint_count = 0; + return; +} diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h new file mode 100644 index 000000000000..9185bbb21658 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_mq.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _C2_MQ_H_ +#define _C2_MQ_H_ +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include "c2_wr.h" + +enum c2_shared_regs { + + C2_SHARED_ARMED = 0x10, + C2_SHARED_NOTIFY = 0x18, + C2_SHARED_SHARED = 0x40, +}; + +struct c2_mq_shared { + u16 unused1; + u8 armed; + u8 notification_type; + u32 unused2; + u16 shared; + /* Pad to 64 bytes. */ + u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)]; +}; + +enum c2_mq_type { + C2_MQ_HOST_TARGET = 1, + C2_MQ_ADAPTER_TARGET = 2, +}; + +/* + * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ. + * c2_user_mq_t (which is the same format) is for user-mode MQs... + */ +#define C2_MQ_MAGIC 0x4d512020 /* 'MQ ' */ +struct c2_mq { + u32 magic; + union { + u8 *host; + u8 __iomem *adapter; + } msg_pool; + dma_addr_t host_dma; + DECLARE_PCI_UNMAP_ADDR(mapping); + u16 hint_count; + u16 priv; + struct c2_mq_shared __iomem *peer; + u16 *shared; + dma_addr_t shared_dma; + u32 q_size; + u32 msg_size; + u32 index; + enum c2_mq_type type; +}; + +static __inline__ int c2_mq_empty(struct c2_mq *q) +{ + return q->priv == be16_to_cpu(*q->shared); +} + +static __inline__ int c2_mq_full(struct c2_mq *q) +{ + return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size; +} + +extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count); +extern void *c2_mq_alloc(struct c2_mq *q); +extern void c2_mq_produce(struct c2_mq *q); +extern void *c2_mq_consume(struct c2_mq *q); +extern void c2_mq_free(struct c2_mq *q); +extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, + u8 __iomem *pool_start, u16 __iomem *peer, u32 type); +extern void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size, + u8 *pool_start, u16 __iomem *peer, u32 type); + +#endif /* _C2_MQ_H_ */ diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/infiniband/hw/amso1100/c2_pd.c new file mode 100644 index 000000000000..00c709926c8d --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_pd.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/init.h> +#include <linux/errno.h> + +#include "c2.h" +#include "c2_provider.h" + +int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd) +{ + u32 obj; + int ret = 0; + + spin_lock(&c2dev->pd_table.lock); + obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max, + c2dev->pd_table.last); + if (obj >= c2dev->pd_table.max) + obj = find_first_zero_bit(c2dev->pd_table.table, + c2dev->pd_table.max); + if (obj < c2dev->pd_table.max) { + pd->pd_id = obj; + __set_bit(obj, c2dev->pd_table.table); + c2dev->pd_table.last = obj+1; + if (c2dev->pd_table.last >= c2dev->pd_table.max) + c2dev->pd_table.last = 0; + } else + ret = -ENOMEM; + spin_unlock(&c2dev->pd_table.lock); + return ret; +} + +void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd) +{ + spin_lock(&c2dev->pd_table.lock); + __clear_bit(pd->pd_id, c2dev->pd_table.table); + spin_unlock(&c2dev->pd_table.lock); +} + +int __devinit c2_init_pd_table(struct c2_dev *c2dev) +{ + + c2dev->pd_table.last = 0; + c2dev->pd_table.max = c2dev->props.max_pd; + spin_lock_init(&c2dev->pd_table.lock); + c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) * + sizeof(long), GFP_KERNEL); + if (!c2dev->pd_table.table) + return -ENOMEM; + bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd); + return 0; +} + +void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev) +{ + kfree(c2dev->pd_table.table); +} diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c new file mode 100644 index 000000000000..8fddc8cccdf3 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -0,0 +1,869 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/inetdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/if_vlan.h> +#include <linux/crc32.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/if_arp.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/byteorder.h> + +#include <rdma/ib_smi.h> +#include <rdma/ib_user_verbs.h> +#include "c2.h" +#include "c2_provider.h" +#include "c2_user.h" + +static int c2_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct c2_dev *c2dev = to_c2dev(ibdev); + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + *props = c2dev->props; + return 0; +} + +static int c2_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + props->max_mtu = IB_MTU_4096; + props->lid = 0; + props->lmc = 0; + props->sm_lid = 0; + props->sm_sl = 0; + props->state = IB_PORT_ACTIVE; + props->phys_state = 0; + props->port_cap_flags = + IB_PORT_CM_SUP | + IB_PORT_REINIT_SUP | + IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; + props->gid_tbl_len = 1; + props->pkey_tbl_len = 1; + props->qkey_viol_cntr = 0; + props->active_width = 1; + props->active_speed = 1; + + return 0; +} + +static int c2_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return 0; +} + +static int c2_query_pkey(struct ib_device *ibdev, + u8 port, u16 index, u16 * pkey) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + *pkey = 0; + return 0; +} + +static int c2_query_gid(struct ib_device *ibdev, u8 port, + int index, union ib_gid *gid) +{ + struct c2_dev *c2dev = to_c2dev(ibdev); + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + memset(&(gid->raw[0]), 0, sizeof(gid->raw)); + memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6); + + return 0; +} + +/* Allocate the user context data structure. This keeps track + * of all objects associated with a particular user-mode client. + */ +static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct c2_ucontext *context; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + context = kmalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + + return &context->ibucontext; +} + +static int c2_dealloc_ucontext(struct ib_ucontext *context) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + kfree(context); + return 0; +} + +static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return -ENOSYS; +} + +static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct c2_pd *pd; + int err; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + pd = kmalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + err = c2_pd_alloc(to_c2dev(ibdev), !context, pd); + if (err) { + kfree(pd); + return ERR_PTR(err); + } + + if (context) { + if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) { + c2_pd_free(to_c2dev(ibdev), pd); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } + + return &pd->ibpd; +} + +static int c2_dealloc_pd(struct ib_pd *pd) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + c2_pd_free(to_c2dev(pd->device), to_c2pd(pd)); + kfree(pd); + + return 0; +} + +static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return ERR_PTR(-ENOSYS); +} + +static int c2_ah_destroy(struct ib_ah *ah) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return -ENOSYS; +} + +static void c2_add_ref(struct ib_qp *ibqp) +{ + struct c2_qp *qp; + BUG_ON(!ibqp); + qp = to_c2qp(ibqp); + atomic_inc(&qp->refcount); +} + +static void c2_rem_ref(struct ib_qp *ibqp) +{ + struct c2_qp *qp; + BUG_ON(!ibqp); + qp = to_c2qp(ibqp); + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); +} + +struct ib_qp *c2_get_qp(struct ib_device *device, int qpn) +{ + struct c2_dev* c2dev = to_c2dev(device); + struct c2_qp *qp; + + qp = c2_find_qpn(c2dev, qpn); + pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n", + __FUNCTION__, qp, qpn, device, + (qp?atomic_read(&qp->refcount):0)); + + return (qp?&qp->ibqp:NULL); +} + +static struct ib_qp *c2_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct c2_qp *qp; + int err; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + switch (init_attr->qp_type) { + case IB_QPT_RC: + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) { + pr_debug("%s: Unable to allocate QP\n", __FUNCTION__); + return ERR_PTR(-ENOMEM); + } + spin_lock_init(&qp->lock); + if (pd->uobject) { + /* userspace specific */ + } + + err = c2_alloc_qp(to_c2dev(pd->device), + to_c2pd(pd), init_attr, qp); + + if (err && pd->uobject) { + /* userspace specific */ + } + + break; + default: + pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__, + init_attr->qp_type); + return ERR_PTR(-EINVAL); + break; + } + + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + return &qp->ibqp; +} + +static int c2_destroy_qp(struct ib_qp *ib_qp) +{ + struct c2_qp *qp = to_c2qp(ib_qp); + + pr_debug("%s:%u qp=%p,qp->state=%d\n", + __FUNCTION__, __LINE__,ib_qp,qp->state); + c2_free_qp(to_c2dev(ib_qp->device), qp); + kfree(qp); + return 0; +} + +static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct c2_cq *cq; + int err; + + cq = kmalloc(sizeof(*cq), GFP_KERNEL); + if (!cq) { + pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__); + return ERR_PTR(-ENOMEM); + } + + err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq); + if (err) { + pr_debug("%s: error initializing CQ\n", __FUNCTION__); + kfree(cq); + return ERR_PTR(err); + } + + return &cq->ibcq; +} + +static int c2_destroy_cq(struct ib_cq *ib_cq) +{ + struct c2_cq *cq = to_c2cq(ib_cq); + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + c2_free_cq(to_c2dev(ib_cq->device), cq); + kfree(cq); + + return 0; +} + +static inline u32 c2_convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) | + C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND; +} + +static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd, + struct ib_phys_buf *buffer_list, + int num_phys_buf, int acc, u64 * iova_start) +{ + struct c2_mr *mr; + u64 *page_list; + u32 total_len; + int err, i, j, k, page_shift, pbl_depth; + + pbl_depth = 0; + total_len = 0; + + page_shift = PAGE_SHIFT; + /* + * If there is only 1 buffer we assume this could + * be a map of all phy mem...use a 32k page_shift. + */ + if (num_phys_buf == 1) + page_shift += 3; + + for (i = 0; i < num_phys_buf; i++) { + + if (buffer_list[i].addr & ~PAGE_MASK) { + pr_debug("Unaligned Memory Buffer: 0x%x\n", + (unsigned int) buffer_list[i].addr); + return ERR_PTR(-EINVAL); + } + + if (!buffer_list[i].size) { + pr_debug("Invalid Buffer Size\n"); + return ERR_PTR(-EINVAL); + } + + total_len += buffer_list[i].size; + pbl_depth += ALIGN(buffer_list[i].size, + (1 << page_shift)) >> page_shift; + } + + page_list = vmalloc(sizeof(u64) * pbl_depth); + if (!page_list) { + pr_debug("couldn't vmalloc page_list of size %zd\n", + (sizeof(u64) * pbl_depth)); + return ERR_PTR(-ENOMEM); + } + + for (i = 0, j = 0; i < num_phys_buf; i++) { + + int naddrs; + + naddrs = ALIGN(buffer_list[i].size, + (1 << page_shift)) >> page_shift; + for (k = 0; k < naddrs; k++) + page_list[j++] = (buffer_list[i].addr + + (k << page_shift)); + } + + mr = kmalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->pd = to_c2pd(ib_pd); + pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " + "*iova_start %llx, first pa %llx, last pa %llx\n", + __FUNCTION__, page_shift, pbl_depth, total_len, + *iova_start, page_list[0], page_list[pbl_depth-1]); + err = c2_nsmr_register_phys_kern(to_c2dev(ib_pd->device), page_list, + (1 << page_shift), pbl_depth, + total_len, 0, iova_start, + c2_convert_access(acc), mr); + vfree(page_list); + if (err) { + kfree(mr); + return ERR_PTR(err); + } + + return &mr->ibmr; +} + +static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct ib_phys_buf bl; + u64 kva = 0; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + /* AMSO1100 limit */ + bl.size = 0xffffffff; + bl.addr = 0; + return c2_reg_phys_mr(pd, &bl, 1, acc, &kva); +} + +static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, + int acc, struct ib_udata *udata) +{ + u64 *pages; + u64 kva = 0; + int shift, n, len; + int i, j, k; + int err = 0; + struct ib_umem_chunk *chunk; + struct c2_pd *c2pd = to_c2pd(pd); + struct c2_mr *c2mr; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + shift = ffs(region->page_size) - 1; + + c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL); + if (!c2mr) + return ERR_PTR(-ENOMEM); + c2mr->pd = c2pd; + + n = 0; + list_for_each_entry(chunk, ®ion->chunk_list, list) + n += chunk->nents; + + pages = kmalloc(n * sizeof(u64), GFP_KERNEL); + if (!pages) { + err = -ENOMEM; + goto err; + } + + i = 0; + list_for_each_entry(chunk, ®ion->chunk_list, list) { + for (j = 0; j < chunk->nmap; ++j) { + len = sg_dma_len(&chunk->page_list[j]) >> shift; + for (k = 0; k < len; ++k) { + pages[i++] = + sg_dma_address(&chunk->page_list[j]) + + (region->page_size * k); + } + } + } + + kva = (u64)region->virt_base; + err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), + pages, + region->page_size, + i, + region->length, + region->offset, + &kva, + c2_convert_access(acc), + c2mr); + kfree(pages); + if (err) { + kfree(c2mr); + return ERR_PTR(err); + } + return &c2mr->ibmr; + +err: + kfree(c2mr); + return ERR_PTR(err); +} + +static int c2_dereg_mr(struct ib_mr *ib_mr) +{ + struct c2_mr *mr = to_c2mr(ib_mr); + int err; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey); + if (err) + pr_debug("c2_stag_dealloc failed: %d\n", err); + else + kfree(mr); + + return err; +} + +static ssize_t show_rev(struct class_device *cdev, char *buf) +{ + struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return sprintf(buf, "%x\n", dev->props.hw_ver); +} + +static ssize_t show_fw_ver(struct class_device *cdev, char *buf) +{ + struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev); + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return sprintf(buf, "%x.%x.%x\n", + (int) (dev->props.fw_ver >> 32), + (int) (dev->props.fw_ver >> 16) & 0xffff, + (int) (dev->props.fw_ver & 0xffff)); +} + +static ssize_t show_hca(struct class_device *cdev, char *buf) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return sprintf(buf, "AMSO1100\n"); +} + +static ssize_t show_board(struct class_device *cdev, char *buf) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID"); +} + +static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); +static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); + +static struct class_device_attribute *c2_class_attributes[] = { + &class_device_attr_hw_rev, + &class_device_attr_fw_ver, + &class_device_attr_hca_type, + &class_device_attr_board_id +}; + +static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + int err; + + err = + c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr, + attr_mask); + + return err; +} + +static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return -ENOSYS; +} + +static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return -ENOSYS; +} + +static int c2_process_mad(struct ib_device *ibdev, + int mad_flags, + u8 port_num, + struct ib_wc *in_wc, + struct ib_grh *in_grh, + struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return -ENOSYS; +} + +static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + /* Request a connection */ + return c2_llp_connect(cm_id, iw_param); +} + +static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + /* Accept the new connection */ + return c2_llp_accept(cm_id, iw_param); +} + +static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) +{ + int err; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + err = c2_llp_reject(cm_id, pdata, pdata_len); + return err; +} + +static int c2_service_create(struct iw_cm_id *cm_id, int backlog) +{ + int err; + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + err = c2_llp_service_create(cm_id, backlog); + pr_debug("%s:%u err=%d\n", + __FUNCTION__, __LINE__, + err); + return err; +} + +static int c2_service_destroy(struct iw_cm_id *cm_id) +{ + int err; + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + + err = c2_llp_service_destroy(cm_id); + + return err; +} + +static int c2_pseudo_up(struct net_device *netdev) +{ + struct in_device *ind; + struct c2_dev *c2dev = netdev->priv; + + ind = in_dev_get(netdev); + if (!ind) + return 0; + + pr_debug("adding...\n"); + for_ifa(ind) { +#ifdef DEBUG + u8 *ip = (u8 *) & ifa->ifa_address; + + pr_debug("%s: %d.%d.%d.%d\n", + ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]); +#endif + c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask); + } + endfor_ifa(ind); + in_dev_put(ind); + + return 0; +} + +static int c2_pseudo_down(struct net_device *netdev) +{ + struct in_device *ind; + struct c2_dev *c2dev = netdev->priv; + + ind = in_dev_get(netdev); + if (!ind) + return 0; + + pr_debug("deleting...\n"); + for_ifa(ind) { +#ifdef DEBUG + u8 *ip = (u8 *) & ifa->ifa_address; + + pr_debug("%s: %d.%d.%d.%d\n", + ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]); +#endif + c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask); + } + endfor_ifa(ind); + in_dev_put(ind); + + return 0; +} + +static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu) +{ + int ret = 0; + + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + netdev->mtu = new_mtu; + + /* TODO: Tell rnic about new rmda interface mtu */ + return ret; +} + +static void setup(struct net_device *netdev) +{ + SET_MODULE_OWNER(netdev); + netdev->open = c2_pseudo_up; + netdev->stop = c2_pseudo_down; + netdev->hard_start_xmit = c2_pseudo_xmit_frame; + netdev->get_stats = NULL; + netdev->tx_timeout = NULL; + netdev->set_mac_address = NULL; + netdev->change_mtu = c2_pseudo_change_mtu; + netdev->watchdog_timeo = 0; + netdev->type = ARPHRD_ETHER; + netdev->mtu = 1500; + netdev->hard_header_len = ETH_HLEN; + netdev->addr_len = ETH_ALEN; + netdev->tx_queue_len = 0; + netdev->flags |= IFF_NOARP; + return; +} + +static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) +{ + char name[IFNAMSIZ]; + struct net_device *netdev; + + /* change ethxxx to iwxxx */ + strcpy(name, "iw"); + strcat(name, &c2dev->netdev->name[3]); + netdev = alloc_netdev(sizeof(*netdev), name, setup); + if (!netdev) { + printk(KERN_ERR PFX "%s - etherdev alloc failed", + __FUNCTION__); + return NULL; + } + + netdev->priv = c2dev; + + SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); + + memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6); + + /* Print out the MAC address */ + pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X\n", + netdev->name, + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + +#if 0 + /* Disable network packets */ + netif_stop_queue(netdev); +#endif + return netdev; +} + +int c2_register_device(struct c2_dev *dev) +{ + int ret; + int i; + + /* Register pseudo network device */ + dev->pseudo_netdev = c2_pseudo_netdev_init(dev); + if (dev->pseudo_netdev) { + ret = register_netdev(dev->pseudo_netdev); + if (ret) { + printk(KERN_ERR PFX + "Unable to register netdev, ret = %d\n", ret); + free_netdev(dev->pseudo_netdev); + return ret; + } + } + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); + dev->ibdev.owner = THIS_MODULE; + dev->ibdev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_POLL_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_POST_SEND) | + (1ull << IB_USER_VERBS_CMD_POST_RECV); + + dev->ibdev.node_type = RDMA_NODE_RNIC; + memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); + memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6); + dev->ibdev.phys_port_cnt = 1; + dev->ibdev.dma_device = &dev->pcidev->dev; + dev->ibdev.class_dev.dev = &dev->pcidev->dev; + dev->ibdev.query_device = c2_query_device; + dev->ibdev.query_port = c2_query_port; + dev->ibdev.modify_port = c2_modify_port; + dev->ibdev.query_pkey = c2_query_pkey; + dev->ibdev.query_gid = c2_query_gid; + dev->ibdev.alloc_ucontext = c2_alloc_ucontext; + dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext; + dev->ibdev.mmap = c2_mmap_uar; + dev->ibdev.alloc_pd = c2_alloc_pd; + dev->ibdev.dealloc_pd = c2_dealloc_pd; + dev->ibdev.create_ah = c2_ah_create; + dev->ibdev.destroy_ah = c2_ah_destroy; + dev->ibdev.create_qp = c2_create_qp; + dev->ibdev.modify_qp = c2_modify_qp; + dev->ibdev.destroy_qp = c2_destroy_qp; + dev->ibdev.create_cq = c2_create_cq; + dev->ibdev.destroy_cq = c2_destroy_cq; + dev->ibdev.poll_cq = c2_poll_cq; + dev->ibdev.get_dma_mr = c2_get_dma_mr; + dev->ibdev.reg_phys_mr = c2_reg_phys_mr; + dev->ibdev.reg_user_mr = c2_reg_user_mr; + dev->ibdev.dereg_mr = c2_dereg_mr; + + dev->ibdev.alloc_fmr = NULL; + dev->ibdev.unmap_fmr = NULL; + dev->ibdev.dealloc_fmr = NULL; + dev->ibdev.map_phys_fmr = NULL; + + dev->ibdev.attach_mcast = c2_multicast_attach; + dev->ibdev.detach_mcast = c2_multicast_detach; + dev->ibdev.process_mad = c2_process_mad; + + dev->ibdev.req_notify_cq = c2_arm_cq; + dev->ibdev.post_send = c2_post_send; + dev->ibdev.post_recv = c2_post_receive; + + dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL); + dev->ibdev.iwcm->add_ref = c2_add_ref; + dev->ibdev.iwcm->rem_ref = c2_rem_ref; + dev->ibdev.iwcm->get_qp = c2_get_qp; + dev->ibdev.iwcm->connect = c2_connect; + dev->ibdev.iwcm->accept = c2_accept; + dev->ibdev.iwcm->reject = c2_reject; + dev->ibdev.iwcm->create_listen = c2_service_create; + dev->ibdev.iwcm->destroy_listen = c2_service_destroy; + + ret = ib_register_device(&dev->ibdev); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { + ret = class_device_create_file(&dev->ibdev.class_dev, + c2_class_attributes[i]); + if (ret) { + unregister_netdev(dev->pseudo_netdev); + free_netdev(dev->pseudo_netdev); + ib_unregister_device(&dev->ibdev); + return ret; + } + } + + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + return 0; +} + +void c2_unregister_device(struct c2_dev *dev) +{ + pr_debug("%s:%u\n", __FUNCTION__, __LINE__); + unregister_netdev(dev->pseudo_netdev); + free_netdev(dev->pseudo_netdev); + ib_unregister_device(&dev->ibdev); +} diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h new file mode 100644 index 000000000000..fc906223220f --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_provider.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef C2_PROVIDER_H +#define C2_PROVIDER_H +#include <linux/inetdevice.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_pack.h> + +#include "c2_mq.h" +#include <rdma/iw_cm.h> + +#define C2_MPT_FLAG_ATOMIC (1 << 14) +#define C2_MPT_FLAG_REMOTE_WRITE (1 << 13) +#define C2_MPT_FLAG_REMOTE_READ (1 << 12) +#define C2_MPT_FLAG_LOCAL_WRITE (1 << 11) +#define C2_MPT_FLAG_LOCAL_READ (1 << 10) + +struct c2_buf_list { + void *buf; + DECLARE_PCI_UNMAP_ADDR(mapping) +}; + + +/* The user context keeps track of objects allocated for a + * particular user-mode client. */ +struct c2_ucontext { + struct ib_ucontext ibucontext; +}; + +struct c2_mtt; + +/* All objects associated with a PD are kept in the + * associated user context if present. + */ +struct c2_pd { + struct ib_pd ibpd; + u32 pd_id; +}; + +struct c2_mr { + struct ib_mr ibmr; + struct c2_pd *pd; +}; + +struct c2_av; + +enum c2_ah_type { + C2_AH_ON_HCA, + C2_AH_PCI_POOL, + C2_AH_KMALLOC +}; + +struct c2_ah { + struct ib_ah ibah; +}; + +struct c2_cq { + struct ib_cq ibcq; + spinlock_t lock; + atomic_t refcount; + int cqn; + int is_kernel; + wait_queue_head_t wait; + + u32 adapter_handle; + struct c2_mq mq; +}; + +struct c2_wq { + spinlock_t lock; +}; +struct iw_cm_id; +struct c2_qp { + struct ib_qp ibqp; + struct iw_cm_id *cm_id; + spinlock_t lock; + atomic_t refcount; + wait_queue_head_t wait; + int qpn; + + u32 adapter_handle; + u32 send_sgl_depth; + u32 recv_sgl_depth; + u32 rdma_write_sgl_depth; + u8 state; + + struct c2_mq sq_mq; + struct c2_mq rq_mq; +}; + +struct c2_cr_query_attrs { + u32 local_addr; + u32 remote_addr; + u16 local_port; + u16 remote_port; +}; + +static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct c2_pd, ibpd); +} + +static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct c2_ucontext, ibucontext); +} + +static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct c2_mr, ibmr); +} + + +static inline struct c2_ah *to_c2ah(struct ib_ah *ibah) +{ + return container_of(ibah, struct c2_ah, ibah); +} + +static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct c2_cq, ibcq); +} + +static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct c2_qp, ibqp); +} + +static inline int is_rnic_addr(struct net_device *netdev, u32 addr) +{ + struct in_device *ind; + int ret = 0; + + ind = in_dev_get(netdev); + if (!ind) + return 0; + + for_ifa(ind) { + if (ifa->ifa_address == addr) { + ret = 1; + break; + } + } + endfor_ifa(ind); + in_dev_put(ind); + return ret; +} +#endif /* C2_PROVIDER_H */ diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c new file mode 100644 index 000000000000..12261132b077 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_qp.c @@ -0,0 +1,975 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include "c2.h" +#include "c2_vq.h" +#include "c2_status.h" + +#define C2_MAX_ORD_PER_QP 128 +#define C2_MAX_IRD_PER_QP 128 + +#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count) +#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16) +#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF) + +#define NO_SUPPORT -1 +static const u8 c2_opcode[] = { + [IB_WR_SEND] = C2_WR_TYPE_SEND, + [IB_WR_SEND_WITH_IMM] = NO_SUPPORT, + [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT, + [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ, + [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT, + [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT, +}; + +static int to_c2_state(enum ib_qp_state ib_state) +{ + switch (ib_state) { + case IB_QPS_RESET: + return C2_QP_STATE_IDLE; + case IB_QPS_RTS: + return C2_QP_STATE_RTS; + case IB_QPS_SQD: + return C2_QP_STATE_CLOSING; + case IB_QPS_SQE: + return C2_QP_STATE_CLOSING; + case IB_QPS_ERR: + return C2_QP_STATE_ERROR; + default: + return -1; + } +} + +static int to_ib_state(enum c2_qp_state c2_state) +{ + switch (c2_state) { + case C2_QP_STATE_IDLE: + return IB_QPS_RESET; + case C2_QP_STATE_CONNECTING: + return IB_QPS_RTR; + case C2_QP_STATE_RTS: + return IB_QPS_RTS; + case C2_QP_STATE_CLOSING: + return IB_QPS_SQD; + case C2_QP_STATE_ERROR: + return IB_QPS_ERR; + case C2_QP_STATE_TERMINATE: + return IB_QPS_SQE; + default: + return -1; + } +} + +static const char *to_ib_state_str(int ib_state) +{ + static const char *state_str[] = { + "IB_QPS_RESET", + "IB_QPS_INIT", + "IB_QPS_RTR", + "IB_QPS_RTS", + "IB_QPS_SQD", + "IB_QPS_SQE", + "IB_QPS_ERR" + }; + if (ib_state < IB_QPS_RESET || + ib_state > IB_QPS_ERR) + return "<invalid IB QP state>"; + + ib_state -= IB_QPS_RESET; + return state_str[ib_state]; +} + +void c2_set_qp_state(struct c2_qp *qp, int c2_state) +{ + int new_state = to_ib_state(c2_state); + + pr_debug("%s: qp[%p] state modify %s --> %s\n", + __FUNCTION__, + qp, + to_ib_state_str(qp->state), + to_ib_state_str(new_state)); + qp->state = new_state; +} + +#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF + +int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, + struct ib_qp_attr *attr, int attr_mask) +{ + struct c2wr_qp_modify_req wr; + struct c2wr_qp_modify_rep *reply; + struct c2_vq_req *vq_req; + unsigned long flags; + u8 next_state; + int err; + + pr_debug("%s:%d qp=%p, %s --> %s\n", + __FUNCTION__, __LINE__, + qp, + to_ib_state_str(qp->state), + to_ib_state_str(attr->qp_state)); + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + c2_wr_set_id(&wr, CCWR_QP_MODIFY); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.qp_handle = qp->adapter_handle; + wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); + wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); + wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); + wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); + + if (attr_mask & IB_QP_STATE) { + /* Ensure the state is valid */ + if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) + return -EINVAL; + + wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); + + if (attr->qp_state == IB_QPS_ERR) { + spin_lock_irqsave(&qp->lock, flags); + if (qp->cm_id && qp->state == IB_QPS_RTS) { + pr_debug("Generating CLOSE event for QP-->ERR, " + "qp=%p, cm_id=%p\n",qp,qp->cm_id); + /* Generate an CLOSE event */ + vq_req->cm_id = qp->cm_id; + vq_req->event = IW_CM_EVENT_CLOSE; + } + spin_unlock_irqrestore(&qp->lock, flags); + } + next_state = attr->qp_state; + + } else if (attr_mask & IB_QP_CUR_STATE) { + + if (attr->cur_qp_state != IB_QPS_RTR && + attr->cur_qp_state != IB_QPS_RTS && + attr->cur_qp_state != IB_QPS_SQD && + attr->cur_qp_state != IB_QPS_SQE) + return -EINVAL; + else + wr.next_qp_state = + cpu_to_be32(to_c2_state(attr->cur_qp_state)); + + next_state = attr->cur_qp_state; + + } else { + err = 0; + goto bail0; + } + + /* reference the request struct */ + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail0; + + reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail0; + } + + err = c2_errno(reply); + if (!err) + qp->state = next_state; +#ifdef DEBUG + else + pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err); +#endif + /* + * If we're going to error and generating the event here, then + * we need to remove the reference because there will be no + * close event generated by the adapter + */ + spin_lock_irqsave(&qp->lock, flags); + if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { + qp->cm_id->rem_ref(qp->cm_id); + qp->cm_id = NULL; + } + spin_unlock_irqrestore(&qp->lock, flags); + + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + + pr_debug("%s:%d qp=%p, cur_state=%s\n", + __FUNCTION__, __LINE__, + qp, + to_ib_state_str(qp->state)); + return err; +} + +int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, + int ord, int ird) +{ + struct c2wr_qp_modify_req wr; + struct c2wr_qp_modify_rep *reply; + struct c2_vq_req *vq_req; + int err; + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + c2_wr_set_id(&wr, CCWR_QP_MODIFY); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.qp_handle = qp->adapter_handle; + wr.ord = cpu_to_be32(ord); + wr.ird = cpu_to_be32(ird); + wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); + wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); + wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); + + /* reference the request struct */ + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail0; + + reply = (struct c2wr_qp_modify_rep *) (unsigned long) + vq_req->reply_msg; + if (!reply) { + err = -ENOMEM; + goto bail0; + } + + err = c2_errno(reply); + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp) +{ + struct c2_vq_req *vq_req; + struct c2wr_qp_destroy_req wr; + struct c2wr_qp_destroy_rep *reply; + unsigned long flags; + int err; + + /* + * Allocate a verb request message + */ + vq_req = vq_req_alloc(c2dev); + if (!vq_req) { + return -ENOMEM; + } + + /* + * Initialize the WR + */ + c2_wr_set_id(&wr, CCWR_QP_DESTROY); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.qp_handle = qp->adapter_handle; + + /* + * reference the request struct. dereferenced in the int handler. + */ + vq_req_get(c2dev, vq_req); + + spin_lock_irqsave(&qp->lock, flags); + if (qp->cm_id && qp->state == IB_QPS_RTS) { + pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, " + "qp=%p, cm_id=%p\n",qp,qp->cm_id); + /* Generate an CLOSE event */ + vq_req->qp = qp; + vq_req->cm_id = qp->cm_id; + vq_req->event = IW_CM_EVENT_CLOSE; + } + spin_unlock_irqrestore(&qp->lock, flags); + + /* + * Send WR to adapter + */ + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + /* + * Wait for reply from adapter + */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) { + goto bail0; + } + + /* + * Process reply + */ + reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail0; + } + + spin_lock_irqsave(&qp->lock, flags); + if (qp->cm_id) { + qp->cm_id->rem_ref(qp->cm_id); + qp->cm_id = NULL; + } + spin_unlock_irqrestore(&qp->lock, flags); + + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp) +{ + int ret; + + do { + spin_lock_irq(&c2dev->qp_table.lock); + ret = idr_get_new_above(&c2dev->qp_table.idr, qp, + c2dev->qp_table.last++, &qp->qpn); + spin_unlock_irq(&c2dev->qp_table.lock); + } while ((ret == -EAGAIN) && + idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL)); + return ret; +} + +static void c2_free_qpn(struct c2_dev *c2dev, int qpn) +{ + spin_lock_irq(&c2dev->qp_table.lock); + idr_remove(&c2dev->qp_table.idr, qpn); + spin_unlock_irq(&c2dev->qp_table.lock); +} + +struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn) +{ + unsigned long flags; + struct c2_qp *qp; + + spin_lock_irqsave(&c2dev->qp_table.lock, flags); + qp = idr_find(&c2dev->qp_table.idr, qpn); + spin_unlock_irqrestore(&c2dev->qp_table.lock, flags); + return qp; +} + +int c2_alloc_qp(struct c2_dev *c2dev, + struct c2_pd *pd, + struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp) +{ + struct c2wr_qp_create_req wr; + struct c2wr_qp_create_rep *reply; + struct c2_vq_req *vq_req; + struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq); + struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq); + unsigned long peer_pa; + u32 q_size, msg_size, mmap_size; + void __iomem *mmap; + int err; + + err = c2_alloc_qpn(c2dev, qp); + if (err) + return err; + qp->ibqp.qp_num = qp->qpn; + qp->ibqp.qp_type = IB_QPT_RC; + + /* Allocate the SQ and RQ shared pointers */ + qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, + &qp->sq_mq.shared_dma, GFP_KERNEL); + if (!qp->sq_mq.shared) { + err = -ENOMEM; + goto bail0; + } + + qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, + &qp->rq_mq.shared_dma, GFP_KERNEL); + if (!qp->rq_mq.shared) { + err = -ENOMEM; + goto bail1; + } + + /* Allocate the verbs request */ + vq_req = vq_req_alloc(c2dev); + if (vq_req == NULL) { + err = -ENOMEM; + goto bail2; + } + + /* Initialize the work request */ + memset(&wr, 0, sizeof(wr)); + c2_wr_set_id(&wr, CCWR_QP_CREATE); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + wr.sq_cq_handle = send_cq->adapter_handle; + wr.rq_cq_handle = recv_cq->adapter_handle; + wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1); + wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1); + wr.srq_handle = 0; + wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND | + QP_ZERO_STAG | QP_RDMA_READ_RESPONSE); + wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge); + wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge); + wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge); + wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma); + wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma); + wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP); + wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP); + wr.pd_id = pd->pd_id; + wr.user_context = (unsigned long) qp; + + vq_req_get(c2dev, vq_req); + + /* Send the WR to the adapter */ + err = vq_send_wr(c2dev, (union c2wr *) & wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail3; + } + + /* Wait for the verb reply */ + err = vq_wait_for_reply(c2dev, vq_req); + if (err) { + goto bail3; + } + + /* Process the reply */ + reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail3; + } + + if ((err = c2_wr_get_result(reply)) != 0) { + goto bail4; + } + + /* Fill in the kernel QP struct */ + atomic_set(&qp->refcount, 1); + qp->adapter_handle = reply->qp_handle; + qp->state = IB_QPS_RESET; + qp->send_sgl_depth = qp_attrs->cap.max_send_sge; + qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge; + qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge; + + /* Initialize the SQ MQ */ + q_size = be32_to_cpu(reply->sq_depth); + msg_size = be32_to_cpu(reply->sq_msg_size); + peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start); + mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size); + mmap = ioremap_nocache(peer_pa, mmap_size); + if (!mmap) { + err = -ENOMEM; + goto bail5; + } + + c2_mq_req_init(&qp->sq_mq, + be32_to_cpu(reply->sq_mq_index), + q_size, + msg_size, + mmap + sizeof(struct c2_mq_shared), /* pool start */ + mmap, /* peer */ + C2_MQ_ADAPTER_TARGET); + + /* Initialize the RQ mq */ + q_size = be32_to_cpu(reply->rq_depth); + msg_size = be32_to_cpu(reply->rq_msg_size); + peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start); + mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size); + mmap = ioremap_nocache(peer_pa, mmap_size); + if (!mmap) { + err = -ENOMEM; + goto bail6; + } + + c2_mq_req_init(&qp->rq_mq, + be32_to_cpu(reply->rq_mq_index), + q_size, + msg_size, + mmap + sizeof(struct c2_mq_shared), /* pool start */ + mmap, /* peer */ + C2_MQ_ADAPTER_TARGET); + + vq_repbuf_free(c2dev, reply); + vq_req_free(c2dev, vq_req); + + return 0; + + bail6: + iounmap(qp->sq_mq.peer); + bail5: + destroy_qp(c2dev, qp); + bail4: + vq_repbuf_free(c2dev, reply); + bail3: + vq_req_free(c2dev, vq_req); + bail2: + c2_free_mqsp(qp->rq_mq.shared); + bail1: + c2_free_mqsp(qp->sq_mq.shared); + bail0: + c2_free_qpn(c2dev, qp->qpn); + return err; +} + +void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) +{ + struct c2_cq *send_cq; + struct c2_cq *recv_cq; + + send_cq = to_c2cq(qp->ibqp.send_cq); + recv_cq = to_c2cq(qp->ibqp.recv_cq); + + /* + * Lock CQs here, so that CQ polling code can do QP lookup + * without taking a lock. + */ + spin_lock_irq(&send_cq->lock); + if (send_cq != recv_cq) + spin_lock(&recv_cq->lock); + + c2_free_qpn(c2dev, qp->qpn); + + if (send_cq != recv_cq) + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + + /* + * Destory qp in the rnic... + */ + destroy_qp(c2dev, qp); + + /* + * Mark any unreaped CQEs as null and void. + */ + c2_cq_clean(c2dev, qp, send_cq->cqn); + if (send_cq != recv_cq) + c2_cq_clean(c2dev, qp, recv_cq->cqn); + /* + * Unmap the MQs and return the shared pointers + * to the message pool. + */ + iounmap(qp->sq_mq.peer); + iounmap(qp->rq_mq.peer); + c2_free_mqsp(qp->sq_mq.shared); + c2_free_mqsp(qp->rq_mq.shared); + + atomic_dec(&qp->refcount); + wait_event(qp->wait, !atomic_read(&qp->refcount)); +} + +/* + * Function: move_sgl + * + * Description: + * Move an SGL from the user's work request struct into a CCIL Work Request + * message, swapping to WR byte order and ensure the total length doesn't + * overflow. + * + * IN: + * dst - ptr to CCIL Work Request message SGL memory. + * src - ptr to the consumers SGL memory. + * + * OUT: none + * + * Return: + * CCIL status codes. + */ +static int +move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len, + u8 * actual_count) +{ + u32 tot = 0; /* running total */ + u8 acount = 0; /* running total non-0 len sge's */ + + while (count > 0) { + /* + * If the addition of this SGE causes the + * total SGL length to exceed 2^32-1, then + * fail-n-bail. + * + * If the current total plus the next element length + * wraps, then it will go negative and be less than the + * current total... + */ + if ((tot + src->length) < tot) { + return -EINVAL; + } + /* + * Bug: 1456 (as well as 1498 & 1643) + * Skip over any sge's supplied with len=0 + */ + if (src->length) { + tot += src->length; + dst->stag = cpu_to_be32(src->lkey); + dst->to = cpu_to_be64(src->addr); + dst->length = cpu_to_be32(src->length); + dst++; + acount++; + } + src++; + count--; + } + + if (acount == 0) { + /* + * Bug: 1476 (as well as 1498, 1456 and 1643) + * Setup the SGL in the WR to make it easier for the RNIC. + * This way, the FW doesn't have to deal with special cases. + * Setting length=0 should be sufficient. + */ + dst->stag = 0; + dst->to = 0; + dst->length = 0; + } + + *p_len = tot; + *actual_count = acount; + return 0; +} + +/* + * Function: c2_activity (private function) + * + * Description: + * Post an mq index to the host->adapter activity fifo. + * + * IN: + * c2dev - ptr to c2dev structure + * mq_index - mq index to post + * shared - value most recently written to shared + * + * OUT: + * + * Return: + * none + */ +static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared) +{ + /* + * First read the register to see if the FIFO is full, and if so, + * spin until it's not. This isn't perfect -- there is no + * synchronization among the clients of the register, but in + * practice it prevents multiple CPU from hammering the bus + * with PCI RETRY. Note that when this does happen, the card + * cannot get on the bus and the card and system hang in a + * deadlock -- thus the need for this code. [TOT] + */ + while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(0); + } + + __raw_writel(C2_HINT_MAKE(mq_index, shared), + c2dev->regs + PCI_BAR0_ADAPTER_HINT); +} + +/* + * Function: qp_wr_post + * + * Description: + * This in-line function allocates a MQ msg, then moves the host-copy of + * the completed WR into msg. Then it posts the message. + * + * IN: + * q - ptr to user MQ. + * wr - ptr to host-copy of the WR. + * qp - ptr to user qp + * size - Number of bytes to post. Assumed to be divisible by 4. + * + * OUT: none + * + * Return: + * CCIL status codes. + */ +static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size) +{ + union c2wr *msg; + + msg = c2_mq_alloc(q); + if (msg == NULL) { + return -EINVAL; + } +#ifdef CCMSGMAGIC + ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC); +#endif + + /* + * Since all header fields in the WR are the same as the + * CQE, set the following so the adapter need not. + */ + c2_wr_set_result(wr, CCERR_PENDING); + + /* + * Copy the wr down to the adapter + */ + memcpy((void *) msg, (void *) wr, size); + + c2_mq_produce(q); + return 0; +} + + +int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, + struct ib_send_wr **bad_wr) +{ + struct c2_dev *c2dev = to_c2dev(ibqp->device); + struct c2_qp *qp = to_c2qp(ibqp); + union c2wr wr; + int err = 0; + + u32 flags; + u32 tot_len; + u8 actual_sge_count; + u32 msg_size; + + if (qp->state > IB_QPS_RTS) + return -EINVAL; + + while (ib_wr) { + + flags = 0; + wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id; + if (ib_wr->send_flags & IB_SEND_SIGNALED) { + flags |= SQ_SIGNALED; + } + + switch (ib_wr->opcode) { + case IB_WR_SEND: + if (ib_wr->send_flags & IB_SEND_SOLICITED) { + c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE); + msg_size = sizeof(struct c2wr_send_req); + } else { + c2_wr_set_id(&wr, C2_WR_TYPE_SEND); + msg_size = sizeof(struct c2wr_send_req); + } + + wr.sqwr.send.remote_stag = 0; + msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge; + if (ib_wr->num_sge > qp->send_sgl_depth) { + err = -EINVAL; + break; + } + if (ib_wr->send_flags & IB_SEND_FENCE) { + flags |= SQ_READ_FENCE; + } + err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data), + ib_wr->sg_list, + ib_wr->num_sge, + &tot_len, &actual_sge_count); + wr.sqwr.send.sge_len = cpu_to_be32(tot_len); + c2_wr_set_sge_count(&wr, actual_sge_count); + break; + case IB_WR_RDMA_WRITE: + c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE); + msg_size = sizeof(struct c2wr_rdma_write_req) + + (sizeof(struct c2_data_addr) * ib_wr->num_sge); + if (ib_wr->num_sge > qp->rdma_write_sgl_depth) { + err = -EINVAL; + break; + } + if (ib_wr->send_flags & IB_SEND_FENCE) { + flags |= SQ_READ_FENCE; + } + wr.sqwr.rdma_write.remote_stag = + cpu_to_be32(ib_wr->wr.rdma.rkey); + wr.sqwr.rdma_write.remote_to = + cpu_to_be64(ib_wr->wr.rdma.remote_addr); + err = move_sgl((struct c2_data_addr *) + & (wr.sqwr.rdma_write.data), + ib_wr->sg_list, + ib_wr->num_sge, + &tot_len, &actual_sge_count); + wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len); + c2_wr_set_sge_count(&wr, actual_sge_count); + break; + case IB_WR_RDMA_READ: + c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ); + msg_size = sizeof(struct c2wr_rdma_read_req); + + /* IWarp only suppots 1 sge for RDMA reads */ + if (ib_wr->num_sge > 1) { + err = -EINVAL; + break; + } + + /* + * Move the local and remote stag/to/len into the WR. + */ + wr.sqwr.rdma_read.local_stag = + cpu_to_be32(ib_wr->sg_list->lkey); + wr.sqwr.rdma_read.local_to = + cpu_to_be64(ib_wr->sg_list->addr); + wr.sqwr.rdma_read.remote_stag = + cpu_to_be32(ib_wr->wr.rdma.rkey); + wr.sqwr.rdma_read.remote_to = + cpu_to_be64(ib_wr->wr.rdma.remote_addr); + wr.sqwr.rdma_read.length = + cpu_to_be32(ib_wr->sg_list->length); + break; + default: + /* error */ + msg_size = 0; + err = -EINVAL; + break; + } + + /* + * If we had an error on the last wr build, then + * break out. Possible errors include bogus WR + * type, and a bogus SGL length... + */ + if (err) { + break; + } + + /* + * Store flags + */ + c2_wr_set_flags(&wr, flags); + + /* + * Post the puppy! + */ + err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size); + if (err) { + break; + } + + /* + * Enqueue mq index to activity FIFO. + */ + c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count); + + ib_wr = ib_wr->next; + } + + if (err) + *bad_wr = ib_wr; + return err; +} + +int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, + struct ib_recv_wr **bad_wr) +{ + struct c2_dev *c2dev = to_c2dev(ibqp->device); + struct c2_qp *qp = to_c2qp(ibqp); + union c2wr wr; + int err = 0; + + if (qp->state > IB_QPS_RTS) + return -EINVAL; + + /* + * Try and post each work request + */ + while (ib_wr) { + u32 tot_len; + u8 actual_sge_count; + + if (ib_wr->num_sge > qp->recv_sgl_depth) { + err = -EINVAL; + break; + } + + /* + * Create local host-copy of the WR + */ + wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id; + c2_wr_set_id(&wr, CCWR_RECV); + c2_wr_set_flags(&wr, 0); + + /* sge_count is limited to eight bits. */ + BUG_ON(ib_wr->num_sge >= 256); + err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data), + ib_wr->sg_list, + ib_wr->num_sge, &tot_len, &actual_sge_count); + c2_wr_set_sge_count(&wr, actual_sge_count); + + /* + * If we had an error on the last wr build, then + * break out. Possible errors include bogus WR + * type, and a bogus SGL length... + */ + if (err) { + break; + } + + err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size); + if (err) { + break; + } + + /* + * Enqueue mq index to activity FIFO + */ + c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count); + + ib_wr = ib_wr->next; + } + + if (err) + *bad_wr = ib_wr; + return err; +} + +void __devinit c2_init_qp_table(struct c2_dev *c2dev) +{ + spin_lock_init(&c2dev->qp_table.lock); + idr_init(&c2dev->qp_table.idr); +} + +void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev) +{ + idr_destroy(&c2dev->qp_table.idr); +} diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c new file mode 100644 index 000000000000..1c3c9d65ecea --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -0,0 +1,663 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/if_vlan.h> +#include <linux/crc32.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/inet.h> + +#include <linux/route.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/byteorder.h> +#include <rdma/ib_smi.h> +#include "c2.h" +#include "c2_vq.h" + +/* Device capabilities */ +#define C2_MIN_PAGESIZE 1024 + +#define C2_MAX_MRS 32768 +#define C2_MAX_QPS 16000 +#define C2_MAX_WQE_SZ 256 +#define C2_MAX_QP_WR ((128*1024)/C2_MAX_WQE_SZ) +#define C2_MAX_SGES 4 +#define C2_MAX_SGE_RD 1 +#define C2_MAX_CQS 32768 +#define C2_MAX_CQES 4096 +#define C2_MAX_PDS 16384 + +/* + * Send the adapter INIT message to the amso1100 + */ +static int c2_adapter_init(struct c2_dev *c2dev) +{ + struct c2wr_init_req wr; + int err; + + memset(&wr, 0, sizeof(wr)); + c2_wr_set_id(&wr, CCWR_INIT); + wr.hdr.context = 0; + wr.hint_count = cpu_to_be64(c2dev->hint_count_dma); + wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma); + wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma); + wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma); + wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma); + wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma); + + /* Post the init message */ + err = vq_send_wr(c2dev, (union c2wr *) & wr); + + return err; +} + +/* + * Send the adapter TERM message to the amso1100 + */ +static void c2_adapter_term(struct c2_dev *c2dev) +{ + struct c2wr_init_req wr; + + memset(&wr, 0, sizeof(wr)); + c2_wr_set_id(&wr, CCWR_TERM); + wr.hdr.context = 0; + + /* Post the init message */ + vq_send_wr(c2dev, (union c2wr *) & wr); + c2dev->init = 0; + + return; +} + +/* + * Query the adapter + */ +static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props) +{ + struct c2_vq_req *vq_req; + struct c2wr_rnic_query_req wr; + struct c2wr_rnic_query_rep *reply; + int err; + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + c2_wr_set_id(&wr, CCWR_RNIC_QUERY); + wr.hdr.context = (unsigned long) vq_req; + wr.rnic_handle = c2dev->adapter_handle; + + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, (union c2wr *) &wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail1; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail1; + + reply = + (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) + err = -ENOMEM; + + err = c2_errno(reply); + if (err) + goto bail2; + + props->fw_ver = + ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | + ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) | + (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF); + memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); + props->max_mr_size = 0xFFFFFFFF; + props->page_size_cap = ~(C2_MIN_PAGESIZE-1); + props->vendor_id = be32_to_cpu(reply->vendor_id); + props->vendor_part_id = be32_to_cpu(reply->part_number); + props->hw_ver = be32_to_cpu(reply->hw_version); + props->max_qp = be32_to_cpu(reply->max_qps); + props->max_qp_wr = be32_to_cpu(reply->max_qp_depth); + props->device_cap_flags = c2dev->device_cap_flags; + props->max_sge = C2_MAX_SGES; + props->max_sge_rd = C2_MAX_SGE_RD; + props->max_cq = be32_to_cpu(reply->max_cqs); + props->max_cqe = be32_to_cpu(reply->max_cq_depth); + props->max_mr = be32_to_cpu(reply->max_mrs); + props->max_pd = be32_to_cpu(reply->max_pds); + props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird); + props->max_ee_rd_atom = 0; + props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird); + props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord); + props->max_ee_init_rd_atom = 0; + props->atomic_cap = IB_ATOMIC_NONE; + props->max_ee = 0; + props->max_rdd = 0; + props->max_mw = be32_to_cpu(reply->max_mws); + props->max_raw_ipv6_qp = 0; + props->max_raw_ethy_qp = 0; + props->max_mcast_grp = 0; + props->max_mcast_qp_attach = 0; + props->max_total_mcast_qp_attach = 0; + props->max_ah = 0; + props->max_fmr = 0; + props->max_map_per_fmr = 0; + props->max_srq = 0; + props->max_srq_wr = 0; + props->max_srq_sge = 0; + props->max_pkeys = 0; + props->local_ca_ack_delay = 0; + + bail2: + vq_repbuf_free(c2dev, reply); + + bail1: + vq_req_free(c2dev, vq_req); + return err; +} + +/* + * Add an IP address to the RNIC interface + */ +int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) +{ + struct c2_vq_req *vq_req; + struct c2wr_rnic_setconfig_req *wr; + struct c2wr_rnic_setconfig_rep *reply; + struct c2_netaddr netaddr; + int err, len; + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + len = sizeof(struct c2_netaddr); + wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); + if (!wr) { + err = -ENOMEM; + goto bail0; + } + + c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); + wr->hdr.context = (unsigned long) vq_req; + wr->rnic_handle = c2dev->adapter_handle; + wr->option = cpu_to_be32(C2_CFG_ADD_ADDR); + + netaddr.ip_addr = inaddr; + netaddr.netmask = inmask; + netaddr.mtu = 0; + + memcpy(wr->data, &netaddr, len); + + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, (union c2wr *) wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail1; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail1; + + reply = + (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail1; + } + + err = c2_errno(reply); + vq_repbuf_free(c2dev, reply); + + bail1: + kfree(wr); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +/* + * Delete an IP address from the RNIC interface + */ +int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask) +{ + struct c2_vq_req *vq_req; + struct c2wr_rnic_setconfig_req *wr; + struct c2wr_rnic_setconfig_rep *reply; + struct c2_netaddr netaddr; + int err, len; + + vq_req = vq_req_alloc(c2dev); + if (!vq_req) + return -ENOMEM; + + len = sizeof(struct c2_netaddr); + wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); + if (!wr) { + err = -ENOMEM; + goto bail0; + } + + c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG); + wr->hdr.context = (unsigned long) vq_req; + wr->rnic_handle = c2dev->adapter_handle; + wr->option = cpu_to_be32(C2_CFG_DEL_ADDR); + + netaddr.ip_addr = inaddr; + netaddr.netmask = inmask; + netaddr.mtu = 0; + + memcpy(wr->data, &netaddr, len); + + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, (union c2wr *) wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail1; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) + goto bail1; + + reply = + (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail1; + } + + err = c2_errno(reply); + vq_repbuf_free(c2dev, reply); + + bail1: + kfree(wr); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +/* + * Open a single RNIC instance to use with all + * low level openib calls + */ +static int c2_rnic_open(struct c2_dev *c2dev) +{ + struct c2_vq_req *vq_req; + union c2wr wr; + struct c2wr_rnic_open_rep *reply; + int err; + + vq_req = vq_req_alloc(c2dev); + if (vq_req == NULL) { + return -ENOMEM; + } + + memset(&wr, 0, sizeof(wr)); + c2_wr_set_id(&wr, CCWR_RNIC_OPEN); + wr.rnic_open.req.hdr.context = (unsigned long) (vq_req); + wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE); + wr.rnic_open.req.port_num = cpu_to_be16(0); + wr.rnic_open.req.user_context = (unsigned long) c2dev; + + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, &wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) { + goto bail0; + } + + reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail0; + } + + if ((err = c2_errno(reply)) != 0) { + goto bail1; + } + + c2dev->adapter_handle = reply->rnic_handle; + + bail1: + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +/* + * Close the RNIC instance + */ +static int c2_rnic_close(struct c2_dev *c2dev) +{ + struct c2_vq_req *vq_req; + union c2wr wr; + struct c2wr_rnic_close_rep *reply; + int err; + + vq_req = vq_req_alloc(c2dev); + if (vq_req == NULL) { + return -ENOMEM; + } + + memset(&wr, 0, sizeof(wr)); + c2_wr_set_id(&wr, CCWR_RNIC_CLOSE); + wr.rnic_close.req.hdr.context = (unsigned long) vq_req; + wr.rnic_close.req.rnic_handle = c2dev->adapter_handle; + + vq_req_get(c2dev, vq_req); + + err = vq_send_wr(c2dev, &wr); + if (err) { + vq_req_put(c2dev, vq_req); + goto bail0; + } + + err = vq_wait_for_reply(c2dev, vq_req); + if (err) { + goto bail0; + } + + reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg); + if (!reply) { + err = -ENOMEM; + goto bail0; + } + + if ((err = c2_errno(reply)) != 0) { + goto bail1; + } + + c2dev->adapter_handle = 0; + + bail1: + vq_repbuf_free(c2dev, reply); + bail0: + vq_req_free(c2dev, vq_req); + return err; +} + +/* + * Called by c2_probe to initialize the RNIC. This principally + * involves initalizing the various limits and resouce pools that + * comprise the RNIC instance. + */ +int c2_rnic_init(struct c2_dev *c2dev) +{ + int err; + u32 qsize, msgsize; + void *q1_pages; + void *q2_pages; + void __iomem *mmio_regs; + + /* Device capabilities */ + c2dev->device_cap_flags = + (IB_DEVICE_RESIZE_MAX_WR | + IB_DEVICE_CURR_QP_STATE_MOD | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_ZERO_STAG | + IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); + + /* Allocate the qptr_array */ + c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); + if (!c2dev->qptr_array) { + return -ENOMEM; + } + + /* Inialize the qptr_array */ + memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); + c2dev->qptr_array[0] = (void *) &c2dev->req_vq; + c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; + c2dev->qptr_array[2] = (void *) &c2dev->aeq; + + /* Initialize data structures */ + init_waitqueue_head(&c2dev->req_vq_wo); + spin_lock_init(&c2dev->vqlock); + spin_lock_init(&c2dev->lock); + + /* Allocate MQ shared pointer pool for kernel clients. User + * mode client pools are hung off the user context + */ + err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool); + if (err) { + goto bail0; + } + + /* Allocate shared pointers for Q0, Q1, and Q2 from + * the shared pointer pool. + */ + + c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, + &c2dev->hint_count_dma, + GFP_KERNEL); + c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, + &c2dev->req_vq.shared_dma, + GFP_KERNEL); + c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, + &c2dev->rep_vq.shared_dma, + GFP_KERNEL); + c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, + &c2dev->aeq.shared_dma, GFP_KERNEL); + if (!c2dev->hint_count || !c2dev->req_vq.shared || + !c2dev->rep_vq.shared || !c2dev->aeq.shared) { + err = -ENOMEM; + goto bail1; + } + + mmio_regs = c2dev->kva; + /* Initialize the Verbs Request Queue */ + c2_mq_req_init(&c2dev->req_vq, 0, + be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)), + be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)), + mmio_regs + + be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)), + mmio_regs + + be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)), + C2_MQ_ADAPTER_TARGET); + + /* Initialize the Verbs Reply Queue */ + qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); + msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); + q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); + if (!q1_pages) { + err = -ENOMEM; + goto bail1; + } + c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device, + (void *)q1_pages, qsize * msgsize, + DMA_FROM_DEVICE); + pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); + pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, + (u64)c2dev->rep_vq.host_dma); + c2_mq_rep_init(&c2dev->rep_vq, + 1, + qsize, + msgsize, + q1_pages, + mmio_regs + + be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)), + C2_MQ_HOST_TARGET); + + /* Initialize the Asynchronus Event Queue */ + qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); + msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); + q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); + if (!q2_pages) { + err = -ENOMEM; + goto bail2; + } + c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device, + (void *)q2_pages, qsize * msgsize, + DMA_FROM_DEVICE); + pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); + pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, + (u64)c2dev->rep_vq.host_dma); + c2_mq_rep_init(&c2dev->aeq, + 2, + qsize, + msgsize, + q2_pages, + mmio_regs + + be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)), + C2_MQ_HOST_TARGET); + + /* Initialize the verbs request allocator */ + err = vq_init(c2dev); + if (err) + goto bail3; + + /* Enable interrupts on the adapter */ + writel(0, c2dev->regs + C2_IDIS); + + /* create the WR init message */ + err = c2_adapter_init(c2dev); + if (err) + goto bail4; + c2dev->init++; + + /* open an adapter instance */ + err = c2_rnic_open(c2dev); + if (err) + goto bail4; + + /* Initialize cached the adapter limits */ + if (c2_rnic_query(c2dev, &c2dev->props)) + goto bail5; + + /* Initialize the PD pool */ + err = c2_init_pd_table(c2dev); + if (err) + goto bail5; + + /* Initialize the QP pool */ + c2_init_qp_table(c2dev); + return 0; + + bail5: + c2_rnic_close(c2dev); + bail4: + vq_term(c2dev); + bail3: + dma_unmap_single(c2dev->ibdev.dma_device, + pci_unmap_addr(&c2dev->aeq, mapping), + c2dev->aeq.q_size * c2dev->aeq.msg_size, + DMA_FROM_DEVICE); + kfree(q2_pages); + bail2: + dma_unmap_single(c2dev->ibdev.dma_device, + pci_unmap_addr(&c2dev->rep_vq, mapping), + c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, + DMA_FROM_DEVICE); + kfree(q1_pages); + bail1: + c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); + bail0: + vfree(c2dev->qptr_array); + + return err; +} + +/* + * Called by c2_remove to cleanup the RNIC resources. + */ +void c2_rnic_term(struct c2_dev *c2dev) +{ + + /* Close the open adapter instance */ + c2_rnic_close(c2dev); + + /* Send the TERM message to the adapter */ + c2_adapter_term(c2dev); + + /* Disable interrupts on the adapter */ + writel(1, c2dev->regs + C2_IDIS); + + /* Free the QP pool */ + c2_cleanup_qp_table(c2dev); + + /* Free the PD pool */ + c2_cleanup_pd_table(c2dev); + + /* Free the verbs request allocator */ + vq_term(c2dev); + + /* Unmap and free the asynchronus event queue */ + dma_unmap_single(c2dev->ibdev.dma_device, + pci_unmap_addr(&c2dev->aeq, mapping), + c2dev->aeq.q_size * c2dev->aeq.msg_size, + DMA_FROM_DEVICE); + kfree(c2dev->aeq.msg_pool.host); + + /* Unmap and free the verbs reply queue */ + dma_unmap_single(c2dev->ibdev.dma_device, + pci_unmap_addr(&c2dev->rep_vq, mapping), + c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, + DMA_FROM_DEVICE); + kfree(c2dev->rep_vq.msg_pool.host); + + /* Free the MQ shared pointer pool */ + c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); + + /* Free the qptr_array */ + vfree(c2dev->qptr_array); + + return; +} diff --git a/drivers/infiniband/hw/amso1100/c2_status.h b/drivers/infiniband/hw/amso1100/c2_status.h new file mode 100644 index 000000000000..6ee4aa92d875 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_status.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _C2_STATUS_H_ +#define _C2_STATUS_H_ + +/* + * Verbs Status Codes + */ +enum c2_status { + C2_OK = 0, /* This must be zero */ + CCERR_INSUFFICIENT_RESOURCES = 1, + CCERR_INVALID_MODIFIER = 2, + CCERR_INVALID_MODE = 3, + CCERR_IN_USE = 4, + CCERR_INVALID_RNIC = 5, + CCERR_INTERRUPTED_OPERATION = 6, + CCERR_INVALID_EH = 7, + CCERR_INVALID_CQ = 8, + CCERR_CQ_EMPTY = 9, + CCERR_NOT_IMPLEMENTED = 10, + CCERR_CQ_DEPTH_TOO_SMALL = 11, + CCERR_PD_IN_USE = 12, + CCERR_INVALID_PD = 13, + CCERR_INVALID_SRQ = 14, + CCERR_INVALID_ADDRESS = 15, + CCERR_INVALID_NETMASK = 16, + CCERR_INVALID_QP = 17, + CCERR_INVALID_QP_STATE = 18, + CCERR_TOO_MANY_WRS_POSTED = 19, + CCERR_INVALID_WR_TYPE = 20, + CCERR_INVALID_SGL_LENGTH = 21, + CCERR_INVALID_SQ_DEPTH = 22, + CCERR_INVALID_RQ_DEPTH = 23, + CCERR_INVALID_ORD = 24, + CCERR_INVALID_IRD = 25, + CCERR_QP_ATTR_CANNOT_CHANGE = 26, + CCERR_INVALID_STAG = 27, + CCERR_QP_IN_USE = 28, + CCERR_OUTSTANDING_WRS = 29, + CCERR_STAG_IN_USE = 30, + CCERR_INVALID_STAG_INDEX = 31, + CCERR_INVALID_SGL_FORMAT = 32, + CCERR_ADAPTER_TIMEOUT = 33, + CCERR_INVALID_CQ_DEPTH = 34, + CCERR_INVALID_PRIVATE_DATA_LENGTH = 35, + CCERR_INVALID_EP = 36, + CCERR_MR_IN_USE = CCERR_STAG_IN_USE, + CCERR_FLUSHED = 38, + CCERR_INVALID_WQE = 39, + CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40, + CCERR_REMOTE_TERMINATION_ERROR = 41, + CCERR_BASE_AND_BOUNDS_VIOLATION = 42, + CCERR_ACCESS_VIOLATION = 43, + CCERR_INVALID_PD_ID = 44, + CCERR_WRAP_ERROR = 45, + CCERR_INV_STAG_ACCESS_ERROR = 46, + CCERR_ZERO_RDMA_READ_RESOURCES = 47, + CCERR_QP_NOT_PRIVILEGED = 48, + CCERR_STAG_STATE_NOT_INVALID = 49, + CCERR_INVALID_PAGE_SIZE = 50, + CCERR_INVALID_BUFFER_SIZE = 51, + CCERR_INVALID_PBE = 52, + CCERR_INVALID_FBO = 53, + CCERR_INVALID_LENGTH = 54, + CCERR_INVALID_ACCESS_RIGHTS = 55, + CCERR_PBL_TOO_BIG = 56, + CCERR_INVALID_VA = 57, + CCERR_INVALID_REGION = 58, + CCERR_INVALID_WINDOW = 59, + CCERR_TOTAL_LENGTH_TOO_BIG = 60, + CCERR_INVALID_QP_ID = 61, + CCERR_ADDR_IN_USE = 62, + CCERR_ADDR_NOT_AVAIL = 63, + CCERR_NET_DOWN = 64, + CCERR_NET_UNREACHABLE = 65, + CCERR_CONN_ABORTED = 66, + CCERR_CONN_RESET = 67, + CCERR_NO_BUFS = 68, + CCERR_CONN_TIMEDOUT = 69, + CCERR_CONN_REFUSED = 70, + CCERR_HOST_UNREACHABLE = 71, + CCERR_INVALID_SEND_SGL_DEPTH = 72, + CCERR_INVALID_RECV_SGL_DEPTH = 73, + CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74, + CCERR_INSUFFICIENT_PRIVILEGES = 75, + CCERR_STACK_ERROR = 76, + CCERR_INVALID_VERSION = 77, + CCERR_INVALID_MTU = 78, + CCERR_INVALID_IMAGE = 79, + CCERR_PENDING = 98, /* not an error; user internally by adapter */ + CCERR_DEFER = 99, /* not an error; used internally by adapter */ + CCERR_FAILED_WRITE = 100, + CCERR_FAILED_ERASE = 101, + CCERR_FAILED_VERIFICATION = 102, + CCERR_NOT_FOUND = 103, + +}; + +/* + * CCAE_ACTIVE_CONNECT_RESULTS status result codes. + */ +enum c2_connect_status { + C2_CONN_STATUS_SUCCESS = C2_OK, + C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES, + C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT, + C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED, + C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE, + C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE, + C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC, + C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP, + C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE, + C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET, + C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL, +}; + +/* + * Flash programming status codes. + */ +enum c2_flash_status { + C2_FLASH_STATUS_SUCCESS = 0x0000, + C2_FLASH_STATUS_VERIFY_ERR = 0x0002, + C2_FLASH_STATUS_IMAGE_ERR = 0x0004, + C2_FLASH_STATUS_ECLBS = 0x0400, + C2_FLASH_STATUS_PSLBS = 0x0800, + C2_FLASH_STATUS_VPENS = 0x1000, +}; + +#endif /* _C2_STATUS_H_ */ diff --git a/drivers/infiniband/hw/amso1100/c2_user.h b/drivers/infiniband/hw/amso1100/c2_user.h new file mode 100644 index 000000000000..7e9e7ad65467 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_user.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef C2_USER_H +#define C2_USER_H + +#include <linux/types.h> + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +struct c2_alloc_ucontext_resp { + __u32 qp_tab_size; + __u32 uarc_size; +}; + +struct c2_alloc_pd_resp { + __u32 pdn; + __u32 reserved; +}; + +struct c2_create_cq { + __u32 lkey; + __u32 pdn; + __u64 arm_db_page; + __u64 set_db_page; + __u32 arm_db_index; + __u32 set_db_index; +}; + +struct c2_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct c2_create_qp { + __u32 lkey; + __u32 reserved; + __u64 sq_db_page; + __u64 rq_db_page; + __u32 sq_db_index; + __u32 rq_db_index; +}; + +#endif /* C2_USER_H */ diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c new file mode 100644 index 000000000000..40caeb5f41b4 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_vq.c @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "c2_vq.h" +#include "c2_provider.h" + +/* + * Verbs Request Objects: + * + * VQ Request Objects are allocated by the kernel verbs handlers. + * They contain a wait object, a refcnt, an atomic bool indicating that the + * adapter has replied, and a copy of the verb reply work request. + * A pointer to the VQ Request Object is passed down in the context + * field of the work request message, and reflected back by the adapter + * in the verbs reply message. The function handle_vq() in the interrupt + * path will use this pointer to: + * 1) append a copy of the verbs reply message + * 2) mark that the reply is ready + * 3) wake up the kernel verbs handler blocked awaiting the reply. + * + * + * The kernel verbs handlers do a "get" to put a 2nd reference on the + * VQ Request object. If the kernel verbs handler exits before the adapter + * can respond, this extra reference will keep the VQ Request object around + * until the adapter's reply can be processed. The reason we need this is + * because a pointer to this object is stuffed into the context field of + * the verbs work request message, and reflected back in the reply message. + * It is used in the interrupt handler (handle_vq()) to wake up the appropriate + * kernel verb handler that is blocked awaiting the verb reply. + * So handle_vq() will do a "put" on the object when it's done accessing it. + * NOTE: If we guarantee that the kernel verb handler will never bail before + * getting the reply, then we don't need these refcnts. + * + * + * VQ Request objects are freed by the kernel verbs handlers only + * after the verb has been processed, or when the adapter fails and + * does not reply. + * + * + * Verbs Reply Buffers: + * + * VQ Reply bufs are local host memory copies of a + * outstanding Verb Request reply + * message. The are always allocated by the kernel verbs handlers, and _may_ be + * freed by either the kernel verbs handler -or- the interrupt handler. The + * kernel verbs handler _must_ free the repbuf, then free the vq request object + * in that order. + */ + +int vq_init(struct c2_dev *c2dev) +{ + sprintf(c2dev->vq_cache_name, "c2-vq:dev%c", + (char) ('0' + c2dev->devnum)); + c2dev->host_msg_cache = + kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (c2dev->host_msg_cache == NULL) { + return -ENOMEM; + } + return 0; +} + +void vq_term(struct c2_dev *c2dev) +{ + kmem_cache_destroy(c2dev->host_msg_cache); +} + +/* vq_req_alloc - allocate a VQ Request Object and initialize it. + * The refcnt is set to 1. + */ +struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) +{ + struct c2_vq_req *r; + + r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); + if (r) { + init_waitqueue_head(&r->wait_object); + r->reply_msg = (u64) NULL; + r->event = 0; + r->cm_id = NULL; + r->qp = NULL; + atomic_set(&r->refcnt, 1); + atomic_set(&r->reply_ready, 0); + } + return r; +} + + +/* vq_req_free - free the VQ Request Object. It is assumed the verbs handler + * has already free the VQ Reply Buffer if it existed. + */ +void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) +{ + r->reply_msg = (u64) NULL; + if (atomic_dec_and_test(&r->refcnt)) { + kfree(r); + } +} + +/* vq_req_get - reference a VQ Request Object. Done + * only in the kernel verbs handlers. + */ +void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r) +{ + atomic_inc(&r->refcnt); +} + + +/* vq_req_put - dereference and potentially free a VQ Request Object. + * + * This is only called by handle_vq() on the + * interrupt when it is done processing + * a verb reply message. If the associated + * kernel verbs handler has already bailed, + * then this put will actually free the VQ + * Request object _and_ the VQ Reply Buffer + * if it exists. + */ +void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) +{ + if (atomic_dec_and_test(&r->refcnt)) { + if (r->reply_msg != (u64) NULL) + vq_repbuf_free(c2dev, + (void *) (unsigned long) r->reply_msg); + kfree(r); + } +} + + +/* + * vq_repbuf_alloc - allocate a VQ Reply Buffer. + */ +void *vq_repbuf_alloc(struct c2_dev *c2dev) +{ + return kmem_cache_alloc(c2dev->host_msg_cache, SLAB_ATOMIC); +} + +/* + * vq_send_wr - post a verbs request message to the Verbs Request Queue. + * If a message is not available in the MQ, then block until one is available. + * NOTE: handle_mq() on the interrupt context will wake up threads blocked here. + * When the adapter drains the Verbs Request Queue, + * it inserts MQ index 0 in to the + * adapter->host activity fifo and interrupts the host. + */ +int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr) +{ + void *msg; + wait_queue_t __wait; + + /* + * grab adapter vq lock + */ + spin_lock(&c2dev->vqlock); + + /* + * allocate msg + */ + msg = c2_mq_alloc(&c2dev->req_vq); + + /* + * If we cannot get a msg, then we'll wait + * When a messages are available, the int handler will wake_up() + * any waiters. + */ + while (msg == NULL) { + pr_debug("%s:%d no available msg in VQ, waiting...\n", + __FUNCTION__, __LINE__); + init_waitqueue_entry(&__wait, current); + add_wait_queue(&c2dev->req_vq_wo, &__wait); + spin_unlock(&c2dev->vqlock); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (!c2_mq_full(&c2dev->req_vq)) { + break; + } + if (!signal_pending(current)) { + schedule_timeout(1 * HZ); /* 1 second... */ + continue; + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&c2dev->req_vq_wo, &__wait); + return -EINTR; + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&c2dev->req_vq_wo, &__wait); + spin_lock(&c2dev->vqlock); + msg = c2_mq_alloc(&c2dev->req_vq); + } + + /* + * copy wr into adapter msg + */ + memcpy(msg, wr, c2dev->req_vq.msg_size); + + /* + * post msg + */ + c2_mq_produce(&c2dev->req_vq); + + /* + * release adapter vq lock + */ + spin_unlock(&c2dev->vqlock); + return 0; +} + + +/* + * vq_wait_for_reply - block until the adapter posts a Verb Reply Message. + */ +int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req) +{ + if (!wait_event_timeout(req->wait_object, + atomic_read(&req->reply_ready), + 60*HZ)) + return -ETIMEDOUT; + + return 0; +} + +/* + * vq_repbuf_free - Free a Verbs Reply Buffer. + */ +void vq_repbuf_free(struct c2_dev *c2dev, void *reply) +{ + kmem_cache_free(c2dev->host_msg_cache, reply); +} diff --git a/drivers/infiniband/hw/amso1100/c2_vq.h b/drivers/infiniband/hw/amso1100/c2_vq.h new file mode 100644 index 000000000000..33805627a607 --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_vq.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _C2_VQ_H_ +#define _C2_VQ_H_ +#include <linux/sched.h> +#include "c2.h" +#include "c2_wr.h" +#include "c2_provider.h" + +struct c2_vq_req { + u64 reply_msg; /* ptr to reply msg */ + wait_queue_head_t wait_object; /* wait object for vq reqs */ + atomic_t reply_ready; /* set when reply is ready */ + atomic_t refcnt; /* used to cancel WRs... */ + int event; + struct iw_cm_id *cm_id; + struct c2_qp *qp; +}; + +extern int vq_init(struct c2_dev *c2dev); +extern void vq_term(struct c2_dev *c2dev); + +extern struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev); +extern void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req); +extern void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req); +extern void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req); +extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr); + +extern void *vq_repbuf_alloc(struct c2_dev *c2dev); +extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply); + +extern int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req); +#endif /* _C2_VQ_H_ */ diff --git a/drivers/infiniband/hw/amso1100/c2_wr.h b/drivers/infiniband/hw/amso1100/c2_wr.h new file mode 100644 index 000000000000..3ec6c43bb0ef --- /dev/null +++ b/drivers/infiniband/hw/amso1100/c2_wr.h @@ -0,0 +1,1520 @@ +/* + * Copyright (c) 2005 Ammasso, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _C2_WR_H_ +#define _C2_WR_H_ + +#ifdef CCDEBUG +#define CCWR_MAGIC 0xb07700b0 +#endif + +#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF + +/* Maximum allowed size in bytes of private_data exchange + * on connect. + */ +#define C2_MAX_PRIVATE_DATA_SIZE 200 + +/* + * These types are shared among the adapter, host, and CCIL consumer. + */ +enum c2_cq_notification_type { + C2_CQ_NOTIFICATION_TYPE_NONE = 1, + C2_CQ_NOTIFICATION_TYPE_NEXT, + C2_CQ_NOTIFICATION_TYPE_NEXT_SE +}; + +enum c2_setconfig_cmd { + C2_CFG_ADD_ADDR = 1, + C2_CFG_DEL_ADDR = 2, + C2_CFG_ADD_ROUTE = 3, + C2_CFG_DEL_ROUTE = 4 +}; + +enum c2_getconfig_cmd { + C2_GETCONFIG_ROUTES = 1, + C2_GETCONFIG_ADDRS +}; + +/* + * CCIL Work Request Identifiers + */ +enum c2wr_ids { + CCWR_RNIC_OPEN = 1, + CCWR_RNIC_QUERY, + CCWR_RNIC_SETCONFIG, + CCWR_RNIC_GETCONFIG, + CCWR_RNIC_CLOSE, + CCWR_CQ_CREATE, + CCWR_CQ_QUERY, + CCWR_CQ_MODIFY, + CCWR_CQ_DESTROY, + CCWR_QP_CONNECT, + CCWR_PD_ALLOC, + CCWR_PD_DEALLOC, + CCWR_SRQ_CREATE, + CCWR_SRQ_QUERY, + CCWR_SRQ_MODIFY, + CCWR_SRQ_DESTROY, + CCWR_QP_CREATE, + CCWR_QP_QUERY, + CCWR_QP_MODIFY, + CCWR_QP_DESTROY, + CCWR_NSMR_STAG_ALLOC, + CCWR_NSMR_REGISTER, + CCWR_NSMR_PBL, + CCWR_STAG_DEALLOC, + CCWR_NSMR_REREGISTER, + CCWR_SMR_REGISTER, + CCWR_MR_QUERY, + CCWR_MW_ALLOC, + CCWR_MW_QUERY, + CCWR_EP_CREATE, + CCWR_EP_GETOPT, + CCWR_EP_SETOPT, + CCWR_EP_DESTROY, + CCWR_EP_BIND, + CCWR_EP_CONNECT, + CCWR_EP_LISTEN, + CCWR_EP_SHUTDOWN, + CCWR_EP_LISTEN_CREATE, + CCWR_EP_LISTEN_DESTROY, + CCWR_EP_QUERY, + CCWR_CR_ACCEPT, + CCWR_CR_REJECT, + CCWR_CONSOLE, + CCWR_TERM, + CCWR_FLASH_INIT, + CCWR_FLASH, + CCWR_BUF_ALLOC, + CCWR_BUF_FREE, + CCWR_FLASH_WRITE, + CCWR_INIT, /* WARNING: Don't move this ever again! */ + + + + /* Add new IDs here */ + + + + /* + * WARNING: CCWR_LAST must always be the last verbs id defined! + * All the preceding IDs are fixed, and must not change. + * You can add new IDs, but must not remove or reorder + * any IDs. If you do, YOU will ruin any hope of + * compatability between versions. + */ + CCWR_LAST, + + /* + * Start over at 1 so that arrays indexed by user wr id's + * begin at 1. This is OK since the verbs and user wr id's + * are always used on disjoint sets of queues. + */ + /* + * The order of the CCWR_SEND_XX verbs must + * match the order of the RDMA_OPs + */ + CCWR_SEND = 1, + CCWR_SEND_INV, + CCWR_SEND_SE, + CCWR_SEND_SE_INV, + CCWR_RDMA_WRITE, + CCWR_RDMA_READ, + CCWR_RDMA_READ_INV, + CCWR_MW_BIND, + CCWR_NSMR_FASTREG, + CCWR_STAG_INVALIDATE, + CCWR_RECV, + CCWR_NOP, + CCWR_UNIMPL, +/* WARNING: This must always be the last user wr id defined! */ +}; +#define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2) + +/* + * SQ/RQ Work Request Types + */ +enum c2_wr_type { + C2_WR_TYPE_SEND = CCWR_SEND, + C2_WR_TYPE_SEND_SE = CCWR_SEND_SE, + C2_WR_TYPE_SEND_INV = CCWR_SEND_INV, + C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV, + C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE, + C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ, + C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV, + C2_WR_TYPE_BIND_MW = CCWR_MW_BIND, + C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG, + C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE, + C2_WR_TYPE_RECV = CCWR_RECV, + C2_WR_TYPE_NOP = CCWR_NOP, +}; + +struct c2_netaddr { + u32 ip_addr; + u32 netmask; + u32 mtu; +}; + +struct c2_route { + u32 ip_addr; /* 0 indicates the default route */ + u32 netmask; /* netmask associated with dst */ + u32 flags; + union { + u32 ipaddr; /* address of the nexthop interface */ + u8 enaddr[6]; + } nexthop; +}; + +/* + * A Scatter Gather Entry. + */ +struct c2_data_addr { + u32 stag; + u32 length; + u64 to; +}; + +/* + * MR and MW flags used by the consumer, RI, and RNIC. + */ +enum c2_mm_flags { + MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */ + MEM_VA_BASED = 0x0002, /* Not Zero-based */ + MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */ + MEM_LOCAL_READ = 0x0008, /* allow local reads */ + MEM_LOCAL_WRITE = 0x0010, /* allow local writes */ + MEM_REMOTE_READ = 0x0020, /* allow remote reads */ + MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */ + MEM_WINDOW_BIND = 0x0080, /* binds allowed */ + MEM_SHARED = 0x0100, /* set if MR is shared */ + MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */ +}; + +/* + * CCIL API ACF flags defined in terms of the low level mem flags. + * This minimizes translation needed in the user API + */ +enum c2_acf { + C2_ACF_LOCAL_READ = MEM_LOCAL_READ, + C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE, + C2_ACF_REMOTE_READ = MEM_REMOTE_READ, + C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE, + C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND +}; + +/* + * Image types of objects written to flash + */ +#define C2_FLASH_IMG_BITFILE 1 +#define C2_FLASH_IMG_OPTION_ROM 2 +#define C2_FLASH_IMG_VPD 3 + +/* + * to fix bug 1815 we define the max size allowable of the + * terminate message (per the IETF spec).Refer to the IETF + * protocal specification, section 12.1.6, page 64) + * The message is prefixed by 20 types of DDP info. + * + * Then the message has 6 bytes for the terminate control + * and DDP segment length info plus a DDP header (either + * 14 or 18 byts) plus 28 bytes for the RDMA header. + * Thus the max size in: + * 20 + (6 + 18 + 28) = 72 + */ +#define C2_MAX_TERMINATE_MESSAGE_SIZE (72) + +/* + * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h + */ +#define WR_BUILD_STR_LEN 64 + +/* + * WARNING: All of these structs need to align any 64bit types on + * 64 bit boundaries! 64bit types include u64 and u64. + */ + +/* + * Clustercore Work Request Header. Be sensitive to field layout + * and alignment. + */ +struct c2wr_hdr { + /* wqe_count is part of the cqe. It is put here so the + * adapter can write to it while the wr is pending without + * clobbering part of the wr. This word need not be dma'd + * from the host to adapter by libccil, but we copy it anyway + * to make the memcpy to the adapter better aligned. + */ + u32 wqe_count; + + /* Put these fields next so that later 32- and 64-bit + * quantities are naturally aligned. + */ + u8 id; + u8 result; /* adapter -> host */ + u8 sge_count; /* host -> adapter */ + u8 flags; /* host -> adapter */ + + u64 context; +#ifdef CCMSGMAGIC + u32 magic; + u32 pad; +#endif +} __attribute__((packed)); + +/* + *------------------------ RNIC ------------------------ + */ + +/* + * WR_RNIC_OPEN + */ + +/* + * Flags for the RNIC WRs + */ +enum c2_rnic_flags { + RNIC_IRD_STATIC = 0x0001, + RNIC_ORD_STATIC = 0x0002, + RNIC_QP_STATIC = 0x0004, + RNIC_SRQ_SUPPORTED = 0x0008, + RNIC_PBL_BLOCK_MODE = 0x0010, + RNIC_SRQ_MODEL_ARRIVAL = 0x0020, + RNIC_CQ_OVF_DETECTED = 0x0040, + RNIC_PRIV_MODE = 0x0080 +}; + +struct c2wr_rnic_open_req { + struct c2wr_hdr hdr; + u64 user_context; + u16 flags; /* See enum c2_rnic_flags */ + u16 port_num; +} __attribute__((packed)); + +struct c2wr_rnic_open_rep { + struct c2wr_hdr hdr; + u32 rnic_handle; +} __attribute__((packed)); + +union c2wr_rnic_open { + struct c2wr_rnic_open_req req; + struct c2wr_rnic_open_rep rep; +} __attribute__((packed)); + +struct c2wr_rnic_query_req { + struct c2wr_hdr hdr; + u32 rnic_handle; +} __attribute__((packed)); + +/* + * WR_RNIC_QUERY + */ +struct c2wr_rnic_query_rep { + struct c2wr_hdr hdr; + u64 user_context; + u32 vendor_id; + u32 part_number; + u32 hw_version; + u32 fw_ver_major; + u32 fw_ver_minor; + u32 fw_ver_patch; + char fw_ver_build_str[WR_BUILD_STR_LEN]; + u32 max_qps; + u32 max_qp_depth; + u32 max_srq_depth; + u32 max_send_sgl_depth; + u32 max_rdma_sgl_depth; + u32 max_cqs; + u32 max_cq_depth; + u32 max_cq_event_handlers; + u32 max_mrs; + u32 max_pbl_depth; + u32 max_pds; + u32 max_global_ird; + u32 max_global_ord; + u32 max_qp_ird; + u32 max_qp_ord; + u32 flags; + u32 max_mws; + u32 pbe_range_low; + u32 pbe_range_high; + u32 max_srqs; + u32 page_size; +} __attribute__((packed)); + +union c2wr_rnic_query { + struct c2wr_rnic_query_req req; + struct c2wr_rnic_query_rep rep; +} __attribute__((packed)); + +/* + * WR_RNIC_GETCONFIG + */ + +struct c2wr_rnic_getconfig_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 option; /* see c2_getconfig_cmd_t */ + u64 reply_buf; + u32 reply_buf_len; +} __attribute__((packed)) ; + +struct c2wr_rnic_getconfig_rep { + struct c2wr_hdr hdr; + u32 option; /* see c2_getconfig_cmd_t */ + u32 count_len; /* length of the number of addresses configured */ +} __attribute__((packed)) ; + +union c2wr_rnic_getconfig { + struct c2wr_rnic_getconfig_req req; + struct c2wr_rnic_getconfig_rep rep; +} __attribute__((packed)) ; + +/* + * WR_RNIC_SETCONFIG + */ +struct c2wr_rnic_setconfig_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 option; /* See c2_setconfig_cmd_t */ + /* variable data and pad. See c2_netaddr and c2_route */ + u8 data[0]; +} __attribute__((packed)) ; + +struct c2wr_rnic_setconfig_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_rnic_setconfig { + struct c2wr_rnic_setconfig_req req; + struct c2wr_rnic_setconfig_rep rep; +} __attribute__((packed)) ; + +/* + * WR_RNIC_CLOSE + */ +struct c2wr_rnic_close_req { + struct c2wr_hdr hdr; + u32 rnic_handle; +} __attribute__((packed)) ; + +struct c2wr_rnic_close_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_rnic_close { + struct c2wr_rnic_close_req req; + struct c2wr_rnic_close_rep rep; +} __attribute__((packed)) ; + +/* + *------------------------ CQ ------------------------ + */ +struct c2wr_cq_create_req { + struct c2wr_hdr hdr; + u64 shared_ht; + u64 user_context; + u64 msg_pool; + u32 rnic_handle; + u32 msg_size; + u32 depth; +} __attribute__((packed)) ; + +struct c2wr_cq_create_rep { + struct c2wr_hdr hdr; + u32 mq_index; + u32 adapter_shared; + u32 cq_handle; +} __attribute__((packed)) ; + +union c2wr_cq_create { + struct c2wr_cq_create_req req; + struct c2wr_cq_create_rep rep; +} __attribute__((packed)) ; + +struct c2wr_cq_modify_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 cq_handle; + u32 new_depth; + u64 new_msg_pool; +} __attribute__((packed)) ; + +struct c2wr_cq_modify_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_cq_modify { + struct c2wr_cq_modify_req req; + struct c2wr_cq_modify_rep rep; +} __attribute__((packed)) ; + +struct c2wr_cq_destroy_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 cq_handle; +} __attribute__((packed)) ; + +struct c2wr_cq_destroy_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_cq_destroy { + struct c2wr_cq_destroy_req req; + struct c2wr_cq_destroy_rep rep; +} __attribute__((packed)) ; + +/* + *------------------------ PD ------------------------ + */ +struct c2wr_pd_alloc_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 pd_id; +} __attribute__((packed)) ; + +struct c2wr_pd_alloc_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_pd_alloc { + struct c2wr_pd_alloc_req req; + struct c2wr_pd_alloc_rep rep; +} __attribute__((packed)) ; + +struct c2wr_pd_dealloc_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 pd_id; +} __attribute__((packed)) ; + +struct c2wr_pd_dealloc_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_pd_dealloc { + struct c2wr_pd_dealloc_req req; + struct c2wr_pd_dealloc_rep rep; +} __attribute__((packed)) ; + +/* + *------------------------ SRQ ------------------------ + */ +struct c2wr_srq_create_req { + struct c2wr_hdr hdr; + u64 shared_ht; + u64 user_context; + u32 rnic_handle; + u32 srq_depth; + u32 srq_limit; + u32 sgl_depth; + u32 pd_id; +} __attribute__((packed)) ; + +struct c2wr_srq_create_rep { + struct c2wr_hdr hdr; + u32 srq_depth; + u32 sgl_depth; + u32 msg_size; + u32 mq_index; + u32 mq_start; + u32 srq_handle; +} __attribute__((packed)) ; + +union c2wr_srq_create { + struct c2wr_srq_create_req req; + struct c2wr_srq_create_rep rep; +} __attribute__((packed)) ; + +struct c2wr_srq_destroy_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 srq_handle; +} __attribute__((packed)) ; + +struct c2wr_srq_destroy_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_srq_destroy { + struct c2wr_srq_destroy_req req; + struct c2wr_srq_destroy_rep rep; +} __attribute__((packed)) ; + +/* + *------------------------ QP ------------------------ + */ +enum c2wr_qp_flags { + QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */ + QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */ + QP_MW_BIND = 0x00000004, /* MWs enabled */ + QP_ZERO_STAG = 0x00000008, /* enabled? */ + QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */ + QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */ + /* enabled? */ +}; + +struct c2wr_qp_create_req { + struct c2wr_hdr hdr; + u64 shared_sq_ht; + u64 shared_rq_ht; + u64 user_context; + u32 rnic_handle; + u32 sq_cq_handle; + u32 rq_cq_handle; + u32 sq_depth; + u32 rq_depth; + u32 srq_handle; + u32 srq_limit; + u32 flags; /* see enum c2wr_qp_flags */ + u32 send_sgl_depth; + u32 recv_sgl_depth; + u32 rdma_write_sgl_depth; + u32 ord; + u32 ird; + u32 pd_id; +} __attribute__((packed)) ; + +struct c2wr_qp_create_rep { + struct c2wr_hdr hdr; + u32 sq_depth; + u32 rq_depth; + u32 send_sgl_depth; + u32 recv_sgl_depth; + u32 rdma_write_sgl_depth; + u32 ord; + u32 ird; + u32 sq_msg_size; + u32 sq_mq_index; + u32 sq_mq_start; + u32 rq_msg_size; + u32 rq_mq_index; + u32 rq_mq_start; + u32 qp_handle; +} __attribute__((packed)) ; + +union c2wr_qp_create { + struct c2wr_qp_create_req req; + struct c2wr_qp_create_rep rep; +} __attribute__((packed)) ; + +struct c2wr_qp_query_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 qp_handle; +} __attribute__((packed)) ; + +struct c2wr_qp_query_rep { + struct c2wr_hdr hdr; + u64 user_context; + u32 rnic_handle; + u32 sq_depth; + u32 rq_depth; + u32 send_sgl_depth; + u32 rdma_write_sgl_depth; + u32 recv_sgl_depth; + u32 ord; + u32 ird; + u16 qp_state; + u16 flags; /* see c2wr_qp_flags_t */ + u32 qp_id; + u32 local_addr; + u32 remote_addr; + u16 local_port; + u16 remote_port; + u32 terminate_msg_length; /* 0 if not present */ + u8 data[0]; + /* Terminate Message in-line here. */ +} __attribute__((packed)) ; + +union c2wr_qp_query { + struct c2wr_qp_query_req req; + struct c2wr_qp_query_rep rep; +} __attribute__((packed)) ; + +struct c2wr_qp_modify_req { + struct c2wr_hdr hdr; + u64 stream_msg; + u32 stream_msg_length; + u32 rnic_handle; + u32 qp_handle; + u32 next_qp_state; + u32 ord; + u32 ird; + u32 sq_depth; + u32 rq_depth; + u32 llp_ep_handle; +} __attribute__((packed)) ; + +struct c2wr_qp_modify_rep { + struct c2wr_hdr hdr; + u32 ord; + u32 ird; + u32 sq_depth; + u32 rq_depth; + u32 sq_msg_size; + u32 sq_mq_index; + u32 sq_mq_start; + u32 rq_msg_size; + u32 rq_mq_index; + u32 rq_mq_start; +} __attribute__((packed)) ; + +union c2wr_qp_modify { + struct c2wr_qp_modify_req req; + struct c2wr_qp_modify_rep rep; +} __attribute__((packed)) ; + +struct c2wr_qp_destroy_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 qp_handle; +} __attribute__((packed)) ; + +struct c2wr_qp_destroy_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_qp_destroy { + struct c2wr_qp_destroy_req req; + struct c2wr_qp_destroy_rep rep; +} __attribute__((packed)) ; + +/* + * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can + * only be posted when a QP is in IDLE state. After the connect request is + * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state. + * No synchronous reply from adapter to this WR. The results of + * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS + * See c2wr_ae_active_connect_results_t + */ +struct c2wr_qp_connect_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 qp_handle; + u32 remote_addr; + u16 remote_port; + u16 pad; + u32 private_data_length; + u8 private_data[0]; /* Private data in-line. */ +} __attribute__((packed)) ; + +struct c2wr_qp_connect { + struct c2wr_qp_connect_req req; + /* no synchronous reply. */ +} __attribute__((packed)) ; + + +/* + *------------------------ MM ------------------------ + */ + +struct c2wr_nsmr_stag_alloc_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 pbl_depth; + u32 pd_id; + u32 flags; +} __attribute__((packed)) ; + +struct c2wr_nsmr_stag_alloc_rep { + struct c2wr_hdr hdr; + u32 pbl_depth; + u32 stag_index; +} __attribute__((packed)) ; + +union c2wr_nsmr_stag_alloc { + struct c2wr_nsmr_stag_alloc_req req; + struct c2wr_nsmr_stag_alloc_rep rep; +} __attribute__((packed)) ; + +struct c2wr_nsmr_register_req { + struct c2wr_hdr hdr; + u64 va; + u32 rnic_handle; + u16 flags; + u8 stag_key; + u8 pad; + u32 pd_id; + u32 pbl_depth; + u32 pbe_size; + u32 fbo; + u32 length; + u32 addrs_length; + /* array of paddrs (must be aligned on a 64bit boundary) */ + u64 paddrs[0]; +} __attribute__((packed)) ; + +struct c2wr_nsmr_register_rep { + struct c2wr_hdr hdr; + u32 pbl_depth; + u32 stag_index; +} __attribute__((packed)) ; + +union c2wr_nsmr_register { + struct c2wr_nsmr_register_req req; + struct c2wr_nsmr_register_rep rep; +} __attribute__((packed)) ; + +struct c2wr_nsmr_pbl_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 flags; + u32 stag_index; + u32 addrs_length; + /* array of paddrs (must be aligned on a 64bit boundary) */ + u64 paddrs[0]; +} __attribute__((packed)) ; + +struct c2wr_nsmr_pbl_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_nsmr_pbl { + struct c2wr_nsmr_pbl_req req; + struct c2wr_nsmr_pbl_rep rep; +} __attribute__((packed)) ; + +struct c2wr_mr_query_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 stag_index; +} __attribute__((packed)) ; + +struct c2wr_mr_query_rep { + struct c2wr_hdr hdr; + u8 stag_key; + u8 pad[3]; + u32 pd_id; + u32 flags; + u32 pbl_depth; +} __attribute__((packed)) ; + +union c2wr_mr_query { + struct c2wr_mr_query_req req; + struct c2wr_mr_query_rep rep; +} __attribute__((packed)) ; + +struct c2wr_mw_query_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 stag_index; +} __attribute__((packed)) ; + +struct c2wr_mw_query_rep { + struct c2wr_hdr hdr; + u8 stag_key; + u8 pad[3]; + u32 pd_id; + u32 flags; +} __attribute__((packed)) ; + +union c2wr_mw_query { + struct c2wr_mw_query_req req; + struct c2wr_mw_query_rep rep; +} __attribute__((packed)) ; + + +struct c2wr_stag_dealloc_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 stag_index; +} __attribute__((packed)) ; + +struct c2wr_stag_dealloc_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)) ; + +union c2wr_stag_dealloc { + struct c2wr_stag_dealloc_req req; + struct c2wr_stag_dealloc_rep rep; +} __attribute__((packed)) ; + +struct c2wr_nsmr_reregister_req { + struct c2wr_hdr hdr; + u64 va; + u32 rnic_handle; + u16 flags; + u8 stag_key; + u8 pad; + u32 stag_index; + u32 pd_id; + u32 pbl_depth; + u32 pbe_size; + u32 fbo; + u32 length; + u32 addrs_length; + u32 pad1; + /* array of paddrs (must be aligned on a 64bit boundary) */ + u64 paddrs[0]; +} __attribute__((packed)) ; + +struct c2wr_nsmr_reregister_rep { + struct c2wr_hdr hdr; + u32 pbl_depth; + u32 stag_index; +} __attribute__((packed)) ; + +union c2wr_nsmr_reregister { + struct c2wr_nsmr_reregister_req req; + struct c2wr_nsmr_reregister_rep rep; +} __attribute__((packed)) ; + +struct c2wr_smr_register_req { + struct c2wr_hdr hdr; + u64 va; + u32 rnic_handle; + u16 flags; + u8 stag_key; + u8 pad; + u32 stag_index; + u32 pd_id; +} __attribute__((packed)) ; + +struct c2wr_smr_register_rep { + struct c2wr_hdr hdr; + u32 stag_index; +} __attribute__((packed)) ; + +union c2wr_smr_register { + struct c2wr_smr_register_req req; + struct c2wr_smr_register_rep rep; +} __attribute__((packed)) ; + +struct c2wr_mw_alloc_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 pd_id; +} __attribute__((packed)) ; + +struct c2wr_mw_alloc_rep { + struct c2wr_hdr hdr; + u32 stag_index; +} __attribute__((packed)) ; + +union c2wr_mw_alloc { + struct c2wr_mw_alloc_req req; + struct c2wr_mw_alloc_rep rep; +} __attribute__((packed)) ; + +/* + *------------------------ WRs ----------------------- + */ + +struct c2wr_user_hdr { + struct c2wr_hdr hdr; /* Has status and WR Type */ +} __attribute__((packed)) ; + +enum c2_qp_state { + C2_QP_STATE_IDLE = 0x01, + C2_QP_STATE_CONNECTING = 0x02, + C2_QP_STATE_RTS = 0x04, + C2_QP_STATE_CLOSING = 0x08, + C2_QP_STATE_TERMINATE = 0x10, + C2_QP_STATE_ERROR = 0x20, +}; + +/* Completion queue entry. */ +struct c2wr_ce { + struct c2wr_hdr hdr; /* Has status and WR Type */ + u64 qp_user_context; /* c2_user_qp_t * */ + u32 qp_state; /* Current QP State */ + u32 handle; /* QPID or EP Handle */ + u32 bytes_rcvd; /* valid for RECV WCs */ + u32 stag; +} __attribute__((packed)) ; + + +/* + * Flags used for all post-sq WRs. These must fit in the flags + * field of the struct c2wr_hdr (eight bits). + */ +enum { + SQ_SIGNALED = 0x01, + SQ_READ_FENCE = 0x02, + SQ_FENCE = 0x04, +}; + +/* + * Common fields for all post-sq WRs. Namely the standard header and a + * secondary header with fields common to all post-sq WRs. + */ +struct c2_sq_hdr { + struct c2wr_user_hdr user_hdr; +} __attribute__((packed)); + +/* + * Same as above but for post-rq WRs. + */ +struct c2_rq_hdr { + struct c2wr_user_hdr user_hdr; +} __attribute__((packed)); + +/* + * use the same struct for all sends. + */ +struct c2wr_send_req { + struct c2_sq_hdr sq_hdr; + u32 sge_len; + u32 remote_stag; + u8 data[0]; /* SGE array */ +} __attribute__((packed)); + +union c2wr_send { + struct c2wr_send_req req; + struct c2wr_ce rep; +} __attribute__((packed)); + +struct c2wr_rdma_write_req { + struct c2_sq_hdr sq_hdr; + u64 remote_to; + u32 remote_stag; + u32 sge_len; + u8 data[0]; /* SGE array */ +} __attribute__((packed)); + +union c2wr_rdma_write { + struct c2wr_rdma_write_req req; + struct c2wr_ce rep; +} __attribute__((packed)); + +struct c2wr_rdma_read_req { + struct c2_sq_hdr sq_hdr; + u64 local_to; + u64 remote_to; + u32 local_stag; + u32 remote_stag; + u32 length; +} __attribute__((packed)); + +union c2wr_rdma_read { + struct c2wr_rdma_read_req req; + struct c2wr_ce rep; +} __attribute__((packed)); + +struct c2wr_mw_bind_req { + struct c2_sq_hdr sq_hdr; + u64 va; + u8 stag_key; + u8 pad[3]; + u32 mw_stag_index; + u32 mr_stag_index; + u32 length; + u32 flags; +} __attribute__((packed)); + +union c2wr_mw_bind { + struct c2wr_mw_bind_req req; + struct c2wr_ce rep; +} __attribute__((packed)); + +struct c2wr_nsmr_fastreg_req { + struct c2_sq_hdr sq_hdr; + u64 va; + u8 stag_key; + u8 pad[3]; + u32 stag_index; + u32 pbe_size; + u32 fbo; + u32 length; + u32 addrs_length; + /* array of paddrs (must be aligned on a 64bit boundary) */ + u64 paddrs[0]; +} __attribute__((packed)); + +union c2wr_nsmr_fastreg { + struct c2wr_nsmr_fastreg_req req; + struct c2wr_ce rep; +} __attribute__((packed)); + +struct c2wr_stag_invalidate_req { + struct c2_sq_hdr sq_hdr; + u8 stag_key; + u8 pad[3]; + u32 stag_index; +} __attribute__((packed)); + +union c2wr_stag_invalidate { + struct c2wr_stag_invalidate_req req; + struct c2wr_ce rep; +} __attribute__((packed)); + +union c2wr_sqwr { + struct c2_sq_hdr sq_hdr; + struct c2wr_send_req send; + struct c2wr_send_req send_se; + struct c2wr_send_req send_inv; + struct c2wr_send_req send_se_inv; + struct c2wr_rdma_write_req rdma_write; + struct c2wr_rdma_read_req rdma_read; + struct c2wr_mw_bind_req mw_bind; + struct c2wr_nsmr_fastreg_req nsmr_fastreg; + struct c2wr_stag_invalidate_req stag_inv; +} __attribute__((packed)); + + +/* + * RQ WRs + */ +struct c2wr_rqwr { + struct c2_rq_hdr rq_hdr; + u8 data[0]; /* array of SGEs */ +} __attribute__((packed)); + +union c2wr_recv { + struct c2wr_rqwr req; + struct c2wr_ce rep; +} __attribute__((packed)); + +/* + * All AEs start with this header. Most AEs only need to convey the + * information in the header. Some, like LLP connection events, need + * more info. The union typdef c2wr_ae_t has all the possible AEs. + * + * hdr.context is the user_context from the rnic_open WR. NULL If this + * is not affiliated with an rnic + * + * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN, + * CCAE_LLP_CLOSE_COMPLETE) + * + * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ + * + * user_context is the context passed down when the host created the resource. + */ +struct c2wr_ae_hdr { + struct c2wr_hdr hdr; + u64 user_context; /* user context for this res. */ + u32 resource_type; /* see enum c2_resource_indicator */ + u32 resource; /* handle for resource */ + u32 qp_state; /* current QP State */ +} __attribute__((packed)); + +/* + * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ, + * the adapter moves the QP into RTS state + */ +struct c2wr_ae_active_connect_results { + struct c2wr_ae_hdr ae_hdr; + u32 laddr; + u32 raddr; + u16 lport; + u16 rport; + u32 private_data_length; + u8 private_data[0]; /* data is in-line in the msg. */ +} __attribute__((packed)); + +/* + * When connections are established by the stack (and the private data + * MPA frame is received), the adapter will generate an event to the host. + * The details of the connection, any private data, and the new connection + * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the + * AE queue: + */ +struct c2wr_ae_connection_request { + struct c2wr_ae_hdr ae_hdr; + u32 cr_handle; /* connreq handle (sock ptr) */ + u32 laddr; + u32 raddr; + u16 lport; + u16 rport; + u32 private_data_length; + u8 private_data[0]; /* data is in-line in the msg. */ +} __attribute__((packed)); + +union c2wr_ae { + struct c2wr_ae_hdr ae_generic; + struct c2wr_ae_active_connect_results ae_active_connect_results; + struct c2wr_ae_connection_request ae_connection_request; +} __attribute__((packed)); + +struct c2wr_init_req { + struct c2wr_hdr hdr; + u64 hint_count; + u64 q0_host_shared; + u64 q1_host_shared; + u64 q1_host_msg_pool; + u64 q2_host_shared; + u64 q2_host_msg_pool; +} __attribute__((packed)); + +struct c2wr_init_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)); + +union c2wr_init { + struct c2wr_init_req req; + struct c2wr_init_rep rep; +} __attribute__((packed)); + +/* + * For upgrading flash. + */ + +struct c2wr_flash_init_req { + struct c2wr_hdr hdr; + u32 rnic_handle; +} __attribute__((packed)); + +struct c2wr_flash_init_rep { + struct c2wr_hdr hdr; + u32 adapter_flash_buf_offset; + u32 adapter_flash_len; +} __attribute__((packed)); + +union c2wr_flash_init { + struct c2wr_flash_init_req req; + struct c2wr_flash_init_rep rep; +} __attribute__((packed)); + +struct c2wr_flash_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 len; +} __attribute__((packed)); + +struct c2wr_flash_rep { + struct c2wr_hdr hdr; + u32 status; +} __attribute__((packed)); + +union c2wr_flash { + struct c2wr_flash_req req; + struct c2wr_flash_rep rep; +} __attribute__((packed)); + +struct c2wr_buf_alloc_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 size; +} __attribute__((packed)); + +struct c2wr_buf_alloc_rep { + struct c2wr_hdr hdr; + u32 offset; /* 0 if mem not available */ + u32 size; /* 0 if mem not available */ +} __attribute__((packed)); + +union c2wr_buf_alloc { + struct c2wr_buf_alloc_req req; + struct c2wr_buf_alloc_rep rep; +} __attribute__((packed)); + +struct c2wr_buf_free_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 offset; /* Must match value from alloc */ + u32 size; /* Must match value from alloc */ +} __attribute__((packed)); + +struct c2wr_buf_free_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)); + +union c2wr_buf_free { + struct c2wr_buf_free_req req; + struct c2wr_ce rep; +} __attribute__((packed)); + +struct c2wr_flash_write_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 offset; + u32 size; + u32 type; + u32 flags; +} __attribute__((packed)); + +struct c2wr_flash_write_rep { + struct c2wr_hdr hdr; + u32 status; +} __attribute__((packed)); + +union c2wr_flash_write { + struct c2wr_flash_write_req req; + struct c2wr_flash_write_rep rep; +} __attribute__((packed)); + +/* + * Messages for LLP connection setup. + */ + +/* + * Listen Request. This allocates a listening endpoint to allow passive + * connection setup. Newly established LLP connections are passed up + * via an AE. See c2wr_ae_connection_request_t + */ +struct c2wr_ep_listen_create_req { + struct c2wr_hdr hdr; + u64 user_context; /* returned in AEs. */ + u32 rnic_handle; + u32 local_addr; /* local addr, or 0 */ + u16 local_port; /* 0 means "pick one" */ + u16 pad; + u32 backlog; /* tradional tcp listen bl */ +} __attribute__((packed)); + +struct c2wr_ep_listen_create_rep { + struct c2wr_hdr hdr; + u32 ep_handle; /* handle to new listening ep */ + u16 local_port; /* resulting port... */ + u16 pad; +} __attribute__((packed)); + +union c2wr_ep_listen_create { + struct c2wr_ep_listen_create_req req; + struct c2wr_ep_listen_create_rep rep; +} __attribute__((packed)); + +struct c2wr_ep_listen_destroy_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 ep_handle; +} __attribute__((packed)); + +struct c2wr_ep_listen_destroy_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)); + +union c2wr_ep_listen_destroy { + struct c2wr_ep_listen_destroy_req req; + struct c2wr_ep_listen_destroy_rep rep; +} __attribute__((packed)); + +struct c2wr_ep_query_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 ep_handle; +} __attribute__((packed)); + +struct c2wr_ep_query_rep { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 local_addr; + u32 remote_addr; + u16 local_port; + u16 remote_port; +} __attribute__((packed)); + +union c2wr_ep_query { + struct c2wr_ep_query_req req; + struct c2wr_ep_query_rep rep; +} __attribute__((packed)); + + +/* + * The host passes this down to indicate acceptance of a pending iWARP + * connection. The cr_handle was obtained from the CONNECTION_REQUEST + * AE passed up by the adapter. See c2wr_ae_connection_request_t. + */ +struct c2wr_cr_accept_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 qp_handle; /* QP to bind to this LLP conn */ + u32 ep_handle; /* LLP handle to accept */ + u32 private_data_length; + u8 private_data[0]; /* data in-line in msg. */ +} __attribute__((packed)); + +/* + * adapter sends reply when private data is successfully submitted to + * the LLP. + */ +struct c2wr_cr_accept_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)); + +union c2wr_cr_accept { + struct c2wr_cr_accept_req req; + struct c2wr_cr_accept_rep rep; +} __attribute__((packed)); + +/* + * The host sends this down if a given iWARP connection request was + * rejected by the consumer. The cr_handle was obtained from a + * previous c2wr_ae_connection_request_t AE sent by the adapter. + */ +struct c2wr_cr_reject_req { + struct c2wr_hdr hdr; + u32 rnic_handle; + u32 ep_handle; /* LLP handle to reject */ +} __attribute__((packed)); + +/* + * Dunno if this is needed, but we'll add it for now. The adapter will + * send the reject_reply after the LLP endpoint has been destroyed. + */ +struct c2wr_cr_reject_rep { + struct c2wr_hdr hdr; +} __attribute__((packed)); + +union c2wr_cr_reject { + struct c2wr_cr_reject_req req; + struct c2wr_cr_reject_rep rep; +} __attribute__((packed)); + +/* + * console command. Used to implement a debug console over the verbs + * request and reply queues. + */ + +/* + * Console request message. It contains: + * - message hdr with id = CCWR_CONSOLE + * - the physaddr/len of host memory to be used for the reply. + * - the command string. eg: "netstat -s" or "zoneinfo" + */ +struct c2wr_console_req { + struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */ + u64 reply_buf; /* pinned host buf for reply */ + u32 reply_buf_len; /* length of reply buffer */ + u8 command[0]; /* NUL terminated ascii string */ + /* containing the command req */ +} __attribute__((packed)); + +/* + * flags used in the console reply. + */ +enum c2_console_flags { + CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */ +} __attribute__((packed)); + +/* + * Console reply message. + * hdr.result contains the c2_status_t error if the reply was _not_ generated, + * or C2_OK if the reply was generated. + */ +struct c2wr_console_rep { + struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */ + u32 flags; +} __attribute__((packed)); + +union c2wr_console { + struct c2wr_console_req req; + struct c2wr_console_rep rep; +} __attribute__((packed)); + + +/* + * Giant union with all WRs. Makes life easier... + */ +union c2wr { + struct c2wr_hdr hdr; + struct c2wr_user_hdr user_hdr; + union c2wr_rnic_open rnic_open; + union c2wr_rnic_query rnic_query; + union c2wr_rnic_getconfig rnic_getconfig; + union c2wr_rnic_setconfig rnic_setconfig; + union c2wr_rnic_close rnic_close; + union c2wr_cq_create cq_create; + union c2wr_cq_modify cq_modify; + union c2wr_cq_destroy cq_destroy; + union c2wr_pd_alloc pd_alloc; + union c2wr_pd_dealloc pd_dealloc; + union c2wr_srq_create srq_create; + union c2wr_srq_destroy srq_destroy; + union c2wr_qp_create qp_create; + union c2wr_qp_query qp_query; + union c2wr_qp_modify qp_modify; + union c2wr_qp_destroy qp_destroy; + struct c2wr_qp_connect qp_connect; + union c2wr_nsmr_stag_alloc nsmr_stag_alloc; + union c2wr_nsmr_register nsmr_register; + union c2wr_nsmr_pbl nsmr_pbl; + union c2wr_mr_query mr_query; + union c2wr_mw_query mw_query; + union c2wr_stag_dealloc stag_dealloc; + union c2wr_sqwr sqwr; + struct c2wr_rqwr rqwr; + struct c2wr_ce ce; + union c2wr_ae ae; + union c2wr_init init; + union c2wr_ep_listen_create ep_listen_create; + union c2wr_ep_listen_destroy ep_listen_destroy; + union c2wr_cr_accept cr_accept; + union c2wr_cr_reject cr_reject; + union c2wr_console console; + union c2wr_flash_init flash_init; + union c2wr_flash flash; + union c2wr_buf_alloc buf_alloc; + union c2wr_buf_free buf_free; + union c2wr_flash_write flash_write; +} __attribute__((packed)); + + +/* + * Accessors for the wr fields that are packed together tightly to + * reduce the wr message size. The wr arguments are void* so that + * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types + * in the struct c2wr union can be passed in. + */ +static __inline__ u8 c2_wr_get_id(void *wr) +{ + return ((struct c2wr_hdr *) wr)->id; +} +static __inline__ void c2_wr_set_id(void *wr, u8 id) +{ + ((struct c2wr_hdr *) wr)->id = id; +} +static __inline__ u8 c2_wr_get_result(void *wr) +{ + return ((struct c2wr_hdr *) wr)->result; +} +static __inline__ void c2_wr_set_result(void *wr, u8 result) +{ + ((struct c2wr_hdr *) wr)->result = result; +} +static __inline__ u8 c2_wr_get_flags(void *wr) +{ + return ((struct c2wr_hdr *) wr)->flags; +} +static __inline__ void c2_wr_set_flags(void *wr, u8 flags) +{ + ((struct c2wr_hdr *) wr)->flags = flags; +} +static __inline__ u8 c2_wr_get_sge_count(void *wr) +{ + return ((struct c2wr_hdr *) wr)->sge_count; +} +static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count) +{ + ((struct c2wr_hdr *) wr)->sge_count = sge_count; +} +static __inline__ u32 c2_wr_get_wqe_count(void *wr) +{ + return ((struct c2wr_hdr *) wr)->wqe_count; +} +static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count) +{ + ((struct c2wr_hdr *) wr)->wqe_count = wqe_count; +} + +#endif /* _C2_WR_H_ */ diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig new file mode 100644 index 000000000000..922389b64394 --- /dev/null +++ b/drivers/infiniband/hw/ehca/Kconfig @@ -0,0 +1,16 @@ +config INFINIBAND_EHCA + tristate "eHCA support" + depends on IBMEBUS && INFINIBAND + ---help--- + This driver supports the IBM pSeries eHCA InfiniBand adapter. + + To compile the driver as a module, choose M here. The module + will be called ib_ehca. + +config INFINIBAND_EHCA_SCALING + bool "Scaling support (EXPERIMENTAL)" + depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL + ---help--- + eHCA scaling support schedules the CQ callbacks to different CPUs. + + To enable this feature choose Y here. diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/infiniband/hw/ehca/Makefile new file mode 100644 index 000000000000..74d284e46a40 --- /dev/null +++ b/drivers/infiniband/hw/ehca/Makefile @@ -0,0 +1,16 @@ +# Authors: Heiko J Schick <schickhj@de.ibm.com> +# Christoph Raisch <raisch@de.ibm.com> +# Joachim Fenkes <fenkes@de.ibm.com> +# +# Copyright (c) 2005 IBM Corporation +# +# All rights reserved. +# +# This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD. + +obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o + +ib_ehca-objs = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \ + ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \ + ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o + diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c new file mode 100644 index 000000000000..3bac197f9014 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_av.c @@ -0,0 +1,271 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * adress vector functions + * + * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Khadija Souissi <souissik@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include <asm/current.h> + +#include "ehca_tools.h" +#include "ehca_iverbs.h" +#include "hcp_if.h" + +static struct kmem_cache *av_cache; + +struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) +{ + int ret; + struct ehca_av *av; + struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, + ib_device); + + av = kmem_cache_alloc(av_cache, SLAB_KERNEL); + if (!av) { + ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p", + pd, ah_attr); + return ERR_PTR(-ENOMEM); + } + + av->av.sl = ah_attr->sl; + av->av.dlid = ah_attr->dlid; + av->av.slid_path_bits = ah_attr->src_path_bits; + + if (ehca_static_rate < 0) { + int ah_mult = ib_rate_to_mult(ah_attr->static_rate); + int ehca_mult = + ib_rate_to_mult(shca->sport[ah_attr->port_num].rate ); + + if (ah_mult >= ehca_mult) + av->av.ipd = 0; + else + av->av.ipd = (ah_mult > 0) ? + ((ehca_mult - 1) / ah_mult) : 0; + } else + av->av.ipd = ehca_static_rate; + + av->av.lnh = ah_attr->ah_flags; + av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6); + av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK, + ah_attr->grh.traffic_class); + av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK, + ah_attr->grh.flow_label); + av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK, + ah_attr->grh.hop_limit); + av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B); + /* set sgid in grh.word_1 */ + if (ah_attr->ah_flags & IB_AH_GRH) { + int rc; + struct ib_port_attr port_attr; + union ib_gid gid; + memset(&port_attr, 0, sizeof(port_attr)); + rc = ehca_query_port(pd->device, ah_attr->port_num, + &port_attr); + if (rc) { /* invalid port number */ + ret = -EINVAL; + ehca_err(pd->device, "Invalid port number " + "ehca_query_port() returned %x " + "pd=%p ah_attr=%p", rc, pd, ah_attr); + goto create_ah_exit1; + } + memset(&gid, 0, sizeof(gid)); + rc = ehca_query_gid(pd->device, + ah_attr->port_num, + ah_attr->grh.sgid_index, &gid); + if (rc) { + ret = -EINVAL; + ehca_err(pd->device, "Failed to retrieve sgid " + "ehca_query_gid() returned %x " + "pd=%p ah_attr=%p", rc, pd, ah_attr); + goto create_ah_exit1; + } + memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); + } + /* for the time being we use a hard coded PMTU of 2048 Bytes */ + av->av.pmtu = 4; + + /* dgid comes in grh.word_3 */ + memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, + sizeof(ah_attr->grh.dgid)); + + return &av->ib_ah; + +create_ah_exit1: + kmem_cache_free(av_cache, av); + + return ERR_PTR(ret); +} + +int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) +{ + struct ehca_av *av; + struct ehca_ud_av new_ehca_av; + struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd); + u32 cur_pid = current->tgid; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + my_pd->ownpid != cur_pid) { + ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + return -EINVAL; + } + + memset(&new_ehca_av, 0, sizeof(new_ehca_av)); + new_ehca_av.sl = ah_attr->sl; + new_ehca_av.dlid = ah_attr->dlid; + new_ehca_av.slid_path_bits = ah_attr->src_path_bits; + new_ehca_av.ipd = ah_attr->static_rate; + new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK, + (ah_attr->ah_flags & IB_AH_GRH) > 0); + new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK, + ah_attr->grh.traffic_class); + new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK, + ah_attr->grh.flow_label); + new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK, + ah_attr->grh.hop_limit); + new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b); + + /* set sgid in grh.word_1 */ + if (ah_attr->ah_flags & IB_AH_GRH) { + int rc; + struct ib_port_attr port_attr; + union ib_gid gid; + memset(&port_attr, 0, sizeof(port_attr)); + rc = ehca_query_port(ah->device, ah_attr->port_num, + &port_attr); + if (rc) { /* invalid port number */ + ehca_err(ah->device, "Invalid port number " + "ehca_query_port() returned %x " + "ah=%p ah_attr=%p port_num=%x", + rc, ah, ah_attr, ah_attr->port_num); + return -EINVAL; + } + memset(&gid, 0, sizeof(gid)); + rc = ehca_query_gid(ah->device, + ah_attr->port_num, + ah_attr->grh.sgid_index, &gid); + if (rc) { + ehca_err(ah->device, "Failed to retrieve sgid " + "ehca_query_gid() returned %x " + "ah=%p ah_attr=%p port_num=%x " + "sgid_index=%x", + rc, ah, ah_attr, ah_attr->port_num, + ah_attr->grh.sgid_index); + return -EINVAL; + } + memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); + } + + new_ehca_av.pmtu = 4; /* see also comment in create_ah() */ + + memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, + sizeof(ah_attr->grh.dgid)); + + av = container_of(ah, struct ehca_av, ib_ah); + av->av = new_ehca_av; + + return 0; +} + +int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) +{ + struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah); + struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd); + u32 cur_pid = current->tgid; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + my_pd->ownpid != cur_pid) { + ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + return -EINVAL; + } + + memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3, + sizeof(ah_attr->grh.dgid)); + ah_attr->sl = av->av.sl; + + ah_attr->dlid = av->av.dlid; + + ah_attr->src_path_bits = av->av.slid_path_bits; + ah_attr->static_rate = av->av.ipd; + ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh); + ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK, + av->av.grh.word_0); + ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK, + av->av.grh.word_0); + ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK, + av->av.grh.word_0); + + return 0; +} + +int ehca_destroy_ah(struct ib_ah *ah) +{ + struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd); + u32 cur_pid = current->tgid; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + my_pd->ownpid != cur_pid) { + ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + return -EINVAL; + } + + kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah)); + + return 0; +} + +int ehca_init_av_cache(void) +{ + av_cache = kmem_cache_create("ehca_cache_av", + sizeof(struct ehca_av), 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!av_cache) + return -ENOMEM; + return 0; +} + +void ehca_cleanup_av_cache(void) +{ + if (av_cache) + kmem_cache_destroy(av_cache); +} diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h new file mode 100644 index 000000000000..1c722032319c --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_classes.h @@ -0,0 +1,346 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Struct definition for eHCA internal structures + * + * Authors: Heiko J Schick <schickhj@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EHCA_CLASSES_H__ +#define __EHCA_CLASSES_H__ + +#include "ehca_classes.h" +#include "ipz_pt_fn.h" + +struct ehca_module; +struct ehca_qp; +struct ehca_cq; +struct ehca_eq; +struct ehca_mr; +struct ehca_mw; +struct ehca_pd; +struct ehca_av; + +#ifdef CONFIG_PPC64 +#include "ehca_classes_pSeries.h" +#endif + +#include <rdma/ib_verbs.h> +#include <rdma/ib_user_verbs.h> + +#include "ehca_irq.h" + +struct ehca_eq { + u32 length; + struct ipz_queue ipz_queue; + struct ipz_eq_handle ipz_eq_handle; + struct work_struct work; + struct h_galpas galpas; + int is_initialized; + struct ehca_pfeq pf; + spinlock_t spinlock; + struct tasklet_struct interrupt_task; + u32 ist; +}; + +struct ehca_sport { + struct ib_cq *ibcq_aqp1; + struct ib_qp *ibqp_aqp1; + enum ib_rate rate; + enum ib_port_state port_state; +}; + +struct ehca_shca { + struct ib_device ib_device; + struct ibmebus_dev *ibmebus_dev; + u8 num_ports; + int hw_level; + struct list_head shca_list; + struct ipz_adapter_handle ipz_hca_handle; + struct ehca_sport sport[2]; + struct ehca_eq eq; + struct ehca_eq neq; + struct ehca_mr *maxmr; + struct ehca_pd *pd; + struct h_galpas galpas; +}; + +struct ehca_pd { + struct ib_pd ib_pd; + struct ipz_pd fw_pd; + u32 ownpid; +}; + +struct ehca_qp { + struct ib_qp ib_qp; + u32 qp_type; + struct ipz_queue ipz_squeue; + struct ipz_queue ipz_rqueue; + struct h_galpas galpas; + u32 qkey; + u32 real_qp_num; + u32 token; + spinlock_t spinlock_s; + spinlock_t spinlock_r; + u32 sq_max_inline_data_size; + struct ipz_qp_handle ipz_qp_handle; + struct ehca_pfqp pf; + struct ib_qp_init_attr init_attr; + u64 uspace_squeue; + u64 uspace_rqueue; + u64 uspace_fwh; + struct ehca_cq *send_cq; + struct ehca_cq *recv_cq; + unsigned int sqerr_purgeflag; + struct hlist_node list_entries; +}; + +/* must be power of 2 */ +#define QP_HASHTAB_LEN 8 + +struct ehca_cq { + struct ib_cq ib_cq; + struct ipz_queue ipz_queue; + struct h_galpas galpas; + spinlock_t spinlock; + u32 cq_number; + u32 token; + u32 nr_of_entries; + struct ipz_cq_handle ipz_cq_handle; + struct ehca_pfcq pf; + spinlock_t cb_lock; + u64 uspace_queue; + u64 uspace_fwh; + struct hlist_head qp_hashtab[QP_HASHTAB_LEN]; + struct list_head entry; + u32 nr_callbacks; + spinlock_t task_lock; + u32 ownpid; +}; + +enum ehca_mr_flag { + EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */ + EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */ +}; + +struct ehca_mr { + union { + struct ib_mr ib_mr; /* must always be first in ehca_mr */ + struct ib_fmr ib_fmr; /* must always be first in ehca_mr */ + } ib; + spinlock_t mrlock; + + enum ehca_mr_flag flags; + u32 num_pages; /* number of MR pages */ + u32 num_4k; /* number of 4k "page" portions to form MR */ + int acl; /* ACL (stored here for usage in reregister) */ + u64 *start; /* virtual start address (stored here for */ + /* usage in reregister) */ + u64 size; /* size (stored here for usage in reregister) */ + u32 fmr_page_size; /* page size for FMR */ + u32 fmr_max_pages; /* max pages for FMR */ + u32 fmr_max_maps; /* max outstanding maps for FMR */ + u32 fmr_map_cnt; /* map counter for FMR */ + /* fw specific data */ + struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ + struct h_galpas galpas; + /* data for userspace bridge */ + u32 nr_of_pages; + void *pagearray; +}; + +struct ehca_mw { + struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */ + spinlock_t mwlock; + + u8 never_bound; /* indication MW was never bound */ + struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */ + struct h_galpas galpas; +}; + +enum ehca_mr_pgi_type { + EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr, + * ehca_rereg_phys_mr, + * ehca_reg_internal_maxmr */ + EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */ + EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */ +}; + +struct ehca_mr_pginfo { + enum ehca_mr_pgi_type type; + u64 num_pages; + u64 page_cnt; + u64 num_4k; /* number of 4k "page" portions */ + u64 page_4k_cnt; /* counter for 4k "page" portions */ + u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */ + + /* type EHCA_MR_PGI_PHYS section */ + int num_phys_buf; + struct ib_phys_buf *phys_buf_array; + u64 next_buf; + + /* type EHCA_MR_PGI_USER section */ + struct ib_umem *region; + struct ib_umem_chunk *next_chunk; + u64 next_nmap; + + /* type EHCA_MR_PGI_FMR section */ + u64 *page_list; + u64 next_listelem; + /* next_4k also used within EHCA_MR_PGI_FMR */ +}; + +/* output parameters for MR/FMR hipz calls */ +struct ehca_mr_hipzout_parms { + struct ipz_mrmw_handle handle; + u32 lkey; + u32 rkey; + u64 len; + u64 vaddr; + u32 acl; +}; + +/* output parameters for MW hipz calls */ +struct ehca_mw_hipzout_parms { + struct ipz_mrmw_handle handle; + u32 rkey; +}; + +struct ehca_av { + struct ib_ah ib_ah; + struct ehca_ud_av av; +}; + +struct ehca_ucontext { + struct ib_ucontext ib_ucontext; +}; + +struct ehca_module *ehca_module_new(void); + +int ehca_module_delete(struct ehca_module *me); + +int ehca_eq_ctor(struct ehca_eq *eq); + +int ehca_eq_dtor(struct ehca_eq *eq); + +struct ehca_shca *ehca_shca_new(void); + +int ehca_shca_delete(struct ehca_shca *me); + +struct ehca_sport *ehca_sport_new(struct ehca_shca *anchor); + +int ehca_init_pd_cache(void); +void ehca_cleanup_pd_cache(void); +int ehca_init_cq_cache(void); +void ehca_cleanup_cq_cache(void); +int ehca_init_qp_cache(void); +void ehca_cleanup_qp_cache(void); +int ehca_init_av_cache(void); +void ehca_cleanup_av_cache(void); +int ehca_init_mrmw_cache(void); +void ehca_cleanup_mrmw_cache(void); + +extern spinlock_t ehca_qp_idr_lock; +extern spinlock_t ehca_cq_idr_lock; +extern struct idr ehca_qp_idr; +extern struct idr ehca_cq_idr; + +extern int ehca_static_rate; +extern int ehca_port_act_time; +extern int ehca_use_hp_mr; + +struct ipzu_queue_resp { + u64 queue; /* points to first queue entry */ + u32 qe_size; /* queue entry size */ + u32 act_nr_of_sg; + u32 queue_length; /* queue length allocated in bytes */ + u32 pagesize; + u32 toggle_state; + u32 dummy; /* padding for 8 byte alignment */ +}; + +struct ehca_create_cq_resp { + u32 cq_number; + u32 token; + struct ipzu_queue_resp ipz_queue; + struct h_galpas galpas; +}; + +struct ehca_create_qp_resp { + u32 qp_num; + u32 token; + u32 qp_type; + u32 qkey; + /* qp_num assigned by ehca: sqp0/1 may have got different numbers */ + u32 real_qp_num; + u32 dummy; /* padding for 8 byte alignment */ + struct ipzu_queue_resp ipz_squeue; + struct ipzu_queue_resp ipz_rqueue; + struct h_galpas galpas; +}; + +struct ehca_alloc_cq_parms { + u32 nr_cqe; + u32 act_nr_of_entries; + u32 act_pages; + struct ipz_eq_handle eq_handle; +}; + +struct ehca_alloc_qp_parms { + int servicetype; + int sigtype; + int daqp_ctrl; + int max_send_sge; + int max_recv_sge; + int ud_av_l_key_ctl; + + u16 act_nr_send_wqes; + u16 act_nr_recv_wqes; + u8 act_nr_recv_sges; + u8 act_nr_send_sges; + + u32 nr_rq_pages; + u32 nr_sq_pages; + + struct ipz_eq_handle ipz_eq_handle; + struct ipz_pd pd; +}; + +int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); +int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num); +struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int qp_num); + +#endif diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h new file mode 100644 index 000000000000..5665f213b81a --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h @@ -0,0 +1,236 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * pSeries interface definitions + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EHCA_CLASSES_PSERIES_H__ +#define __EHCA_CLASSES_PSERIES_H__ + +#include "hcp_phyp.h" +#include "ipz_pt_fn.h" + + +struct ehca_pfqp { + struct ipz_qpt sqpt; + struct ipz_qpt rqpt; +}; + +struct ehca_pfcq { + struct ipz_qpt qpt; + u32 cqnr; +}; + +struct ehca_pfeq { + struct ipz_qpt qpt; + struct h_galpa galpa; + u32 eqnr; +}; + +struct ipz_adapter_handle { + u64 handle; +}; + +struct ipz_cq_handle { + u64 handle; +}; + +struct ipz_eq_handle { + u64 handle; +}; + +struct ipz_qp_handle { + u64 handle; +}; +struct ipz_mrmw_handle { + u64 handle; +}; + +struct ipz_pd { + u32 value; +}; + +struct hcp_modify_qp_control_block { + u32 qkey; /* 00 */ + u32 rdd; /* reliable datagram domain */ + u32 send_psn; /* 02 */ + u32 receive_psn; /* 03 */ + u32 prim_phys_port; /* 04 */ + u32 alt_phys_port; /* 05 */ + u32 prim_p_key_idx; /* 06 */ + u32 alt_p_key_idx; /* 07 */ + u32 rdma_atomic_ctrl; /* 08 */ + u32 qp_state; /* 09 */ + u32 reserved_10; /* 10 */ + u32 rdma_nr_atomic_resp_res; /* 11 */ + u32 path_migration_state; /* 12 */ + u32 rdma_atomic_outst_dest_qp; /* 13 */ + u32 dest_qp_nr; /* 14 */ + u32 min_rnr_nak_timer_field; /* 15 */ + u32 service_level; /* 16 */ + u32 send_grh_flag; /* 17 */ + u32 retry_count; /* 18 */ + u32 timeout; /* 19 */ + u32 path_mtu; /* 20 */ + u32 max_static_rate; /* 21 */ + u32 dlid; /* 22 */ + u32 rnr_retry_count; /* 23 */ + u32 source_path_bits; /* 24 */ + u32 traffic_class; /* 25 */ + u32 hop_limit; /* 26 */ + u32 source_gid_idx; /* 27 */ + u32 flow_label; /* 28 */ + u32 reserved_29; /* 29 */ + union { /* 30 */ + u64 dw[2]; + u8 byte[16]; + } dest_gid; + u32 service_level_al; /* 34 */ + u32 send_grh_flag_al; /* 35 */ + u32 retry_count_al; /* 36 */ + u32 timeout_al; /* 37 */ + u32 max_static_rate_al; /* 38 */ + u32 dlid_al; /* 39 */ + u32 rnr_retry_count_al; /* 40 */ + u32 source_path_bits_al; /* 41 */ + u32 traffic_class_al; /* 42 */ + u32 hop_limit_al; /* 43 */ + u32 source_gid_idx_al; /* 44 */ + u32 flow_label_al; /* 45 */ + u32 reserved_46; /* 46 */ + u32 reserved_47; /* 47 */ + union { /* 48 */ + u64 dw[2]; + u8 byte[16]; + } dest_gid_al; + u32 max_nr_outst_send_wr; /* 52 */ + u32 max_nr_outst_recv_wr; /* 53 */ + u32 disable_ete_credit_check; /* 54 */ + u32 qp_number; /* 55 */ + u64 send_queue_handle; /* 56 */ + u64 recv_queue_handle; /* 58 */ + u32 actual_nr_sges_in_sq_wqe; /* 60 */ + u32 actual_nr_sges_in_rq_wqe; /* 61 */ + u32 qp_enable; /* 62 */ + u32 curr_srq_limit; /* 63 */ + u64 qp_aff_asyn_ev_log_reg; /* 64 */ + u64 shared_rq_hndl; /* 66 */ + u64 trigg_doorbell_qp_hndl; /* 68 */ + u32 reserved_70_127[58]; /* 70 */ +}; + +#define MQPCB_MASK_QKEY EHCA_BMASK_IBM(0,0) +#define MQPCB_MASK_SEND_PSN EHCA_BMASK_IBM(2,2) +#define MQPCB_MASK_RECEIVE_PSN EHCA_BMASK_IBM(3,3) +#define MQPCB_MASK_PRIM_PHYS_PORT EHCA_BMASK_IBM(4,4) +#define MQPCB_PRIM_PHYS_PORT EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_ALT_PHYS_PORT EHCA_BMASK_IBM(5,5) +#define MQPCB_MASK_PRIM_P_KEY_IDX EHCA_BMASK_IBM(6,6) +#define MQPCB_PRIM_P_KEY_IDX EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM(7,7) +#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM(8,8) +#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM(9,9) +#define MQPCB_QP_STATE EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11,11) +#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12,12) +#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13,13) +#define MQPCB_MASK_DEST_QP_NR EHCA_BMASK_IBM(14,14) +#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD EHCA_BMASK_IBM(15,15) +#define MQPCB_MASK_SERVICE_LEVEL EHCA_BMASK_IBM(16,16) +#define MQPCB_MASK_SEND_GRH_FLAG EHCA_BMASK_IBM(17,17) +#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18,18) +#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19,19) +#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20,20) +#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21,21) +#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22,22) +#define MQPCB_DLID EHCA_BMASK_IBM(16,31) +#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23,23) +#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29,31) +#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24,24) +#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25,31) +#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25,25) +#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26,26) +#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27,27) +#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28,28) +#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12,31) +#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30,30) +#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31,31) +#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28,31) +#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32,32) +#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31,31) +#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33,33) +#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) +#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34,34) +#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27,31) +#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35,35) +#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36,36) +#define MQPCB_DLID_AL EHCA_BMASK_IBM(16,31) +#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37,37) +#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29,31) +#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38,38) +#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25,31) +#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39,39) +#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40,40) +#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41,41) +#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24,31) +#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42,42) +#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12,31) +#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44,44) +#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45,45) +#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16,31) +#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46,46) +#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16,31) +#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47,47) +#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31,31) +#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31) +#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48) +#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31) +#define MQPCB_MASK_CURR_SQR_LIMIT EHCA_BMASK_IBM(49,49) +#define MQPCB_CURR_SQR_LIMIT EHCA_BMASK_IBM(15,31) +#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50) +#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51) + +#endif /* __EHCA_CLASSES_PSERIES_H__ */ diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c new file mode 100644 index 000000000000..458fe19648a1 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -0,0 +1,427 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Completion queue handling + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Khadija Souissi <souissi@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm/current.h> + +#include "ehca_iverbs.h" +#include "ehca_classes.h" +#include "ehca_irq.h" +#include "hcp_if.h" + +static struct kmem_cache *cq_cache; + +int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) +{ + unsigned int qp_num = qp->real_qp_num; + unsigned int key = qp_num & (QP_HASHTAB_LEN-1); + unsigned long spl_flags; + + spin_lock_irqsave(&cq->spinlock, spl_flags); + hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); + spin_unlock_irqrestore(&cq->spinlock, spl_flags); + + ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x", + cq->cq_number, qp_num); + + return 0; +} + +int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num) +{ + int ret = -EINVAL; + unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); + struct hlist_node *iter; + struct ehca_qp *qp; + unsigned long spl_flags; + + spin_lock_irqsave(&cq->spinlock, spl_flags); + hlist_for_each(iter, &cq->qp_hashtab[key]) { + qp = hlist_entry(iter, struct ehca_qp, list_entries); + if (qp->real_qp_num == real_qp_num) { + hlist_del(iter); + ehca_dbg(cq->ib_cq.device, + "removed qp from cq .cq_num=%x real_qp_num=%x", + cq->cq_number, real_qp_num); + ret = 0; + break; + } + } + spin_unlock_irqrestore(&cq->spinlock, spl_flags); + if (ret) + ehca_err(cq->ib_cq.device, + "qp not found cq_num=%x real_qp_num=%x", + cq->cq_number, real_qp_num); + + return ret; +} + +struct ehca_qp* ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) +{ + struct ehca_qp *ret = NULL; + unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); + struct hlist_node *iter; + struct ehca_qp *qp; + hlist_for_each(iter, &cq->qp_hashtab[key]) { + qp = hlist_entry(iter, struct ehca_qp, list_entries); + if (qp->real_qp_num == real_qp_num) { + ret = qp; + break; + } + } + return ret; +} + +struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + static const u32 additional_cqe = 20; + struct ib_cq *cq; + struct ehca_cq *my_cq; + struct ehca_shca *shca = + container_of(device, struct ehca_shca, ib_device); + struct ipz_adapter_handle adapter_handle; + struct ehca_alloc_cq_parms param; /* h_call's out parameters */ + struct h_galpa gal; + void *vpage; + u32 counter; + u64 rpage, cqx_fec, h_ret; + int ipz_rc, ret, i; + unsigned long flags; + + if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) + return ERR_PTR(-EINVAL); + + my_cq = kmem_cache_alloc(cq_cache, SLAB_KERNEL); + if (!my_cq) { + ehca_err(device, "Out of memory for ehca_cq struct device=%p", + device); + return ERR_PTR(-ENOMEM); + } + + memset(my_cq, 0, sizeof(struct ehca_cq)); + memset(¶m, 0, sizeof(struct ehca_alloc_cq_parms)); + + spin_lock_init(&my_cq->spinlock); + spin_lock_init(&my_cq->cb_lock); + spin_lock_init(&my_cq->task_lock); + my_cq->ownpid = current->tgid; + + cq = &my_cq->ib_cq; + + adapter_handle = shca->ipz_hca_handle; + param.eq_handle = shca->eq.ipz_eq_handle; + + do { + if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) { + cq = ERR_PTR(-ENOMEM); + ehca_err(device, "Can't reserve idr nr. device=%p", + device); + goto create_cq_exit1; + } + + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + + } while (ret == -EAGAIN); + + if (ret) { + cq = ERR_PTR(-ENOMEM); + ehca_err(device, "Can't allocate new idr entry. device=%p", + device); + goto create_cq_exit1; + } + + /* + * CQs maximum depth is 4GB-64, but we need additional 20 as buffer + * for receiving errors CQEs. + */ + param.nr_cqe = cqe + additional_cqe; + h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, ¶m); + + if (h_ret != H_SUCCESS) { + ehca_err(device, "hipz_h_alloc_resource_cq() failed " + "h_ret=%lx device=%p", h_ret, device); + cq = ERR_PTR(ehca2ib_return_code(h_ret)); + goto create_cq_exit2; + } + + ipz_rc = ipz_queue_ctor(&my_cq->ipz_queue, param.act_pages, + EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0); + if (!ipz_rc) { + ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%x device=%p", + ipz_rc, device); + cq = ERR_PTR(-EINVAL); + goto create_cq_exit3; + } + + for (counter = 0; counter < param.act_pages; counter++) { + vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); + if (!vpage) { + ehca_err(device, "ipz_qpageit_get_inc() " + "returns NULL device=%p", device); + cq = ERR_PTR(-EAGAIN); + goto create_cq_exit4; + } + rpage = virt_to_abs(vpage); + + h_ret = hipz_h_register_rpage_cq(adapter_handle, + my_cq->ipz_cq_handle, + &my_cq->pf, + 0, + 0, + rpage, + 1, + my_cq->galpas. + kernel); + + if (h_ret < H_SUCCESS) { + ehca_err(device, "hipz_h_register_rpage_cq() failed " + "ehca_cq=%p cq_num=%x h_ret=%lx counter=%i " + "act_pages=%i", my_cq, my_cq->cq_number, + h_ret, counter, param.act_pages); + cq = ERR_PTR(-EINVAL); + goto create_cq_exit4; + } + + if (counter == (param.act_pages - 1)) { + vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); + if ((h_ret != H_SUCCESS) || vpage) { + ehca_err(device, "Registration of pages not " + "complete ehca_cq=%p cq_num=%x " + "h_ret=%lx", my_cq, my_cq->cq_number, + h_ret); + cq = ERR_PTR(-EAGAIN); + goto create_cq_exit4; + } + } else { + if (h_ret != H_PAGE_REGISTERED) { + ehca_err(device, "Registration of page failed " + "ehca_cq=%p cq_num=%x h_ret=%lx" + "counter=%i act_pages=%i", + my_cq, my_cq->cq_number, + h_ret, counter, param.act_pages); + cq = ERR_PTR(-ENOMEM); + goto create_cq_exit4; + } + } + } + + ipz_qeit_reset(&my_cq->ipz_queue); + + gal = my_cq->galpas.kernel; + cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); + ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx", + my_cq, my_cq->cq_number, cqx_fec); + + my_cq->ib_cq.cqe = my_cq->nr_of_entries = + param.act_nr_of_entries - additional_cqe; + my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff; + + for (i = 0; i < QP_HASHTAB_LEN; i++) + INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]); + + if (context) { + struct ipz_queue *ipz_queue = &my_cq->ipz_queue; + struct ehca_create_cq_resp resp; + struct vm_area_struct *vma; + memset(&resp, 0, sizeof(resp)); + resp.cq_number = my_cq->cq_number; + resp.token = my_cq->token; + resp.ipz_queue.qe_size = ipz_queue->qe_size; + resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg; + resp.ipz_queue.queue_length = ipz_queue->queue_length; + resp.ipz_queue.pagesize = ipz_queue->pagesize; + resp.ipz_queue.toggle_state = ipz_queue->toggle_state; + ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000, + ipz_queue->queue_length, + (void**)&resp.ipz_queue.queue, + &vma); + if (ret) { + ehca_err(device, "Could not mmap queue pages"); + cq = ERR_PTR(ret); + goto create_cq_exit4; + } + my_cq->uspace_queue = resp.ipz_queue.queue; + resp.galpas = my_cq->galpas; + ret = ehca_mmap_register(my_cq->galpas.user.fw_handle, + (void**)&resp.galpas.kernel.fw_handle, + &vma); + if (ret) { + ehca_err(device, "Could not mmap fw_handle"); + cq = ERR_PTR(ret); + goto create_cq_exit5; + } + my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + ehca_err(device, "Copy to udata failed."); + goto create_cq_exit6; + } + } + + return cq; + +create_cq_exit6: + ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE); + +create_cq_exit5: + ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length); + +create_cq_exit4: + ipz_queue_dtor(&my_cq->ipz_queue); + +create_cq_exit3: + h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); + if (h_ret != H_SUCCESS) + ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " + "cq_num=%x h_ret=%lx", my_cq, my_cq->cq_number, h_ret); + +create_cq_exit2: + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + idr_remove(&ehca_cq_idr, my_cq->token); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + +create_cq_exit1: + kmem_cache_free(cq_cache, my_cq); + + return cq; +} + +int ehca_destroy_cq(struct ib_cq *cq) +{ + u64 h_ret; + int ret; + struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); + int cq_num = my_cq->cq_number; + struct ib_device *device = cq->device; + struct ehca_shca *shca = container_of(device, struct ehca_shca, + ib_device); + struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; + u32 cur_pid = current->tgid; + unsigned long flags; + + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + while (my_cq->nr_callbacks) + yield(); + + idr_remove(&ehca_cq_idr, my_cq->token); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + + if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { + ehca_err(device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_cq->ownpid); + return -EINVAL; + } + + /* un-mmap if vma alloc */ + if (my_cq->uspace_queue ) { + ret = ehca_munmap(my_cq->uspace_queue, + my_cq->ipz_queue.queue_length); + if (ret) + ehca_err(device, "Could not munmap queue ehca_cq=%p " + "cq_num=%x", my_cq, cq_num); + ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE); + if (ret) + ehca_err(device, "Could not munmap fwh ehca_cq=%p " + "cq_num=%x", my_cq, cq_num); + } + + h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); + if (h_ret == H_R_STATE) { + /* cq in err: read err data and destroy it forcibly */ + ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err " + "state. Try to delete it forcibly.", + my_cq, cq_num, my_cq->ipz_cq_handle.handle); + ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); + h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); + if (h_ret == H_SUCCESS) + ehca_dbg(device, "cq_num=%x deleted successfully.", + cq_num); + } + if (h_ret != H_SUCCESS) { + ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lx " + "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); + return ehca2ib_return_code(h_ret); + } + ipz_queue_dtor(&my_cq->ipz_queue); + kmem_cache_free(cq_cache, my_cq); + + return 0; +} + +int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) +{ + struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); + u32 cur_pid = current->tgid; + + if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) { + ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_cq->ownpid); + return -EINVAL; + } + + /* TODO: proper resize needs to be done */ + ehca_err(cq->device, "not implemented yet"); + + return -EFAULT; +} + +int ehca_init_cq_cache(void) +{ + cq_cache = kmem_cache_create("ehca_cache_cq", + sizeof(struct ehca_cq), 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!cq_cache) + return -ENOMEM; + return 0; +} + +void ehca_cleanup_cq_cache(void) +{ + if (cq_cache) + kmem_cache_destroy(cq_cache); +} diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c new file mode 100644 index 000000000000..5281dec66f12 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_eq.c @@ -0,0 +1,185 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Event queue handling + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Khadija Souissi <souissi@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ehca_classes.h" +#include "ehca_irq.h" +#include "ehca_iverbs.h" +#include "ehca_qes.h" +#include "hcp_if.h" +#include "ipz_pt_fn.h" + +int ehca_create_eq(struct ehca_shca *shca, + struct ehca_eq *eq, + const enum ehca_eq_type type, const u32 length) +{ + u64 ret; + u32 nr_pages; + u32 i; + void *vpage; + struct ib_device *ib_dev = &shca->ib_device; + + spin_lock_init(&eq->spinlock); + eq->is_initialized = 0; + + if (type != EHCA_EQ && type != EHCA_NEQ) { + ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq); + return -EINVAL; + } + if (!length) { + ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq); + return -EINVAL; + } + + ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle, + &eq->pf, + type, + length, + &eq->ipz_eq_handle, + &eq->length, + &nr_pages, &eq->ist); + + if (ret != H_SUCCESS) { + ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq); + return -EINVAL; + } + + ret = ipz_queue_ctor(&eq->ipz_queue, nr_pages, + EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0); + if (!ret) { + ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq); + goto create_eq_exit1; + } + + for (i = 0; i < nr_pages; i++) { + u64 rpage; + + if (!(vpage = ipz_qpageit_get_inc(&eq->ipz_queue))) { + ret = H_RESOURCE; + goto create_eq_exit2; + } + + rpage = virt_to_abs(vpage); + ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle, + eq->ipz_eq_handle, + &eq->pf, + 0, 0, rpage, 1); + + if (i == (nr_pages - 1)) { + /* last page */ + vpage = ipz_qpageit_get_inc(&eq->ipz_queue); + if (ret != H_SUCCESS || vpage) + goto create_eq_exit2; + } else { + if (ret != H_PAGE_REGISTERED || !vpage) + goto create_eq_exit2; + } + } + + ipz_qeit_reset(&eq->ipz_queue); + + /* register interrupt handlers and initialize work queues */ + if (type == EHCA_EQ) { + ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_eq, + SA_INTERRUPT, "ehca_eq", + (void *)shca); + if (ret < 0) + ehca_err(ib_dev, "Can't map interrupt handler."); + + tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); + } else if (type == EHCA_NEQ) { + ret = ibmebus_request_irq(NULL, eq->ist, ehca_interrupt_neq, + SA_INTERRUPT, "ehca_neq", + (void *)shca); + if (ret < 0) + ehca_err(ib_dev, "Can't map interrupt handler."); + + tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); + } + + eq->is_initialized = 1; + + return 0; + +create_eq_exit2: + ipz_queue_dtor(&eq->ipz_queue); + +create_eq_exit1: + hipz_h_destroy_eq(shca->ipz_hca_handle, eq); + + return -EINVAL; +} + +void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq) +{ + unsigned long flags; + void *eqe; + + spin_lock_irqsave(&eq->spinlock, flags); + eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue); + spin_unlock_irqrestore(&eq->spinlock, flags); + + return eqe; +} + +int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq) +{ + unsigned long flags; + u64 h_ret; + + spin_lock_irqsave(&eq->spinlock, flags); + ibmebus_free_irq(NULL, eq->ist, (void *)shca); + + h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq); + + spin_unlock_irqrestore(&eq->spinlock, flags); + + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "Can't free EQ resources."); + return -EINVAL; + } + ipz_queue_dtor(&eq->ipz_queue); + + return 0; +} diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c new file mode 100644 index 000000000000..5eae6ac48425 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -0,0 +1,241 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * HCA query functions + * + * Authors: Heiko J Schick <schickhj@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ehca_tools.h" +#include "hcp_if.h" + +int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) +{ + int ret = 0; + struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, + ib_device); + struct hipz_query_hca *rblock; + + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!rblock) { + ehca_err(&shca->ib_device, "Can't allocate rblock memory."); + return -ENOMEM; + } + + if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { + ehca_err(&shca->ib_device, "Can't query device properties"); + ret = -EINVAL; + goto query_device1; + } + + memset(props, 0, sizeof(struct ib_device_attr)); + props->fw_ver = rblock->hw_ver; + props->max_mr_size = rblock->max_mr_size; + props->vendor_id = rblock->vendor_id >> 8; + props->vendor_part_id = rblock->vendor_part_id >> 16; + props->hw_ver = rblock->hw_ver; + props->max_qp = min_t(int, rblock->max_qp, INT_MAX); + props->max_qp_wr = min_t(int, rblock->max_wqes_wq, INT_MAX); + props->max_sge = min_t(int, rblock->max_sge, INT_MAX); + props->max_sge_rd = min_t(int, rblock->max_sge_rd, INT_MAX); + props->max_cq = min_t(int, rblock->max_cq, INT_MAX); + props->max_cqe = min_t(int, rblock->max_cqe, INT_MAX); + props->max_mr = min_t(int, rblock->max_mr, INT_MAX); + props->max_mw = min_t(int, rblock->max_mw, INT_MAX); + props->max_pd = min_t(int, rblock->max_pd, INT_MAX); + props->max_ah = min_t(int, rblock->max_ah, INT_MAX); + props->max_fmr = min_t(int, rblock->max_mr, INT_MAX); + props->max_srq = 0; + props->max_srq_wr = 0; + props->max_srq_sge = 0; + props->max_pkeys = 16; + props->local_ca_ack_delay + = rblock->local_ca_ack_delay; + props->max_raw_ipv6_qp + = min_t(int, rblock->max_raw_ipv6_qp, INT_MAX); + props->max_raw_ethy_qp + = min_t(int, rblock->max_raw_ethy_qp, INT_MAX); + props->max_mcast_grp + = min_t(int, rblock->max_mcast_grp, INT_MAX); + props->max_mcast_qp_attach + = min_t(int, rblock->max_mcast_qp_attach, INT_MAX); + props->max_total_mcast_qp_attach + = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); + +query_device1: + kfree(rblock); + + return ret; +} + +int ehca_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props) +{ + int ret = 0; + struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, + ib_device); + struct hipz_query_port *rblock; + + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!rblock) { + ehca_err(&shca->ib_device, "Can't allocate rblock memory."); + return -ENOMEM; + } + + if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { + ehca_err(&shca->ib_device, "Can't query port properties"); + ret = -EINVAL; + goto query_port1; + } + + memset(props, 0, sizeof(struct ib_port_attr)); + props->state = rblock->state; + + switch (rblock->max_mtu) { + case 0x1: + props->active_mtu = props->max_mtu = IB_MTU_256; + break; + case 0x2: + props->active_mtu = props->max_mtu = IB_MTU_512; + break; + case 0x3: + props->active_mtu = props->max_mtu = IB_MTU_1024; + break; + case 0x4: + props->active_mtu = props->max_mtu = IB_MTU_2048; + break; + case 0x5: + props->active_mtu = props->max_mtu = IB_MTU_4096; + break; + default: + ehca_err(&shca->ib_device, "Unknown MTU size: %x.", + rblock->max_mtu); + break; + } + + props->gid_tbl_len = rblock->gid_tbl_len; + props->max_msg_sz = rblock->max_msg_sz; + props->bad_pkey_cntr = rblock->bad_pkey_cntr; + props->qkey_viol_cntr = rblock->qkey_viol_cntr; + props->pkey_tbl_len = rblock->pkey_tbl_len; + props->lid = rblock->lid; + props->sm_lid = rblock->sm_lid; + props->lmc = rblock->lmc; + props->sm_sl = rblock->sm_sl; + props->subnet_timeout = rblock->subnet_timeout; + props->init_type_reply = rblock->init_type_reply; + + props->active_width = IB_WIDTH_12X; + props->active_speed = 0x1; + +query_port1: + kfree(rblock); + + return ret; +} + +int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) +{ + int ret = 0; + struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); + struct hipz_query_port *rblock; + + if (index > 16) { + ehca_err(&shca->ib_device, "Invalid index: %x.", index); + return -EINVAL; + } + + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!rblock) { + ehca_err(&shca->ib_device, "Can't allocate rblock memory."); + return -ENOMEM; + } + + if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { + ehca_err(&shca->ib_device, "Can't query port properties"); + ret = -EINVAL; + goto query_pkey1; + } + + memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); + +query_pkey1: + kfree(rblock); + + return ret; +} + +int ehca_query_gid(struct ib_device *ibdev, u8 port, + int index, union ib_gid *gid) +{ + int ret = 0; + struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, + ib_device); + struct hipz_query_port *rblock; + + if (index > 255) { + ehca_err(&shca->ib_device, "Invalid index: %x.", index); + return -EINVAL; + } + + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!rblock) { + ehca_err(&shca->ib_device, "Can't allocate rblock memory."); + return -ENOMEM; + } + + if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) { + ehca_err(&shca->ib_device, "Can't query port properties"); + ret = -EINVAL; + goto query_gid1; + } + + memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64)); + memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); + +query_gid1: + kfree(rblock); + + return ret; +} + +int ehca_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props) +{ + /* Not implemented yet */ + return -EFAULT; +} diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c new file mode 100644 index 000000000000..2a65b5be1979 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -0,0 +1,762 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Functions for EQs, NEQs and interrupts + * + * Authors: Heiko J Schick <schickhj@de.ibm.com> + * Khadija Souissi <souissi@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ehca_classes.h" +#include "ehca_irq.h" +#include "ehca_iverbs.h" +#include "ehca_tools.h" +#include "hcp_if.h" +#include "hipz_fns.h" + +#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) +#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) +#define EQE_EE_IDENTIFIER EHCA_BMASK_IBM(2,7) +#define EQE_CQ_NUMBER EHCA_BMASK_IBM(8,31) +#define EQE_QP_NUMBER EHCA_BMASK_IBM(8,31) +#define EQE_QP_TOKEN EHCA_BMASK_IBM(32,63) +#define EQE_CQ_TOKEN EHCA_BMASK_IBM(32,63) + +#define NEQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) +#define NEQE_EVENT_CODE EHCA_BMASK_IBM(2,7) +#define NEQE_PORT_NUMBER EHCA_BMASK_IBM(8,15) +#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16,16) + +#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52,63) +#define ERROR_DATA_TYPE EHCA_BMASK_IBM(0,7) + +#ifdef CONFIG_INFINIBAND_EHCA_SCALING + +static void queue_comp_task(struct ehca_cq *__cq); + +static struct ehca_comp_pool* pool; +static struct notifier_block comp_pool_callback_nb; + +#endif + +static inline void comp_event_callback(struct ehca_cq *cq) +{ + if (!cq->ib_cq.comp_handler) + return; + + spin_lock(&cq->cb_lock); + cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context); + spin_unlock(&cq->cb_lock); + + return; +} + +static void print_error_data(struct ehca_shca * shca, void* data, + u64* rblock, int length) +{ + u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]); + u64 resource = rblock[1]; + + switch (type) { + case 0x1: /* Queue Pair */ + { + struct ehca_qp *qp = (struct ehca_qp*)data; + + /* only print error data if AER is set */ + if (rblock[6] == 0) + return; + + ehca_err(&shca->ib_device, + "QP 0x%x (resource=%lx) has errors.", + qp->ib_qp.qp_num, resource); + break; + } + case 0x4: /* Completion Queue */ + { + struct ehca_cq *cq = (struct ehca_cq*)data; + + ehca_err(&shca->ib_device, + "CQ 0x%x (resource=%lx) has errors.", + cq->cq_number, resource); + break; + } + default: + ehca_err(&shca->ib_device, + "Unknown errror type: %lx on %s.", + type, shca->ib_device.name); + break; + } + + ehca_err(&shca->ib_device, "Error data is available: %lx.", resource); + ehca_err(&shca->ib_device, "EHCA ----- error data begin " + "---------------------------------------------------"); + ehca_dmp(rblock, length, "resource=%lx", resource); + ehca_err(&shca->ib_device, "EHCA ----- error data end " + "----------------------------------------------------"); + + return; +} + +int ehca_error_data(struct ehca_shca *shca, void *data, + u64 resource) +{ + + unsigned long ret; + u64 *rblock; + unsigned long block_count; + + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!rblock) { + ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); + ret = -ENOMEM; + goto error_data1; + } + + ret = hipz_h_error_data(shca->ipz_hca_handle, + resource, + rblock, + &block_count); + + if (ret == H_R_STATE) { + ehca_err(&shca->ib_device, + "No error data is available: %lx.", resource); + } + else if (ret == H_SUCCESS) { + int length; + + length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); + + if (length > PAGE_SIZE) + length = PAGE_SIZE; + + print_error_data(shca, data, rblock, length); + } + else { + ehca_err(&shca->ib_device, + "Error data could not be fetched: %lx", resource); + } + + kfree(rblock); + +error_data1: + return ret; + +} + +static void qp_event_callback(struct ehca_shca *shca, + u64 eqe, + enum ib_event_type event_type) +{ + struct ib_event event; + struct ehca_qp *qp; + unsigned long flags; + u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); + + spin_lock_irqsave(&ehca_qp_idr_lock, flags); + qp = idr_find(&ehca_qp_idr, token); + spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); + + + if (!qp) + return; + + ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); + + if (!qp->ib_qp.event_handler) + return; + + event.device = &shca->ib_device; + event.event = event_type; + event.element.qp = &qp->ib_qp; + + qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); + + return; +} + +static void cq_event_callback(struct ehca_shca *shca, + u64 eqe) +{ + struct ehca_cq *cq; + unsigned long flags; + u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); + + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + cq = idr_find(&ehca_cq_idr, token); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + + if (!cq) + return; + + ehca_error_data(shca, cq, cq->ipz_cq_handle.handle); + + return; +} + +static void parse_identifier(struct ehca_shca *shca, u64 eqe) +{ + u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe); + + switch (identifier) { + case 0x02: /* path migrated */ + qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG); + break; + case 0x03: /* communication established */ + qp_event_callback(shca, eqe, IB_EVENT_COMM_EST); + break; + case 0x04: /* send queue drained */ + qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED); + break; + case 0x05: /* QP error */ + case 0x06: /* QP error */ + qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL); + break; + case 0x07: /* CQ error */ + case 0x08: /* CQ error */ + cq_event_callback(shca, eqe); + break; + case 0x09: /* MRMWPTE error */ + ehca_err(&shca->ib_device, "MRMWPTE error."); + break; + case 0x0A: /* port event */ + ehca_err(&shca->ib_device, "Port event."); + break; + case 0x0B: /* MR access error */ + ehca_err(&shca->ib_device, "MR access error."); + break; + case 0x0C: /* EQ error */ + ehca_err(&shca->ib_device, "EQ error."); + break; + case 0x0D: /* P/Q_Key mismatch */ + ehca_err(&shca->ib_device, "P/Q_Key mismatch."); + break; + case 0x10: /* sampling complete */ + ehca_err(&shca->ib_device, "Sampling complete."); + break; + case 0x11: /* unaffiliated access error */ + ehca_err(&shca->ib_device, "Unaffiliated access error."); + break; + case 0x12: /* path migrating error */ + ehca_err(&shca->ib_device, "Path migration error."); + break; + case 0x13: /* interface trace stopped */ + ehca_err(&shca->ib_device, "Interface trace stopped."); + break; + case 0x14: /* first error capture info available */ + default: + ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.", + identifier, shca->ib_device.name); + break; + } + + return; +} + +static void parse_ec(struct ehca_shca *shca, u64 eqe) +{ + struct ib_event event; + u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); + u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); + + switch (ec) { + case 0x30: /* port availability change */ + if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { + ehca_info(&shca->ib_device, + "port %x is active.", port); + event.device = &shca->ib_device; + event.event = IB_EVENT_PORT_ACTIVE; + event.element.port_num = port; + shca->sport[port - 1].port_state = IB_PORT_ACTIVE; + ib_dispatch_event(&event); + } else { + ehca_info(&shca->ib_device, + "port %x is inactive.", port); + event.device = &shca->ib_device; + event.event = IB_EVENT_PORT_ERR; + event.element.port_num = port; + shca->sport[port - 1].port_state = IB_PORT_DOWN; + ib_dispatch_event(&event); + } + break; + case 0x31: + /* port configuration change + * disruptive change is caused by + * LID, PKEY or SM change + */ + ehca_warn(&shca->ib_device, + "disruptive port %x configuration change", port); + + ehca_info(&shca->ib_device, + "port %x is inactive.", port); + event.device = &shca->ib_device; + event.event = IB_EVENT_PORT_ERR; + event.element.port_num = port; + shca->sport[port - 1].port_state = IB_PORT_DOWN; + ib_dispatch_event(&event); + + ehca_info(&shca->ib_device, + "port %x is active.", port); + event.device = &shca->ib_device; + event.event = IB_EVENT_PORT_ACTIVE; + event.element.port_num = port; + shca->sport[port - 1].port_state = IB_PORT_ACTIVE; + ib_dispatch_event(&event); + break; + case 0x32: /* adapter malfunction */ + ehca_err(&shca->ib_device, "Adapter malfunction."); + break; + case 0x33: /* trace stopped */ + ehca_err(&shca->ib_device, "Traced stopped."); + break; + default: + ehca_err(&shca->ib_device, "Unknown event code: %x on %s.", + ec, shca->ib_device.name); + break; + } + + return; +} + +static inline void reset_eq_pending(struct ehca_cq *cq) +{ + u64 CQx_EP; + struct h_galpa gal = cq->galpas.kernel; + + hipz_galpa_store_cq(gal, cqx_ep, 0x0); + CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep)); + + return; +} + +irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs) +{ + struct ehca_shca *shca = (struct ehca_shca*)dev_id; + + tasklet_hi_schedule(&shca->neq.interrupt_task); + + return IRQ_HANDLED; +} + +void ehca_tasklet_neq(unsigned long data) +{ + struct ehca_shca *shca = (struct ehca_shca*)data; + struct ehca_eqe *eqe; + u64 ret; + + eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); + + while (eqe) { + if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) + parse_ec(shca, eqe->entry); + + eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); + } + + ret = hipz_h_reset_event(shca->ipz_hca_handle, + shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL); + + if (ret != H_SUCCESS) + ehca_err(&shca->ib_device, "Can't clear notification events."); + + return; +} + +irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs) +{ + struct ehca_shca *shca = (struct ehca_shca*)dev_id; + + tasklet_hi_schedule(&shca->eq.interrupt_task); + + return IRQ_HANDLED; +} + +void ehca_tasklet_eq(unsigned long data) +{ + struct ehca_shca *shca = (struct ehca_shca*)data; + struct ehca_eqe *eqe; + int int_state; + int query_cnt = 0; + + do { + eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); + + if ((shca->hw_level >= 2) && eqe) + int_state = 1; + else + int_state = 0; + + while ((int_state == 1) || eqe) { + while (eqe) { + u64 eqe_value = eqe->entry; + + ehca_dbg(&shca->ib_device, + "eqe_value=%lx", eqe_value); + + /* TODO: better structure */ + if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, + eqe_value)) { + unsigned long flags; + u32 token; + struct ehca_cq *cq; + + ehca_dbg(&shca->ib_device, + "... completion event"); + token = + EHCA_BMASK_GET(EQE_CQ_TOKEN, + eqe_value); + spin_lock_irqsave(&ehca_cq_idr_lock, + flags); + cq = idr_find(&ehca_cq_idr, token); + + if (cq == NULL) { + spin_unlock(&ehca_cq_idr_lock); + break; + } + + reset_eq_pending(cq); +#ifdef CONFIG_INFINIBAND_EHCA_SCALING + queue_comp_task(cq); + spin_unlock_irqrestore(&ehca_cq_idr_lock, + flags); +#else + spin_unlock_irqrestore(&ehca_cq_idr_lock, + flags); + comp_event_callback(cq); +#endif + } else { + ehca_dbg(&shca->ib_device, + "... non completion event"); + parse_identifier(shca, eqe_value); + } + eqe = + (struct ehca_eqe *)ehca_poll_eq(shca, + &shca->eq); + } + + if (shca->hw_level >= 2) { + int_state = + hipz_h_query_int_state(shca->ipz_hca_handle, + shca->eq.ist); + query_cnt++; + iosync(); + if (query_cnt >= 100) { + query_cnt = 0; + int_state = 0; + } + } + eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); + + } + } while (int_state != 0); + + return; +} + +#ifdef CONFIG_INFINIBAND_EHCA_SCALING + +static inline int find_next_online_cpu(struct ehca_comp_pool* pool) +{ + unsigned long flags_last_cpu; + + if (ehca_debug_level) + ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); + + spin_lock_irqsave(&pool->last_cpu_lock, flags_last_cpu); + pool->last_cpu = next_cpu(pool->last_cpu, cpu_online_map); + if (pool->last_cpu == NR_CPUS) + pool->last_cpu = first_cpu(cpu_online_map); + spin_unlock_irqrestore(&pool->last_cpu_lock, flags_last_cpu); + + return pool->last_cpu; +} + +static void __queue_comp_task(struct ehca_cq *__cq, + struct ehca_cpu_comp_task *cct) +{ + unsigned long flags_cct; + unsigned long flags_cq; + + spin_lock_irqsave(&cct->task_lock, flags_cct); + spin_lock_irqsave(&__cq->task_lock, flags_cq); + + if (__cq->nr_callbacks == 0) { + __cq->nr_callbacks++; + list_add_tail(&__cq->entry, &cct->cq_list); + cct->cq_jobs++; + wake_up(&cct->wait_queue); + } + else + __cq->nr_callbacks++; + + spin_unlock_irqrestore(&__cq->task_lock, flags_cq); + spin_unlock_irqrestore(&cct->task_lock, flags_cct); +} + +static void queue_comp_task(struct ehca_cq *__cq) +{ + int cpu; + int cpu_id; + struct ehca_cpu_comp_task *cct; + + cpu = get_cpu(); + cpu_id = find_next_online_cpu(pool); + + BUG_ON(!cpu_online(cpu_id)); + + cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); + + if (cct->cq_jobs > 0) { + cpu_id = find_next_online_cpu(pool); + cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); + } + + __queue_comp_task(__cq, cct); + + put_cpu(); + + return; +} + +static void run_comp_task(struct ehca_cpu_comp_task* cct) +{ + struct ehca_cq *cq; + unsigned long flags_cct; + unsigned long flags_cq; + + spin_lock_irqsave(&cct->task_lock, flags_cct); + + while (!list_empty(&cct->cq_list)) { + cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); + spin_unlock_irqrestore(&cct->task_lock, flags_cct); + comp_event_callback(cq); + spin_lock_irqsave(&cct->task_lock, flags_cct); + + spin_lock_irqsave(&cq->task_lock, flags_cq); + cq->nr_callbacks--; + if (cq->nr_callbacks == 0) { + list_del_init(cct->cq_list.next); + cct->cq_jobs--; + } + spin_unlock_irqrestore(&cq->task_lock, flags_cq); + + } + + spin_unlock_irqrestore(&cct->task_lock, flags_cct); + + return; +} + +static int comp_task(void *__cct) +{ + struct ehca_cpu_comp_task* cct = __cct; + DECLARE_WAITQUEUE(wait, current); + + set_current_state(TASK_INTERRUPTIBLE); + while(!kthread_should_stop()) { + add_wait_queue(&cct->wait_queue, &wait); + + if (list_empty(&cct->cq_list)) + schedule(); + else + __set_current_state(TASK_RUNNING); + + remove_wait_queue(&cct->wait_queue, &wait); + + if (!list_empty(&cct->cq_list)) + run_comp_task(__cct); + + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + +static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, + int cpu) +{ + struct ehca_cpu_comp_task *cct; + + cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + spin_lock_init(&cct->task_lock); + INIT_LIST_HEAD(&cct->cq_list); + init_waitqueue_head(&cct->wait_queue); + cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); + + return cct->task; +} + +static void destroy_comp_task(struct ehca_comp_pool *pool, + int cpu) +{ + struct ehca_cpu_comp_task *cct; + struct task_struct *task; + unsigned long flags_cct; + + cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + + spin_lock_irqsave(&cct->task_lock, flags_cct); + + task = cct->task; + cct->task = NULL; + cct->cq_jobs = 0; + + spin_unlock_irqrestore(&cct->task_lock, flags_cct); + + if (task) + kthread_stop(task); + + return; +} + +static void take_over_work(struct ehca_comp_pool *pool, + int cpu) +{ + struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + LIST_HEAD(list); + struct ehca_cq *cq; + unsigned long flags_cct; + + spin_lock_irqsave(&cct->task_lock, flags_cct); + + list_splice_init(&cct->cq_list, &list); + + while(!list_empty(&list)) { + cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); + + list_del(&cq->entry); + __queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks, + smp_processor_id())); + } + + spin_unlock_irqrestore(&cct->task_lock, flags_cct); + +} + +static int comp_pool_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + struct ehca_cpu_comp_task *cct; + + switch (action) { + case CPU_UP_PREPARE: + ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); + if(!create_comp_task(pool, cpu)) { + ehca_gen_err("Can't create comp_task for cpu: %x", cpu); + return NOTIFY_BAD; + } + break; + case CPU_UP_CANCELED: + ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); + cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + kthread_bind(cct->task, any_online_cpu(cpu_online_map)); + destroy_comp_task(pool, cpu); + break; + case CPU_ONLINE: + ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); + cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + kthread_bind(cct->task, cpu); + wake_up_process(cct->task); + break; + case CPU_DOWN_PREPARE: + ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); + break; + case CPU_DOWN_FAILED: + ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); + break; + case CPU_DEAD: + ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); + destroy_comp_task(pool, cpu); + take_over_work(pool, cpu); + break; + } + + return NOTIFY_OK; +} + +#endif + +int ehca_create_comp_pool(void) +{ +#ifdef CONFIG_INFINIBAND_EHCA_SCALING + int cpu; + struct task_struct *task; + + pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL); + if (pool == NULL) + return -ENOMEM; + + spin_lock_init(&pool->last_cpu_lock); + pool->last_cpu = any_online_cpu(cpu_online_map); + + pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); + if (pool->cpu_comp_tasks == NULL) { + kfree(pool); + return -EINVAL; + } + + for_each_online_cpu(cpu) { + task = create_comp_task(pool, cpu); + if (task) { + kthread_bind(task, cpu); + wake_up_process(task); + } + } + + comp_pool_callback_nb.notifier_call = comp_pool_callback; + comp_pool_callback_nb.priority =0; + register_cpu_notifier(&comp_pool_callback_nb); +#endif + + return 0; +} + +void ehca_destroy_comp_pool(void) +{ +#ifdef CONFIG_INFINIBAND_EHCA_SCALING + int i; + + unregister_cpu_notifier(&comp_pool_callback_nb); + + for (i = 0; i < NR_CPUS; i++) { + if (cpu_online(i)) + destroy_comp_task(pool, i); + } +#endif + + return; +} diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/infiniband/hw/ehca/ehca_irq.h new file mode 100644 index 000000000000..85bf1fe16fe4 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_irq.h @@ -0,0 +1,77 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Function definitions and structs for EQs, NEQs and interrupts + * + * Authors: Heiko J Schick <schickhj@de.ibm.com> + * Khadija Souissi <souissi@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EHCA_IRQ_H +#define __EHCA_IRQ_H + + +struct ehca_shca; + +#include <linux/interrupt.h> +#include <linux/types.h> +#include <asm/atomic.h> + +int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource); + +irqreturn_t ehca_interrupt_neq(int irq, void *dev_id, struct pt_regs *regs); +void ehca_tasklet_neq(unsigned long data); + +irqreturn_t ehca_interrupt_eq(int irq, void *dev_id, struct pt_regs *regs); +void ehca_tasklet_eq(unsigned long data); + +struct ehca_cpu_comp_task { + wait_queue_head_t wait_queue; + struct list_head cq_list; + struct task_struct *task; + spinlock_t task_lock; + int cq_jobs; +}; + +struct ehca_comp_pool { + struct ehca_cpu_comp_task *cpu_comp_tasks; + int last_cpu; + spinlock_t last_cpu_lock; +}; + +int ehca_create_comp_pool(void); +void ehca_destroy_comp_pool(void); + +#endif diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h new file mode 100644 index 000000000000..319c39d47f3a --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h @@ -0,0 +1,182 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Function definitions for internal functions + * + * Authors: Heiko J Schick <schickhj@de.ibm.com> + * Dietmar Decker <ddecker@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __EHCA_IVERBS_H__ +#define __EHCA_IVERBS_H__ + +#include "ehca_classes.h" + +int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props); + +int ehca_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props); + +int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey); + +int ehca_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid); + +int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, + struct ib_port_modify *props); + +struct ib_pd *ehca_alloc_pd(struct ib_device *device, + struct ib_ucontext *context, + struct ib_udata *udata); + +int ehca_dealloc_pd(struct ib_pd *pd); + +struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); + +int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); + +int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); + +int ehca_destroy_ah(struct ib_ah *ah); + +struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags); + +struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, u64 *iova_start); + +struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, + struct ib_umem *region, + int mr_access_flags, struct ib_udata *udata); + +int ehca_rereg_phys_mr(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, int mr_access_flags, u64 *iova_start); + +int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); + +int ehca_dereg_mr(struct ib_mr *mr); + +struct ib_mw *ehca_alloc_mw(struct ib_pd *pd); + +int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw, + struct ib_mw_bind *mw_bind); + +int ehca_dealloc_mw(struct ib_mw *mw); + +struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, + int mr_access_flags, + struct ib_fmr_attr *fmr_attr); + +int ehca_map_phys_fmr(struct ib_fmr *fmr, + u64 *page_list, int list_len, u64 iova); + +int ehca_unmap_fmr(struct list_head *fmr_list); + +int ehca_dealloc_fmr(struct ib_fmr *fmr); + +enum ehca_eq_type { + EHCA_EQ = 0, /* Event Queue */ + EHCA_NEQ /* Notification Event Queue */ +}; + +int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq, + enum ehca_eq_type type, const u32 length); + +int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq); + +void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq); + + +struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, + struct ib_ucontext *context, + struct ib_udata *udata); + +int ehca_destroy_cq(struct ib_cq *cq); + +int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); + +int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); + +int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); + +int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify); + +struct ib_qp *ehca_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); + +int ehca_destroy_qp(struct ib_qp *qp); + +int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, + struct ib_udata *udata); + +int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); + +int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr, + struct ib_send_wr **bad_send_wr); + +int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr); + +u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp, + struct ib_qp_init_attr *qp_init_attr); + +int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); + +int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); + +struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device, + struct ib_udata *udata); + +int ehca_dealloc_ucontext(struct ib_ucontext *context); + +int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + +void ehca_poll_eqs(unsigned long data); + +int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped, + struct vm_area_struct **vma); + +int ehca_mmap_register(u64 physical,void **mapped, + struct vm_area_struct **vma); + +int ehca_munmap(unsigned long addr, size_t len); + +#endif diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c new file mode 100644 index 000000000000..2380994418a5 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -0,0 +1,818 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * module start stop, hca detection + * + * Authors: Heiko J Schick <schickhj@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Joachim Fenkes <fenkes@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ehca_classes.h" +#include "ehca_iverbs.h" +#include "ehca_mrmw.h" +#include "ehca_tools.h" +#include "hcp_if.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); +MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); +MODULE_VERSION("SVNEHCA_0016"); + +int ehca_open_aqp1 = 0; +int ehca_debug_level = 0; +int ehca_hw_level = 0; +int ehca_nr_ports = 2; +int ehca_use_hp_mr = 0; +int ehca_port_act_time = 30; +int ehca_poll_all_eqs = 1; +int ehca_static_rate = -1; + +module_param_named(open_aqp1, ehca_open_aqp1, int, 0); +module_param_named(debug_level, ehca_debug_level, int, 0); +module_param_named(hw_level, ehca_hw_level, int, 0); +module_param_named(nr_ports, ehca_nr_ports, int, 0); +module_param_named(use_hp_mr, ehca_use_hp_mr, int, 0); +module_param_named(port_act_time, ehca_port_act_time, int, 0); +module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, 0); +module_param_named(static_rate, ehca_static_rate, int, 0); + +MODULE_PARM_DESC(open_aqp1, + "AQP1 on startup (0: no (default), 1: yes)"); +MODULE_PARM_DESC(debug_level, + "debug level" + " (0: no debug traces (default), 1: with debug traces)"); +MODULE_PARM_DESC(hw_level, + "hardware level" + " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); +MODULE_PARM_DESC(nr_ports, + "number of connected ports (default: 2)"); +MODULE_PARM_DESC(use_hp_mr, + "high performance MRs (0: no (default), 1: yes)"); +MODULE_PARM_DESC(port_act_time, + "time to wait for port activation (default: 30 sec)"); +MODULE_PARM_DESC(poll_all_eqs, + "polls all event queues periodically" + " (0: no, 1: yes (default))"); +MODULE_PARM_DESC(static_rate, + "set permanent static rate (default: disabled)"); + +spinlock_t ehca_qp_idr_lock; +spinlock_t ehca_cq_idr_lock; +DEFINE_IDR(ehca_qp_idr); +DEFINE_IDR(ehca_cq_idr); + +static struct list_head shca_list; /* list of all registered ehcas */ +static spinlock_t shca_list_lock; + +static struct timer_list poll_eqs_timer; + +static int ehca_create_slab_caches(void) +{ + int ret; + + ret = ehca_init_pd_cache(); + if (ret) { + ehca_gen_err("Cannot create PD SLAB cache."); + return ret; + } + + ret = ehca_init_cq_cache(); + if (ret) { + ehca_gen_err("Cannot create CQ SLAB cache."); + goto create_slab_caches2; + } + + ret = ehca_init_qp_cache(); + if (ret) { + ehca_gen_err("Cannot create QP SLAB cache."); + goto create_slab_caches3; + } + + ret = ehca_init_av_cache(); + if (ret) { + ehca_gen_err("Cannot create AV SLAB cache."); + goto create_slab_caches4; + } + + ret = ehca_init_mrmw_cache(); + if (ret) { + ehca_gen_err("Cannot create MR&MW SLAB cache."); + goto create_slab_caches5; + } + + return 0; + +create_slab_caches5: + ehca_cleanup_av_cache(); + +create_slab_caches4: + ehca_cleanup_qp_cache(); + +create_slab_caches3: + ehca_cleanup_cq_cache(); + +create_slab_caches2: + ehca_cleanup_pd_cache(); + + return ret; +} + +static void ehca_destroy_slab_caches(void) +{ + ehca_cleanup_mrmw_cache(); + ehca_cleanup_av_cache(); + ehca_cleanup_qp_cache(); + ehca_cleanup_cq_cache(); + ehca_cleanup_pd_cache(); +} + +#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) +#define EHCA_REVID EHCA_BMASK_IBM(40,63) + +int ehca_sense_attributes(struct ehca_shca *shca) +{ + int ret = 0; + u64 h_ret; + struct hipz_query_hca *rblock; + + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!rblock) { + ehca_gen_err("Cannot allocate rblock memory."); + return -ENOMEM; + } + + h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); + if (h_ret != H_SUCCESS) { + ehca_gen_err("Cannot query device properties. h_ret=%lx", + h_ret); + ret = -EPERM; + goto num_ports1; + } + + if (ehca_nr_ports == 1) + shca->num_ports = 1; + else + shca->num_ports = (u8)rblock->num_ports; + + ehca_gen_dbg(" ... found %x ports", rblock->num_ports); + + if (ehca_hw_level == 0) { + u32 hcaaver; + u32 revid; + + hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver); + revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver); + + ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid); + + if ((hcaaver == 1) && (revid == 0)) + shca->hw_level = 0; + else if ((hcaaver == 1) && (revid == 1)) + shca->hw_level = 1; + else if ((hcaaver == 1) && (revid == 2)) + shca->hw_level = 2; + } + ehca_gen_dbg(" ... hardware level=%x", shca->hw_level); + + shca->sport[0].rate = IB_RATE_30_GBPS; + shca->sport[1].rate = IB_RATE_30_GBPS; + +num_ports1: + kfree(rblock); + return ret; +} + +static int init_node_guid(struct ehca_shca *shca) +{ + int ret = 0; + struct hipz_query_hca *rblock; + + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!rblock) { + ehca_err(&shca->ib_device, "Can't allocate rblock memory."); + return -ENOMEM; + } + + if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { + ehca_err(&shca->ib_device, "Can't query device properties"); + ret = -EINVAL; + goto init_node_guid1; + } + + memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); + +init_node_guid1: + kfree(rblock); + return ret; +} + +int ehca_register_device(struct ehca_shca *shca) +{ + int ret; + + ret = init_node_guid(shca); + if (ret) + return ret; + + strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); + shca->ib_device.owner = THIS_MODULE; + + shca->ib_device.uverbs_abi_ver = 5; + shca->ib_device.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); + + shca->ib_device.node_type = RDMA_NODE_IB_CA; + shca->ib_device.phys_port_cnt = shca->num_ports; + shca->ib_device.dma_device = &shca->ibmebus_dev->ofdev.dev; + shca->ib_device.query_device = ehca_query_device; + shca->ib_device.query_port = ehca_query_port; + shca->ib_device.query_gid = ehca_query_gid; + shca->ib_device.query_pkey = ehca_query_pkey; + /* shca->in_device.modify_device = ehca_modify_device */ + shca->ib_device.modify_port = ehca_modify_port; + shca->ib_device.alloc_ucontext = ehca_alloc_ucontext; + shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext; + shca->ib_device.alloc_pd = ehca_alloc_pd; + shca->ib_device.dealloc_pd = ehca_dealloc_pd; + shca->ib_device.create_ah = ehca_create_ah; + /* shca->ib_device.modify_ah = ehca_modify_ah; */ + shca->ib_device.query_ah = ehca_query_ah; + shca->ib_device.destroy_ah = ehca_destroy_ah; + shca->ib_device.create_qp = ehca_create_qp; + shca->ib_device.modify_qp = ehca_modify_qp; + shca->ib_device.query_qp = ehca_query_qp; + shca->ib_device.destroy_qp = ehca_destroy_qp; + shca->ib_device.post_send = ehca_post_send; + shca->ib_device.post_recv = ehca_post_recv; + shca->ib_device.create_cq = ehca_create_cq; + shca->ib_device.destroy_cq = ehca_destroy_cq; + shca->ib_device.resize_cq = ehca_resize_cq; + shca->ib_device.poll_cq = ehca_poll_cq; + /* shca->ib_device.peek_cq = ehca_peek_cq; */ + shca->ib_device.req_notify_cq = ehca_req_notify_cq; + /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */ + shca->ib_device.get_dma_mr = ehca_get_dma_mr; + shca->ib_device.reg_phys_mr = ehca_reg_phys_mr; + shca->ib_device.reg_user_mr = ehca_reg_user_mr; + shca->ib_device.query_mr = ehca_query_mr; + shca->ib_device.dereg_mr = ehca_dereg_mr; + shca->ib_device.rereg_phys_mr = ehca_rereg_phys_mr; + shca->ib_device.alloc_mw = ehca_alloc_mw; + shca->ib_device.bind_mw = ehca_bind_mw; + shca->ib_device.dealloc_mw = ehca_dealloc_mw; + shca->ib_device.alloc_fmr = ehca_alloc_fmr; + shca->ib_device.map_phys_fmr = ehca_map_phys_fmr; + shca->ib_device.unmap_fmr = ehca_unmap_fmr; + shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; + shca->ib_device.attach_mcast = ehca_attach_mcast; + shca->ib_device.detach_mcast = ehca_detach_mcast; + /* shca->ib_device.process_mad = ehca_process_mad; */ + shca->ib_device.mmap = ehca_mmap; + + ret = ib_register_device(&shca->ib_device); + if (ret) + ehca_err(&shca->ib_device, + "ib_register_device() failed ret=%x", ret); + + return ret; +} + +static int ehca_create_aqp1(struct ehca_shca *shca, u32 port) +{ + struct ehca_sport *sport = &shca->sport[port - 1]; + struct ib_cq *ibcq; + struct ib_qp *ibqp; + struct ib_qp_init_attr qp_init_attr; + int ret; + + if (sport->ibcq_aqp1) { + ehca_err(&shca->ib_device, "AQP1 CQ is already created."); + return -EPERM; + } + + ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void*)(-1), 10); + if (IS_ERR(ibcq)) { + ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); + return PTR_ERR(ibcq); + } + sport->ibcq_aqp1 = ibcq; + + if (sport->ibqp_aqp1) { + ehca_err(&shca->ib_device, "AQP1 QP is already created."); + ret = -EPERM; + goto create_aqp1; + } + + memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr)); + qp_init_attr.send_cq = ibcq; + qp_init_attr.recv_cq = ibcq; + qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; + qp_init_attr.cap.max_send_wr = 100; + qp_init_attr.cap.max_recv_wr = 100; + qp_init_attr.cap.max_send_sge = 2; + qp_init_attr.cap.max_recv_sge = 1; + qp_init_attr.qp_type = IB_QPT_GSI; + qp_init_attr.port_num = port; + qp_init_attr.qp_context = NULL; + qp_init_attr.event_handler = NULL; + qp_init_attr.srq = NULL; + + ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr); + if (IS_ERR(ibqp)) { + ehca_err(&shca->ib_device, "Cannot create AQP1 QP."); + ret = PTR_ERR(ibqp); + goto create_aqp1; + } + sport->ibqp_aqp1 = ibqp; + + return 0; + +create_aqp1: + ib_destroy_cq(sport->ibcq_aqp1); + return ret; +} + +static int ehca_destroy_aqp1(struct ehca_sport *sport) +{ + int ret; + + ret = ib_destroy_qp(sport->ibqp_aqp1); + if (ret) { + ehca_gen_err("Cannot destroy AQP1 QP. ret=%x", ret); + return ret; + } + + ret = ib_destroy_cq(sport->ibcq_aqp1); + if (ret) + ehca_gen_err("Cannot destroy AQP1 CQ. ret=%x", ret); + + return ret; +} + +static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", + ehca_debug_level); +} + +static ssize_t ehca_store_debug_level(struct device_driver *ddp, + const char *buf, size_t count) +{ + int value = (*buf) - '0'; + if (value >= 0 && value <= 9) + ehca_debug_level = value; + return 1; +} + +DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, + ehca_show_debug_level, ehca_store_debug_level); + +void ehca_create_driver_sysfs(struct ibmebus_driver *drv) +{ + driver_create_file(&drv->driver, &driver_attr_debug_level); +} + +void ehca_remove_driver_sysfs(struct ibmebus_driver *drv) +{ + driver_remove_file(&drv->driver, &driver_attr_debug_level); +} + +#define EHCA_RESOURCE_ATTR(name) \ +static ssize_t ehca_show_##name(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct ehca_shca *shca; \ + struct hipz_query_hca *rblock; \ + int data; \ + \ + shca = dev->driver_data; \ + \ + rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \ + if (!rblock) { \ + dev_err(dev, "Can't allocate rblock memory."); \ + return 0; \ + } \ + \ + if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ + dev_err(dev, "Can't query device properties"); \ + kfree(rblock); \ + return 0; \ + } \ + \ + data = rblock->name; \ + kfree(rblock); \ + \ + if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ + return snprintf(buf, 256, "1\n"); \ + else \ + return snprintf(buf, 256, "%d\n", data); \ + \ +} \ +static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL); + +EHCA_RESOURCE_ATTR(num_ports); +EHCA_RESOURCE_ATTR(hw_ver); +EHCA_RESOURCE_ATTR(max_eq); +EHCA_RESOURCE_ATTR(cur_eq); +EHCA_RESOURCE_ATTR(max_cq); +EHCA_RESOURCE_ATTR(cur_cq); +EHCA_RESOURCE_ATTR(max_qp); +EHCA_RESOURCE_ATTR(cur_qp); +EHCA_RESOURCE_ATTR(max_mr); +EHCA_RESOURCE_ATTR(cur_mr); +EHCA_RESOURCE_ATTR(max_mw); +EHCA_RESOURCE_ATTR(cur_mw); +EHCA_RESOURCE_ATTR(max_pd); +EHCA_RESOURCE_ATTR(max_ah); + +static ssize_t ehca_show_adapter_handle(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ehca_shca *shca = dev->driver_data; + + return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle); + +} +static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); + + +void ehca_create_device_sysfs(struct ibmebus_dev *dev) +{ + device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle); + device_create_file(&dev->ofdev.dev, &dev_attr_num_ports); + device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver); + device_create_file(&dev->ofdev.dev, &dev_attr_max_eq); + device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq); + device_create_file(&dev->ofdev.dev, &dev_attr_max_cq); + device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq); + device_create_file(&dev->ofdev.dev, &dev_attr_max_qp); + device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp); + device_create_file(&dev->ofdev.dev, &dev_attr_max_mr); + device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr); + device_create_file(&dev->ofdev.dev, &dev_attr_max_mw); + device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw); + device_create_file(&dev->ofdev.dev, &dev_attr_max_pd); + device_create_file(&dev->ofdev.dev, &dev_attr_max_ah); +} + +void ehca_remove_device_sysfs(struct ibmebus_dev *dev) +{ + device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle); + device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports); + device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver); + device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq); + device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq); + device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq); + device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq); + device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp); + device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp); + device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr); + device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr); + device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw); + device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw); + device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd); + device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah); +} + +static int __devinit ehca_probe(struct ibmebus_dev *dev, + const struct of_device_id *id) +{ + struct ehca_shca *shca; + u64 *handle; + struct ib_pd *ibpd; + int ret; + + handle = (u64 *)get_property(dev->ofdev.node, "ibm,hca-handle", NULL); + if (!handle) { + ehca_gen_err("Cannot get eHCA handle for adapter: %s.", + dev->ofdev.node->full_name); + return -ENODEV; + } + + if (!(*handle)) { + ehca_gen_err("Wrong eHCA handle for adapter: %s.", + dev->ofdev.node->full_name); + return -ENODEV; + } + + shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca)); + if (!shca) { + ehca_gen_err("Cannot allocate shca memory."); + return -ENOMEM; + } + + shca->ibmebus_dev = dev; + shca->ipz_hca_handle.handle = *handle; + dev->ofdev.dev.driver_data = shca; + + ret = ehca_sense_attributes(shca); + if (ret < 0) { + ehca_gen_err("Cannot sense eHCA attributes."); + goto probe1; + } + + ret = ehca_register_device(shca); + if (ret) { + ehca_gen_err("Cannot register Infiniband device"); + goto probe1; + } + + /* create event queues */ + ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048); + if (ret) { + ehca_err(&shca->ib_device, "Cannot create EQ."); + goto probe2; + } + + ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513); + if (ret) { + ehca_err(&shca->ib_device, "Cannot create NEQ."); + goto probe3; + } + + /* create internal protection domain */ + ibpd = ehca_alloc_pd(&shca->ib_device, (void*)(-1), NULL); + if (IS_ERR(ibpd)) { + ehca_err(&shca->ib_device, "Cannot create internal PD."); + ret = PTR_ERR(ibpd); + goto probe4; + } + + shca->pd = container_of(ibpd, struct ehca_pd, ib_pd); + shca->pd->ib_pd.device = &shca->ib_device; + + /* create internal max MR */ + ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr); + + if (ret) { + ehca_err(&shca->ib_device, "Cannot create internal MR ret=%x", + ret); + goto probe5; + } + + /* create AQP1 for port 1 */ + if (ehca_open_aqp1 == 1) { + shca->sport[0].port_state = IB_PORT_DOWN; + ret = ehca_create_aqp1(shca, 1); + if (ret) { + ehca_err(&shca->ib_device, + "Cannot create AQP1 for port 1."); + goto probe6; + } + } + + /* create AQP1 for port 2 */ + if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) { + shca->sport[1].port_state = IB_PORT_DOWN; + ret = ehca_create_aqp1(shca, 2); + if (ret) { + ehca_err(&shca->ib_device, + "Cannot create AQP1 for port 2."); + goto probe7; + } + } + + ehca_create_device_sysfs(dev); + + spin_lock(&shca_list_lock); + list_add(&shca->shca_list, &shca_list); + spin_unlock(&shca_list_lock); + + return 0; + +probe7: + ret = ehca_destroy_aqp1(&shca->sport[0]); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy AQP1 for port 1. ret=%x", ret); + +probe6: + ret = ehca_dereg_internal_maxmr(shca); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy internal MR. ret=%x", ret); + +probe5: + ret = ehca_dealloc_pd(&shca->pd->ib_pd); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy internal PD. ret=%x", ret); + +probe4: + ret = ehca_destroy_eq(shca, &shca->neq); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy NEQ. ret=%x", ret); + +probe3: + ret = ehca_destroy_eq(shca, &shca->eq); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy EQ. ret=%x", ret); + +probe2: + ib_unregister_device(&shca->ib_device); + +probe1: + ib_dealloc_device(&shca->ib_device); + + return -EINVAL; +} + +static int __devexit ehca_remove(struct ibmebus_dev *dev) +{ + struct ehca_shca *shca = dev->ofdev.dev.driver_data; + int ret; + + ehca_remove_device_sysfs(dev); + + if (ehca_open_aqp1 == 1) { + int i; + for (i = 0; i < shca->num_ports; i++) { + ret = ehca_destroy_aqp1(&shca->sport[i]); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy AQP1 for port %x " + "ret=%x", ret, i); + } + } + + ib_unregister_device(&shca->ib_device); + + ret = ehca_dereg_internal_maxmr(shca); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy internal MR. ret=%x", ret); + + ret = ehca_dealloc_pd(&shca->pd->ib_pd); + if (ret) + ehca_err(&shca->ib_device, + "Cannot destroy internal PD. ret=%x", ret); + + ret = ehca_destroy_eq(shca, &shca->eq); + if (ret) + ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%x", ret); + + ret = ehca_destroy_eq(shca, &shca->neq); + if (ret) + ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%x", ret); + + ib_dealloc_device(&shca->ib_device); + + spin_lock(&shca_list_lock); + list_del(&shca->shca_list); + spin_unlock(&shca_list_lock); + + return ret; +} + +static struct of_device_id ehca_device_table[] = +{ + { + .name = "lhca", + .compatible = "IBM,lhca", + }, + {}, +}; + +static struct ibmebus_driver ehca_driver = { + .name = "ehca", + .id_table = ehca_device_table, + .probe = ehca_probe, + .remove = ehca_remove, +}; + +void ehca_poll_eqs(unsigned long data) +{ + struct ehca_shca *shca; + + spin_lock(&shca_list_lock); + list_for_each_entry(shca, &shca_list, shca_list) { + if (shca->eq.is_initialized) + ehca_tasklet_eq((unsigned long)(void*)shca); + } + mod_timer(&poll_eqs_timer, jiffies + HZ); + spin_unlock(&shca_list_lock); +} + +int __init ehca_module_init(void) +{ + int ret; + + printk(KERN_INFO "eHCA Infiniband Device Driver " + "(Rel.: SVNEHCA_0016)\n"); + idr_init(&ehca_qp_idr); + idr_init(&ehca_cq_idr); + spin_lock_init(&ehca_qp_idr_lock); + spin_lock_init(&ehca_cq_idr_lock); + + INIT_LIST_HEAD(&shca_list); + spin_lock_init(&shca_list_lock); + + if ((ret = ehca_create_comp_pool())) { + ehca_gen_err("Cannot create comp pool."); + return ret; + } + + if ((ret = ehca_create_slab_caches())) { + ehca_gen_err("Cannot create SLAB caches"); + ret = -ENOMEM; + goto module_init1; + } + + if ((ret = ibmebus_register_driver(&ehca_driver))) { + ehca_gen_err("Cannot register eHCA device driver"); + ret = -EINVAL; + goto module_init2; + } + + ehca_create_driver_sysfs(&ehca_driver); + + if (ehca_poll_all_eqs != 1) { + ehca_gen_err("WARNING!!!"); + ehca_gen_err("It is possible to lose interrupts."); + } else { + init_timer(&poll_eqs_timer); + poll_eqs_timer.function = ehca_poll_eqs; + poll_eqs_timer.expires = jiffies + HZ; + add_timer(&poll_eqs_timer); + } + + return 0; + +module_init2: + ehca_destroy_slab_caches(); + +module_init1: + ehca_destroy_comp_pool(); + return ret; +}; + +void __exit ehca_module_exit(void) +{ + if (ehca_poll_all_eqs == 1) + del_timer_sync(&poll_eqs_timer); + + ehca_remove_driver_sysfs(&ehca_driver); + ibmebus_unregister_driver(&ehca_driver); + + ehca_destroy_slab_caches(); + + ehca_destroy_comp_pool(); + + idr_destroy(&ehca_cq_idr); + idr_destroy(&ehca_qp_idr); +}; + +module_init(ehca_module_init); +module_exit(ehca_module_exit); diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c new file mode 100644 index 000000000000..32a870660bfe --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_mcast.c @@ -0,0 +1,131 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * mcast functions + * + * Authors: Khadija Souissi <souissik@de.ibm.com> + * Waleri Fomin <fomin@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/module.h> +#include <linux/err.h> +#include "ehca_classes.h" +#include "ehca_tools.h" +#include "ehca_qes.h" +#include "ehca_iverbs.h" +#include "hcp_if.h" + +#define MAX_MC_LID 0xFFFE +#define MIN_MC_LID 0xC000 /* Multicast limits */ +#define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF) +#define EHCA_VALID_MULTICAST_LID(lid) \ + (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID)) + +int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); + struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, + ib_device); + union ib_gid my_gid; + u64 subnet_prefix, interface_id, h_ret; + + if (ibqp->qp_type != IB_QPT_UD) { + ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type); + return -EINVAL; + } + + if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) { + ehca_err(ibqp->device, "invalid mulitcast gid"); + return -EINVAL; + } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) { + ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid); + return -EINVAL; + } + + memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); + + subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); + interface_id = be64_to_cpu(my_gid.global.interface_id); + h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle, + my_qp->ipz_qp_handle, + my_qp->galpas.kernel, + lid, subnet_prefix, interface_id); + if (h_ret != H_SUCCESS) + ehca_err(ibqp->device, + "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " + "h_ret=%lx", my_qp, ibqp->qp_num, h_ret); + + return ehca2ib_return_code(h_ret); +} + +int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); + struct ehca_shca *shca = container_of(ibqp->pd->device, + struct ehca_shca, ib_device); + union ib_gid my_gid; + u64 subnet_prefix, interface_id, h_ret; + + if (ibqp->qp_type != IB_QPT_UD) { + ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type); + return -EINVAL; + } + + if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) { + ehca_err(ibqp->device, "invalid mulitcast gid"); + return -EINVAL; + } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) { + ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid); + return -EINVAL; + } + + memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); + + subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); + interface_id = be64_to_cpu(my_gid.global.interface_id); + h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle, + my_qp->ipz_qp_handle, + my_qp->galpas.kernel, + lid, subnet_prefix, interface_id); + if (h_ret != H_SUCCESS) + ehca_err(ibqp->device, + "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " + "h_ret=%lx", my_qp, ibqp->qp_num, h_ret); + + return ehca2ib_return_code(h_ret); +} diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c new file mode 100644 index 000000000000..5ca65441e1da --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -0,0 +1,2261 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * MR/MW functions + * + * Authors: Dietmar Decker <ddecker@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm/current.h> + +#include "ehca_iverbs.h" +#include "ehca_mrmw.h" +#include "hcp_if.h" +#include "hipz_hw.h" + +static struct kmem_cache *mr_cache; +static struct kmem_cache *mw_cache; + +static struct ehca_mr *ehca_mr_new(void) +{ + struct ehca_mr *me; + + me = kmem_cache_alloc(mr_cache, SLAB_KERNEL); + if (me) { + memset(me, 0, sizeof(struct ehca_mr)); + spin_lock_init(&me->mrlock); + } else + ehca_gen_err("alloc failed"); + + return me; +} + +static void ehca_mr_delete(struct ehca_mr *me) +{ + kmem_cache_free(mr_cache, me); +} + +static struct ehca_mw *ehca_mw_new(void) +{ + struct ehca_mw *me; + + me = kmem_cache_alloc(mw_cache, SLAB_KERNEL); + if (me) { + memset(me, 0, sizeof(struct ehca_mw)); + spin_lock_init(&me->mwlock); + } else + ehca_gen_err("alloc failed"); + + return me; +} + +static void ehca_mw_delete(struct ehca_mw *me) +{ + kmem_cache_free(mw_cache, me); +} + +/*----------------------------------------------------------------------*/ + +struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) +{ + struct ib_mr *ib_mr; + int ret; + struct ehca_mr *e_maxmr; + struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); + struct ehca_shca *shca = + container_of(pd->device, struct ehca_shca, ib_device); + + if (shca->maxmr) { + e_maxmr = ehca_mr_new(); + if (!e_maxmr) { + ehca_err(&shca->ib_device, "out of memory"); + ib_mr = ERR_PTR(-ENOMEM); + goto get_dma_mr_exit0; + } + + ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE, + mr_access_flags, e_pd, + &e_maxmr->ib.ib_mr.lkey, + &e_maxmr->ib.ib_mr.rkey); + if (ret) { + ib_mr = ERR_PTR(ret); + goto get_dma_mr_exit0; + } + ib_mr = &e_maxmr->ib.ib_mr; + } else { + ehca_err(&shca->ib_device, "no internal max-MR exist!"); + ib_mr = ERR_PTR(-EINVAL); + goto get_dma_mr_exit0; + } + +get_dma_mr_exit0: + if (IS_ERR(ib_mr)) + ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ", + PTR_ERR(ib_mr), pd, mr_access_flags); + return ib_mr; +} /* end ehca_get_dma_mr() */ + +/*----------------------------------------------------------------------*/ + +struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start) +{ + struct ib_mr *ib_mr; + int ret; + struct ehca_mr *e_mr; + struct ehca_shca *shca = + container_of(pd->device, struct ehca_shca, ib_device); + struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); + + u64 size; + struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; + u32 num_pages_mr; + u32 num_pages_4k; /* 4k portion "pages" */ + + if ((num_phys_buf <= 0) || !phys_buf_array) { + ehca_err(pd->device, "bad input values: num_phys_buf=%x " + "phys_buf_array=%p", num_phys_buf, phys_buf_array); + ib_mr = ERR_PTR(-EINVAL); + goto reg_phys_mr_exit0; + } + if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || + ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { + /* + * Remote Write Access requires Local Write Access + * Remote Atomic Access requires Local Write Access + */ + ehca_err(pd->device, "bad input values: mr_access_flags=%x", + mr_access_flags); + ib_mr = ERR_PTR(-EINVAL); + goto reg_phys_mr_exit0; + } + + /* check physical buffer list and calculate size */ + ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf, + iova_start, &size); + if (ret) { + ib_mr = ERR_PTR(ret); + goto reg_phys_mr_exit0; + } + if ((size == 0) || + (((u64)iova_start + size) < (u64)iova_start)) { + ehca_err(pd->device, "bad input values: size=%lx iova_start=%p", + size, iova_start); + ib_mr = ERR_PTR(-EINVAL); + goto reg_phys_mr_exit0; + } + + e_mr = ehca_mr_new(); + if (!e_mr) { + ehca_err(pd->device, "out of memory"); + ib_mr = ERR_PTR(-ENOMEM); + goto reg_phys_mr_exit0; + } + + /* determine number of MR pages */ + num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size + + PAGE_SIZE - 1) / PAGE_SIZE); + num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size + + EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); + + /* register MR on HCA */ + if (ehca_mr_is_maxmr(size, iova_start)) { + e_mr->flags |= EHCA_MR_FLAG_MAXMR; + ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags, + e_pd, &e_mr->ib.ib_mr.lkey, + &e_mr->ib.ib_mr.rkey); + if (ret) { + ib_mr = ERR_PTR(ret); + goto reg_phys_mr_exit1; + } + } else { + pginfo.type = EHCA_MR_PGI_PHYS; + pginfo.num_pages = num_pages_mr; + pginfo.num_4k = num_pages_4k; + pginfo.num_phys_buf = num_phys_buf; + pginfo.phys_buf_array = phys_buf_array; + pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / + EHCA_PAGESIZE); + + ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, + e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, + &e_mr->ib.ib_mr.rkey); + if (ret) { + ib_mr = ERR_PTR(ret); + goto reg_phys_mr_exit1; + } + } + + /* successful registration of all pages */ + return &e_mr->ib.ib_mr; + +reg_phys_mr_exit1: + ehca_mr_delete(e_mr); +reg_phys_mr_exit0: + if (IS_ERR(ib_mr)) + ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p " + "num_phys_buf=%x mr_access_flags=%x iova_start=%p", + PTR_ERR(ib_mr), pd, phys_buf_array, + num_phys_buf, mr_access_flags, iova_start); + return ib_mr; +} /* end ehca_reg_phys_mr() */ + +/*----------------------------------------------------------------------*/ + +struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, + struct ib_umem *region, + int mr_access_flags, + struct ib_udata *udata) +{ + struct ib_mr *ib_mr; + struct ehca_mr *e_mr; + struct ehca_shca *shca = + container_of(pd->device, struct ehca_shca, ib_device); + struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); + struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; + int ret; + u32 num_pages_mr; + u32 num_pages_4k; /* 4k portion "pages" */ + + if (!pd) { + ehca_gen_err("bad pd=%p", pd); + return ERR_PTR(-EFAULT); + } + if (!region) { + ehca_err(pd->device, "bad input values: region=%p", region); + ib_mr = ERR_PTR(-EINVAL); + goto reg_user_mr_exit0; + } + if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || + ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { + /* + * Remote Write Access requires Local Write Access + * Remote Atomic Access requires Local Write Access + */ + ehca_err(pd->device, "bad input values: mr_access_flags=%x", + mr_access_flags); + ib_mr = ERR_PTR(-EINVAL); + goto reg_user_mr_exit0; + } + if (region->page_size != PAGE_SIZE) { + ehca_err(pd->device, "page size not supported, " + "region->page_size=%x", region->page_size); + ib_mr = ERR_PTR(-EINVAL); + goto reg_user_mr_exit0; + } + + if ((region->length == 0) || + ((region->virt_base + region->length) < region->virt_base)) { + ehca_err(pd->device, "bad input values: length=%lx " + "virt_base=%lx", region->length, region->virt_base); + ib_mr = ERR_PTR(-EINVAL); + goto reg_user_mr_exit0; + } + + e_mr = ehca_mr_new(); + if (!e_mr) { + ehca_err(pd->device, "out of memory"); + ib_mr = ERR_PTR(-ENOMEM); + goto reg_user_mr_exit0; + } + + /* determine number of MR pages */ + num_pages_mr = (((region->virt_base % PAGE_SIZE) + region->length + + PAGE_SIZE - 1) / PAGE_SIZE); + num_pages_4k = (((region->virt_base % EHCA_PAGESIZE) + region->length + + EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); + + /* register MR on HCA */ + pginfo.type = EHCA_MR_PGI_USER; + pginfo.num_pages = num_pages_mr; + pginfo.num_4k = num_pages_4k; + pginfo.region = region; + pginfo.next_4k = region->offset / EHCA_PAGESIZE; + pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk, + (®ion->chunk_list), + list); + + ret = ehca_reg_mr(shca, e_mr, (u64*)region->virt_base, + region->length, mr_access_flags, e_pd, &pginfo, + &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); + if (ret) { + ib_mr = ERR_PTR(ret); + goto reg_user_mr_exit1; + } + + /* successful registration of all pages */ + return &e_mr->ib.ib_mr; + +reg_user_mr_exit1: + ehca_mr_delete(e_mr); +reg_user_mr_exit0: + if (IS_ERR(ib_mr)) + ehca_err(pd->device, "rc=%lx pd=%p region=%p mr_access_flags=%x" + " udata=%p", + PTR_ERR(ib_mr), pd, region, mr_access_flags, udata); + return ib_mr; +} /* end ehca_reg_user_mr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_rereg_phys_mr(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + int mr_access_flags, + u64 *iova_start) +{ + int ret; + + struct ehca_shca *shca = + container_of(mr->device, struct ehca_shca, ib_device); + struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); + struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); + u64 new_size; + u64 *new_start; + u32 new_acl; + struct ehca_pd *new_pd; + u32 tmp_lkey, tmp_rkey; + unsigned long sl_flags; + u32 num_pages_mr = 0; + u32 num_pages_4k = 0; /* 4k portion "pages" */ + struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; + u32 cur_pid = current->tgid; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + (my_pd->ownpid != cur_pid)) { + ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + + if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) { + /* TODO not supported, because PHYP rereg hCall needs pages */ + ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not " + "supported yet, mr_rereg_mask=%x", mr_rereg_mask); + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + + if (mr_rereg_mask & IB_MR_REREG_PD) { + if (!pd) { + ehca_err(mr->device, "rereg with bad pd, pd=%p " + "mr_rereg_mask=%x", pd, mr_rereg_mask); + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + } + + if ((mr_rereg_mask & + ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) || + (mr_rereg_mask == 0)) { + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + + /* check other parameters */ + if (e_mr == shca->maxmr) { + /* should be impossible, however reject to be sure */ + ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p " + "shca->maxmr=%p mr->lkey=%x", + mr, shca->maxmr, mr->lkey); + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */ + if (e_mr->flags & EHCA_MR_FLAG_FMR) { + ehca_err(mr->device, "not supported for FMR, mr=%p " + "flags=%x", mr, e_mr->flags); + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + if (!phys_buf_array || num_phys_buf <= 0) { + ehca_err(mr->device, "bad input values: mr_rereg_mask=%x" + " phys_buf_array=%p num_phys_buf=%x", + mr_rereg_mask, phys_buf_array, num_phys_buf); + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + } + if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */ + (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || + ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) { + /* + * Remote Write Access requires Local Write Access + * Remote Atomic Access requires Local Write Access + */ + ehca_err(mr->device, "bad input values: mr_rereg_mask=%x " + "mr_access_flags=%x", mr_rereg_mask, mr_access_flags); + ret = -EINVAL; + goto rereg_phys_mr_exit0; + } + + /* set requested values dependent on rereg request */ + spin_lock_irqsave(&e_mr->mrlock, sl_flags); + new_start = e_mr->start; /* new == old address */ + new_size = e_mr->size; /* new == old length */ + new_acl = e_mr->acl; /* new == old access control */ + new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/ + + if (mr_rereg_mask & IB_MR_REREG_TRANS) { + new_start = iova_start; /* change address */ + /* check physical buffer list and calculate size */ + ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, + num_phys_buf, iova_start, + &new_size); + if (ret) + goto rereg_phys_mr_exit1; + if ((new_size == 0) || + (((u64)iova_start + new_size) < (u64)iova_start)) { + ehca_err(mr->device, "bad input values: new_size=%lx " + "iova_start=%p", new_size, iova_start); + ret = -EINVAL; + goto rereg_phys_mr_exit1; + } + num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size + + PAGE_SIZE - 1) / PAGE_SIZE); + num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size + + EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); + pginfo.type = EHCA_MR_PGI_PHYS; + pginfo.num_pages = num_pages_mr; + pginfo.num_4k = num_pages_4k; + pginfo.num_phys_buf = num_phys_buf; + pginfo.phys_buf_array = phys_buf_array; + pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / + EHCA_PAGESIZE); + } + if (mr_rereg_mask & IB_MR_REREG_ACCESS) + new_acl = mr_access_flags; + if (mr_rereg_mask & IB_MR_REREG_PD) + new_pd = container_of(pd, struct ehca_pd, ib_pd); + + ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl, + new_pd, &pginfo, &tmp_lkey, &tmp_rkey); + if (ret) + goto rereg_phys_mr_exit1; + + /* successful reregistration */ + if (mr_rereg_mask & IB_MR_REREG_PD) + mr->pd = pd; + mr->lkey = tmp_lkey; + mr->rkey = tmp_rkey; + +rereg_phys_mr_exit1: + spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); +rereg_phys_mr_exit0: + if (ret) + ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p " + "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x " + "iova_start=%p", + ret, mr, mr_rereg_mask, pd, phys_buf_array, + num_phys_buf, mr_access_flags, iova_start); + return ret; +} /* end ehca_rereg_phys_mr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) +{ + int ret = 0; + u64 h_ret; + struct ehca_shca *shca = + container_of(mr->device, struct ehca_shca, ib_device); + struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); + struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); + u32 cur_pid = current->tgid; + unsigned long sl_flags; + struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + (my_pd->ownpid != cur_pid)) { + ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + ret = -EINVAL; + goto query_mr_exit0; + } + + if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { + ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " + "e_mr->flags=%x", mr, e_mr, e_mr->flags); + ret = -EINVAL; + goto query_mr_exit0; + } + + memset(mr_attr, 0, sizeof(struct ib_mr_attr)); + spin_lock_irqsave(&e_mr->mrlock, sl_flags); + + h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); + if (h_ret != H_SUCCESS) { + ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p " + "hca_hndl=%lx mr_hndl=%lx lkey=%x", + h_ret, mr, shca->ipz_hca_handle.handle, + e_mr->ipz_mr_handle.handle, mr->lkey); + ret = ehca_mrmw_map_hrc_query_mr(h_ret); + goto query_mr_exit1; + } + mr_attr->pd = mr->pd; + mr_attr->device_virt_addr = hipzout.vaddr; + mr_attr->size = hipzout.len; + mr_attr->lkey = hipzout.lkey; + mr_attr->rkey = hipzout.rkey; + ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); + +query_mr_exit1: + spin_unlock_irqrestore(&e_mr->mrlock, sl_flags); +query_mr_exit0: + if (ret) + ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p", + ret, mr, mr_attr); + return ret; +} /* end ehca_query_mr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_dereg_mr(struct ib_mr *mr) +{ + int ret = 0; + u64 h_ret; + struct ehca_shca *shca = + container_of(mr->device, struct ehca_shca, ib_device); + struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr); + struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd); + u32 cur_pid = current->tgid; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + (my_pd->ownpid != cur_pid)) { + ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + ret = -EINVAL; + goto dereg_mr_exit0; + } + + if ((e_mr->flags & EHCA_MR_FLAG_FMR)) { + ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p " + "e_mr->flags=%x", mr, e_mr, e_mr->flags); + ret = -EINVAL; + goto dereg_mr_exit0; + } else if (e_mr == shca->maxmr) { + /* should be impossible, however reject to be sure */ + ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p " + "shca->maxmr=%p mr->lkey=%x", + mr, shca->maxmr, mr->lkey); + ret = -EINVAL; + goto dereg_mr_exit0; + } + + /* TODO: BUSY: MR still has bound window(s) */ + h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); + if (h_ret != H_SUCCESS) { + ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p " + "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", + h_ret, shca, e_mr, shca->ipz_hca_handle.handle, + e_mr->ipz_mr_handle.handle, mr->lkey); + ret = ehca_mrmw_map_hrc_free_mr(h_ret); + goto dereg_mr_exit0; + } + + /* successful deregistration */ + ehca_mr_delete(e_mr); + +dereg_mr_exit0: + if (ret) + ehca_err(mr->device, "ret=%x mr=%p", ret, mr); + return ret; +} /* end ehca_dereg_mr() */ + +/*----------------------------------------------------------------------*/ + +struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) +{ + struct ib_mw *ib_mw; + u64 h_ret; + struct ehca_mw *e_mw; + struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); + struct ehca_shca *shca = + container_of(pd->device, struct ehca_shca, ib_device); + struct ehca_mw_hipzout_parms hipzout = {{0},0}; + + e_mw = ehca_mw_new(); + if (!e_mw) { + ib_mw = ERR_PTR(-ENOMEM); + goto alloc_mw_exit0; + } + + h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, + e_pd->fw_pd, &hipzout); + if (h_ret != H_SUCCESS) { + ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx " + "shca=%p hca_hndl=%lx mw=%p", + h_ret, shca, shca->ipz_hca_handle.handle, e_mw); + ib_mw = ERR_PTR(ehca_mrmw_map_hrc_alloc(h_ret)); + goto alloc_mw_exit1; + } + /* successful MW allocation */ + e_mw->ipz_mw_handle = hipzout.handle; + e_mw->ib_mw.rkey = hipzout.rkey; + return &e_mw->ib_mw; + +alloc_mw_exit1: + ehca_mw_delete(e_mw); +alloc_mw_exit0: + if (IS_ERR(ib_mw)) + ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd); + return ib_mw; +} /* end ehca_alloc_mw() */ + +/*----------------------------------------------------------------------*/ + +int ehca_bind_mw(struct ib_qp *qp, + struct ib_mw *mw, + struct ib_mw_bind *mw_bind) +{ + /* TODO: not supported up to now */ + ehca_gen_err("bind MW currently not supported by HCAD"); + + return -EPERM; +} /* end ehca_bind_mw() */ + +/*----------------------------------------------------------------------*/ + +int ehca_dealloc_mw(struct ib_mw *mw) +{ + u64 h_ret; + struct ehca_shca *shca = + container_of(mw->device, struct ehca_shca, ib_device); + struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw); + + h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); + if (h_ret != H_SUCCESS) { + ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p " + "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", + h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, + e_mw->ipz_mw_handle.handle); + return ehca_mrmw_map_hrc_free_mw(h_ret); + } + /* successful deallocation */ + ehca_mw_delete(e_mw); + return 0; +} /* end ehca_dealloc_mw() */ + +/*----------------------------------------------------------------------*/ + +struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, + int mr_access_flags, + struct ib_fmr_attr *fmr_attr) +{ + struct ib_fmr *ib_fmr; + struct ehca_shca *shca = + container_of(pd->device, struct ehca_shca, ib_device); + struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); + struct ehca_mr *e_fmr; + int ret; + u32 tmp_lkey, tmp_rkey; + struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; + + /* check other parameters */ + if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) || + ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) && + !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) { + /* + * Remote Write Access requires Local Write Access + * Remote Atomic Access requires Local Write Access + */ + ehca_err(pd->device, "bad input values: mr_access_flags=%x", + mr_access_flags); + ib_fmr = ERR_PTR(-EINVAL); + goto alloc_fmr_exit0; + } + if (mr_access_flags & IB_ACCESS_MW_BIND) { + ehca_err(pd->device, "bad input values: mr_access_flags=%x", + mr_access_flags); + ib_fmr = ERR_PTR(-EINVAL); + goto alloc_fmr_exit0; + } + if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) { + ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x " + "fmr_attr->max_maps=%x fmr_attr->page_shift=%x", + fmr_attr->max_pages, fmr_attr->max_maps, + fmr_attr->page_shift); + ib_fmr = ERR_PTR(-EINVAL); + goto alloc_fmr_exit0; + } + if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) && + ((1 << fmr_attr->page_shift) != PAGE_SIZE)) { + ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", + fmr_attr->page_shift); + ib_fmr = ERR_PTR(-EINVAL); + goto alloc_fmr_exit0; + } + + e_fmr = ehca_mr_new(); + if (!e_fmr) { + ib_fmr = ERR_PTR(-ENOMEM); + goto alloc_fmr_exit0; + } + e_fmr->flags |= EHCA_MR_FLAG_FMR; + + /* register MR on HCA */ + ret = ehca_reg_mr(shca, e_fmr, NULL, + fmr_attr->max_pages * (1 << fmr_attr->page_shift), + mr_access_flags, e_pd, &pginfo, + &tmp_lkey, &tmp_rkey); + if (ret) { + ib_fmr = ERR_PTR(ret); + goto alloc_fmr_exit1; + } + + /* successful */ + e_fmr->fmr_page_size = 1 << fmr_attr->page_shift; + e_fmr->fmr_max_pages = fmr_attr->max_pages; + e_fmr->fmr_max_maps = fmr_attr->max_maps; + e_fmr->fmr_map_cnt = 0; + return &e_fmr->ib.ib_fmr; + +alloc_fmr_exit1: + ehca_mr_delete(e_fmr); +alloc_fmr_exit0: + if (IS_ERR(ib_fmr)) + ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x " + "fmr_attr=%p", PTR_ERR(ib_fmr), pd, + mr_access_flags, fmr_attr); + return ib_fmr; +} /* end ehca_alloc_fmr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_map_phys_fmr(struct ib_fmr *fmr, + u64 *page_list, + int list_len, + u64 iova) +{ + int ret; + struct ehca_shca *shca = + container_of(fmr->device, struct ehca_shca, ib_device); + struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); + struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); + struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; + u32 tmp_lkey, tmp_rkey; + + if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { + ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", + e_fmr, e_fmr->flags); + ret = -EINVAL; + goto map_phys_fmr_exit0; + } + ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len); + if (ret) + goto map_phys_fmr_exit0; + if (iova % e_fmr->fmr_page_size) { + /* only whole-numbered pages */ + ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x", + iova, e_fmr->fmr_page_size); + ret = -EINVAL; + goto map_phys_fmr_exit0; + } + if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) { + /* HCAD does not limit the maps, however trace this anyway */ + ehca_info(fmr->device, "map limit exceeded, fmr=%p " + "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x", + fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); + } + + pginfo.type = EHCA_MR_PGI_FMR; + pginfo.num_pages = list_len; + pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); + pginfo.page_list = page_list; + pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) / + EHCA_PAGESIZE); + + ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova, + list_len * e_fmr->fmr_page_size, + e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey); + if (ret) + goto map_phys_fmr_exit0; + + /* successful reregistration */ + e_fmr->fmr_map_cnt++; + e_fmr->ib.ib_fmr.lkey = tmp_lkey; + e_fmr->ib.ib_fmr.rkey = tmp_rkey; + return 0; + +map_phys_fmr_exit0: + if (ret) + ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x " + "iova=%lx", + ret, fmr, page_list, list_len, iova); + return ret; +} /* end ehca_map_phys_fmr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_unmap_fmr(struct list_head *fmr_list) +{ + int ret = 0; + struct ib_fmr *ib_fmr; + struct ehca_shca *shca = NULL; + struct ehca_shca *prev_shca; + struct ehca_mr *e_fmr; + u32 num_fmr = 0; + u32 unmap_fmr_cnt = 0; + + /* check all FMR belong to same SHCA, and check internal flag */ + list_for_each_entry(ib_fmr, fmr_list, list) { + prev_shca = shca; + if (!ib_fmr) { + ehca_gen_err("bad fmr=%p in list", ib_fmr); + ret = -EINVAL; + goto unmap_fmr_exit0; + } + shca = container_of(ib_fmr->device, struct ehca_shca, + ib_device); + e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); + if ((shca != prev_shca) && prev_shca) { + ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p " + "prev_shca=%p e_fmr=%p", + shca, prev_shca, e_fmr); + ret = -EINVAL; + goto unmap_fmr_exit0; + } + if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { + ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p " + "e_fmr->flags=%x", e_fmr, e_fmr->flags); + ret = -EINVAL; + goto unmap_fmr_exit0; + } + num_fmr++; + } + + /* loop over all FMRs to unmap */ + list_for_each_entry(ib_fmr, fmr_list, list) { + unmap_fmr_cnt++; + e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); + shca = container_of(ib_fmr->device, struct ehca_shca, + ib_device); + ret = ehca_unmap_one_fmr(shca, e_fmr); + if (ret) { + /* unmap failed, stop unmapping of rest of FMRs */ + ehca_err(&shca->ib_device, "unmap of one FMR failed, " + "stop rest, e_fmr=%p num_fmr=%x " + "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr, + unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey); + goto unmap_fmr_exit0; + } + } + +unmap_fmr_exit0: + if (ret) + ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x", + ret, fmr_list, num_fmr, unmap_fmr_cnt); + return ret; +} /* end ehca_unmap_fmr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_dealloc_fmr(struct ib_fmr *fmr) +{ + int ret; + u64 h_ret; + struct ehca_shca *shca = + container_of(fmr->device, struct ehca_shca, ib_device); + struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); + + if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { + ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x", + e_fmr, e_fmr->flags); + ret = -EINVAL; + goto free_fmr_exit0; + } + + h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); + if (h_ret != H_SUCCESS) { + ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p " + "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", + h_ret, e_fmr, shca->ipz_hca_handle.handle, + e_fmr->ipz_mr_handle.handle, fmr->lkey); + ret = ehca_mrmw_map_hrc_free_mr(h_ret); + goto free_fmr_exit0; + } + /* successful deregistration */ + ehca_mr_delete(e_fmr); + return 0; + +free_fmr_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr); + return ret; +} /* end ehca_dealloc_fmr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_reg_mr(struct ehca_shca *shca, + struct ehca_mr *e_mr, + u64 *iova_start, + u64 size, + int acl, + struct ehca_pd *e_pd, + struct ehca_mr_pginfo *pginfo, + u32 *lkey, /*OUT*/ + u32 *rkey) /*OUT*/ +{ + int ret; + u64 h_ret; + u32 hipz_acl; + struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; + + ehca_mrmw_map_acl(acl, &hipz_acl); + ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); + if (ehca_use_hp_mr == 1) + hipz_acl |= 0x00000001; + + h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr, + (u64)iova_start, size, hipz_acl, + e_pd->fw_pd, &hipzout); + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx " + "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); + ret = ehca_mrmw_map_hrc_alloc(h_ret); + goto ehca_reg_mr_exit0; + } + + e_mr->ipz_mr_handle = hipzout.handle; + + ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); + if (ret) + goto ehca_reg_mr_exit1; + + /* successful registration */ + e_mr->num_pages = pginfo->num_pages; + e_mr->num_4k = pginfo->num_4k; + e_mr->start = iova_start; + e_mr->size = size; + e_mr->acl = acl; + *lkey = hipzout.lkey; + *rkey = hipzout.rkey; + return 0; + +ehca_reg_mr_exit1: + h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p " + "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " + "pginfo=%p num_pages=%lx num_4k=%lx ret=%x", + h_ret, shca, e_mr, iova_start, size, acl, e_pd, + hipzout.lkey, pginfo, pginfo->num_pages, + pginfo->num_4k, ret); + ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " + "not recoverable"); + } +ehca_reg_mr_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " + "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " + "num_pages=%lx num_4k=%lx", + ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, + pginfo->num_pages, pginfo->num_4k); + return ret; +} /* end ehca_reg_mr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_reg_mr_rpages(struct ehca_shca *shca, + struct ehca_mr *e_mr, + struct ehca_mr_pginfo *pginfo) +{ + int ret = 0; + u64 h_ret; + u32 rnum; + u64 rpage; + u32 i; + u64 *kpage; + + kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!kpage) { + ehca_err(&shca->ib_device, "kpage alloc failed"); + ret = -ENOMEM; + goto ehca_reg_mr_rpages_exit0; + } + + /* max 512 pages per shot */ + for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) { + + if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) { + rnum = pginfo->num_4k % 512; /* last shot */ + if (rnum == 0) + rnum = 512; /* last shot is full */ + } else + rnum = 512; + + if (rnum > 1) { + ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage); + if (ret) { + ehca_err(&shca->ib_device, "ehca_set_pagebuf " + "bad rc, ret=%x rnum=%x kpage=%p", + ret, rnum, kpage); + ret = -EFAULT; + goto ehca_reg_mr_rpages_exit1; + } + rpage = virt_to_abs(kpage); + if (!rpage) { + ehca_err(&shca->ib_device, "kpage=%p i=%x", + kpage, i); + ret = -EFAULT; + goto ehca_reg_mr_rpages_exit1; + } + } else { /* rnum==1 */ + ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage); + if (ret) { + ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 " + "bad rc, ret=%x i=%x", ret, i); + ret = -EFAULT; + goto ehca_reg_mr_rpages_exit1; + } + } + + h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr, + 0, /* pagesize 4k */ + 0, rpage, rnum); + + if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) { + /* + * check for 'registration complete'==H_SUCCESS + * and for 'page registered'==H_PAGE_REGISTERED + */ + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "last " + "hipz_reg_rpage_mr failed, h_ret=%lx " + "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx" + " lkey=%x", h_ret, e_mr, i, + shca->ipz_hca_handle.handle, + e_mr->ipz_mr_handle.handle, + e_mr->ib.ib_mr.lkey); + ret = ehca_mrmw_map_hrc_rrpg_last(h_ret); + break; + } else + ret = 0; + } else if (h_ret != H_PAGE_REGISTERED) { + ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " + "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx " + "mr_hndl=%lx", h_ret, e_mr, i, + e_mr->ib.ib_mr.lkey, + shca->ipz_hca_handle.handle, + e_mr->ipz_mr_handle.handle); + ret = ehca_mrmw_map_hrc_rrpg_notlast(h_ret); + break; + } else + ret = 0; + } /* end for(i) */ + + +ehca_reg_mr_rpages_exit1: + kfree(kpage); +ehca_reg_mr_rpages_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " + "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo, + pginfo->num_pages, pginfo->num_4k); + return ret; +} /* end ehca_reg_mr_rpages() */ + +/*----------------------------------------------------------------------*/ + +inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca, + struct ehca_mr *e_mr, + u64 *iova_start, + u64 size, + u32 acl, + struct ehca_pd *e_pd, + struct ehca_mr_pginfo *pginfo, + u32 *lkey, /*OUT*/ + u32 *rkey) /*OUT*/ +{ + int ret; + u64 h_ret; + u32 hipz_acl; + u64 *kpage; + u64 rpage; + struct ehca_mr_pginfo pginfo_save; + struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; + + ehca_mrmw_map_acl(acl, &hipz_acl); + ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); + + kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (!kpage) { + ehca_err(&shca->ib_device, "kpage alloc failed"); + ret = -ENOMEM; + goto ehca_rereg_mr_rereg1_exit0; + } + + pginfo_save = *pginfo; + ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage); + if (ret) { + ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " + "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p", + e_mr, pginfo, pginfo->type, pginfo->num_pages, + pginfo->num_4k,kpage); + goto ehca_rereg_mr_rereg1_exit1; + } + rpage = virt_to_abs(kpage); + if (!rpage) { + ehca_err(&shca->ib_device, "kpage=%p", kpage); + ret = -EFAULT; + goto ehca_rereg_mr_rereg1_exit1; + } + h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr, + (u64)iova_start, size, hipz_acl, + e_pd->fw_pd, rpage, &hipzout); + if (h_ret != H_SUCCESS) { + /* + * reregistration unsuccessful, try it again with the 3 hCalls, + * e.g. this is required in case H_MR_CONDITION + * (MW bound or MR is shared) + */ + ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " + "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr); + *pginfo = pginfo_save; + ret = -EAGAIN; + } else if ((u64*)hipzout.vaddr != iova_start) { + ehca_err(&shca->ib_device, "PHYP changed iova_start in " + "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " + "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, + hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, + e_mr->ib.ib_mr.lkey, hipzout.lkey); + ret = -EFAULT; + } else { + /* + * successful reregistration + * note: start and start_out are identical for eServer HCAs + */ + e_mr->num_pages = pginfo->num_pages; + e_mr->num_4k = pginfo->num_4k; + e_mr->start = iova_start; + e_mr->size = size; + e_mr->acl = acl; + *lkey = hipzout.lkey; + *rkey = hipzout.rkey; + } + +ehca_rereg_mr_rereg1_exit1: + kfree(kpage); +ehca_rereg_mr_rereg1_exit0: + if ( ret && (ret != -EAGAIN) ) + ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " + "pginfo=%p num_pages=%lx num_4k=%lx", + ret, *lkey, *rkey, pginfo, pginfo->num_pages, + pginfo->num_4k); + return ret; +} /* end ehca_rereg_mr_rereg1() */ + +/*----------------------------------------------------------------------*/ + +int ehca_rereg_mr(struct ehca_shca *shca, + struct ehca_mr *e_mr, + u64 *iova_start, + u64 size, + int acl, + struct ehca_pd *e_pd, + struct ehca_mr_pginfo *pginfo, + u32 *lkey, + u32 *rkey) +{ + int ret = 0; + u64 h_ret; + int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */ + int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ + + /* first determine reregistration hCall(s) */ + if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) || + (pginfo->num_4k > e_mr->num_4k)) { + ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx " + "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k); + rereg_1_hcall = 0; + rereg_3_hcall = 1; + } + + if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */ + rereg_1_hcall = 0; + rereg_3_hcall = 1; + e_mr->flags &= ~EHCA_MR_FLAG_MAXMR; + ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p", + e_mr); + } + + if (rereg_1_hcall) { + ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size, + acl, e_pd, pginfo, lkey, rkey); + if (ret) { + if (ret == -EAGAIN) + rereg_3_hcall = 1; + else + goto ehca_rereg_mr_exit0; + } + } + + if (rereg_3_hcall) { + struct ehca_mr save_mr; + + /* first deregister old MR */ + h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "hipz_free_mr failed, " + "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx " + "mr->lkey=%x", + h_ret, e_mr, shca->ipz_hca_handle.handle, + e_mr->ipz_mr_handle.handle, + e_mr->ib.ib_mr.lkey); + ret = ehca_mrmw_map_hrc_free_mr(h_ret); + goto ehca_rereg_mr_exit0; + } + /* clean ehca_mr_t, without changing struct ib_mr and lock */ + save_mr = *e_mr; + ehca_mr_deletenew(e_mr); + + /* set some MR values */ + e_mr->flags = save_mr.flags; + e_mr->fmr_page_size = save_mr.fmr_page_size; + e_mr->fmr_max_pages = save_mr.fmr_max_pages; + e_mr->fmr_max_maps = save_mr.fmr_max_maps; + e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; + + ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, + e_pd, pginfo, lkey, rkey); + if (ret) { + u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; + memcpy(&e_mr->flags, &(save_mr.flags), + sizeof(struct ehca_mr) - offset); + goto ehca_rereg_mr_exit0; + } + } + +ehca_rereg_mr_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " + "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " + "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " + "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, + acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey, + rereg_1_hcall, rereg_3_hcall); + return ret; +} /* end ehca_rereg_mr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_unmap_one_fmr(struct ehca_shca *shca, + struct ehca_mr *e_fmr) +{ + int ret = 0; + u64 h_ret; + int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */ + int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */ + struct ehca_pd *e_pd = + container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); + struct ehca_mr save_fmr; + u32 tmp_lkey, tmp_rkey; + struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; + struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; + + /* first check if reregistration hCall can be used for unmap */ + if (e_fmr->fmr_max_pages > 512) { + rereg_1_hcall = 0; + rereg_3_hcall = 1; + } + + if (rereg_1_hcall) { + /* + * note: after using rereg hcall with len=0, + * rereg hcall must be used again for registering pages + */ + h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0, + 0, 0, e_pd->fw_pd, 0, &hipzout); + if (h_ret != H_SUCCESS) { + /* + * should not happen, because length checked above, + * FMRs are not shared and no MW bound to FMRs + */ + ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " + "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx " + "mr_hndl=%lx lkey=%x lkey_out=%x", + h_ret, e_fmr, shca->ipz_hca_handle.handle, + e_fmr->ipz_mr_handle.handle, + e_fmr->ib.ib_fmr.lkey, hipzout.lkey); + rereg_3_hcall = 1; + } else { + /* successful reregistration */ + e_fmr->start = NULL; + e_fmr->size = 0; + tmp_lkey = hipzout.lkey; + tmp_rkey = hipzout.rkey; + } + } + + if (rereg_3_hcall) { + struct ehca_mr save_mr; + + /* first free old FMR */ + h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "hipz_free_mr failed, " + "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx " + "lkey=%x", + h_ret, e_fmr, shca->ipz_hca_handle.handle, + e_fmr->ipz_mr_handle.handle, + e_fmr->ib.ib_fmr.lkey); + ret = ehca_mrmw_map_hrc_free_mr(h_ret); + goto ehca_unmap_one_fmr_exit0; + } + /* clean ehca_mr_t, without changing lock */ + save_fmr = *e_fmr; + ehca_mr_deletenew(e_fmr); + + /* set some MR values */ + e_fmr->flags = save_fmr.flags; + e_fmr->fmr_page_size = save_fmr.fmr_page_size; + e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; + e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; + e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; + e_fmr->acl = save_fmr.acl; + + pginfo.type = EHCA_MR_PGI_FMR; + pginfo.num_pages = 0; + pginfo.num_4k = 0; + ret = ehca_reg_mr(shca, e_fmr, NULL, + (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), + e_fmr->acl, e_pd, &pginfo, &tmp_lkey, + &tmp_rkey); + if (ret) { + u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; + memcpy(&e_fmr->flags, &(save_mr.flags), + sizeof(struct ehca_mr) - offset); + goto ehca_unmap_one_fmr_exit0; + } + } + +ehca_unmap_one_fmr_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x " + "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x", + ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages, + rereg_1_hcall, rereg_3_hcall); + return ret; +} /* end ehca_unmap_one_fmr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_reg_smr(struct ehca_shca *shca, + struct ehca_mr *e_origmr, + struct ehca_mr *e_newmr, + u64 *iova_start, + int acl, + struct ehca_pd *e_pd, + u32 *lkey, /*OUT*/ + u32 *rkey) /*OUT*/ +{ + int ret = 0; + u64 h_ret; + u32 hipz_acl; + struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; + + ehca_mrmw_map_acl(acl, &hipz_acl); + ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); + + h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, + (u64)iova_start, hipz_acl, e_pd->fw_pd, + &hipzout); + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx " + "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " + "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", + h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, + shca->ipz_hca_handle.handle, + e_origmr->ipz_mr_handle.handle, + e_origmr->ib.ib_mr.lkey); + ret = ehca_mrmw_map_hrc_reg_smr(h_ret); + goto ehca_reg_smr_exit0; + } + /* successful registration */ + e_newmr->num_pages = e_origmr->num_pages; + e_newmr->num_4k = e_origmr->num_4k; + e_newmr->start = iova_start; + e_newmr->size = e_origmr->size; + e_newmr->acl = acl; + e_newmr->ipz_mr_handle = hipzout.handle; + *lkey = hipzout.lkey; + *rkey = hipzout.rkey; + return 0; + +ehca_reg_smr_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p " + "e_newmr=%p iova_start=%p acl=%x e_pd=%p", + ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd); + return ret; +} /* end ehca_reg_smr() */ + +/*----------------------------------------------------------------------*/ + +/* register internal max-MR to internal SHCA */ +int ehca_reg_internal_maxmr( + struct ehca_shca *shca, + struct ehca_pd *e_pd, + struct ehca_mr **e_maxmr) /*OUT*/ +{ + int ret; + struct ehca_mr *e_mr; + u64 *iova_start; + u64 size_maxmr; + struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; + struct ib_phys_buf ib_pbuf; + u32 num_pages_mr; + u32 num_pages_4k; /* 4k portion "pages" */ + + e_mr = ehca_mr_new(); + if (!e_mr) { + ehca_err(&shca->ib_device, "out of memory"); + ret = -ENOMEM; + goto ehca_reg_internal_maxmr_exit0; + } + e_mr->flags |= EHCA_MR_FLAG_MAXMR; + + /* register internal max-MR on HCA */ + size_maxmr = (u64)high_memory - PAGE_OFFSET; + iova_start = (u64*)KERNELBASE; + ib_pbuf.addr = 0; + ib_pbuf.size = size_maxmr; + num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr + + PAGE_SIZE - 1) / PAGE_SIZE); + num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr + + EHCA_PAGESIZE - 1) / EHCA_PAGESIZE); + + pginfo.type = EHCA_MR_PGI_PHYS; + pginfo.num_pages = num_pages_mr; + pginfo.num_4k = num_pages_4k; + pginfo.num_phys_buf = 1; + pginfo.phys_buf_array = &ib_pbuf; + + ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, + &pginfo, &e_mr->ib.ib_mr.lkey, + &e_mr->ib.ib_mr.rkey); + if (ret) { + ehca_err(&shca->ib_device, "reg of internal max MR failed, " + "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x " + "num_pages_4k=%x", e_mr, iova_start, size_maxmr, + num_pages_mr, num_pages_4k); + goto ehca_reg_internal_maxmr_exit1; + } + + /* successful registration of all pages */ + e_mr->ib.ib_mr.device = e_pd->ib_pd.device; + e_mr->ib.ib_mr.pd = &e_pd->ib_pd; + e_mr->ib.ib_mr.uobject = NULL; + atomic_inc(&(e_pd->ib_pd.usecnt)); + atomic_set(&(e_mr->ib.ib_mr.usecnt), 0); + *e_maxmr = e_mr; + return 0; + +ehca_reg_internal_maxmr_exit1: + ehca_mr_delete(e_mr); +ehca_reg_internal_maxmr_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p", + ret, shca, e_pd, e_maxmr); + return ret; +} /* end ehca_reg_internal_maxmr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_reg_maxmr(struct ehca_shca *shca, + struct ehca_mr *e_newmr, + u64 *iova_start, + int acl, + struct ehca_pd *e_pd, + u32 *lkey, + u32 *rkey) +{ + u64 h_ret; + struct ehca_mr *e_origmr = shca->maxmr; + u32 hipz_acl; + struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; + + ehca_mrmw_map_acl(acl, &hipz_acl); + ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); + + h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, + (u64)iova_start, hipz_acl, e_pd->fw_pd, + &hipzout); + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx " + "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", + h_ret, e_origmr, shca->ipz_hca_handle.handle, + e_origmr->ipz_mr_handle.handle, + e_origmr->ib.ib_mr.lkey); + return ehca_mrmw_map_hrc_reg_smr(h_ret); + } + /* successful registration */ + e_newmr->num_pages = e_origmr->num_pages; + e_newmr->num_4k = e_origmr->num_4k; + e_newmr->start = iova_start; + e_newmr->size = e_origmr->size; + e_newmr->acl = acl; + e_newmr->ipz_mr_handle = hipzout.handle; + *lkey = hipzout.lkey; + *rkey = hipzout.rkey; + return 0; +} /* end ehca_reg_maxmr() */ + +/*----------------------------------------------------------------------*/ + +int ehca_dereg_internal_maxmr(struct ehca_shca *shca) +{ + int ret; + struct ehca_mr *e_maxmr; + struct ib_pd *ib_pd; + + if (!shca->maxmr) { + ehca_err(&shca->ib_device, "bad call, shca=%p", shca); + ret = -EINVAL; + goto ehca_dereg_internal_maxmr_exit0; + } + + e_maxmr = shca->maxmr; + ib_pd = e_maxmr->ib.ib_mr.pd; + shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */ + + ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr); + if (ret) { + ehca_err(&shca->ib_device, "dereg internal max-MR failed, " + "ret=%x e_maxmr=%p shca=%p lkey=%x", + ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey); + shca->maxmr = e_maxmr; + goto ehca_dereg_internal_maxmr_exit0; + } + + atomic_dec(&ib_pd->usecnt); + +ehca_dereg_internal_maxmr_exit0: + if (ret) + ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p", + ret, shca, shca->maxmr); + return ret; +} /* end ehca_dereg_internal_maxmr() */ + +/*----------------------------------------------------------------------*/ + +/* + * check physical buffer array of MR verbs for validness and + * calculates MR size + */ +int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + u64 *iova_start, + u64 *size) +{ + struct ib_phys_buf *pbuf = phys_buf_array; + u64 size_count = 0; + u32 i; + + if (num_phys_buf == 0) { + ehca_gen_err("bad phys buf array len, num_phys_buf=0"); + return -EINVAL; + } + /* check first buffer */ + if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { + ehca_gen_err("iova_start/addr mismatch, iova_start=%p " + "pbuf->addr=%lx pbuf->size=%lx", + iova_start, pbuf->addr, pbuf->size); + return -EINVAL; + } + if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && + (num_phys_buf > 1)) { + ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " + "pbuf->size=%lx", pbuf->addr, pbuf->size); + return -EINVAL; + } + + for (i = 0; i < num_phys_buf; i++) { + if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { + ehca_gen_err("bad address, i=%x pbuf->addr=%lx " + "pbuf->size=%lx", + i, pbuf->addr, pbuf->size); + return -EINVAL; + } + if (((i > 0) && /* not 1st */ + (i < (num_phys_buf - 1)) && /* not last */ + (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { + ehca_gen_err("bad size, i=%x pbuf->size=%lx", + i, pbuf->size); + return -EINVAL; + } + size_count += pbuf->size; + pbuf++; + } + + *size = size_count; + return 0; +} /* end ehca_mr_chk_buf_and_calc_size() */ + +/*----------------------------------------------------------------------*/ + +/* check page list of map FMR verb for validness */ +int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, + u64 *page_list, + int list_len) +{ + u32 i; + u64 *page; + + if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) { + ehca_gen_err("bad list_len, list_len=%x " + "e_fmr->fmr_max_pages=%x fmr=%p", + list_len, e_fmr->fmr_max_pages, e_fmr); + return -EINVAL; + } + + /* each page must be aligned */ + page = page_list; + for (i = 0; i < list_len; i++) { + if (*page % e_fmr->fmr_page_size) { + ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " + "fmr_page_size=%x", i, *page, page, e_fmr, + e_fmr->fmr_page_size); + return -EINVAL; + } + page++; + } + + return 0; +} /* end ehca_fmr_check_page_list() */ + +/*----------------------------------------------------------------------*/ + +/* setup page buffer from page info */ +int ehca_set_pagebuf(struct ehca_mr *e_mr, + struct ehca_mr_pginfo *pginfo, + u32 number, + u64 *kpage) +{ + int ret = 0; + struct ib_umem_chunk *prev_chunk; + struct ib_umem_chunk *chunk; + struct ib_phys_buf *pbuf; + u64 *fmrlist; + u64 num4k, pgaddr, offs4k; + u32 i = 0; + u32 j = 0; + + if (pginfo->type == EHCA_MR_PGI_PHYS) { + /* loop over desired phys_buf_array entries */ + while (i < number) { + pbuf = pginfo->phys_buf_array + pginfo->next_buf; + num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size + + EHCA_PAGESIZE - 1) / EHCA_PAGESIZE; + offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; + while (pginfo->next_4k < offs4k + num4k) { + /* sanity check */ + if ((pginfo->page_cnt >= pginfo->num_pages) || + (pginfo->page_4k_cnt >= pginfo->num_4k)) { + ehca_gen_err("page_cnt >= num_pages, " + "page_cnt=%lx " + "num_pages=%lx " + "page_4k_cnt=%lx " + "num_4k=%lx i=%x", + pginfo->page_cnt, + pginfo->num_pages, + pginfo->page_4k_cnt, + pginfo->num_4k, i); + ret = -EFAULT; + goto ehca_set_pagebuf_exit0; + } + *kpage = phys_to_abs( + (pbuf->addr & EHCA_PAGEMASK) + + (pginfo->next_4k * EHCA_PAGESIZE)); + if ( !(*kpage) && pbuf->addr ) { + ehca_gen_err("pbuf->addr=%lx " + "pbuf->size=%lx " + "next_4k=%lx", pbuf->addr, + pbuf->size, + pginfo->next_4k); + ret = -EFAULT; + goto ehca_set_pagebuf_exit0; + } + (pginfo->page_4k_cnt)++; + (pginfo->next_4k)++; + if (pginfo->next_4k % + (PAGE_SIZE / EHCA_PAGESIZE) == 0) + (pginfo->page_cnt)++; + kpage++; + i++; + if (i >= number) break; + } + if (pginfo->next_4k >= offs4k + num4k) { + (pginfo->next_buf)++; + pginfo->next_4k = 0; + } + } + } else if (pginfo->type == EHCA_MR_PGI_USER) { + /* loop over desired chunk entries */ + chunk = pginfo->next_chunk; + prev_chunk = pginfo->next_chunk; + list_for_each_entry_continue(chunk, + (&(pginfo->region->chunk_list)), + list) { + for (i = pginfo->next_nmap; i < chunk->nmap; ) { + pgaddr = ( page_to_pfn(chunk->page_list[i].page) + << PAGE_SHIFT ); + *kpage = phys_to_abs(pgaddr + + (pginfo->next_4k * + EHCA_PAGESIZE)); + if ( !(*kpage) ) { + ehca_gen_err("pgaddr=%lx " + "chunk->page_list[i]=%lx " + "i=%x next_4k=%lx mr=%p", + pgaddr, + (u64)sg_dma_address( + &chunk-> + page_list[i]), + i, pginfo->next_4k, e_mr); + ret = -EFAULT; + goto ehca_set_pagebuf_exit0; + } + (pginfo->page_4k_cnt)++; + (pginfo->next_4k)++; + kpage++; + if (pginfo->next_4k % + (PAGE_SIZE / EHCA_PAGESIZE) == 0) { + (pginfo->page_cnt)++; + (pginfo->next_nmap)++; + pginfo->next_4k = 0; + i++; + } + j++; + if (j >= number) break; + } + if ((pginfo->next_nmap >= chunk->nmap) && + (j >= number)) { + pginfo->next_nmap = 0; + prev_chunk = chunk; + break; + } else if (pginfo->next_nmap >= chunk->nmap) { + pginfo->next_nmap = 0; + prev_chunk = chunk; + } else if (j >= number) + break; + else + prev_chunk = chunk; + } + pginfo->next_chunk = + list_prepare_entry(prev_chunk, + (&(pginfo->region->chunk_list)), + list); + } else if (pginfo->type == EHCA_MR_PGI_FMR) { + /* loop over desired page_list entries */ + fmrlist = pginfo->page_list + pginfo->next_listelem; + for (i = 0; i < number; i++) { + *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + + pginfo->next_4k * EHCA_PAGESIZE); + if ( !(*kpage) ) { + ehca_gen_err("*fmrlist=%lx fmrlist=%p " + "next_listelem=%lx next_4k=%lx", + *fmrlist, fmrlist, + pginfo->next_listelem, + pginfo->next_4k); + ret = -EFAULT; + goto ehca_set_pagebuf_exit0; + } + (pginfo->page_4k_cnt)++; + (pginfo->next_4k)++; + kpage++; + if (pginfo->next_4k % + (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { + (pginfo->page_cnt)++; + (pginfo->next_listelem)++; + fmrlist++; + pginfo->next_4k = 0; + } + } + } else { + ehca_gen_err("bad pginfo->type=%x", pginfo->type); + ret = -EFAULT; + goto ehca_set_pagebuf_exit0; + } + +ehca_set_pagebuf_exit0: + if (ret) + ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " + "num_4k=%lx next_buf=%lx next_4k=%lx number=%x " + "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x " + "next_listelem=%lx region=%p next_chunk=%p " + "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type, + pginfo->num_pages, pginfo->num_4k, + pginfo->next_buf, pginfo->next_4k, number, kpage, + pginfo->page_cnt, pginfo->page_4k_cnt, i, + pginfo->next_listelem, pginfo->region, + pginfo->next_chunk, pginfo->next_nmap); + return ret; +} /* end ehca_set_pagebuf() */ + +/*----------------------------------------------------------------------*/ + +/* setup 1 page from page info page buffer */ +int ehca_set_pagebuf_1(struct ehca_mr *e_mr, + struct ehca_mr_pginfo *pginfo, + u64 *rpage) +{ + int ret = 0; + struct ib_phys_buf *tmp_pbuf; + u64 *fmrlist; + struct ib_umem_chunk *chunk; + struct ib_umem_chunk *prev_chunk; + u64 pgaddr, num4k, offs4k; + + if (pginfo->type == EHCA_MR_PGI_PHYS) { + /* sanity check */ + if ((pginfo->page_cnt >= pginfo->num_pages) || + (pginfo->page_4k_cnt >= pginfo->num_4k)) { + ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx " + "num_pages=%lx page_4k_cnt=%lx num_4k=%lx", + pginfo->page_cnt, pginfo->num_pages, + pginfo->page_4k_cnt, pginfo->num_4k); + ret = -EFAULT; + goto ehca_set_pagebuf_1_exit0; + } + tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf; + num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size + + EHCA_PAGESIZE - 1) / EHCA_PAGESIZE; + offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; + *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) + + (pginfo->next_4k * EHCA_PAGESIZE)); + if ( !(*rpage) && tmp_pbuf->addr ) { + ehca_gen_err("tmp_pbuf->addr=%lx" + " tmp_pbuf->size=%lx next_4k=%lx", + tmp_pbuf->addr, tmp_pbuf->size, + pginfo->next_4k); + ret = -EFAULT; + goto ehca_set_pagebuf_1_exit0; + } + (pginfo->page_4k_cnt)++; + (pginfo->next_4k)++; + if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0) + (pginfo->page_cnt)++; + if (pginfo->next_4k >= offs4k + num4k) { + (pginfo->next_buf)++; + pginfo->next_4k = 0; + } + } else if (pginfo->type == EHCA_MR_PGI_USER) { + chunk = pginfo->next_chunk; + prev_chunk = pginfo->next_chunk; + list_for_each_entry_continue(chunk, + (&(pginfo->region->chunk_list)), + list) { + pgaddr = ( page_to_pfn(chunk->page_list[ + pginfo->next_nmap].page) + << PAGE_SHIFT); + *rpage = phys_to_abs(pgaddr + + (pginfo->next_4k * EHCA_PAGESIZE)); + if ( !(*rpage) ) { + ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx" + " next_nmap=%lx next_4k=%lx mr=%p", + pgaddr, (u64)sg_dma_address( + &chunk->page_list[ + pginfo-> + next_nmap]), + pginfo->next_nmap, pginfo->next_4k, + e_mr); + ret = -EFAULT; + goto ehca_set_pagebuf_1_exit0; + } + (pginfo->page_4k_cnt)++; + (pginfo->next_4k)++; + if (pginfo->next_4k % + (PAGE_SIZE / EHCA_PAGESIZE) == 0) { + (pginfo->page_cnt)++; + (pginfo->next_nmap)++; + pginfo->next_4k = 0; + } + if (pginfo->next_nmap >= chunk->nmap) { + pginfo->next_nmap = 0; + prev_chunk = chunk; + } + break; + } + pginfo->next_chunk = + list_prepare_entry(prev_chunk, + (&(pginfo->region->chunk_list)), + list); + } else if (pginfo->type == EHCA_MR_PGI_FMR) { + fmrlist = pginfo->page_list + pginfo->next_listelem; + *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + + pginfo->next_4k * EHCA_PAGESIZE); + if ( !(*rpage) ) { + ehca_gen_err("*fmrlist=%lx fmrlist=%p " + "next_listelem=%lx next_4k=%lx", + *fmrlist, fmrlist, pginfo->next_listelem, + pginfo->next_4k); + ret = -EFAULT; + goto ehca_set_pagebuf_1_exit0; + } + (pginfo->page_4k_cnt)++; + (pginfo->next_4k)++; + if (pginfo->next_4k % + (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { + (pginfo->page_cnt)++; + (pginfo->next_listelem)++; + pginfo->next_4k = 0; + } + } else { + ehca_gen_err("bad pginfo->type=%x", pginfo->type); + ret = -EFAULT; + goto ehca_set_pagebuf_1_exit0; + } + +ehca_set_pagebuf_1_exit0: + if (ret) + ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " + "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p " + "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx " + "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr, + pginfo, pginfo->type, pginfo->num_pages, + pginfo->num_4k, pginfo->next_buf, pginfo->next_4k, + rpage, pginfo->page_cnt, pginfo->page_4k_cnt, + pginfo->next_listelem, pginfo->region, + pginfo->next_chunk, pginfo->next_nmap); + return ret; +} /* end ehca_set_pagebuf_1() */ + +/*----------------------------------------------------------------------*/ + +/* + * check MR if it is a max-MR, i.e. uses whole memory + * in case it's a max-MR 1 is returned, else 0 + */ +int ehca_mr_is_maxmr(u64 size, + u64 *iova_start) +{ + /* a MR is treated as max-MR only if it fits following: */ + if ((size == ((u64)high_memory - PAGE_OFFSET)) && + (iova_start == (void*)KERNELBASE)) { + ehca_gen_dbg("this is a max-MR"); + return 1; + } else + return 0; +} /* end ehca_mr_is_maxmr() */ + +/*----------------------------------------------------------------------*/ + +/* map access control for MR/MW. This routine is used for MR and MW. */ +void ehca_mrmw_map_acl(int ib_acl, + u32 *hipz_acl) +{ + *hipz_acl = 0; + if (ib_acl & IB_ACCESS_REMOTE_READ) + *hipz_acl |= HIPZ_ACCESSCTRL_R_READ; + if (ib_acl & IB_ACCESS_REMOTE_WRITE) + *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE; + if (ib_acl & IB_ACCESS_REMOTE_ATOMIC) + *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC; + if (ib_acl & IB_ACCESS_LOCAL_WRITE) + *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE; + if (ib_acl & IB_ACCESS_MW_BIND) + *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND; +} /* end ehca_mrmw_map_acl() */ + +/*----------------------------------------------------------------------*/ + +/* sets page size in hipz access control for MR/MW. */ +void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/ +{ + return; /* HCA supports only 4k */ +} /* end ehca_mrmw_set_pgsize_hipz_acl() */ + +/*----------------------------------------------------------------------*/ + +/* + * reverse map access control for MR/MW. + * This routine is used for MR and MW. + */ +void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, + int *ib_acl) /*OUT*/ +{ + *ib_acl = 0; + if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ) + *ib_acl |= IB_ACCESS_REMOTE_READ; + if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE) + *ib_acl |= IB_ACCESS_REMOTE_WRITE; + if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC) + *ib_acl |= IB_ACCESS_REMOTE_ATOMIC; + if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE) + *ib_acl |= IB_ACCESS_LOCAL_WRITE; + if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND) + *ib_acl |= IB_ACCESS_MW_BIND; +} /* end ehca_mrmw_reverse_map_acl() */ + + +/*----------------------------------------------------------------------*/ + +/* + * map HIPZ rc to IB retcodes for MR/MW allocations + * Used for hipz_mr_reg_alloc and hipz_mw_alloc. + */ +int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc) +{ + switch (hipz_rc) { + case H_SUCCESS: /* successful completion */ + return 0; + case H_ADAPTER_PARM: /* invalid adapter handle */ + case H_RT_PARM: /* invalid resource type */ + case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ + case H_MLENGTH_PARM: /* invalid memory length */ + case H_MEM_ACCESS_PARM: /* invalid access controls */ + case H_CONSTRAINED: /* resource constraint */ + return -EINVAL; + case H_BUSY: /* long busy */ + return -EBUSY; + default: + return -EINVAL; + } +} /* end ehca_mrmw_map_hrc_alloc() */ + +/*----------------------------------------------------------------------*/ + +/* + * map HIPZ rc to IB retcodes for MR register rpage + * Used for hipz_h_register_rpage_mr at registering last page + */ +int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc) +{ + switch (hipz_rc) { + case H_SUCCESS: /* registration complete */ + return 0; + case H_PAGE_REGISTERED: /* page registered */ + case H_ADAPTER_PARM: /* invalid adapter handle */ + case H_RH_PARM: /* invalid resource handle */ +/* case H_QT_PARM: invalid queue type */ + case H_PARAMETER: /* + * invalid logical address, + * or count zero or greater 512 + */ + case H_TABLE_FULL: /* page table full */ + case H_HARDWARE: /* HCA not operational */ + return -EINVAL; + case H_BUSY: /* long busy */ + return -EBUSY; + default: + return -EINVAL; + } +} /* end ehca_mrmw_map_hrc_rrpg_last() */ + +/*----------------------------------------------------------------------*/ + +/* + * map HIPZ rc to IB retcodes for MR register rpage + * Used for hipz_h_register_rpage_mr at registering one page, but not last page + */ +int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc) +{ + switch (hipz_rc) { + case H_PAGE_REGISTERED: /* page registered */ + return 0; + case H_SUCCESS: /* registration complete */ + case H_ADAPTER_PARM: /* invalid adapter handle */ + case H_RH_PARM: /* invalid resource handle */ +/* case H_QT_PARM: invalid queue type */ + case H_PARAMETER: /* + * invalid logical address, + * or count zero or greater 512 + */ + case H_TABLE_FULL: /* page table full */ + case H_HARDWARE: /* HCA not operational */ + return -EINVAL; + case H_BUSY: /* long busy */ + return -EBUSY; + default: + return -EINVAL; + } +} /* end ehca_mrmw_map_hrc_rrpg_notlast() */ + +/*----------------------------------------------------------------------*/ + +/* map HIPZ rc to IB retcodes for MR query. Used for hipz_mr_query. */ +int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc) +{ + switch (hipz_rc) { + case H_SUCCESS: /* successful completion */ + return 0; + case H_ADAPTER_PARM: /* invalid adapter handle */ + case H_RH_PARM: /* invalid resource handle */ + return -EINVAL; + case H_BUSY: /* long busy */ + return -EBUSY; + default: + return -EINVAL; + } +} /* end ehca_mrmw_map_hrc_query_mr() */ + +/*----------------------------------------------------------------------*/ +/*----------------------------------------------------------------------*/ + +/* + * map HIPZ rc to IB retcodes for freeing MR resource + * Used for hipz_h_free_resource_mr + */ +int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc) +{ + switch (hipz_rc) { + case H_SUCCESS: /* resource freed */ + return 0; + case H_ADAPTER_PARM: /* invalid adapter handle */ + case H_RH_PARM: /* invalid resource handle */ + case H_R_STATE: /* invalid resource state */ + case H_HARDWARE: /* HCA not operational */ + return -EINVAL; + case H_RESOURCE: /* Resource in use */ + case H_BUSY: /* long busy */ + return -EBUSY; + default: + return -EINVAL; + } +} /* end ehca_mrmw_map_hrc_free_mr() */ + +/*----------------------------------------------------------------------*/ + +/* + * map HIPZ rc to IB retcodes for freeing MW resource + * Used for hipz_h_free_resource_mw + */ +int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc) +{ + switch (hipz_rc) { + case H_SUCCESS: /* resource freed */ + return 0; + case H_ADAPTER_PARM: /* invalid adapter handle */ + case H_RH_PARM: /* invalid resource handle */ + case H_R_STATE: /* invalid resource state */ + case H_HARDWARE: /* HCA not operational */ + return -EINVAL; + case H_RESOURCE: /* Resource in use */ + case H_BUSY: /* long busy */ + return -EBUSY; + default: + return -EINVAL; + } +} /* end ehca_mrmw_map_hrc_free_mw() */ + +/*----------------------------------------------------------------------*/ + +/* + * map HIPZ rc to IB retcodes for SMR registrations + * Used for hipz_h_register_smr. + */ +int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc) +{ + switch (hipz_rc) { + case H_SUCCESS: /* successful completion */ + return 0; + case H_ADAPTER_PARM: /* invalid adapter handle */ + case H_RH_PARM: /* invalid resource handle */ + case H_MEM_PARM: /* invalid MR virtual address */ + case H_MEM_ACCESS_PARM: /* invalid access controls */ + case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */ + return -EINVAL; + case H_BUSY: /* long busy */ + return -EBUSY; + default: + return -EINVAL; + } +} /* end ehca_mrmw_map_hrc_reg_smr() */ + +/*----------------------------------------------------------------------*/ + +/* + * MR destructor and constructor + * used in Reregister MR verb, sets all fields in ehca_mr_t to 0, + * except struct ib_mr and spinlock + */ +void ehca_mr_deletenew(struct ehca_mr *mr) +{ + mr->flags = 0; + mr->num_pages = 0; + mr->num_4k = 0; + mr->acl = 0; + mr->start = NULL; + mr->fmr_page_size = 0; + mr->fmr_max_pages = 0; + mr->fmr_max_maps = 0; + mr->fmr_map_cnt = 0; + memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); + memset(&mr->galpas, 0, sizeof(mr->galpas)); + mr->nr_of_pages = 0; + mr->pagearray = NULL; +} /* end ehca_mr_deletenew() */ + +int ehca_init_mrmw_cache(void) +{ + mr_cache = kmem_cache_create("ehca_cache_mr", + sizeof(struct ehca_mr), 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!mr_cache) + return -ENOMEM; + mw_cache = kmem_cache_create("ehca_cache_mw", + sizeof(struct ehca_mw), 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!mw_cache) { + kmem_cache_destroy(mr_cache); + mr_cache = NULL; + return -ENOMEM; + } + return 0; +} + +void ehca_cleanup_mrmw_cache(void) +{ + if (mr_cache) + kmem_cache_destroy(mr_cache); + if (mw_cache) + kmem_cache_destroy(mw_cache); +} diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h new file mode 100644 index 000000000000..d936e40a5748 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h @@ -0,0 +1,140 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * MR/MW declarations and inline functions + * + * Authors: Dietmar Decker <ddecker@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _EHCA_MRMW_H_ +#define _EHCA_MRMW_H_ + +int ehca_reg_mr(struct ehca_shca *shca, + struct ehca_mr *e_mr, + u64 *iova_start, + u64 size, + int acl, + struct ehca_pd *e_pd, + struct ehca_mr_pginfo *pginfo, + u32 *lkey, + u32 *rkey); + +int ehca_reg_mr_rpages(struct ehca_shca *shca, + struct ehca_mr *e_mr, + struct ehca_mr_pginfo *pginfo); + +int ehca_rereg_mr(struct ehca_shca *shca, + struct ehca_mr *e_mr, + u64 *iova_start, + u64 size, + int mr_access_flags, + struct ehca_pd *e_pd, + struct ehca_mr_pginfo *pginfo, + u32 *lkey, + u32 *rkey); + +int ehca_unmap_one_fmr(struct ehca_shca *shca, + struct ehca_mr *e_fmr); + +int ehca_reg_smr(struct ehca_shca *shca, + struct ehca_mr *e_origmr, + struct ehca_mr *e_newmr, + u64 *iova_start, + int acl, + struct ehca_pd *e_pd, + u32 *lkey, + u32 *rkey); + +int ehca_reg_internal_maxmr(struct ehca_shca *shca, + struct ehca_pd *e_pd, + struct ehca_mr **maxmr); + +int ehca_reg_maxmr(struct ehca_shca *shca, + struct ehca_mr *e_newmr, + u64 *iova_start, + int acl, + struct ehca_pd *e_pd, + u32 *lkey, + u32 *rkey); + +int ehca_dereg_internal_maxmr(struct ehca_shca *shca); + +int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + u64 *iova_start, + u64 *size); + +int ehca_fmr_check_page_list(struct ehca_mr *e_fmr, + u64 *page_list, + int list_len); + +int ehca_set_pagebuf(struct ehca_mr *e_mr, + struct ehca_mr_pginfo *pginfo, + u32 number, + u64 *kpage); + +int ehca_set_pagebuf_1(struct ehca_mr *e_mr, + struct ehca_mr_pginfo *pginfo, + u64 *rpage); + +int ehca_mr_is_maxmr(u64 size, + u64 *iova_start); + +void ehca_mrmw_map_acl(int ib_acl, + u32 *hipz_acl); + +void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl); + +void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl, + int *ib_acl); + +int ehca_mrmw_map_hrc_alloc(const u64 hipz_rc); + +int ehca_mrmw_map_hrc_rrpg_last(const u64 hipz_rc); + +int ehca_mrmw_map_hrc_rrpg_notlast(const u64 hipz_rc); + +int ehca_mrmw_map_hrc_query_mr(const u64 hipz_rc); + +int ehca_mrmw_map_hrc_free_mr(const u64 hipz_rc); + +int ehca_mrmw_map_hrc_free_mw(const u64 hipz_rc); + +int ehca_mrmw_map_hrc_reg_smr(const u64 hipz_rc); + +void ehca_mr_deletenew(struct ehca_mr *mr); + +#endif /*_EHCA_MRMW_H_*/ diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/infiniband/hw/ehca/ehca_pd.c new file mode 100644 index 000000000000..2c3cdc6f7b39 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_pd.c @@ -0,0 +1,114 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * PD functions + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm/current.h> + +#include "ehca_tools.h" +#include "ehca_iverbs.h" + +static struct kmem_cache *pd_cache; + +struct ib_pd *ehca_alloc_pd(struct ib_device *device, + struct ib_ucontext *context, struct ib_udata *udata) +{ + struct ehca_pd *pd; + + pd = kmem_cache_alloc(pd_cache, SLAB_KERNEL); + if (!pd) { + ehca_err(device, "device=%p context=%p out of memory", + device, context); + return ERR_PTR(-ENOMEM); + } + + memset(pd, 0, sizeof(struct ehca_pd)); + pd->ownpid = current->tgid; + + /* + * Kernel PD: when device = -1, 0 + * User PD: when context != -1 + */ + if (!context) { + /* + * Kernel PDs after init reuses always + * the one created in ehca_shca_reopen() + */ + struct ehca_shca *shca = container_of(device, struct ehca_shca, + ib_device); + pd->fw_pd.value = shca->pd->fw_pd.value; + } else + pd->fw_pd.value = (u64)pd; + + return &pd->ib_pd; +} + +int ehca_dealloc_pd(struct ib_pd *pd) +{ + u32 cur_pid = current->tgid; + struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + my_pd->ownpid != cur_pid) { + ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + return -EINVAL; + } + + kmem_cache_free(pd_cache, + container_of(pd, struct ehca_pd, ib_pd)); + + return 0; +} + +int ehca_init_pd_cache(void) +{ + pd_cache = kmem_cache_create("ehca_cache_pd", + sizeof(struct ehca_pd), 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!pd_cache) + return -ENOMEM; + return 0; +} + +void ehca_cleanup_pd_cache(void) +{ + if (pd_cache) + kmem_cache_destroy(pd_cache); +} diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h new file mode 100644 index 000000000000..8707d297ce4c --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_qes.h @@ -0,0 +1,259 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Hardware request structures + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef _EHCA_QES_H_ +#define _EHCA_QES_H_ + +#include "ehca_tools.h" + +/* virtual scatter gather entry to specify remote adresses with length */ +struct ehca_vsgentry { + u64 vaddr; + u32 lkey; + u32 length; +}; + +#define GRH_FLAG_MASK EHCA_BMASK_IBM(7,7) +#define GRH_IPVERSION_MASK EHCA_BMASK_IBM(0,3) +#define GRH_TCLASS_MASK EHCA_BMASK_IBM(4,12) +#define GRH_FLOWLABEL_MASK EHCA_BMASK_IBM(13,31) +#define GRH_PAYLEN_MASK EHCA_BMASK_IBM(32,47) +#define GRH_NEXTHEADER_MASK EHCA_BMASK_IBM(48,55) +#define GRH_HOPLIMIT_MASK EHCA_BMASK_IBM(56,63) + +/* + * Unreliable Datagram Address Vector Format + * see IBTA Vol1 chapter 8.3 Global Routing Header + */ +struct ehca_ud_av { + u8 sl; + u8 lnh; + u16 dlid; + u8 reserved1; + u8 reserved2; + u8 reserved3; + u8 slid_path_bits; + u8 reserved4; + u8 ipd; + u8 reserved5; + u8 pmtu; + u32 reserved6; + u64 reserved7; + union { + struct { + u64 word_0; /* always set to 6 */ + /*should be 0x1B for IB transport */ + u64 word_1; + u64 word_2; + u64 word_3; + u64 word_4; + } grh; + struct { + u32 wd_0; + u32 wd_1; + /* DWord_1 --> SGID */ + + u32 sgid_wd3; + u32 sgid_wd2; + + u32 sgid_wd1; + u32 sgid_wd0; + /* DWord_3 --> DGID */ + + u32 dgid_wd3; + u32 dgid_wd2; + + u32 dgid_wd1; + u32 dgid_wd0; + } grh_l; + }; +}; + +/* maximum number of sg entries allowed in a WQE */ +#define MAX_WQE_SG_ENTRIES 252 + +#define WQE_OPTYPE_SEND 0x80 +#define WQE_OPTYPE_RDMAREAD 0x40 +#define WQE_OPTYPE_RDMAWRITE 0x20 +#define WQE_OPTYPE_CMPSWAP 0x10 +#define WQE_OPTYPE_FETCHADD 0x08 +#define WQE_OPTYPE_BIND 0x04 + +#define WQE_WRFLAG_REQ_SIGNAL_COM 0x80 +#define WQE_WRFLAG_FENCE 0x40 +#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20 +#define WQE_WRFLAG_SOLIC_EVENT 0x10 + +#define WQEF_CACHE_HINT 0x80 +#define WQEF_CACHE_HINT_RD_WR 0x40 +#define WQEF_TIMED_WQE 0x20 +#define WQEF_PURGE 0x08 +#define WQEF_HIGH_NIBBLE 0xF0 + +#define MW_BIND_ACCESSCTRL_R_WRITE 0x40 +#define MW_BIND_ACCESSCTRL_R_READ 0x20 +#define MW_BIND_ACCESSCTRL_R_ATOMIC 0x10 + +struct ehca_wqe { + u64 work_request_id; + u8 optype; + u8 wr_flag; + u16 pkeyi; + u8 wqef; + u8 nr_of_data_seg; + u16 wqe_provided_slid; + u32 destination_qp_number; + u32 resync_psn_sqp; + u32 local_ee_context_qkey; + u32 immediate_data; + union { + struct { + u64 remote_virtual_adress; + u32 rkey; + u32 reserved; + u64 atomic_1st_op_dma_len; + u64 atomic_2nd_op; + struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; + + } nud; + struct { + u64 ehca_ud_av_ptr; + u64 reserved1; + u64 reserved2; + u64 reserved3; + struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; + } ud_avp; + struct { + struct ehca_ud_av ud_av; + struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES - + 2]; + } ud_av; + struct { + u64 reserved0; + u64 reserved1; + u64 reserved2; + u64 reserved3; + struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES]; + } all_rcv; + + struct { + u64 reserved; + u32 rkey; + u32 old_rkey; + u64 reserved1; + u64 reserved2; + u64 virtual_address; + u32 reserved3; + u32 length; + u32 reserved4; + u16 reserved5; + u8 reserved6; + u8 lr_ctl; + u32 lkey; + u32 reserved7; + u64 reserved8; + u64 reserved9; + u64 reserved10; + u64 reserved11; + } bind; + struct { + u64 reserved12; + u64 reserved13; + u32 size; + u32 start; + } inline_data; + } u; + +}; + +#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0,0) +#define WC_IMM_DATA EHCA_BMASK_IBM(1,1) +#define WC_GRH_PRESENT EHCA_BMASK_IBM(2,2) +#define WC_SE_BIT EHCA_BMASK_IBM(3,3) +#define WC_STATUS_ERROR_BIT 0x80000000 +#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 +#define WC_STATUS_PURGE_BIT 0x10 + +struct ehca_cqe { + u64 work_request_id; + u8 optype; + u8 w_completion_flags; + u16 reserved1; + u32 nr_bytes_transferred; + u32 immediate_data; + u32 local_qp_number; + u8 freed_resource_count; + u8 service_level; + u16 wqe_count; + u32 qp_token; + u32 qkey_ee_token; + u32 remote_qp_number; + u16 dlid; + u16 rlid; + u16 reserved2; + u16 pkey_index; + u32 cqe_timestamp; + u32 wqe_timestamp; + u8 wqe_timestamp_valid; + u8 reserved3; + u8 reserved4; + u8 cqe_flags; + u32 status; +}; + +struct ehca_eqe { + u64 entry; +}; + +struct ehca_mrte { + u64 starting_va; + u64 length; /* length of memory region in bytes*/ + u32 pd; + u8 key_instance; + u8 pagesize; + u8 mr_control; + u8 local_remote_access_ctrl; + u8 reserved[0x20 - 0x18]; + u64 at_pointer[4]; +}; +#endif /*_EHCA_QES_H_*/ diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c new file mode 100644 index 000000000000..4394123cdbd7 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -0,0 +1,1507 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * QP functions + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include <asm/current.h> + +#include "ehca_classes.h" +#include "ehca_tools.h" +#include "ehca_qes.h" +#include "ehca_iverbs.h" +#include "hcp_if.h" +#include "hipz_fns.h" + +static struct kmem_cache *qp_cache; + +/* + * attributes not supported by query qp + */ +#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \ + IB_QP_MAX_QP_RD_ATOMIC | \ + IB_QP_ACCESS_FLAGS | \ + IB_QP_EN_SQD_ASYNC_NOTIFY) + +/* + * ehca (internal) qp state values + */ +enum ehca_qp_state { + EHCA_QPS_RESET = 1, + EHCA_QPS_INIT = 2, + EHCA_QPS_RTR = 3, + EHCA_QPS_RTS = 5, + EHCA_QPS_SQD = 6, + EHCA_QPS_SQE = 8, + EHCA_QPS_ERR = 128 +}; + +/* + * qp state transitions as defined by IB Arch Rel 1.1 page 431 + */ +enum ib_qp_statetrans { + IB_QPST_ANY2RESET, + IB_QPST_ANY2ERR, + IB_QPST_RESET2INIT, + IB_QPST_INIT2RTR, + IB_QPST_INIT2INIT, + IB_QPST_RTR2RTS, + IB_QPST_RTS2SQD, + IB_QPST_RTS2RTS, + IB_QPST_SQD2RTS, + IB_QPST_SQE2RTS, + IB_QPST_SQD2SQD, + IB_QPST_MAX /* nr of transitions, this must be last!!! */ +}; + +/* + * ib2ehca_qp_state maps IB to ehca qp_state + * returns ehca qp state corresponding to given ib qp state + */ +static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state) +{ + switch (ib_qp_state) { + case IB_QPS_RESET: + return EHCA_QPS_RESET; + case IB_QPS_INIT: + return EHCA_QPS_INIT; + case IB_QPS_RTR: + return EHCA_QPS_RTR; + case IB_QPS_RTS: + return EHCA_QPS_RTS; + case IB_QPS_SQD: + return EHCA_QPS_SQD; + case IB_QPS_SQE: + return EHCA_QPS_SQE; + case IB_QPS_ERR: + return EHCA_QPS_ERR; + default: + ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state); + return -EINVAL; + } +} + +/* + * ehca2ib_qp_state maps ehca to IB qp_state + * returns ib qp state corresponding to given ehca qp state + */ +static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state + ehca_qp_state) +{ + switch (ehca_qp_state) { + case EHCA_QPS_RESET: + return IB_QPS_RESET; + case EHCA_QPS_INIT: + return IB_QPS_INIT; + case EHCA_QPS_RTR: + return IB_QPS_RTR; + case EHCA_QPS_RTS: + return IB_QPS_RTS; + case EHCA_QPS_SQD: + return IB_QPS_SQD; + case EHCA_QPS_SQE: + return IB_QPS_SQE; + case EHCA_QPS_ERR: + return IB_QPS_ERR; + default: + ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state); + return -EINVAL; + } +} + +/* + * ehca_qp_type used as index for req_attr and opt_attr of + * struct ehca_modqp_statetrans + */ +enum ehca_qp_type { + QPT_RC = 0, + QPT_UC = 1, + QPT_UD = 2, + QPT_SQP = 3, + QPT_MAX +}; + +/* + * ib2ehcaqptype maps Ib to ehca qp_type + * returns ehca qp type corresponding to ib qp type + */ +static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype) +{ + switch (ibqptype) { + case IB_QPT_SMI: + case IB_QPT_GSI: + return QPT_SQP; + case IB_QPT_RC: + return QPT_RC; + case IB_QPT_UC: + return QPT_UC; + case IB_QPT_UD: + return QPT_UD; + default: + ehca_gen_err("Invalid ibqptype=%x", ibqptype); + return -EINVAL; + } +} + +static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate, + int ib_tostate) +{ + int index = -EINVAL; + switch (ib_tostate) { + case IB_QPS_RESET: + index = IB_QPST_ANY2RESET; + break; + case IB_QPS_INIT: + switch (ib_fromstate) { + case IB_QPS_RESET: + index = IB_QPST_RESET2INIT; + break; + case IB_QPS_INIT: + index = IB_QPST_INIT2INIT; + break; + } + break; + case IB_QPS_RTR: + if (ib_fromstate == IB_QPS_INIT) + index = IB_QPST_INIT2RTR; + break; + case IB_QPS_RTS: + switch (ib_fromstate) { + case IB_QPS_RTR: + index = IB_QPST_RTR2RTS; + break; + case IB_QPS_RTS: + index = IB_QPST_RTS2RTS; + break; + case IB_QPS_SQD: + index = IB_QPST_SQD2RTS; + break; + case IB_QPS_SQE: + index = IB_QPST_SQE2RTS; + break; + } + break; + case IB_QPS_SQD: + if (ib_fromstate == IB_QPS_RTS) + index = IB_QPST_RTS2SQD; + break; + case IB_QPS_SQE: + break; + case IB_QPS_ERR: + index = IB_QPST_ANY2ERR; + break; + default: + break; + } + return index; +} + +enum ehca_service_type { + ST_RC = 0, + ST_UC = 1, + ST_RD = 2, + ST_UD = 3 +}; + +/* + * ibqptype2servicetype returns hcp service type corresponding to given + * ib qp type used by create_qp() + */ +static inline int ibqptype2servicetype(enum ib_qp_type ibqptype) +{ + switch (ibqptype) { + case IB_QPT_SMI: + case IB_QPT_GSI: + return ST_UD; + case IB_QPT_RC: + return ST_RC; + case IB_QPT_UC: + return ST_UC; + case IB_QPT_UD: + return ST_UD; + case IB_QPT_RAW_IPV6: + return -EINVAL; + case IB_QPT_RAW_ETY: + return -EINVAL; + default: + ehca_gen_err("Invalid ibqptype=%x", ibqptype); + return -EINVAL; + } +} + +/* + * init_qp_queues initializes/constructs r/squeue and registers queue pages. + */ +static inline int init_qp_queues(struct ehca_shca *shca, + struct ehca_qp *my_qp, + int nr_sq_pages, + int nr_rq_pages, + int swqe_size, + int rwqe_size, + int nr_send_sges, int nr_receive_sges) +{ + int ret, cnt, ipz_rc; + void *vpage; + u64 rpage, h_ret; + struct ib_device *ib_dev = &shca->ib_device; + struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle; + + ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue, + nr_sq_pages, + EHCA_PAGESIZE, swqe_size, nr_send_sges); + if (!ipz_rc) { + ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x", + ipz_rc); + return -EBUSY; + } + + ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue, + nr_rq_pages, + EHCA_PAGESIZE, rwqe_size, nr_receive_sges); + if (!ipz_rc) { + ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x", + ipz_rc); + ret = -EBUSY; + goto init_qp_queues0; + } + /* register SQ pages */ + for (cnt = 0; cnt < nr_sq_pages; cnt++) { + vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue); + if (!vpage) { + ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() " + "failed p_vpage= %p", vpage); + ret = -EINVAL; + goto init_qp_queues1; + } + rpage = virt_to_abs(vpage); + + h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, + my_qp->ipz_qp_handle, + &my_qp->pf, 0, 0, + rpage, 1, + my_qp->galpas.kernel); + if (h_ret < H_SUCCESS) { + ehca_err(ib_dev, "SQ hipz_qp_register_rpage()" + " failed rc=%lx", h_ret); + ret = ehca2ib_return_code(h_ret); + goto init_qp_queues1; + } + } + + ipz_qeit_reset(&my_qp->ipz_squeue); + + /* register RQ pages */ + for (cnt = 0; cnt < nr_rq_pages; cnt++) { + vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); + if (!vpage) { + ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() " + "failed p_vpage = %p", vpage); + ret = -EINVAL; + goto init_qp_queues1; + } + + rpage = virt_to_abs(vpage); + + h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, + my_qp->ipz_qp_handle, + &my_qp->pf, 0, 1, + rpage, 1,my_qp->galpas.kernel); + if (h_ret < H_SUCCESS) { + ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed " + "rc=%lx", h_ret); + ret = ehca2ib_return_code(h_ret); + goto init_qp_queues1; + } + if (cnt == (nr_rq_pages - 1)) { /* last page! */ + if (h_ret != H_SUCCESS) { + ehca_err(ib_dev, "RQ hipz_qp_register_rpage() " + "h_ret= %lx ", h_ret); + ret = ehca2ib_return_code(h_ret); + goto init_qp_queues1; + } + vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); + if (vpage) { + ehca_err(ib_dev, "ipz_qpageit_get_inc() " + "should not succeed vpage=%p", vpage); + ret = -EINVAL; + goto init_qp_queues1; + } + } else { + if (h_ret != H_PAGE_REGISTERED) { + ehca_err(ib_dev, "RQ hipz_qp_register_rpage() " + "h_ret= %lx ", h_ret); + ret = ehca2ib_return_code(h_ret); + goto init_qp_queues1; + } + } + } + + ipz_qeit_reset(&my_qp->ipz_rqueue); + + return 0; + +init_qp_queues1: + ipz_queue_dtor(&my_qp->ipz_rqueue); +init_qp_queues0: + ipz_queue_dtor(&my_qp->ipz_squeue); + return ret; +} + +struct ib_qp *ehca_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 }; + static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 }; + struct ehca_qp *my_qp; + struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); + struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, + ib_device); + struct ib_ucontext *context = NULL; + u64 h_ret; + int max_send_sge, max_recv_sge, ret; + + /* h_call's out parameters */ + struct ehca_alloc_qp_parms parms; + u32 swqe_size = 0, rwqe_size = 0; + u8 daqp_completion, isdaqp; + unsigned long flags; + + if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR && + init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) { + ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed", + init_attr->sq_sig_type); + return ERR_PTR(-EINVAL); + } + + /* save daqp completion bits */ + daqp_completion = init_attr->qp_type & 0x60; + /* save daqp bit */ + isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0; + init_attr->qp_type = init_attr->qp_type & 0x1F; + + if (init_attr->qp_type != IB_QPT_UD && + init_attr->qp_type != IB_QPT_SMI && + init_attr->qp_type != IB_QPT_GSI && + init_attr->qp_type != IB_QPT_UC && + init_attr->qp_type != IB_QPT_RC) { + ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type); + return ERR_PTR(-EINVAL); + } + if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD) + && isdaqp) { + ehca_err(pd->device, "unsupported LL QP Type=%x", + init_attr->qp_type); + return ERR_PTR(-EINVAL); + } else if (init_attr->qp_type == IB_QPT_RC && isdaqp && + (init_attr->cap.max_send_wr > 255 || + init_attr->cap.max_recv_wr > 255 )) { + ehca_err(pd->device, "Invalid Number of max_sq_wr =%x " + "or max_rq_wr=%x for QP Type=%x", + init_attr->cap.max_send_wr, + init_attr->cap.max_recv_wr,init_attr->qp_type); + return ERR_PTR(-EINVAL); + } else if (init_attr->qp_type == IB_QPT_UD && isdaqp && + init_attr->cap.max_send_wr > 255) { + ehca_err(pd->device, + "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x", + init_attr->cap.max_send_wr, init_attr->qp_type); + return ERR_PTR(-EINVAL); + } + + if (pd->uobject && udata) + context = pd->uobject->context; + + my_qp = kmem_cache_alloc(qp_cache, SLAB_KERNEL); + if (!my_qp) { + ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); + return ERR_PTR(-ENOMEM); + } + + memset(my_qp, 0, sizeof(struct ehca_qp)); + memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms)); + spin_lock_init(&my_qp->spinlock_s); + spin_lock_init(&my_qp->spinlock_r); + + my_qp->recv_cq = + container_of(init_attr->recv_cq, struct ehca_cq, ib_cq); + my_qp->send_cq = + container_of(init_attr->send_cq, struct ehca_cq, ib_cq); + + my_qp->init_attr = *init_attr; + + do { + if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) { + ret = -ENOMEM; + ehca_err(pd->device, "Can't reserve idr resources."); + goto create_qp_exit0; + } + + spin_lock_irqsave(&ehca_qp_idr_lock, flags); + ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); + spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); + + } while (ret == -EAGAIN); + + if (ret) { + ret = -ENOMEM; + ehca_err(pd->device, "Can't allocate new idr entry."); + goto create_qp_exit0; + } + + parms.servicetype = ibqptype2servicetype(init_attr->qp_type); + if (parms.servicetype < 0) { + ret = -EINVAL; + ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type); + goto create_qp_exit0; + } + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + parms.sigtype = HCALL_SIGT_EVERY; + else + parms.sigtype = HCALL_SIGT_BY_WQE; + + /* UD_AV CIRCUMVENTION */ + max_send_sge = init_attr->cap.max_send_sge; + max_recv_sge = init_attr->cap.max_recv_sge; + if (IB_QPT_UD == init_attr->qp_type || + IB_QPT_GSI == init_attr->qp_type || + IB_QPT_SMI == init_attr->qp_type) { + max_send_sge += 2; + max_recv_sge += 2; + } + + parms.ipz_eq_handle = shca->eq.ipz_eq_handle; + parms.daqp_ctrl = isdaqp | daqp_completion; + parms.pd = my_pd->fw_pd; + parms.max_recv_sge = max_recv_sge; + parms.max_send_sge = max_send_sge; + + h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms); + + if (h_ret != H_SUCCESS) { + ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx", + h_ret); + ret = ehca2ib_return_code(h_ret); + goto create_qp_exit1; + } + + switch (init_attr->qp_type) { + case IB_QPT_RC: + if (isdaqp == 0) { + swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ + (parms.act_nr_send_sges)]); + rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ + (parms.act_nr_recv_sges)]); + } else { /* for daqp we need to use msg size, not wqe size */ + swqe_size = da_rc_msg_size[max_send_sge]; + rwqe_size = da_rc_msg_size[max_recv_sge]; + parms.act_nr_send_sges = 1; + parms.act_nr_recv_sges = 1; + } + break; + case IB_QPT_UC: + swqe_size = offsetof(struct ehca_wqe, + u.nud.sg_list[parms.act_nr_send_sges]); + rwqe_size = offsetof(struct ehca_wqe, + u.nud.sg_list[parms.act_nr_recv_sges]); + break; + + case IB_QPT_UD: + case IB_QPT_GSI: + case IB_QPT_SMI: + /* UD circumvention */ + parms.act_nr_recv_sges -= 2; + parms.act_nr_send_sges -= 2; + if (isdaqp) { + swqe_size = da_ud_sq_msg_size[max_send_sge]; + rwqe_size = da_rc_msg_size[max_recv_sge]; + parms.act_nr_send_sges = 1; + parms.act_nr_recv_sges = 1; + } else { + swqe_size = offsetof(struct ehca_wqe, + u.ud_av.sg_list[parms.act_nr_send_sges]); + rwqe_size = offsetof(struct ehca_wqe, + u.ud_av.sg_list[parms.act_nr_recv_sges]); + } + + if (IB_QPT_GSI == init_attr->qp_type || + IB_QPT_SMI == init_attr->qp_type) { + parms.act_nr_send_wqes = init_attr->cap.max_send_wr; + parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; + parms.act_nr_send_sges = init_attr->cap.max_send_sge; + parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; + my_qp->real_qp_num = + (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; + } + + break; + + default: + break; + } + + /* initializes r/squeue and registers queue pages */ + ret = init_qp_queues(shca, my_qp, + parms.nr_sq_pages, parms.nr_rq_pages, + swqe_size, rwqe_size, + parms.act_nr_send_sges, parms.act_nr_recv_sges); + if (ret) { + ehca_err(pd->device, + "Couldn't initialize r/squeue and pages ret=%x", ret); + goto create_qp_exit2; + } + + my_qp->ib_qp.pd = &my_pd->ib_pd; + my_qp->ib_qp.device = my_pd->ib_pd.device; + + my_qp->ib_qp.recv_cq = init_attr->recv_cq; + my_qp->ib_qp.send_cq = init_attr->send_cq; + + my_qp->ib_qp.qp_num = my_qp->real_qp_num; + my_qp->ib_qp.qp_type = init_attr->qp_type; + + my_qp->qp_type = init_attr->qp_type; + my_qp->ib_qp.srq = init_attr->srq; + + my_qp->ib_qp.qp_context = init_attr->qp_context; + my_qp->ib_qp.event_handler = init_attr->event_handler; + + init_attr->cap.max_inline_data = 0; /* not supported yet */ + init_attr->cap.max_recv_sge = parms.act_nr_recv_sges; + init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes; + init_attr->cap.max_send_sge = parms.act_nr_send_sges; + init_attr->cap.max_send_wr = parms.act_nr_send_wqes; + + /* NOTE: define_apq0() not supported yet */ + if (init_attr->qp_type == IB_QPT_GSI) { + h_ret = ehca_define_sqp(shca, my_qp, init_attr); + if (h_ret != H_SUCCESS) { + ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx", + h_ret); + ret = ehca2ib_return_code(h_ret); + goto create_qp_exit3; + } + } + if (init_attr->send_cq) { + struct ehca_cq *cq = container_of(init_attr->send_cq, + struct ehca_cq, ib_cq); + ret = ehca_cq_assign_qp(cq, my_qp); + if (ret) { + ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x", + ret); + goto create_qp_exit3; + } + my_qp->send_cq = cq; + } + /* copy queues, galpa data to user space */ + if (context && udata) { + struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue; + struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue; + struct ehca_create_qp_resp resp; + struct vm_area_struct * vma; + memset(&resp, 0, sizeof(resp)); + + resp.qp_num = my_qp->real_qp_num; + resp.token = my_qp->token; + resp.qp_type = my_qp->qp_type; + resp.qkey = my_qp->qkey; + resp.real_qp_num = my_qp->real_qp_num; + /* rqueue properties */ + resp.ipz_rqueue.qe_size = ipz_rqueue->qe_size; + resp.ipz_rqueue.act_nr_of_sg = ipz_rqueue->act_nr_of_sg; + resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; + resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; + resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state; + ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000, + ipz_rqueue->queue_length, + (void**)&resp.ipz_rqueue.queue, + &vma); + if (ret) { + ehca_err(pd->device, "Could not mmap rqueue pages"); + goto create_qp_exit3; + } + my_qp->uspace_rqueue = resp.ipz_rqueue.queue; + /* squeue properties */ + resp.ipz_squeue.qe_size = ipz_squeue->qe_size; + resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg; + resp.ipz_squeue.queue_length = ipz_squeue->queue_length; + resp.ipz_squeue.pagesize = ipz_squeue->pagesize; + resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state; + ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000, + ipz_squeue->queue_length, + (void**)&resp.ipz_squeue.queue, + &vma); + if (ret) { + ehca_err(pd->device, "Could not mmap squeue pages"); + goto create_qp_exit4; + } + my_qp->uspace_squeue = resp.ipz_squeue.queue; + /* fw_handle */ + resp.galpas = my_qp->galpas; + ret = ehca_mmap_register(my_qp->galpas.user.fw_handle, + (void**)&resp.galpas.kernel.fw_handle, + &vma); + if (ret) { + ehca_err(pd->device, "Could not mmap fw_handle"); + goto create_qp_exit5; + } + my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle; + + if (ib_copy_to_udata(udata, &resp, sizeof resp)) { + ehca_err(pd->device, "Copy to udata failed"); + ret = -EINVAL; + goto create_qp_exit6; + } + } + + return &my_qp->ib_qp; + +create_qp_exit6: + ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE); + +create_qp_exit5: + ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length); + +create_qp_exit4: + ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length); + +create_qp_exit3: + ipz_queue_dtor(&my_qp->ipz_rqueue); + ipz_queue_dtor(&my_qp->ipz_squeue); + +create_qp_exit2: + hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); + +create_qp_exit1: + spin_lock_irqsave(&ehca_qp_idr_lock, flags); + idr_remove(&ehca_qp_idr, my_qp->token); + spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); + +create_qp_exit0: + kmem_cache_free(qp_cache, my_qp); + return ERR_PTR(ret); +} + +/* + * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts + * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe + * returns total number of bad wqes in bad_wqe_cnt + */ +static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca, + int *bad_wqe_cnt) +{ + u64 h_ret; + struct ipz_queue *squeue; + void *bad_send_wqe_p, *bad_send_wqe_v; + void *squeue_start_p, *squeue_end_p; + void *squeue_start_v, *squeue_end_v; + struct ehca_wqe *wqe; + int qp_num = my_qp->ib_qp.qp_num; + + /* get send wqe pointer */ + h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle, + my_qp->ipz_qp_handle, &my_qp->pf, + &bad_send_wqe_p, NULL, 2); + if (h_ret != H_SUCCESS) { + ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" + " ehca_qp=%p qp_num=%x h_ret=%lx", + my_qp, qp_num, h_ret); + return ehca2ib_return_code(h_ret); + } + bad_send_wqe_p = (void*)((u64)bad_send_wqe_p & (~(1L<<63))); + ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p", + qp_num, bad_send_wqe_p); + /* convert wqe pointer to vadr */ + bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); + if (ehca_debug_level) + ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); + squeue = &my_qp->ipz_squeue; + squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L)); + squeue_end_p = squeue_start_p+squeue->queue_length; + squeue_start_v = abs_to_virt((u64)squeue_start_p); + squeue_end_v = abs_to_virt((u64)squeue_end_p); + ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p", + qp_num, squeue_start_v, squeue_end_v); + + /* loop sets wqe's purge bit */ + wqe = (struct ehca_wqe*)bad_send_wqe_v; + *bad_wqe_cnt = 0; + while (wqe->optype != 0xff && wqe->wqef != 0xff) { + if (ehca_debug_level) + ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); + wqe->nr_of_data_seg = 0; /* suppress data access */ + wqe->wqef = WQEF_PURGE; /* WQE to be purged */ + wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size); + *bad_wqe_cnt = (*bad_wqe_cnt)+1; + if ((void*)wqe >= squeue_end_v) { + wqe = squeue_start_v; + } + } + /* + * bad wqe will be reprocessed and ignored when pol_cq() is called, + * i.e. nr of wqes with flush error status is one less + */ + ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x", + qp_num, (*bad_wqe_cnt)-1); + wqe->wqef = 0; + + return 0; +} + +/* + * internal_modify_qp with circumvention to handle aqp0 properly + * smi_reset2init indicates if this is an internal reset-to-init-call for + * smi. This flag must always be zero if called from ehca_modify_qp()! + * This internal func was intorduced to avoid recursion of ehca_modify_qp()! + */ +static int internal_modify_qp(struct ib_qp *ibqp, + struct ib_qp_attr *attr, + int attr_mask, int smi_reset2init) +{ + enum ib_qp_state qp_cur_state, qp_new_state; + int cnt, qp_attr_idx, ret = 0; + enum ib_qp_statetrans statetrans; + struct hcp_modify_qp_control_block *mqpcb; + struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); + struct ehca_shca *shca = + container_of(ibqp->pd->device, struct ehca_shca, ib_device); + u64 update_mask; + u64 h_ret; + int bad_wqe_cnt = 0; + int squeue_locked = 0; + unsigned long spl_flags = 0; + + /* do query_qp to obtain current attr values */ + mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); + if (mqpcb == NULL) { + ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " + "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); + return -ENOMEM; + } + + h_ret = hipz_h_query_qp(shca->ipz_hca_handle, + my_qp->ipz_qp_handle, + &my_qp->pf, + mqpcb, my_qp->galpas.kernel); + if (h_ret != H_SUCCESS) { + ehca_err(ibqp->device, "hipz_h_query_qp() failed " + "ehca_qp=%p qp_num=%x h_ret=%lx", + my_qp, ibqp->qp_num, h_ret); + ret = ehca2ib_return_code(h_ret); + goto modify_qp_exit1; + } + + qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); + + if (qp_cur_state == -EINVAL) { /* invalid qp state */ + ret = -EINVAL; + ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x " + "ehca_qp=%p qp_num=%x", + mqpcb->qp_state, my_qp, ibqp->qp_num); + goto modify_qp_exit1; + } + /* + * circumvention to set aqp0 initial state to init + * as expected by IB spec + */ + if (smi_reset2init == 0 && + ibqp->qp_type == IB_QPT_SMI && + qp_cur_state == IB_QPS_RESET && + (attr_mask & IB_QP_STATE) && + attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */ + struct ib_qp_attr smiqp_attr = { + .qp_state = IB_QPS_INIT, + .port_num = my_qp->init_attr.port_num, + .pkey_index = 0, + .qkey = 0 + }; + int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT | + IB_QP_PKEY_INDEX | IB_QP_QKEY; + int smirc = internal_modify_qp( + ibqp, &smiqp_attr, smiqp_attr_mask, 1); + if (smirc) { + ehca_err(ibqp->device, "SMI RESET -> INIT failed. " + "ehca_modify_qp() rc=%x", smirc); + ret = H_PARAMETER; + goto modify_qp_exit1; + } + qp_cur_state = IB_QPS_INIT; + ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded"); + } + /* is transmitted current state equal to "real" current state */ + if ((attr_mask & IB_QP_CUR_STATE) && + qp_cur_state != attr->cur_qp_state) { + ret = -EINVAL; + ehca_err(ibqp->device, + "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>" + " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x", + attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num); + goto modify_qp_exit1; + } + + ehca_dbg(ibqp->device,"ehca_qp=%p qp_num=%x current qp_state=%x " + "new qp_state=%x attribute_mask=%x", + my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask); + + qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; + if (!smi_reset2init && + !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, + attr_mask)) { + ret = -EINVAL; + ehca_err(ibqp->device, + "Invalid qp transition new_state=%x cur_state=%x " + "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state, + qp_cur_state, my_qp, ibqp->qp_num, attr_mask); + goto modify_qp_exit1; + } + + if ((mqpcb->qp_state = ib2ehca_qp_state(qp_new_state))) + update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1); + else { + ret = -EINVAL; + ehca_err(ibqp->device, "Invalid new qp state=%x " + "ehca_qp=%p qp_num=%x", + qp_new_state, my_qp, ibqp->qp_num); + goto modify_qp_exit1; + } + + /* retrieve state transition struct to get req and opt attrs */ + statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state); + if (statetrans < 0) { + ret = -EINVAL; + ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x " + "new_qp_state=%x State_xsition=%x ehca_qp=%p " + "qp_num=%x", qp_cur_state, qp_new_state, + statetrans, my_qp, ibqp->qp_num); + goto modify_qp_exit1; + } + + qp_attr_idx = ib2ehcaqptype(ibqp->qp_type); + + if (qp_attr_idx < 0) { + ret = qp_attr_idx; + ehca_err(ibqp->device, + "Invalid QP type=%x ehca_qp=%p qp_num=%x", + ibqp->qp_type, my_qp, ibqp->qp_num); + goto modify_qp_exit1; + } + + ehca_dbg(ibqp->device, + "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x", + my_qp, ibqp->qp_num, statetrans); + + /* sqe -> rts: set purge bit of bad wqe before actual trans */ + if ((my_qp->qp_type == IB_QPT_UD || + my_qp->qp_type == IB_QPT_GSI || + my_qp->qp_type == IB_QPT_SMI) && + statetrans == IB_QPST_SQE2RTS) { + /* mark next free wqe if kernel */ + if (my_qp->uspace_squeue == 0) { + struct ehca_wqe *wqe; + /* lock send queue */ + spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); + squeue_locked = 1; + /* mark next free wqe */ + wqe = (struct ehca_wqe*) + ipz_qeit_get(&my_qp->ipz_squeue); + wqe->optype = wqe->wqef = 0xff; + ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p", + ibqp->qp_num, wqe); + } + ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt); + if (ret) { + ehca_err(ibqp->device, "prepare_sqe_rts() failed " + "ehca_qp=%p qp_num=%x ret=%x", + my_qp, ibqp->qp_num, ret); + goto modify_qp_exit2; + } + } + + /* + * enable RDMA_Atomic_Control if reset->init und reliable con + * this is necessary since gen2 does not provide that flag, + * but pHyp requires it + */ + if (statetrans == IB_QPST_RESET2INIT && + (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) { + mqpcb->rdma_atomic_ctrl = 3; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1); + } + /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */ + if (statetrans == IB_QPST_INIT2RTR && + (ibqp->qp_type == IB_QPT_UC) && + !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) { + mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */ + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1); + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + mqpcb->prim_p_key_idx = attr->pkey_index; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1); + } + if (attr_mask & IB_QP_PORT) { + if (attr->port_num < 1 || attr->port_num > shca->num_ports) { + ret = -EINVAL; + ehca_err(ibqp->device, "Invalid port=%x. " + "ehca_qp=%p qp_num=%x num_ports=%x", + attr->port_num, my_qp, ibqp->qp_num, + shca->num_ports); + goto modify_qp_exit2; + } + mqpcb->prim_phys_port = attr->port_num; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1); + } + if (attr_mask & IB_QP_QKEY) { + mqpcb->qkey = attr->qkey; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1); + } + if (attr_mask & IB_QP_AV) { + int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate); + int ehca_mult = ib_rate_to_mult(shca->sport[my_qp-> + init_attr.port_num].rate); + + mqpcb->dlid = attr->ah_attr.dlid; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1); + mqpcb->source_path_bits = attr->ah_attr.src_path_bits; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1); + mqpcb->service_level = attr->ah_attr.sl; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1); + + if (ah_mult < ehca_mult) + mqpcb->max_static_rate = (ah_mult > 0) ? + ((ehca_mult - 1) / ah_mult) : 0; + else + mqpcb->max_static_rate = 0; + + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); + + /* + * only if GRH is TRUE we might consider SOURCE_GID_IDX + * and DEST_GID otherwise phype will return H_ATTR_PARM!!! + */ + if (attr->ah_attr.ah_flags == IB_AH_GRH) { + mqpcb->send_grh_flag = 1 << 31; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); + mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); + + for (cnt = 0; cnt < 16; cnt++) + mqpcb->dest_gid.byte[cnt] = + attr->ah_attr.grh.dgid.raw[cnt]; + + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1); + mqpcb->flow_label = attr->ah_attr.grh.flow_label; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1); + mqpcb->hop_limit = attr->ah_attr.grh.hop_limit; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1); + mqpcb->traffic_class = attr->ah_attr.grh.traffic_class; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1); + } + } + + if (attr_mask & IB_QP_PATH_MTU) { + mqpcb->path_mtu = attr->path_mtu; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1); + } + if (attr_mask & IB_QP_TIMEOUT) { + mqpcb->timeout = attr->timeout; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1); + } + if (attr_mask & IB_QP_RETRY_CNT) { + mqpcb->retry_count = attr->retry_cnt; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1); + } + if (attr_mask & IB_QP_RNR_RETRY) { + mqpcb->rnr_retry_count = attr->rnr_retry; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1); + } + if (attr_mask & IB_QP_RQ_PSN) { + mqpcb->receive_psn = attr->rq_psn; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1); + } + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ? + attr->max_dest_rd_atomic : 2; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1); + } + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ? + attr->max_rd_atomic : 2; + update_mask |= + EHCA_BMASK_SET + (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1); + } + if (attr_mask & IB_QP_ALT_PATH) { + int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate); + int ehca_mult = ib_rate_to_mult( + shca->sport[my_qp->init_attr.port_num].rate); + + mqpcb->dlid_al = attr->alt_ah_attr.dlid; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1); + mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1); + mqpcb->service_level_al = attr->alt_ah_attr.sl; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1); + + if (ah_mult < ehca_mult) + mqpcb->max_static_rate = (ah_mult > 0) ? + ((ehca_mult - 1) / ah_mult) : 0; + else + mqpcb->max_static_rate_al = 0; + + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1); + + /* + * only if GRH is TRUE we might consider SOURCE_GID_IDX + * and DEST_GID otherwise phype will return H_ATTR_PARM!!! + */ + if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) { + mqpcb->send_grh_flag_al = 1 << 31; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1); + mqpcb->source_gid_idx_al = + attr->alt_ah_attr.grh.sgid_index; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1); + + for (cnt = 0; cnt < 16; cnt++) + mqpcb->dest_gid_al.byte[cnt] = + attr->alt_ah_attr.grh.dgid.raw[cnt]; + + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1); + mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1); + mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1); + mqpcb->traffic_class_al = + attr->alt_ah_attr.grh.traffic_class; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1); + } + } + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1); + } + + if (attr_mask & IB_QP_SQ_PSN) { + mqpcb->send_psn = attr->sq_psn; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1); + } + + if (attr_mask & IB_QP_DEST_QPN) { + mqpcb->dest_qp_nr = attr->dest_qp_num; + update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1); + } + + if (attr_mask & IB_QP_PATH_MIG_STATE) { + mqpcb->path_migration_state = attr->path_mig_state; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1); + } + + if (attr_mask & IB_QP_CAP) { + mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1); + mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1; + update_mask |= + EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1); + /* no support for max_send/recv_sge yet */ + } + + if (ehca_debug_level) + ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); + + h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, + my_qp->ipz_qp_handle, + &my_qp->pf, + update_mask, + mqpcb, my_qp->galpas.kernel); + + if (h_ret != H_SUCCESS) { + ret = ehca2ib_return_code(h_ret); + ehca_err(ibqp->device, "hipz_h_modify_qp() failed rc=%lx " + "ehca_qp=%p qp_num=%x",h_ret, my_qp, ibqp->qp_num); + goto modify_qp_exit2; + } + + if ((my_qp->qp_type == IB_QPT_UD || + my_qp->qp_type == IB_QPT_GSI || + my_qp->qp_type == IB_QPT_SMI) && + statetrans == IB_QPST_SQE2RTS) { + /* doorbell to reprocessing wqes */ + iosync(); /* serialize GAL register access */ + hipz_update_sqa(my_qp, bad_wqe_cnt-1); + ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt); + } + + if (statetrans == IB_QPST_RESET2INIT || + statetrans == IB_QPST_INIT2INIT) { + mqpcb->qp_enable = 1; + mqpcb->qp_state = EHCA_QPS_INIT; + update_mask = 0; + update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1); + + h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, + my_qp->ipz_qp_handle, + &my_qp->pf, + update_mask, + mqpcb, + my_qp->galpas.kernel); + + if (h_ret != H_SUCCESS) { + ret = ehca2ib_return_code(h_ret); + ehca_err(ibqp->device, "ENABLE in context of " + "RESET_2_INIT failed! Maybe you didn't get " + "a LID h_ret=%lx ehca_qp=%p qp_num=%x", + h_ret, my_qp, ibqp->qp_num); + goto modify_qp_exit2; + } + } + + if (statetrans == IB_QPST_ANY2RESET) { + ipz_qeit_reset(&my_qp->ipz_rqueue); + ipz_qeit_reset(&my_qp->ipz_squeue); + } + + if (attr_mask & IB_QP_QKEY) + my_qp->qkey = attr->qkey; + +modify_qp_exit2: + if (squeue_locked) { /* this means: sqe -> rts */ + spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags); + my_qp->sqerr_purgeflag = 1; + } + +modify_qp_exit1: + kfree(mqpcb); + + return ret; +} + +int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, + struct ib_udata *udata) +{ + struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); + struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, + ib_pd); + u32 cur_pid = current->tgid; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + my_pd->ownpid != cur_pid) { + ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + return -EINVAL; + } + + return internal_modify_qp(ibqp, attr, attr_mask, 0); +} + +int ehca_query_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) +{ + struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); + struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, + ib_pd); + struct ehca_shca *shca = container_of(qp->device, struct ehca_shca, + ib_device); + struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; + struct hcp_modify_qp_control_block *qpcb; + u32 cur_pid = current->tgid; + int cnt, ret = 0; + u64 h_ret; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + my_pd->ownpid != cur_pid) { + ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + return -EINVAL; + } + + if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) { + ehca_err(qp->device,"Invalid attribute mask " + "ehca_qp=%p qp_num=%x qp_attr_mask=%x ", + my_qp, qp->qp_num, qp_attr_mask); + return -EINVAL; + } + + qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL ); + if (!qpcb) { + ehca_err(qp->device,"Out of memory for qpcb " + "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); + return -ENOMEM; + } + + h_ret = hipz_h_query_qp(adapter_handle, + my_qp->ipz_qp_handle, + &my_qp->pf, + qpcb, my_qp->galpas.kernel); + + if (h_ret != H_SUCCESS) { + ret = ehca2ib_return_code(h_ret); + ehca_err(qp->device,"hipz_h_query_qp() failed " + "ehca_qp=%p qp_num=%x h_ret=%lx", + my_qp, qp->qp_num, h_ret); + goto query_qp_exit1; + } + + qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state); + qp_attr->qp_state = qp_attr->cur_qp_state; + + if (qp_attr->cur_qp_state == -EINVAL) { + ret = -EINVAL; + ehca_err(qp->device,"Got invalid ehca_qp_state=%x " + "ehca_qp=%p qp_num=%x", + qpcb->qp_state, my_qp, qp->qp_num); + goto query_qp_exit1; + } + + if (qp_attr->qp_state == IB_QPS_SQD) + qp_attr->sq_draining = 1; + + qp_attr->qkey = qpcb->qkey; + qp_attr->path_mtu = qpcb->path_mtu; + qp_attr->path_mig_state = qpcb->path_migration_state; + qp_attr->rq_psn = qpcb->receive_psn; + qp_attr->sq_psn = qpcb->send_psn; + qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field; + qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1; + qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1; + /* UD_AV CIRCUMVENTION */ + if (my_qp->qp_type == IB_QPT_UD) { + qp_attr->cap.max_send_sge = + qpcb->actual_nr_sges_in_sq_wqe - 2; + qp_attr->cap.max_recv_sge = + qpcb->actual_nr_sges_in_rq_wqe - 2; + } else { + qp_attr->cap.max_send_sge = + qpcb->actual_nr_sges_in_sq_wqe; + qp_attr->cap.max_recv_sge = + qpcb->actual_nr_sges_in_rq_wqe; + } + + qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; + qp_attr->dest_qp_num = qpcb->dest_qp_nr; + + qp_attr->pkey_index = + EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); + + qp_attr->port_num = + EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port); + + qp_attr->timeout = qpcb->timeout; + qp_attr->retry_cnt = qpcb->retry_count; + qp_attr->rnr_retry = qpcb->rnr_retry_count; + + qp_attr->alt_pkey_index = + EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx); + + qp_attr->alt_port_num = qpcb->alt_phys_port; + qp_attr->alt_timeout = qpcb->timeout_al; + + /* primary av */ + qp_attr->ah_attr.sl = qpcb->service_level; + + if (qpcb->send_grh_flag) { + qp_attr->ah_attr.ah_flags = IB_AH_GRH; + } + + qp_attr->ah_attr.static_rate = qpcb->max_static_rate; + qp_attr->ah_attr.dlid = qpcb->dlid; + qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits; + qp_attr->ah_attr.port_num = qp_attr->port_num; + + /* primary GRH */ + qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class; + qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit; + qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx; + qp_attr->ah_attr.grh.flow_label = qpcb->flow_label; + + for (cnt = 0; cnt < 16; cnt++) + qp_attr->ah_attr.grh.dgid.raw[cnt] = + qpcb->dest_gid.byte[cnt]; + + /* alternate AV */ + qp_attr->alt_ah_attr.sl = qpcb->service_level_al; + if (qpcb->send_grh_flag_al) { + qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH; + } + + qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al; + qp_attr->alt_ah_attr.dlid = qpcb->dlid_al; + qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al; + + /* alternate GRH */ + qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al; + qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al; + qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al; + qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al; + + for (cnt = 0; cnt < 16; cnt++) + qp_attr->alt_ah_attr.grh.dgid.raw[cnt] = + qpcb->dest_gid_al.byte[cnt]; + + /* return init attributes given in ehca_create_qp */ + if (qp_init_attr) + *qp_init_attr = my_qp->init_attr; + + if (ehca_debug_level) + ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); + +query_qp_exit1: + kfree(qpcb); + + return ret; +} + +int ehca_destroy_qp(struct ib_qp *ibqp) +{ + struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); + struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, + ib_device); + struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, + ib_pd); + u32 cur_pid = current->tgid; + u32 qp_num = ibqp->qp_num; + int ret; + u64 h_ret; + u8 port_num; + enum ib_qp_type qp_type; + unsigned long flags; + + if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && + my_pd->ownpid != cur_pid) { + ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", + cur_pid, my_pd->ownpid); + return -EINVAL; + } + + if (my_qp->send_cq) { + ret = ehca_cq_unassign_qp(my_qp->send_cq, + my_qp->real_qp_num); + if (ret) { + ehca_err(ibqp->device, "Couldn't unassign qp from " + "send_cq ret=%x qp_num=%x cq_num=%x", ret, + my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number); + return ret; + } + } + + spin_lock_irqsave(&ehca_qp_idr_lock, flags); + idr_remove(&ehca_qp_idr, my_qp->token); + spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); + + /* un-mmap if vma alloc */ + if (my_qp->uspace_rqueue) { + ret = ehca_munmap(my_qp->uspace_rqueue, + my_qp->ipz_rqueue.queue_length); + if (ret) + ehca_err(ibqp->device, "Could not munmap rqueue " + "qp_num=%x", qp_num); + ret = ehca_munmap(my_qp->uspace_squeue, + my_qp->ipz_squeue.queue_length); + if (ret) + ehca_err(ibqp->device, "Could not munmap squeue " + "qp_num=%x", qp_num); + ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE); + if (ret) + ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x", + qp_num); + } + + h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); + if (h_ret != H_SUCCESS) { + ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " + "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); + return ehca2ib_return_code(h_ret); + } + + port_num = my_qp->init_attr.port_num; + qp_type = my_qp->init_attr.qp_type; + + /* no support for IB_QPT_SMI yet */ + if (qp_type == IB_QPT_GSI) { + struct ib_event event; + ehca_info(ibqp->device, "device %s: port %x is inactive.", + shca->ib_device.name, port_num); + event.device = &shca->ib_device; + event.event = IB_EVENT_PORT_ERR; + event.element.port_num = port_num; + shca->sport[port_num - 1].port_state = IB_PORT_DOWN; + ib_dispatch_event(&event); + } + + ipz_queue_dtor(&my_qp->ipz_rqueue); + ipz_queue_dtor(&my_qp->ipz_squeue); + kmem_cache_free(qp_cache, my_qp); + return 0; +} + +int ehca_init_qp_cache(void) +{ + qp_cache = kmem_cache_create("ehca_cache_qp", + sizeof(struct ehca_qp), 0, + SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!qp_cache) + return -ENOMEM; + return 0; +} + +void ehca_cleanup_qp_cache(void) +{ + if (qp_cache) + kmem_cache_destroy(qp_cache); +} diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c new file mode 100644 index 000000000000..b46bda1bf85d --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c @@ -0,0 +1,653 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * post_send/recv, poll_cq, req_notify + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include <asm-powerpc/system.h> +#include "ehca_classes.h" +#include "ehca_tools.h" +#include "ehca_qes.h" +#include "ehca_iverbs.h" +#include "hcp_if.h" +#include "hipz_fns.h" + +static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, + struct ehca_wqe *wqe_p, + struct ib_recv_wr *recv_wr) +{ + u8 cnt_ds; + if (unlikely((recv_wr->num_sge < 0) || + (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) { + ehca_gen_err("Invalid number of WQE SGE. " + "num_sqe=%x max_nr_of_sg=%x", + recv_wr->num_sge, ipz_rqueue->act_nr_of_sg); + return -EINVAL; /* invalid SG list length */ + } + + /* clear wqe header until sglist */ + memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); + + wqe_p->work_request_id = recv_wr->wr_id; + wqe_p->nr_of_data_seg = recv_wr->num_sge; + + for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) { + wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr = + recv_wr->sg_list[cnt_ds].addr; + wqe_p->u.all_rcv.sg_list[cnt_ds].lkey = + recv_wr->sg_list[cnt_ds].lkey; + wqe_p->u.all_rcv.sg_list[cnt_ds].length = + recv_wr->sg_list[cnt_ds].length; + } + + if (ehca_debug_level) { + ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue); + ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); + } + + return 0; +} + +#if defined(DEBUG_GSI_SEND_WR) + +/* need ib_mad struct */ +#include <rdma/ib_mad.h> + +static void trace_send_wr_ud(const struct ib_send_wr *send_wr) +{ + int idx; + int j; + while (send_wr) { + struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr; + struct ib_sge *sge = send_wr->sg_list; + ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x " + "send_flags=%x opcode=%x",idx, send_wr->wr_id, + send_wr->num_sge, send_wr->send_flags, + send_wr->opcode); + if (mad_hdr) { + ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x " + "mgmt_class=%x class_version=%x method=%x " + "status=%x class_specific=%x tid=%lx " + "attr_id=%x resv=%x attr_mod=%x", + idx, mad_hdr->base_version, + mad_hdr->mgmt_class, + mad_hdr->class_version, mad_hdr->method, + mad_hdr->status, mad_hdr->class_specific, + mad_hdr->tid, mad_hdr->attr_id, + mad_hdr->resv, + mad_hdr->attr_mod); + } + for (j = 0; j < send_wr->num_sge; j++) { + u8 *data = (u8 *) abs_to_virt(sge->addr); + ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x " + "lkey=%x", + idx, j, data, sge->length, sge->lkey); + /* assume length is n*16 */ + ehca_dmp(data, sge->length, "send_wr#%x sge#%x", + idx, j); + sge++; + } /* eof for j */ + idx++; + send_wr = send_wr->next; + } /* eof while send_wr */ +} + +#endif /* DEBUG_GSI_SEND_WR */ + +static inline int ehca_write_swqe(struct ehca_qp *qp, + struct ehca_wqe *wqe_p, + const struct ib_send_wr *send_wr) +{ + u32 idx; + u64 dma_length; + struct ehca_av *my_av; + u32 remote_qkey = send_wr->wr.ud.remote_qkey; + + if (unlikely((send_wr->num_sge < 0) || + (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { + ehca_gen_err("Invalid number of WQE SGE. " + "num_sqe=%x max_nr_of_sg=%x", + send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg); + return -EINVAL; /* invalid SG list length */ + } + + /* clear wqe header until sglist */ + memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); + + wqe_p->work_request_id = send_wr->wr_id; + + switch (send_wr->opcode) { + case IB_WR_SEND: + case IB_WR_SEND_WITH_IMM: + wqe_p->optype = WQE_OPTYPE_SEND; + break; + case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: + wqe_p->optype = WQE_OPTYPE_RDMAWRITE; + break; + case IB_WR_RDMA_READ: + wqe_p->optype = WQE_OPTYPE_RDMAREAD; + break; + default: + ehca_gen_err("Invalid opcode=%x", send_wr->opcode); + return -EINVAL; /* invalid opcode */ + } + + wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE; + + wqe_p->wr_flag = 0; + + if (send_wr->send_flags & IB_SEND_SIGNALED) + wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; + + if (send_wr->opcode == IB_WR_SEND_WITH_IMM || + send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { + /* this might not work as long as HW does not support it */ + wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data); + wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT; + } + + wqe_p->nr_of_data_seg = send_wr->num_sge; + + switch (qp->qp_type) { + case IB_QPT_SMI: + case IB_QPT_GSI: + /* no break is intential here */ + case IB_QPT_UD: + /* IB 1.2 spec C10-15 compliance */ + if (send_wr->wr.ud.remote_qkey & 0x80000000) + remote_qkey = qp->qkey; + + wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; + wqe_p->local_ee_context_qkey = remote_qkey; + if (!send_wr->wr.ud.ah) { + ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); + return -EINVAL; + } + my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); + wqe_p->u.ud_av.ud_av = my_av->av; + + /* + * omitted check of IB_SEND_INLINE + * since HW does not support it + */ + for (idx = 0; idx < send_wr->num_sge; idx++) { + wqe_p->u.ud_av.sg_list[idx].vaddr = + send_wr->sg_list[idx].addr; + wqe_p->u.ud_av.sg_list[idx].lkey = + send_wr->sg_list[idx].lkey; + wqe_p->u.ud_av.sg_list[idx].length = + send_wr->sg_list[idx].length; + } /* eof for idx */ + if (qp->qp_type == IB_QPT_SMI || + qp->qp_type == IB_QPT_GSI) + wqe_p->u.ud_av.ud_av.pmtu = 1; + if (qp->qp_type == IB_QPT_GSI) { + wqe_p->pkeyi = send_wr->wr.ud.pkey_index; +#ifdef DEBUG_GSI_SEND_WR + trace_send_wr_ud(send_wr); +#endif /* DEBUG_GSI_SEND_WR */ + } + break; + + case IB_QPT_UC: + if (send_wr->send_flags & IB_SEND_FENCE) + wqe_p->wr_flag |= WQE_WRFLAG_FENCE; + /* no break is intentional here */ + case IB_QPT_RC: + /* TODO: atomic not implemented */ + wqe_p->u.nud.remote_virtual_adress = + send_wr->wr.rdma.remote_addr; + wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey; + + /* + * omitted checking of IB_SEND_INLINE + * since HW does not support it + */ + dma_length = 0; + for (idx = 0; idx < send_wr->num_sge; idx++) { + wqe_p->u.nud.sg_list[idx].vaddr = + send_wr->sg_list[idx].addr; + wqe_p->u.nud.sg_list[idx].lkey = + send_wr->sg_list[idx].lkey; + wqe_p->u.nud.sg_list[idx].length = + send_wr->sg_list[idx].length; + dma_length += send_wr->sg_list[idx].length; + } /* eof idx */ + wqe_p->u.nud.atomic_1st_op_dma_len = dma_length; + + break; + + default: + ehca_gen_err("Invalid qptype=%x", qp->qp_type); + return -EINVAL; + } + + if (ehca_debug_level) { + ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); + ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); + } + return 0; +} + +/* map_ib_wc_status converts raw cqe_status to ib_wc_status */ +static inline void map_ib_wc_status(u32 cqe_status, + enum ib_wc_status *wc_status) +{ + if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) { + switch (cqe_status & 0x3F) { + case 0x01: + case 0x21: + *wc_status = IB_WC_LOC_LEN_ERR; + break; + case 0x02: + case 0x22: + *wc_status = IB_WC_LOC_QP_OP_ERR; + break; + case 0x03: + case 0x23: + *wc_status = IB_WC_LOC_EEC_OP_ERR; + break; + case 0x04: + case 0x24: + *wc_status = IB_WC_LOC_PROT_ERR; + break; + case 0x05: + case 0x25: + *wc_status = IB_WC_WR_FLUSH_ERR; + break; + case 0x06: + *wc_status = IB_WC_MW_BIND_ERR; + break; + case 0x07: /* remote error - look into bits 20:24 */ + switch ((cqe_status + & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) { + case 0x0: + /* + * PSN Sequence Error! + * couldn't find a matching status! + */ + *wc_status = IB_WC_GENERAL_ERR; + break; + case 0x1: + *wc_status = IB_WC_REM_INV_REQ_ERR; + break; + case 0x2: + *wc_status = IB_WC_REM_ACCESS_ERR; + break; + case 0x3: + *wc_status = IB_WC_REM_OP_ERR; + break; + case 0x4: + *wc_status = IB_WC_REM_INV_RD_REQ_ERR; + break; + } + break; + case 0x08: + *wc_status = IB_WC_RETRY_EXC_ERR; + break; + case 0x09: + *wc_status = IB_WC_RNR_RETRY_EXC_ERR; + break; + case 0x0A: + case 0x2D: + *wc_status = IB_WC_REM_ABORT_ERR; + break; + case 0x0B: + case 0x2E: + *wc_status = IB_WC_INV_EECN_ERR; + break; + case 0x0C: + case 0x2F: + *wc_status = IB_WC_INV_EEC_STATE_ERR; + break; + case 0x0D: + *wc_status = IB_WC_BAD_RESP_ERR; + break; + case 0x10: + /* WQE purged */ + *wc_status = IB_WC_WR_FLUSH_ERR; + break; + default: + *wc_status = IB_WC_FATAL_ERR; + + } + } else + *wc_status = IB_WC_SUCCESS; +} + +int ehca_post_send(struct ib_qp *qp, + struct ib_send_wr *send_wr, + struct ib_send_wr **bad_send_wr) +{ + struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); + struct ib_send_wr *cur_send_wr; + struct ehca_wqe *wqe_p; + int wqe_cnt = 0; + int ret = 0; + unsigned long spl_flags; + + /* LOCK the QUEUE */ + spin_lock_irqsave(&my_qp->spinlock_s, spl_flags); + + /* loop processes list of send reqs */ + for (cur_send_wr = send_wr; cur_send_wr != NULL; + cur_send_wr = cur_send_wr->next) { + u64 start_offset = my_qp->ipz_squeue.current_q_offset; + /* get pointer next to free WQE */ + wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue); + if (unlikely(!wqe_p)) { + /* too many posted work requests: queue overflow */ + if (bad_send_wr) + *bad_send_wr = cur_send_wr; + if (wqe_cnt == 0) { + ret = -ENOMEM; + ehca_err(qp->device, "Too many posted WQEs " + "qp_num=%x", qp->qp_num); + } + goto post_send_exit0; + } + /* write a SEND WQE into the QUEUE */ + ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr); + /* + * if something failed, + * reset the free entry pointer to the start value + */ + if (unlikely(ret)) { + my_qp->ipz_squeue.current_q_offset = start_offset; + *bad_send_wr = cur_send_wr; + if (wqe_cnt == 0) { + ret = -EINVAL; + ehca_err(qp->device, "Could not write WQE " + "qp_num=%x", qp->qp_num); + } + goto post_send_exit0; + } + wqe_cnt++; + ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d", + my_qp, qp->qp_num, wqe_cnt); + } /* eof for cur_send_wr */ + +post_send_exit0: + /* UNLOCK the QUEUE */ + spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags); + iosync(); /* serialize GAL register access */ + hipz_update_sqa(my_qp, wqe_cnt); + return ret; +} + +int ehca_post_recv(struct ib_qp *qp, + struct ib_recv_wr *recv_wr, + struct ib_recv_wr **bad_recv_wr) +{ + struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); + struct ib_recv_wr *cur_recv_wr; + struct ehca_wqe *wqe_p; + int wqe_cnt = 0; + int ret = 0; + unsigned long spl_flags; + + /* LOCK the QUEUE */ + spin_lock_irqsave(&my_qp->spinlock_r, spl_flags); + + /* loop processes list of send reqs */ + for (cur_recv_wr = recv_wr; cur_recv_wr != NULL; + cur_recv_wr = cur_recv_wr->next) { + u64 start_offset = my_qp->ipz_rqueue.current_q_offset; + /* get pointer next to free WQE */ + wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue); + if (unlikely(!wqe_p)) { + /* too many posted work requests: queue overflow */ + if (bad_recv_wr) + *bad_recv_wr = cur_recv_wr; + if (wqe_cnt == 0) { + ret = -ENOMEM; + ehca_err(qp->device, "Too many posted WQEs " + "qp_num=%x", qp->qp_num); + } + goto post_recv_exit0; + } + /* write a RECV WQE into the QUEUE */ + ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr); + /* + * if something failed, + * reset the free entry pointer to the start value + */ + if (unlikely(ret)) { + my_qp->ipz_rqueue.current_q_offset = start_offset; + *bad_recv_wr = cur_recv_wr; + if (wqe_cnt == 0) { + ret = -EINVAL; + ehca_err(qp->device, "Could not write WQE " + "qp_num=%x", qp->qp_num); + } + goto post_recv_exit0; + } + wqe_cnt++; + ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d", + my_qp, qp->qp_num, wqe_cnt); + } /* eof for cur_recv_wr */ + +post_recv_exit0: + spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags); + iosync(); /* serialize GAL register access */ + hipz_update_rqa(my_qp, wqe_cnt); + return ret; +} + +/* + * ib_wc_opcode table converts ehca wc opcode to ib + * Since we use zero to indicate invalid opcode, the actual ib opcode must + * be decremented!!! + */ +static const u8 ib_wc_opcode[255] = { + [0x01] = IB_WC_RECV+1, + [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1, + [0x04] = IB_WC_BIND_MW+1, + [0x08] = IB_WC_FETCH_ADD+1, + [0x10] = IB_WC_COMP_SWAP+1, + [0x20] = IB_WC_RDMA_WRITE+1, + [0x40] = IB_WC_RDMA_READ+1, + [0x80] = IB_WC_SEND+1 +}; + +/* internal function to poll one entry of cq */ +static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) +{ + int ret = 0; + struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); + struct ehca_cqe *cqe; + int cqe_count = 0; + +poll_cq_one_read_cqe: + cqe = (struct ehca_cqe *) + ipz_qeit_get_inc_valid(&my_cq->ipz_queue); + if (!cqe) { + ret = -EAGAIN; + ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " + "cq_num=%x ret=%x", my_cq, my_cq->cq_number, ret); + goto poll_cq_one_exit0; + } + + /* prevents loads being reordered across this point */ + rmb(); + + cqe_count++; + if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { + struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number); + int purgeflag; + unsigned long spl_flags; + if (!qp) { + ehca_err(cq->device, "cq_num=%x qp_num=%x " + "could not find qp -> ignore cqe", + my_cq->cq_number, cqe->local_qp_number); + ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", + my_cq->cq_number, cqe->local_qp_number); + /* ignore this purged cqe */ + goto poll_cq_one_read_cqe; + } + spin_lock_irqsave(&qp->spinlock_s, spl_flags); + purgeflag = qp->sqerr_purgeflag; + spin_unlock_irqrestore(&qp->spinlock_s, spl_flags); + + if (purgeflag) { + ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x " + "src_qp=%x", + cqe->local_qp_number, cqe->remote_qp_number); + if (ehca_debug_level) + ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", + cqe->local_qp_number, + cqe->remote_qp_number); + /* + * ignore this to avoid double cqes of bad wqe + * that caused sqe and turn off purge flag + */ + qp->sqerr_purgeflag = 0; + goto poll_cq_one_read_cqe; + } + } + + /* tracing cqe */ + if (ehca_debug_level) { + ehca_dbg(cq->device, + "Received COMPLETION ehca_cq=%p cq_num=%x -----", + my_cq, my_cq->cq_number); + ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", + my_cq, my_cq->cq_number); + ehca_dbg(cq->device, + "ehca_cq=%p cq_num=%x -------------------------", + my_cq, my_cq->cq_number); + } + + /* we got a completion! */ + wc->wr_id = cqe->work_request_id; + + /* eval ib_wc_opcode */ + wc->opcode = ib_wc_opcode[cqe->optype]-1; + if (unlikely(wc->opcode == -1)) { + ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x " + "ehca_cq=%p cq_num=%x", + cqe->optype, cqe->status, my_cq, my_cq->cq_number); + /* dump cqe for other infos */ + ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", + my_cq, my_cq->cq_number); + /* update also queue adder to throw away this entry!!! */ + goto poll_cq_one_exit0; + } + /* eval ib_wc_status */ + if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { + /* complete with errors */ + map_ib_wc_status(cqe->status, &wc->status); + wc->vendor_err = wc->status; + } else + wc->status = IB_WC_SUCCESS; + + wc->qp_num = cqe->local_qp_number; + wc->byte_len = cqe->nr_bytes_transferred; + wc->pkey_index = cqe->pkey_index; + wc->slid = cqe->rlid; + wc->dlid_path_bits = cqe->dlid; + wc->src_qp = cqe->remote_qp_number; + wc->wc_flags = cqe->w_completion_flags; + wc->imm_data = cpu_to_be32(cqe->immediate_data); + wc->sl = cqe->service_level; + + if (wc->status != IB_WC_SUCCESS) + ehca_dbg(cq->device, + "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe " + "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx " + "cqe=%p", my_cq, my_cq->cq_number, cqe->optype, + cqe->status, cqe->local_qp_number, + cqe->remote_qp_number, cqe->work_request_id, cqe); + +poll_cq_one_exit0: + if (cqe_count > 0) + hipz_update_feca(my_cq, cqe_count); + + return ret; +} + +int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) +{ + struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); + int nr; + struct ib_wc *current_wc = wc; + int ret = 0; + unsigned long spl_flags; + + if (num_entries < 1) { + ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p " + "cq_num=%x", num_entries, my_cq, my_cq->cq_number); + ret = -EINVAL; + goto poll_cq_exit0; + } + + spin_lock_irqsave(&my_cq->spinlock, spl_flags); + for (nr = 0; nr < num_entries; nr++) { + ret = ehca_poll_cq_one(cq, current_wc); + if (ret) + break; + current_wc++; + } /* eof for nr */ + spin_unlock_irqrestore(&my_cq->spinlock, spl_flags); + if (ret == -EAGAIN || !ret) + ret = nr; + +poll_cq_exit0: + return ret; +} + +int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) +{ + struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); + + switch (cq_notify) { + case IB_CQ_SOLICITED: + hipz_set_cqx_n0(my_cq, 1); + break; + case IB_CQ_NEXT_COMP: + hipz_set_cqx_n1(my_cq, 1); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c new file mode 100644 index 000000000000..9f16e9c79394 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c @@ -0,0 +1,111 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * SQP functions + * + * Authors: Khadija Souissi <souissi@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include <linux/module.h> +#include <linux/err.h> +#include "ehca_classes.h" +#include "ehca_tools.h" +#include "ehca_qes.h" +#include "ehca_iverbs.h" +#include "hcp_if.h" + + +/** + * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue + * pair is created successfully, the corresponding port gets active. + * + * Define Special Queue pair 0 (SMI QP) is still not supported. + * + * @qp_init_attr: Queue pair init attributes with port and queue pair type + */ + +u64 ehca_define_sqp(struct ehca_shca *shca, + struct ehca_qp *ehca_qp, + struct ib_qp_init_attr *qp_init_attr) +{ + u32 pma_qp_nr, bma_qp_nr; + u64 ret; + u8 port = qp_init_attr->port_num; + int counter; + + shca->sport[port - 1].port_state = IB_PORT_DOWN; + + switch (qp_init_attr->qp_type) { + case IB_QPT_SMI: + /* function not supported yet */ + break; + case IB_QPT_GSI: + ret = hipz_h_define_aqp1(shca->ipz_hca_handle, + ehca_qp->ipz_qp_handle, + ehca_qp->galpas.kernel, + (u32) qp_init_attr->port_num, + &pma_qp_nr, &bma_qp_nr); + + if (ret != H_SUCCESS) { + ehca_err(&shca->ib_device, + "Can't define AQP1 for port %x. rc=%lx", + port, ret); + return ret; + } + break; + default: + ehca_err(&shca->ib_device, "invalid qp_type=%x", + qp_init_attr->qp_type); + return H_PARAMETER; + } + + for (counter = 0; + shca->sport[port - 1].port_state != IB_PORT_ACTIVE && + counter < ehca_port_act_time; + counter++) { + ehca_dbg(&shca->ib_device, "... wait until port %x is active", + port); + msleep_interruptible(1000); + } + + if (counter == ehca_port_act_time) { + ehca_err(&shca->ib_device, "Port %x is not active.", port); + return H_HARDWARE; + } + + return H_SUCCESS; +} diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h new file mode 100644 index 000000000000..9f56bb846d93 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_tools.h @@ -0,0 +1,172 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * auxiliary functions + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Khadija Souissi <souissik@de.ibm.com> + * Waleri Fomin <fomin@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef EHCA_TOOLS_H +#define EHCA_TOOLS_H + +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/idr.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/vmalloc.h> +#include <linux/version.h> +#include <linux/notifier.h> +#include <linux/cpu.h> +#include <linux/device.h> + +#include <asm/abs_addr.h> +#include <asm/ibmebus.h> +#include <asm/io.h> +#include <asm/pgtable.h> + +extern int ehca_debug_level; + +#define ehca_dbg(ib_dev, format, arg...) \ + do { \ + if (unlikely(ehca_debug_level)) \ + dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \ + "PU%04x EHCA_DBG:%s " format "\n", \ + get_paca()->paca_index, __FUNCTION__, \ + ## arg); \ + } while (0) + +#define ehca_info(ib_dev, format, arg...) \ + dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \ + get_paca()->paca_index, __FUNCTION__, ## arg) + +#define ehca_warn(ib_dev, format, arg...) \ + dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \ + get_paca()->paca_index, __FUNCTION__, ## arg) + +#define ehca_err(ib_dev, format, arg...) \ + dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \ + get_paca()->paca_index, __FUNCTION__, ## arg) + +/* use this one only if no ib_dev available */ +#define ehca_gen_dbg(format, arg...) \ + do { \ + if (unlikely(ehca_debug_level)) \ + printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n",\ + get_paca()->paca_index, __FUNCTION__, ## arg); \ + } while (0) + +#define ehca_gen_warn(format, arg...) \ + do { \ + if (unlikely(ehca_debug_level)) \ + printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n",\ + get_paca()->paca_index, __FUNCTION__, ## arg); \ + } while (0) + +#define ehca_gen_err(format, arg...) \ + printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \ + get_paca()->paca_index, __FUNCTION__, ## arg) + +/** + * ehca_dmp - printk a memory block, whose length is n*8 bytes. + * Each line has the following layout: + * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex> + */ +#define ehca_dmp(adr, len, format, args...) \ + do { \ + unsigned int x; \ + unsigned int l = (unsigned int)(len); \ + unsigned char *deb = (unsigned char*)(adr); \ + for (x = 0; x < l; x += 16) { \ + printk("EHCA_DMP:%s" format \ + " adr=%p ofs=%04x %016lx %016lx\n", \ + __FUNCTION__, ##args, deb, x, \ + *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ + deb += 16; \ + } \ + } while (0) + +/* define a bitmask, little endian version */ +#define EHCA_BMASK(pos,length) (((pos)<<16)+(length)) + +/* define a bitmask, the ibm way... */ +#define EHCA_BMASK_IBM(from,to) (((63-to)<<16)+((to)-(from)+1)) + +/* internal function, don't use */ +#define EHCA_BMASK_SHIFTPOS(mask) (((mask)>>16)&0xffff) + +/* internal function, don't use */ +#define EHCA_BMASK_MASK(mask) (0xffffffffffffffffULL >> ((64-(mask))&0xffff)) + +/** + * EHCA_BMASK_SET - return value shifted and masked by mask + * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable + * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask + * in variable + */ +#define EHCA_BMASK_SET(mask,value) \ + ((EHCA_BMASK_MASK(mask) & ((u64)(value)))<<EHCA_BMASK_SHIFTPOS(mask)) + +/** + * EHCA_BMASK_GET - extract a parameter from value by mask + */ +#define EHCA_BMASK_GET(mask,value) \ + (EHCA_BMASK_MASK(mask)& (((u64)(value))>>EHCA_BMASK_SHIFTPOS(mask))) + + +/* Converts ehca to ib return code */ +static inline int ehca2ib_return_code(u64 ehca_rc) +{ + switch (ehca_rc) { + case H_SUCCESS: + return 0; + case H_BUSY: + return -EBUSY; + case H_NO_MEM: + return -ENOMEM; + default: + return -EINVAL; + } +} + + +#endif /* EHCA_TOOLS_H */ diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c new file mode 100644 index 000000000000..e08764e4aef2 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c @@ -0,0 +1,392 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * userspace support verbs + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm/current.h> + +#include "ehca_classes.h" +#include "ehca_iverbs.h" +#include "ehca_mrmw.h" +#include "ehca_tools.h" +#include "hcp_if.h" + +struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device, + struct ib_udata *udata) +{ + struct ehca_ucontext *my_context; + + my_context = kzalloc(sizeof *my_context, GFP_KERNEL); + if (!my_context) { + ehca_err(device, "Out of memory device=%p", device); + return ERR_PTR(-ENOMEM); + } + + return &my_context->ib_ucontext; +} + +int ehca_dealloc_ucontext(struct ib_ucontext *context) +{ + kfree(container_of(context, struct ehca_ucontext, ib_ucontext)); + return 0; +} + +struct page *ehca_nopage(struct vm_area_struct *vma, + unsigned long address, int *type) +{ + struct page *mypage = NULL; + u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; + u32 idr_handle = fileoffset >> 32; + u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ + u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ + u32 cur_pid = current->tgid; + unsigned long flags; + struct ehca_cq *cq; + struct ehca_qp *qp; + struct ehca_pd *pd; + u64 offset; + void *vaddr; + + switch (q_type) { + case 1: /* CQ */ + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + cq = idr_find(&ehca_cq_idr, idr_handle); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + + /* make sure this mmap really belongs to the authorized user */ + if (!cq) { + ehca_gen_err("cq is NULL ret=NOPAGE_SIGBUS"); + return NOPAGE_SIGBUS; + } + + if (cq->ownpid != cur_pid) { + ehca_err(cq->ib_cq.device, + "Invalid caller pid=%x ownpid=%x", + cur_pid, cq->ownpid); + return NOPAGE_SIGBUS; + } + + if (rsrc_type == 2) { + ehca_dbg(cq->ib_cq.device, "cq=%p cq queuearea", cq); + offset = address - vma->vm_start; + vaddr = ipz_qeit_calc(&cq->ipz_queue, offset); + ehca_dbg(cq->ib_cq.device, "offset=%lx vaddr=%p", + offset, vaddr); + mypage = virt_to_page(vaddr); + } + break; + + case 2: /* QP */ + spin_lock_irqsave(&ehca_qp_idr_lock, flags); + qp = idr_find(&ehca_qp_idr, idr_handle); + spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); + + /* make sure this mmap really belongs to the authorized user */ + if (!qp) { + ehca_gen_err("qp is NULL ret=NOPAGE_SIGBUS"); + return NOPAGE_SIGBUS; + } + + pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); + if (pd->ownpid != cur_pid) { + ehca_err(qp->ib_qp.device, + "Invalid caller pid=%x ownpid=%x", + cur_pid, pd->ownpid); + return NOPAGE_SIGBUS; + } + + if (rsrc_type == 2) { /* rqueue */ + ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueuearea", qp); + offset = address - vma->vm_start; + vaddr = ipz_qeit_calc(&qp->ipz_rqueue, offset); + ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", + offset, vaddr); + mypage = virt_to_page(vaddr); + } else if (rsrc_type == 3) { /* squeue */ + ehca_dbg(qp->ib_qp.device, "qp=%p qp squeuearea", qp); + offset = address - vma->vm_start; + vaddr = ipz_qeit_calc(&qp->ipz_squeue, offset); + ehca_dbg(qp->ib_qp.device, "offset=%lx vaddr=%p", + offset, vaddr); + mypage = virt_to_page(vaddr); + } + break; + + default: + ehca_gen_err("bad queue type %x", q_type); + return NOPAGE_SIGBUS; + } + + if (!mypage) { + ehca_gen_err("Invalid page adr==NULL ret=NOPAGE_SIGBUS"); + return NOPAGE_SIGBUS; + } + get_page(mypage); + + return mypage; +} + +static struct vm_operations_struct ehcau_vm_ops = { + .nopage = ehca_nopage, +}; + +int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + u64 fileoffset = vma->vm_pgoff << PAGE_SHIFT; + u32 idr_handle = fileoffset >> 32; + u32 q_type = (fileoffset >> 28) & 0xF; /* CQ, QP,... */ + u32 rsrc_type = (fileoffset >> 24) & 0xF; /* sq,rq,cmnd_window */ + u32 cur_pid = current->tgid; + u32 ret; + u64 vsize, physical; + unsigned long flags; + struct ehca_cq *cq; + struct ehca_qp *qp; + struct ehca_pd *pd; + + switch (q_type) { + case 1: /* CQ */ + spin_lock_irqsave(&ehca_cq_idr_lock, flags); + cq = idr_find(&ehca_cq_idr, idr_handle); + spin_unlock_irqrestore(&ehca_cq_idr_lock, flags); + + /* make sure this mmap really belongs to the authorized user */ + if (!cq) + return -EINVAL; + + if (cq->ownpid != cur_pid) { + ehca_err(cq->ib_cq.device, + "Invalid caller pid=%x ownpid=%x", + cur_pid, cq->ownpid); + return -ENOMEM; + } + + if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context) + return -EINVAL; + + switch (rsrc_type) { + case 1: /* galpa fw handle */ + ehca_dbg(cq->ib_cq.device, "cq=%p cq triggerarea", cq); + vma->vm_flags |= VM_RESERVED; + vsize = vma->vm_end - vma->vm_start; + if (vsize != EHCA_PAGESIZE) { + ehca_err(cq->ib_cq.device, "invalid vsize=%lx", + vma->vm_end - vma->vm_start); + return -EINVAL; + } + + physical = cq->galpas.user.fw_handle; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_flags |= VM_IO | VM_RESERVED; + + ehca_dbg(cq->ib_cq.device, + "vsize=%lx physical=%lx", vsize, physical); + ret = remap_pfn_range(vma, vma->vm_start, + physical >> PAGE_SHIFT, vsize, + vma->vm_page_prot); + if (ret) { + ehca_err(cq->ib_cq.device, + "remap_pfn_range() failed ret=%x", + ret); + return -ENOMEM; + } + break; + + case 2: /* cq queue_addr */ + ehca_dbg(cq->ib_cq.device, "cq=%p cq q_addr", cq); + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &ehcau_vm_ops; + break; + + default: + ehca_err(cq->ib_cq.device, "bad resource type %x", + rsrc_type); + return -EINVAL; + } + break; + + case 2: /* QP */ + spin_lock_irqsave(&ehca_qp_idr_lock, flags); + qp = idr_find(&ehca_qp_idr, idr_handle); + spin_unlock_irqrestore(&ehca_qp_idr_lock, flags); + + /* make sure this mmap really belongs to the authorized user */ + if (!qp) + return -EINVAL; + + pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd); + if (pd->ownpid != cur_pid) { + ehca_err(qp->ib_qp.device, + "Invalid caller pid=%x ownpid=%x", + cur_pid, pd->ownpid); + return -ENOMEM; + } + + if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) + return -EINVAL; + + switch (rsrc_type) { + case 1: /* galpa fw handle */ + ehca_dbg(qp->ib_qp.device, "qp=%p qp triggerarea", qp); + vma->vm_flags |= VM_RESERVED; + vsize = vma->vm_end - vma->vm_start; + if (vsize != EHCA_PAGESIZE) { + ehca_err(qp->ib_qp.device, "invalid vsize=%lx", + vma->vm_end - vma->vm_start); + return -EINVAL; + } + + physical = qp->galpas.user.fw_handle; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_flags |= VM_IO | VM_RESERVED; + + ehca_dbg(qp->ib_qp.device, "vsize=%lx physical=%lx", + vsize, physical); + ret = remap_pfn_range(vma, vma->vm_start, + physical >> PAGE_SHIFT, vsize, + vma->vm_page_prot); + if (ret) { + ehca_err(qp->ib_qp.device, + "remap_pfn_range() failed ret=%x", + ret); + return -ENOMEM; + } + break; + + case 2: /* qp rqueue_addr */ + ehca_dbg(qp->ib_qp.device, "qp=%p qp rqueue_addr", qp); + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &ehcau_vm_ops; + break; + + case 3: /* qp squeue_addr */ + ehca_dbg(qp->ib_qp.device, "qp=%p qp squeue_addr", qp); + vma->vm_flags |= VM_RESERVED; + vma->vm_ops = &ehcau_vm_ops; + break; + + default: + ehca_err(qp->ib_qp.device, "bad resource type %x", + rsrc_type); + return -EINVAL; + } + break; + + default: + ehca_gen_err("bad queue type %x", q_type); + return -EINVAL; + } + + return 0; +} + +int ehca_mmap_nopage(u64 foffset, u64 length, void **mapped, + struct vm_area_struct **vma) +{ + down_write(¤t->mm->mmap_sem); + *mapped = (void*)do_mmap(NULL,0, length, PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + foffset); + up_write(¤t->mm->mmap_sem); + if (!(*mapped)) { + ehca_gen_err("couldn't mmap foffset=%lx length=%lx", + foffset, length); + return -EINVAL; + } + + *vma = find_vma(current->mm, (u64)*mapped); + if (!(*vma)) { + down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, 0, length); + up_write(¤t->mm->mmap_sem); + ehca_gen_err("couldn't find vma queue=%p", *mapped); + return -EINVAL; + } + (*vma)->vm_flags |= VM_RESERVED; + (*vma)->vm_ops = &ehcau_vm_ops; + + return 0; +} + +int ehca_mmap_register(u64 physical, void **mapped, + struct vm_area_struct **vma) +{ + int ret; + unsigned long vsize; + /* ehca hw supports only 4k page */ + ret = ehca_mmap_nopage(0, EHCA_PAGESIZE, mapped, vma); + if (ret) { + ehca_gen_err("could'nt mmap physical=%lx", physical); + return ret; + } + + (*vma)->vm_flags |= VM_RESERVED; + vsize = (*vma)->vm_end - (*vma)->vm_start; + if (vsize != EHCA_PAGESIZE) { + ehca_gen_err("invalid vsize=%lx", + (*vma)->vm_end - (*vma)->vm_start); + return -EINVAL; + } + + (*vma)->vm_page_prot = pgprot_noncached((*vma)->vm_page_prot); + (*vma)->vm_flags |= VM_IO | VM_RESERVED; + + ret = remap_pfn_range((*vma), (*vma)->vm_start, + physical >> PAGE_SHIFT, vsize, + (*vma)->vm_page_prot); + if (ret) { + ehca_gen_err("remap_pfn_range() failed ret=%x", ret); + return -ENOMEM; + } + + return 0; + +} + +int ehca_munmap(unsigned long addr, size_t len) { + int ret = 0; + struct mm_struct *mm = current->mm; + if (mm) { + down_write(&mm->mmap_sem); + ret = do_munmap(mm, addr, len); + up_write(&mm->mmap_sem); + } + return ret; +} diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c new file mode 100644 index 000000000000..3fb46e67df87 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_if.c @@ -0,0 +1,874 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Firmware Infiniband Interface code for POWER + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Gerd Bayer <gerd.bayer@de.ibm.com> + * Waleri Fomin <fomin@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <asm/hvcall.h> +#include "ehca_tools.h" +#include "hcp_if.h" +#include "hcp_phyp.h" +#include "hipz_fns.h" +#include "ipz_pt_fn.h" + +#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11) +#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12) +#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15) +#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18) +#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21) +#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23) +#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31) +#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63) + +#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15) +#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) +#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39) +#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47) + +#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) +#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63) +#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15) +#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31) + +#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31) +#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63) + +/* direct access qp controls */ +#define DAQP_CTRL_ENABLE 0x01 +#define DAQP_CTRL_SEND_COMP 0x20 +#define DAQP_CTRL_RECV_COMP 0x40 + +static u32 get_longbusy_msecs(int longbusy_rc) +{ + switch (longbusy_rc) { + case H_LONG_BUSY_ORDER_1_MSEC: + return 1; + case H_LONG_BUSY_ORDER_10_MSEC: + return 10; + case H_LONG_BUSY_ORDER_100_MSEC: + return 100; + case H_LONG_BUSY_ORDER_1_SEC: + return 1000; + case H_LONG_BUSY_ORDER_10_SEC: + return 10000; + case H_LONG_BUSY_ORDER_100_SEC: + return 100000; + default: + return 1; + } +} + +static long ehca_plpar_hcall_norets(unsigned long opcode, + unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6, + unsigned long arg7) +{ + long ret; + int i, sleep_msecs; + + ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " + "arg5=%lx arg6=%lx arg7=%lx", + opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); + + for (i = 0; i < 5; i++) { + ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4, + arg5, arg6, arg7); + + if (H_IS_LONG_BUSY(ret)) { + sleep_msecs = get_longbusy_msecs(ret); + msleep_interruptible(sleep_msecs); + continue; + } + + if (ret < H_SUCCESS) + ehca_gen_err("opcode=%lx ret=%lx" + " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" + " arg5=%lx arg6=%lx arg7=%lx ", + opcode, ret, + arg1, arg2, arg3, arg4, arg5, + arg6, arg7); + + ehca_gen_dbg("opcode=%lx ret=%lx", opcode, ret); + return ret; + + } + + return H_BUSY; +} + +static long ehca_plpar_hcall9(unsigned long opcode, + unsigned long *outs, /* array of 9 outputs */ + unsigned long arg1, + unsigned long arg2, + unsigned long arg3, + unsigned long arg4, + unsigned long arg5, + unsigned long arg6, + unsigned long arg7, + unsigned long arg8, + unsigned long arg9) +{ + long ret; + int i, sleep_msecs; + + ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " + "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", + opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7, + arg8, arg9); + + for (i = 0; i < 5; i++) { + ret = plpar_hcall9(opcode, outs, + arg1, arg2, arg3, arg4, arg5, + arg6, arg7, arg8, arg9); + + if (H_IS_LONG_BUSY(ret)) { + sleep_msecs = get_longbusy_msecs(ret); + msleep_interruptible(sleep_msecs); + continue; + } + + if (ret < H_SUCCESS) + ehca_gen_err("opcode=%lx ret=%lx" + " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" + " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" + " arg9=%lx" + " out1=%lx out2=%lx out3=%lx out4=%lx" + " out5=%lx out6=%lx out7=%lx out8=%lx" + " out9=%lx", + opcode, ret, + arg1, arg2, arg3, arg4, arg5, + arg6, arg7, arg8, arg9, + outs[0], outs[1], outs[2], outs[3], + outs[4], outs[5], outs[6], outs[7], + outs[8]); + + ehca_gen_dbg("opcode=%lx ret=%lx out1=%lx out2=%lx out3=%lx " + "out4=%lx out5=%lx out6=%lx out7=%lx out8=%lx " + "out9=%lx", + opcode, ret, outs[0], outs[1], outs[2], outs[3], + outs[4], outs[5], outs[6], outs[7], outs[8]); + return ret; + + } + + return H_BUSY; +} +u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, + struct ehca_pfeq *pfeq, + const u32 neq_control, + const u32 number_of_entries, + struct ipz_eq_handle *eq_handle, + u32 *act_nr_of_entries, + u32 *act_pages, + u32 *eq_ist) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + u64 allocate_controls; + + /* resource type */ + allocate_controls = 3ULL; + + /* ISN is associated */ + if (neq_control != 1) + allocate_controls = (1ULL << (63 - 7)) | allocate_controls; + else /* notification event queue */ + allocate_controls = (1ULL << 63) | allocate_controls; + + ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, + adapter_handle.handle, /* r4 */ + allocate_controls, /* r5 */ + number_of_entries, /* r6 */ + 0, 0, 0, 0, 0, 0); + eq_handle->handle = outs[0]; + *act_nr_of_entries = (u32)outs[3]; + *act_pages = (u32)outs[4]; + *eq_ist = (u32)outs[5]; + + if (ret == H_NOT_ENOUGH_RESOURCES) + ehca_gen_err("Not enough resource - ret=%lx ", ret); + + return ret; +} + +u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle, + struct ipz_eq_handle eq_handle, + const u64 event_mask) +{ + return ehca_plpar_hcall_norets(H_RESET_EVENTS, + adapter_handle.handle, /* r4 */ + eq_handle.handle, /* r5 */ + event_mask, /* r6 */ + 0, 0, 0, 0); +} + +u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, + struct ehca_cq *cq, + struct ehca_alloc_cq_parms *param) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, + adapter_handle.handle, /* r4 */ + 2, /* r5 */ + param->eq_handle.handle, /* r6 */ + cq->token, /* r7 */ + param->nr_cqe, /* r8 */ + 0, 0, 0, 0); + cq->ipz_cq_handle.handle = outs[0]; + param->act_nr_of_entries = (u32)outs[3]; + param->act_pages = (u32)outs[4]; + + if (ret == H_SUCCESS) + hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); + + if (ret == H_NOT_ENOUGH_RESOURCES) + ehca_gen_err("Not enough resources. ret=%lx", ret); + + return ret; +} + +u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, + struct ehca_qp *qp, + struct ehca_alloc_qp_parms *parms) +{ + u64 ret; + u64 allocate_controls; + u64 max_r10_reg; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1; + u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1; + int daqp_ctrl = parms->daqp_ctrl; + + allocate_controls = + EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, + (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0) + | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0) + | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype) + | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype) + | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING, + (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0) + | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING, + (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0) + | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL, + parms->ud_av_l_key_ctl) + | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1); + + max_r10_reg = + EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR, + max_nr_send_wqes) + | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR, + max_nr_receive_wqes) + | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE, + parms->max_send_sge) + | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE, + parms->max_recv_sge); + + ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, + adapter_handle.handle, /* r4 */ + allocate_controls, /* r5 */ + qp->send_cq->ipz_cq_handle.handle, + qp->recv_cq->ipz_cq_handle.handle, + parms->ipz_eq_handle.handle, + ((u64)qp->token << 32) | parms->pd.value, + max_r10_reg, /* r10 */ + parms->ud_av_l_key_ctl, /* r11 */ + 0); + qp->ipz_qp_handle.handle = outs[0]; + qp->real_qp_num = (u32)outs[1]; + parms->act_nr_send_sges = + (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); + parms->act_nr_recv_wqes = + (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]); + parms->act_nr_send_sges = + (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]); + parms->act_nr_recv_sges = + (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]); + parms->nr_sq_pages = + (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]); + parms->nr_rq_pages = + (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); + + if (ret == H_SUCCESS) + hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]); + + if (ret == H_NOT_ENOUGH_RESOURCES) + ehca_gen_err("Not enough resources. ret=%lx", ret); + + return ret; +} + +u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, + const u8 port_id, + struct hipz_query_port *query_port_response_block) +{ + u64 ret; + u64 r_cb = virt_to_abs(query_port_response_block); + + if (r_cb & (EHCA_PAGESIZE-1)) { + ehca_gen_err("response block not page aligned"); + return H_PARAMETER; + } + + ret = ehca_plpar_hcall_norets(H_QUERY_PORT, + adapter_handle.handle, /* r4 */ + port_id, /* r5 */ + r_cb, /* r6 */ + 0, 0, 0, 0); + + if (ehca_debug_level) + ehca_dmp(query_port_response_block, 64, "response_block"); + + return ret; +} + +u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, + struct hipz_query_hca *query_hca_rblock) +{ + u64 r_cb = virt_to_abs(query_hca_rblock); + + if (r_cb & (EHCA_PAGESIZE-1)) { + ehca_gen_err("response_block=%p not page aligned", + query_hca_rblock); + return H_PARAMETER; + } + + return ehca_plpar_hcall_norets(H_QUERY_HCA, + adapter_handle.handle, /* r4 */ + r_cb, /* r5 */ + 0, 0, 0, 0, 0); +} + +u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle, + const u8 pagesize, + const u8 queue_type, + const u64 resource_handle, + const u64 logical_address_of_page, + u64 count) +{ + return ehca_plpar_hcall_norets(H_REGISTER_RPAGES, + adapter_handle.handle, /* r4 */ + queue_type | pagesize << 8, /* r5 */ + resource_handle, /* r6 */ + logical_address_of_page, /* r7 */ + count, /* r8 */ + 0, 0); +} + +u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle, + const struct ipz_eq_handle eq_handle, + struct ehca_pfeq *pfeq, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count) +{ + if (count != 1) { + ehca_gen_err("Ppage counter=%lx", count); + return H_PARAMETER; + } + return hipz_h_register_rpage(adapter_handle, + pagesize, + queue_type, + eq_handle.handle, + logical_address_of_page, count); +} + +u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle, + u32 ist) +{ + u64 ret; + ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE, + adapter_handle.handle, /* r4 */ + ist, /* r5 */ + 0, 0, 0, 0, 0); + + if (ret != H_SUCCESS && ret != H_BUSY) + ehca_gen_err("Could not query interrupt state."); + + return ret; +} + +u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle, + const struct ipz_cq_handle cq_handle, + struct ehca_pfcq *pfcq, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count, + const struct h_galpa gal) +{ + if (count != 1) { + ehca_gen_err("Page counter=%lx", count); + return H_PARAMETER; + } + + return hipz_h_register_rpage(adapter_handle, pagesize, queue_type, + cq_handle.handle, logical_address_of_page, + count); +} + +u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count, + const struct h_galpa galpa) +{ + if (count != 1) { + ehca_gen_err("Page counter=%lx", count); + return H_PARAMETER; + } + + return hipz_h_register_rpage(adapter_handle,pagesize,queue_type, + qp_handle.handle,logical_address_of_page, + count); +} + +u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + void **log_addr_next_sq_wqe2processed, + void **log_addr_next_rq_wqe2processed, + int dis_and_get_function_code) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, + adapter_handle.handle, /* r4 */ + dis_and_get_function_code, /* r5 */ + qp_handle.handle, /* r6 */ + 0, 0, 0, 0, 0, 0); + if (log_addr_next_sq_wqe2processed) + *log_addr_next_sq_wqe2processed = (void*)outs[0]; + if (log_addr_next_rq_wqe2processed) + *log_addr_next_rq_wqe2processed = (void*)outs[1]; + + return ret; +} + +u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + const u64 update_mask, + struct hcp_modify_qp_control_block *mqpcb, + struct h_galpa gal) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, + adapter_handle.handle, /* r4 */ + qp_handle.handle, /* r5 */ + update_mask, /* r6 */ + virt_to_abs(mqpcb), /* r7 */ + 0, 0, 0, 0, 0); + + if (ret == H_NOT_ENOUGH_RESOURCES) + ehca_gen_err("Insufficient resources ret=%lx", ret); + + return ret; +} + +u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + struct hcp_modify_qp_control_block *qqpcb, + struct h_galpa gal) +{ + return ehca_plpar_hcall_norets(H_QUERY_QP, + adapter_handle.handle, /* r4 */ + qp_handle.handle, /* r5 */ + virt_to_abs(qqpcb), /* r6 */ + 0, 0, 0, 0); +} + +u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, + struct ehca_qp *qp) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = hcp_galpas_dtor(&qp->galpas); + if (ret) { + ehca_gen_err("Could not destruct qp->galpas"); + return H_RESOURCE; + } + ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, + adapter_handle.handle, /* r4 */ + /* function code */ + 1, /* r5 */ + qp->ipz_qp_handle.handle, /* r6 */ + 0, 0, 0, 0, 0, 0); + if (ret == H_HARDWARE) + ehca_gen_err("HCA not operational. ret=%lx", ret); + + ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle.handle, /* r4 */ + qp->ipz_qp_handle.handle, /* r5 */ + 0, 0, 0, 0, 0); + + if (ret == H_RESOURCE) + ehca_gen_err("Resource still in use. ret=%lx", ret); + + return ret; +} + +u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u32 port) +{ + return ehca_plpar_hcall_norets(H_DEFINE_AQP0, + adapter_handle.handle, /* r4 */ + qp_handle.handle, /* r5 */ + port, /* r6 */ + 0, 0, 0, 0); +} + +u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u32 port, u32 * pma_qp_nr, + u32 * bma_qp_nr) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, + adapter_handle.handle, /* r4 */ + qp_handle.handle, /* r5 */ + port, /* r6 */ + 0, 0, 0, 0, 0, 0); + *pma_qp_nr = (u32)outs[0]; + *bma_qp_nr = (u32)outs[1]; + + if (ret == H_ALIAS_EXIST) + ehca_gen_err("AQP1 already exists. ret=%lx", ret); + + return ret; +} + +u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u16 mcg_dlid, + u64 subnet_prefix, u64 interface_id) +{ + u64 ret; + + ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP, + adapter_handle.handle, /* r4 */ + qp_handle.handle, /* r5 */ + mcg_dlid, /* r6 */ + interface_id, /* r7 */ + subnet_prefix, /* r8 */ + 0, 0); + + if (ret == H_NOT_ENOUGH_RESOURCES) + ehca_gen_err("Not enough resources. ret=%lx", ret); + + return ret; +} + +u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u16 mcg_dlid, + u64 subnet_prefix, u64 interface_id) +{ + return ehca_plpar_hcall_norets(H_DETACH_MCQP, + adapter_handle.handle, /* r4 */ + qp_handle.handle, /* r5 */ + mcg_dlid, /* r6 */ + interface_id, /* r7 */ + subnet_prefix, /* r8 */ + 0, 0); +} + +u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle, + struct ehca_cq *cq, + u8 force_flag) +{ + u64 ret; + + ret = hcp_galpas_dtor(&cq->galpas); + if (ret) { + ehca_gen_err("Could not destruct cp->galpas"); + return H_RESOURCE; + } + + ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle.handle, /* r4 */ + cq->ipz_cq_handle.handle, /* r5 */ + force_flag != 0 ? 1L : 0L, /* r6 */ + 0, 0, 0, 0); + + if (ret == H_RESOURCE) + ehca_gen_err("H_FREE_RESOURCE failed ret=%lx ", ret); + + return ret; +} + +u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle, + struct ehca_eq *eq) +{ + u64 ret; + + ret = hcp_galpas_dtor(&eq->galpas); + if (ret) { + ehca_gen_err("Could not destruct eq->galpas"); + return H_RESOURCE; + } + + ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle.handle, /* r4 */ + eq->ipz_eq_handle.handle, /* r5 */ + 0, 0, 0, 0, 0); + + if (ret == H_RESOURCE) + ehca_gen_err("Resource in use. ret=%lx ", ret); + + return ret; +} + +u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const u64 vaddr, + const u64 length, + const u32 access_ctrl, + const struct ipz_pd pd, + struct ehca_mr_hipzout_parms *outparms) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, + adapter_handle.handle, /* r4 */ + 5, /* r5 */ + vaddr, /* r6 */ + length, /* r7 */ + (((u64)access_ctrl) << 32ULL), /* r8 */ + pd.value, /* r9 */ + 0, 0, 0); + outparms->handle.handle = outs[0]; + outparms->lkey = (u32)outs[2]; + outparms->rkey = (u32)outs[3]; + + return ret; +} + +u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count) +{ + u64 ret; + + if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { + ehca_gen_err("logical_address_of_page not on a 4k boundary " + "adapter_handle=%lx mr=%p mr_handle=%lx " + "pagesize=%x queue_type=%x " + "logical_address_of_page=%lx count=%lx", + adapter_handle.handle, mr, + mr->ipz_mr_handle.handle, pagesize, queue_type, + logical_address_of_page, count); + ret = H_PARAMETER; + } else + ret = hipz_h_register_rpage(adapter_handle, pagesize, + queue_type, + mr->ipz_mr_handle.handle, + logical_address_of_page, count); + return ret; +} + +u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + struct ehca_mr_hipzout_parms *outparms) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_QUERY_MR, outs, + adapter_handle.handle, /* r4 */ + mr->ipz_mr_handle.handle, /* r5 */ + 0, 0, 0, 0, 0, 0, 0); + outparms->len = outs[0]; + outparms->vaddr = outs[1]; + outparms->acl = outs[4] >> 32; + outparms->lkey = (u32)(outs[5] >> 32); + outparms->rkey = (u32)(outs[5] & (0xffffffff)); + + return ret; +} + +u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr) +{ + return ehca_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle.handle, /* r4 */ + mr->ipz_mr_handle.handle, /* r5 */ + 0, 0, 0, 0, 0); +} + +u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const u64 vaddr_in, + const u64 length, + const u32 access_ctrl, + const struct ipz_pd pd, + const u64 mr_addr_cb, + struct ehca_mr_hipzout_parms *outparms) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, + adapter_handle.handle, /* r4 */ + mr->ipz_mr_handle.handle, /* r5 */ + vaddr_in, /* r6 */ + length, /* r7 */ + /* r8 */ + ((((u64)access_ctrl) << 32ULL) | pd.value), + mr_addr_cb, /* r9 */ + 0, 0, 0); + outparms->vaddr = outs[1]; + outparms->lkey = (u32)outs[2]; + outparms->rkey = (u32)outs[3]; + + return ret; +} + +u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const struct ehca_mr *orig_mr, + const u64 vaddr_in, + const u32 access_ctrl, + const struct ipz_pd pd, + struct ehca_mr_hipzout_parms *outparms) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, + adapter_handle.handle, /* r4 */ + orig_mr->ipz_mr_handle.handle, /* r5 */ + vaddr_in, /* r6 */ + (((u64)access_ctrl) << 32ULL), /* r7 */ + pd.value, /* r8 */ + 0, 0, 0, 0); + outparms->handle.handle = outs[0]; + outparms->lkey = (u32)outs[2]; + outparms->rkey = (u32)outs[3]; + + return ret; +} + +u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mw *mw, + const struct ipz_pd pd, + struct ehca_mw_hipzout_parms *outparms) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, + adapter_handle.handle, /* r4 */ + 6, /* r5 */ + pd.value, /* r6 */ + 0, 0, 0, 0, 0, 0); + outparms->handle.handle = outs[0]; + outparms->rkey = (u32)outs[3]; + + return ret; +} + +u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mw *mw, + struct ehca_mw_hipzout_parms *outparms) +{ + u64 ret; + u64 outs[PLPAR_HCALL9_BUFSIZE]; + + ret = ehca_plpar_hcall9(H_QUERY_MW, outs, + adapter_handle.handle, /* r4 */ + mw->ipz_mw_handle.handle, /* r5 */ + 0, 0, 0, 0, 0, 0, 0); + outparms->rkey = (u32)outs[3]; + + return ret; +} + +u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mw *mw) +{ + return ehca_plpar_hcall_norets(H_FREE_RESOURCE, + adapter_handle.handle, /* r4 */ + mw->ipz_mw_handle.handle, /* r5 */ + 0, 0, 0, 0, 0); +} + +u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle, + const u64 ressource_handle, + void *rblock, + unsigned long *byte_count) +{ + u64 r_cb = virt_to_abs(rblock); + + if (r_cb & (EHCA_PAGESIZE-1)) { + ehca_gen_err("rblock not page aligned."); + return H_PARAMETER; + } + + return ehca_plpar_hcall_norets(H_ERROR_DATA, + adapter_handle.handle, + ressource_handle, + r_cb, + 0, 0, 0, 0); +} diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h new file mode 100644 index 000000000000..587ebd470959 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_if.h @@ -0,0 +1,261 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Firmware Infiniband Interface code for POWER + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Gerd Bayer <gerd.bayer@de.ibm.com> + * Waleri Fomin <fomin@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HCP_IF_H__ +#define __HCP_IF_H__ + +#include "ehca_classes.h" +#include "ehca_tools.h" +#include "hipz_hw.h" + +/* + * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initalize + * resources, create the empty EQPT (ring). + */ +u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, + struct ehca_pfeq *pfeq, + const u32 neq_control, + const u32 number_of_entries, + struct ipz_eq_handle *eq_handle, + u32 * act_nr_of_entries, + u32 * act_pages, + u32 * eq_ist); + +u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle, + struct ipz_eq_handle eq_handle, + const u64 event_mask); +/* + * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize + * resources, create the empty CQPT (ring). + */ +u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, + struct ehca_cq *cq, + struct ehca_alloc_cq_parms *param); + + +/* + * hipz_h_alloc_resource_qp allocates QP resources in HW and FW, + * initialize resources, create empty QPPTs (2 rings). + */ +u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, + struct ehca_qp *qp, + struct ehca_alloc_qp_parms *parms); + +u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, + const u8 port_id, + struct hipz_query_port *query_port_response_block); + +u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle, + struct hipz_query_hca *query_hca_rblock); + +/* + * hipz_h_register_rpage internal function in hcp_if.h for all + * hcp_H_REGISTER_RPAGE calls. + */ +u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle, + const u8 pagesize, + const u8 queue_type, + const u64 resource_handle, + const u64 logical_address_of_page, + u64 count); + +u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle, + const struct ipz_eq_handle eq_handle, + struct ehca_pfeq *pfeq, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count); + +u64 hipz_h_query_int_state(const struct ipz_adapter_handle + hcp_adapter_handle, + u32 ist); + +u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle, + const struct ipz_cq_handle cq_handle, + struct ehca_pfcq *pfcq, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count, + const struct h_galpa gal); + +u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count, + const struct h_galpa galpa); + +u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + void **log_addr_next_sq_wqe_tb_processed, + void **log_addr_next_rq_wqe_tb_processed, + int dis_and_get_function_code); +enum hcall_sigt { + HCALL_SIGT_NO_CQE = 0, + HCALL_SIGT_BY_WQE = 1, + HCALL_SIGT_EVERY = 2 +}; + +u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + const u64 update_mask, + struct hcp_modify_qp_control_block *mqpcb, + struct h_galpa gal); + +u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct ehca_pfqp *pfqp, + struct hcp_modify_qp_control_block *qqpcb, + struct h_galpa gal); + +u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle, + struct ehca_qp *qp); + +u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u32 port); + +u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u32 port, u32 * pma_qp_nr, + u32 * bma_qp_nr); + +u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u16 mcg_dlid, + u64 subnet_prefix, u64 interface_id); + +u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle, + const struct ipz_qp_handle qp_handle, + struct h_galpa gal, + u16 mcg_dlid, + u64 subnet_prefix, u64 interface_id); + +u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle, + struct ehca_cq *cq, + u8 force_flag); + +u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle, + struct ehca_eq *eq); + +/* + * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize + * resources. + */ +u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const u64 vaddr, + const u64 length, + const u32 access_ctrl, + const struct ipz_pd pd, + struct ehca_mr_hipzout_parms *outparms); + +/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */ +u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const u8 pagesize, + const u8 queue_type, + const u64 logical_address_of_page, + const u64 count); + +/* hipz_h_query_mr queries MR in HW and FW */ +u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + struct ehca_mr_hipzout_parms *outparms); + +/* hipz_h_free_resource_mr frees MR resources in HW and FW */ +u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr); + +/* hipz_h_reregister_pmr reregisters MR in HW and FW */ +u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const u64 vaddr_in, + const u64 length, + const u32 access_ctrl, + const struct ipz_pd pd, + const u64 mr_addr_cb, + struct ehca_mr_hipzout_parms *outparms); + +/* hipz_h_register_smr register shared MR in HW and FW */ +u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mr *mr, + const struct ehca_mr *orig_mr, + const u64 vaddr_in, + const u32 access_ctrl, + const struct ipz_pd pd, + struct ehca_mr_hipzout_parms *outparms); + +/* + * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize + * resources. + */ +u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mw *mw, + const struct ipz_pd pd, + struct ehca_mw_hipzout_parms *outparms); + +/* hipz_h_query_mw queries MW in HW and FW */ +u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mw *mw, + struct ehca_mw_hipzout_parms *outparms); + +/* hipz_h_free_resource_mw frees MW resources in HW and FW */ +u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle, + const struct ehca_mw *mw); + +u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle, + const u64 ressource_handle, + void *rblock, + unsigned long *byte_count); + +#endif /* __HCP_IF_H__ */ diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c new file mode 100644 index 000000000000..0b1a4772c78a --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_phyp.c @@ -0,0 +1,80 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * load store abstraction for ehca register access with tracing + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ehca_classes.h" +#include "hipz_hw.h" + +int hcall_map_page(u64 physaddr, u64 *mapaddr) +{ + *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE)); + return 0; +} + +int hcall_unmap_page(u64 mapaddr) +{ + iounmap((volatile void __iomem*)mapaddr); + return 0; +} + +int hcp_galpas_ctor(struct h_galpas *galpas, + u64 paddr_kernel, u64 paddr_user) +{ + int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); + if (ret) + return ret; + + galpas->user.fw_handle = paddr_user; + + return 0; +} + +int hcp_galpas_dtor(struct h_galpas *galpas) +{ + if (galpas->kernel.fw_handle) { + int ret = hcall_unmap_page(galpas->kernel.fw_handle); + if (ret) + return ret; + } + + galpas->user.fw_handle = galpas->kernel.fw_handle = 0; + + return 0; +} diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h new file mode 100644 index 000000000000..5305c2a3ed94 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hcp_phyp.h @@ -0,0 +1,90 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * Firmware calls + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Waleri Fomin <fomin@de.ibm.com> + * Gerd Bayer <gerd.bayer@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HCP_PHYP_H__ +#define __HCP_PHYP_H__ + + +/* + * eHCA page (mapped into memory) + * resource to access eHCA register pages in CPU address space +*/ +struct h_galpa { + u64 fw_handle; + /* for pSeries this is a 64bit memory address where + I/O memory is mapped into CPU address space (kv) */ +}; + +/* + * resource to access eHCA address space registers, all types + */ +struct h_galpas { + u32 pid; /*PID of userspace galpa checking */ + struct h_galpa user; /* user space accessible resource, + set to 0 if unused */ + struct h_galpa kernel; /* kernel space accessible resource, + set to 0 if unused */ +}; + +static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset) +{ + u64 addr = galpa.fw_handle + offset; + return *(volatile u64 __force *)addr; +} + +static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value) +{ + u64 addr = galpa.fw_handle + offset; + *(volatile u64 __force *)addr = value; +} + +int hcp_galpas_ctor(struct h_galpas *galpas, + u64 paddr_kernel, u64 paddr_user); + +int hcp_galpas_dtor(struct h_galpas *galpas); + +int hcall_map_page(u64 physaddr, u64 * mapaddr); + +int hcall_unmap_page(u64 mapaddr); + +#endif diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/infiniband/hw/ehca/hipz_fns.h new file mode 100644 index 000000000000..9dac93d02140 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hipz_fns.h @@ -0,0 +1,68 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * HW abstraction register functions + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIPZ_FNS_H__ +#define __HIPZ_FNS_H__ + +#include "ehca_classes.h" +#include "hipz_hw.h" + +#include "hipz_fns_core.h" + +#define hipz_galpa_store_eq(gal, offset, value) \ + hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value) + +#define hipz_galpa_load_eq(gal, offset) \ + hipz_galpa_load(gal, EQTEMM_OFFSET(offset)) + +#define hipz_galpa_store_qped(gal, offset, value) \ + hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value) + +#define hipz_galpa_load_qped(gal, offset) \ + hipz_galpa_load(gal, QPEDMM_OFFSET(offset)) + +#define hipz_galpa_store_mrmw(gal, offset, value) \ + hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value) + +#define hipz_galpa_load_mrmw(gal, offset) \ + hipz_galpa_load(gal, MRMWMM_OFFSET(offset)) + +#endif diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/infiniband/hw/ehca/hipz_fns_core.h new file mode 100644 index 000000000000..20898a153446 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hipz_fns_core.h @@ -0,0 +1,100 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * HW abstraction register functions + * + * Authors: Christoph Raisch <raisch@de.ibm.com> + * Heiko J Schick <schickhj@de.ibm.com> + * Hoang-Nam Nguyen <hnguyen@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIPZ_FNS_CORE_H__ +#define __HIPZ_FNS_CORE_H__ + +#include "hcp_phyp.h" +#include "hipz_hw.h" + +#define hipz_galpa_store_cq(gal, offset, value) \ + hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value) + +#define hipz_galpa_load_cq(gal, offset) \ + hipz_galpa_load(gal, CQTEMM_OFFSET(offset)) + +#define hipz_galpa_store_qp(gal,offset, value) \ + hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value) +#define hipz_galpa_load_qp(gal, offset) \ + hipz_galpa_load(gal,QPTEMM_OFFSET(offset)) + +static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes) +{ + /* ringing doorbell :-) */ + hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa, + EHCA_BMASK_SET(QPX_SQADDER, nr_wqes)); +} + +static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes) +{ + /* ringing doorbell :-) */ + hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa, + EHCA_BMASK_SET(QPX_RQADDER, nr_wqes)); +} + +static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes) +{ + hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca, + EHCA_BMASK_SET(CQX_FECADDER, nr_cqes)); +} + +static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value) +{ + u64 cqx_n0_reg; + + hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0, + EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT, + value)); + cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0); +} + +static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value) +{ + u64 cqx_n1_reg; + + hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1, + EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value)); + cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1); +} + +#endif /* __HIPZ_FNC_CORE_H__ */ diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h new file mode 100644 index 000000000000..3fc92b031c50 --- /dev/null +++ b/drivers/infiniband/hw/ehca/hipz_hw.h @@ -0,0 +1,388 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * eHCA register definitions + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIPZ_HW_H__ +#define __HIPZ_HW_H__ + +#include "ehca_tools.h" + +/* QP Table Entry Memory Map */ +struct hipz_qptemm { + u64 qpx_hcr; + u64 qpx_c; + u64 qpx_herr; + u64 qpx_aer; +/* 0x20*/ + u64 qpx_sqa; + u64 qpx_sqc; + u64 qpx_rqa; + u64 qpx_rqc; +/* 0x40*/ + u64 qpx_st; + u64 qpx_pmstate; + u64 qpx_pmfa; + u64 qpx_pkey; +/* 0x60*/ + u64 qpx_pkeya; + u64 qpx_pkeyb; + u64 qpx_pkeyc; + u64 qpx_pkeyd; +/* 0x80*/ + u64 qpx_qkey; + u64 qpx_dqp; + u64 qpx_dlidp; + u64 qpx_portp; +/* 0xa0*/ + u64 qpx_slidp; + u64 qpx_slidpp; + u64 qpx_dlida; + u64 qpx_porta; +/* 0xc0*/ + u64 qpx_slida; + u64 qpx_slidpa; + u64 qpx_slvl; + u64 qpx_ipd; +/* 0xe0*/ + u64 qpx_mtu; + u64 qpx_lato; + u64 qpx_rlimit; + u64 qpx_rnrlimit; +/* 0x100*/ + u64 qpx_t; + u64 qpx_sqhp; + u64 qpx_sqptp; + u64 qpx_nspsn; +/* 0x120*/ + u64 qpx_nspsnhwm; + u64 reserved1; + u64 qpx_sdsi; + u64 qpx_sdsbc; +/* 0x140*/ + u64 qpx_sqwsize; + u64 qpx_sqwts; + u64 qpx_lsn; + u64 qpx_nssn; +/* 0x160 */ + u64 qpx_mor; + u64 qpx_cor; + u64 qpx_sqsize; + u64 qpx_erc; +/* 0x180*/ + u64 qpx_rnrrc; + u64 qpx_ernrwt; + u64 qpx_rnrresp; + u64 qpx_lmsna; +/* 0x1a0 */ + u64 qpx_sqhpc; + u64 qpx_sqcptp; + u64 qpx_sigt; + u64 qpx_wqecnt; +/* 0x1c0*/ + u64 qpx_rqhp; + u64 qpx_rqptp; + u64 qpx_rqsize; + u64 qpx_nrr; +/* 0x1e0*/ + u64 qpx_rdmac; + u64 qpx_nrpsn; + u64 qpx_lapsn; + u64 qpx_lcr; +/* 0x200*/ + u64 qpx_rwc; + u64 qpx_rwva; + u64 qpx_rdsi; + u64 qpx_rdsbc; +/* 0x220*/ + u64 qpx_rqwsize; + u64 qpx_crmsn; + u64 qpx_rdd; + u64 qpx_larpsn; +/* 0x240*/ + u64 qpx_pd; + u64 qpx_scqn; + u64 qpx_rcqn; + u64 qpx_aeqn; +/* 0x260*/ + u64 qpx_aaelog; + u64 qpx_ram; + u64 qpx_rdmaqe0; + u64 qpx_rdmaqe1; +/* 0x280*/ + u64 qpx_rdmaqe2; + u64 qpx_rdmaqe3; + u64 qpx_nrpsnhwm; +/* 0x298*/ + u64 reserved[(0x400 - 0x298) / 8]; +/* 0x400 extended data */ + u64 reserved_ext[(0x500 - 0x400) / 8]; +/* 0x500 */ + u64 reserved2[(0x1000 - 0x500) / 8]; +/* 0x1000 */ +}; + +#define QPX_SQADDER EHCA_BMASK_IBM(48,63) +#define QPX_RQADDER EHCA_BMASK_IBM(48,63) + +#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x) + +/* MRMWPT Entry Memory Map */ +struct hipz_mrmwmm { + /* 0x00 */ + u64 mrx_hcr; + + u64 mrx_c; + u64 mrx_herr; + u64 mrx_aer; + /* 0x20 */ + u64 mrx_pp; + u64 reserved1; + u64 reserved2; + u64 reserved3; + /* 0x40 */ + u64 reserved4[(0x200 - 0x40) / 8]; + /* 0x200 */ + u64 mrx_ctl[64]; + +}; + +#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm,x) + +struct hipz_qpedmm { + /* 0x00 */ + u64 reserved0[(0x400) / 8]; + /* 0x400 */ + u64 qpedx_phh; + u64 qpedx_ppsgp; + /* 0x410 */ + u64 qpedx_ppsgu; + u64 qpedx_ppdgp; + /* 0x420 */ + u64 qpedx_ppdgu; + u64 qpedx_aph; + /* 0x430 */ + u64 qpedx_apsgp; + u64 qpedx_apsgu; + /* 0x440 */ + u64 qpedx_apdgp; + u64 qpedx_apdgu; + /* 0x450 */ + u64 qpedx_apav; + u64 qpedx_apsav; + /* 0x460 */ + u64 qpedx_hcr; + u64 reserved1[4]; + /* 0x488 */ + u64 qpedx_rrl0; + /* 0x490 */ + u64 qpedx_rrrkey0; + u64 qpedx_rrva0; + /* 0x4a0 */ + u64 reserved2; + u64 qpedx_rrl1; + /* 0x4b0 */ + u64 qpedx_rrrkey1; + u64 qpedx_rrva1; + /* 0x4c0 */ + u64 reserved3; + u64 qpedx_rrl2; + /* 0x4d0 */ + u64 qpedx_rrrkey2; + u64 qpedx_rrva2; + /* 0x4e0 */ + u64 reserved4; + u64 qpedx_rrl3; + /* 0x4f0 */ + u64 qpedx_rrrkey3; + u64 qpedx_rrva3; +}; + +#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm,x) + +/* CQ Table Entry Memory Map */ +struct hipz_cqtemm { + u64 cqx_hcr; + u64 cqx_c; + u64 cqx_herr; + u64 cqx_aer; +/* 0x20 */ + u64 cqx_ptp; + u64 cqx_tp; + u64 cqx_fec; + u64 cqx_feca; +/* 0x40 */ + u64 cqx_ep; + u64 cqx_eq; +/* 0x50 */ + u64 reserved1; + u64 cqx_n0; +/* 0x60 */ + u64 cqx_n1; + u64 reserved2[(0x1000 - 0x60) / 8]; +/* 0x1000 */ +}; + +#define CQX_FEC_CQE_CNT EHCA_BMASK_IBM(32,63) +#define CQX_FECADDER EHCA_BMASK_IBM(32,63) +#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0,0) +#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0,0) + +#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm,x) + +/* EQ Table Entry Memory Map */ +struct hipz_eqtemm { + u64 eqx_hcr; + u64 eqx_c; + + u64 eqx_herr; + u64 eqx_aer; +/* 0x20 */ + u64 eqx_ptp; + u64 eqx_tp; + u64 eqx_ssba; + u64 eqx_psba; + +/* 0x40 */ + u64 eqx_cec; + u64 eqx_meql; + u64 eqx_xisbi; + u64 eqx_xisc; +/* 0x60 */ + u64 eqx_it; + +}; + +#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm,x) + +/* access control defines for MR/MW */ +#define HIPZ_ACCESSCTRL_L_WRITE 0x00800000 +#define HIPZ_ACCESSCTRL_R_WRITE 0x00400000 +#define HIPZ_ACCESSCTRL_R_READ 0x00200000 +#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000 +#define HIPZ_ACCESSCTRL_MW_BIND 0x00080000 + +/* query hca response block */ +struct hipz_query_hca { + u32 cur_reliable_dg; + u32 cur_qp; + u32 cur_cq; + u32 cur_eq; + u32 cur_mr; + u32 cur_mw; + u32 cur_ee_context; + u32 cur_mcast_grp; + u32 cur_qp_attached_mcast_grp; + u32 reserved1; + u32 cur_ipv6_qp; + u32 cur_eth_qp; + u32 cur_hp_mr; + u32 reserved2[3]; + u32 max_rd_domain; + u32 max_qp; + u32 max_cq; + u32 max_eq; + u32 max_mr; + u32 max_hp_mr; + u32 max_mw; + u32 max_mrwpte; + u32 max_special_mrwpte; + u32 max_rd_ee_context; + u32 max_mcast_grp; + u32 max_total_mcast_qp_attach; + u32 max_mcast_qp_attach; + u32 max_raw_ipv6_qp; + u32 max_raw_ethy_qp; + u32 internal_clock_frequency; + u32 max_pd; + u32 max_ah; + u32 max_cqe; + u32 max_wqes_wq; + u32 max_partitions; + u32 max_rr_ee_context; + u32 max_rr_qp; + u32 max_rr_hca; + u32 max_act_wqs_ee_context; + u32 max_act_wqs_qp; + u32 max_sge; + u32 max_sge_rd; + u32 memory_page_size_supported; + u64 max_mr_size; + u32 local_ca_ack_delay; + u32 num_ports; + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + u64 node_guid; + u64 hca_cap_indicators; + u32 data_counter_register_size; + u32 max_shared_rq; + u32 max_isns_eq; + u32 max_neq; +} __attribute__ ((packed)); + +/* query port response block */ +struct hipz_query_port { + u32 state; + u32 bad_pkey_cntr; + u32 lmc; + u32 lid; + u32 subnet_timeout; + u32 qkey_viol_cntr; + u32 sm_sl; + u32 sm_lid; + u32 capability_mask; + u32 init_type_reply; + u32 pkey_tbl_len; + u32 gid_tbl_len; + u64 gid_prefix; + u32 port_nr; + u16 pkey_entries[16]; + u8 reserved1[32]; + u32 trent_size; + u32 trbuf_size; + u64 max_msg_sz; + u32 max_mtu; + u32 vl_cap; + u8 reserved2[1900]; + u64 guid_entries[255]; +} __attribute__ ((packed)); + +#endif diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c new file mode 100644 index 000000000000..e028ff1588cc --- /dev/null +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -0,0 +1,149 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * internal queue handling + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ehca_tools.h" +#include "ipz_pt_fn.h" + +void *ipz_qpageit_get_inc(struct ipz_queue *queue) +{ + void *ret = ipz_qeit_get(queue); + queue->current_q_offset += queue->pagesize; + if (queue->current_q_offset > queue->queue_length) { + queue->current_q_offset -= queue->pagesize; + ret = NULL; + } + if (((u64)ret) % EHCA_PAGESIZE) { + ehca_gen_err("ERROR!! not at PAGE-Boundary"); + return NULL; + } + return ret; +} + +void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) +{ + void *ret = ipz_qeit_get(queue); + u64 last_entry_in_q = queue->queue_length - queue->qe_size; + + queue->current_q_offset += queue->qe_size; + if (queue->current_q_offset > last_entry_in_q) { + queue->current_q_offset = 0; + queue->toggle_state = (~queue->toggle_state) & 1; + } + + return ret; +} + +int ipz_queue_ctor(struct ipz_queue *queue, + const u32 nr_of_pages, + const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) +{ + int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT; + int f; + + if (pagesize > PAGE_SIZE) { + ehca_gen_err("FATAL ERROR: pagesize=%x is greater " + "than kernel page size", pagesize); + return 0; + } + if (!pages_per_kpage) { + ehca_gen_err("FATAL ERROR: invalid kernel page size. " + "pages_per_kpage=%x", pages_per_kpage); + return 0; + } + queue->queue_length = nr_of_pages * pagesize; + queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); + if (!queue->queue_pages) { + ehca_gen_err("ERROR!! didn't get the memory"); + return 0; + } + memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); + /* + * allocate pages for queue: + * outer loop allocates whole kernel pages (page aligned) and + * inner loop divides a kernel page into smaller hca queue pages + */ + f = 0; + while (f < nr_of_pages) { + u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL); + int k; + if (!kpage) + goto ipz_queue_ctor_exit0; /*NOMEM*/ + for (k = 0; k < pages_per_kpage && f < nr_of_pages; k++) { + (queue->queue_pages)[f] = (struct ipz_page *)kpage; + kpage += EHCA_PAGESIZE; + f++; + } + } + + queue->current_q_offset = 0; + queue->qe_size = qe_size; + queue->act_nr_of_sg = nr_of_sg; + queue->pagesize = pagesize; + queue->toggle_state = 1; + return 1; + + ipz_queue_ctor_exit0: + ehca_gen_err("Couldn't get alloc pages queue=%p f=%x nr_of_pages=%x", + queue, f, nr_of_pages); + for (f = 0; f < nr_of_pages; f += pages_per_kpage) { + if (!(queue->queue_pages)[f]) + break; + free_page((unsigned long)(queue->queue_pages)[f]); + } + return 0; +} + +int ipz_queue_dtor(struct ipz_queue *queue) +{ + int pages_per_kpage = PAGE_SIZE >> EHCA_PAGESHIFT; + int g; + int nr_pages; + + if (!queue || !queue->queue_pages) { + ehca_gen_dbg("queue or queue_pages is NULL"); + return 0; + } + nr_pages = queue->queue_length / queue->pagesize; + for (g = 0; g < nr_pages; g += pages_per_kpage) + free_page((unsigned long)(queue->queue_pages)[g]); + vfree(queue->queue_pages); + + return 1; +} diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h new file mode 100644 index 000000000000..2f13509d5257 --- /dev/null +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h @@ -0,0 +1,247 @@ +/* + * IBM eServer eHCA Infiniband device driver for Linux on POWER + * + * internal queue handling + * + * Authors: Waleri Fomin <fomin@de.ibm.com> + * Reinhard Ernst <rernst@de.ibm.com> + * Christoph Raisch <raisch@de.ibm.com> + * + * Copyright (c) 2005 IBM Corporation + * + * All rights reserved. + * + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. + * + * OpenIB BSD License + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __IPZ_PT_FN_H__ +#define __IPZ_PT_FN_H__ + +#define EHCA_PAGESHIFT 12 +#define EHCA_PAGESIZE 4096UL +#define EHCA_PAGEMASK (~(EHCA_PAGESIZE-1)) +#define EHCA_PT_ENTRIES 512UL + +#include "ehca_tools.h" +#include "ehca_qes.h" + +/* struct generic ehca page */ +struct ipz_page { + u8 entries[EHCA_PAGESIZE]; +}; + +/* struct generic queue in linux kernel virtual memory (kv) */ +struct ipz_queue { + u64 current_q_offset; /* current queue entry */ + + struct ipz_page **queue_pages; /* array of pages belonging to queue */ + u32 qe_size; /* queue entry size */ + u32 act_nr_of_sg; + u32 queue_length; /* queue length allocated in bytes */ + u32 pagesize; + u32 toggle_state; /* toggle flag - per page */ + u32 dummy3; /* 64 bit alignment */ +}; + +/* + * return current Queue Entry for a certain q_offset + * returns address (kv) of Queue Entry + */ +static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) +{ + struct ipz_page *current_page; + if (q_offset >= queue->queue_length) + return NULL; + current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; + return ¤t_page->entries[q_offset & (EHCA_PAGESIZE - 1)]; +} + +/* + * return current Queue Entry + * returns address (kv) of Queue Entry + */ +static inline void *ipz_qeit_get(struct ipz_queue *queue) +{ + return ipz_qeit_calc(queue, queue->current_q_offset); +} + +/* + * return current Queue Page , increment Queue Page iterator from + * page to page in struct ipz_queue, last increment will return 0! and + * NOT wrap + * returns address (kv) of Queue Page + * warning don't use in parallel with ipz_QE_get_inc() + */ +void *ipz_qpageit_get_inc(struct ipz_queue *queue); + +/* + * return current Queue Entry, increment Queue Entry iterator by one + * step in struct ipz_queue, will wrap in ringbuffer + * returns address (kv) of Queue Entry BEFORE increment + * warning don't use in parallel with ipz_qpageit_get_inc() + * warning unpredictable results may occur if steps>act_nr_of_queue_entries + */ +static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) +{ + void *ret = ipz_qeit_get(queue); + queue->current_q_offset += queue->qe_size; + if (queue->current_q_offset >= queue->queue_length) { + queue->current_q_offset = 0; + /* toggle the valid flag */ + queue->toggle_state = (~queue->toggle_state) & 1; + } + + return ret; +} + +/* + * return current Queue Entry, increment Queue Entry iterator by one + * step in struct ipz_queue, will wrap in ringbuffer + * returns address (kv) of Queue Entry BEFORE increment + * returns 0 and does not increment, if wrong valid state + * warning don't use in parallel with ipz_qpageit_get_inc() + * warning unpredictable results may occur if steps>act_nr_of_queue_entries + */ +static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue) +{ + struct ehca_cqe *cqe = ipz_qeit_get(queue); + u32 cqe_flags = cqe->cqe_flags; + + if ((cqe_flags >> 7) != (queue->toggle_state & 1)) + return NULL; + + ipz_qeit_get_inc(queue); + return cqe; +} + +/* + * returns and resets Queue Entry iterator + * returns address (kv) of first Queue Entry + */ +static inline void *ipz_qeit_reset(struct ipz_queue *queue) +{ + queue->current_q_offset = 0; + return ipz_qeit_get(queue); +} + +/* struct generic page table */ +struct ipz_pt { + u64 entries[EHCA_PT_ENTRIES]; +}; + +/* struct page table for a queue, only to be used in pf */ +struct ipz_qpt { + /* queue page tables (kv), use u64 because we know the element length */ + u64 *qpts; + u32 n_qpts; + u32 n_ptes; /* number of page table entries */ + u64 *current_pte_addr; +}; + +/* + * constructor for a ipz_queue_t, placement new for ipz_queue_t, + * new for all dependent datastructors + * all QP Tables are the same + * flow: + * allocate+pin queue + * see ipz_qpt_ctor() + * returns true if ok, false if out of memory + */ +int ipz_queue_ctor(struct ipz_queue *queue, const u32 nr_of_pages, + const u32 pagesize, const u32 qe_size, + const u32 nr_of_sg); + +/* + * destructor for a ipz_queue_t + * -# free queue + * see ipz_queue_ctor() + * returns true if ok, false if queue was NULL-ptr of free failed + */ +int ipz_queue_dtor(struct ipz_queue *queue); + +/* + * constructor for a ipz_qpt_t, + * placement new for struct ipz_queue, new for all dependent datastructors + * all QP Tables are the same, + * flow: + * -# allocate+pin queue + * -# initialise ptcb + * -# allocate+pin PTs + * -# link PTs to a ring, according to HCA Arch, set bit62 id needed + * -# the ring must have room for exactly nr_of_PTEs + * see ipz_qpt_ctor() + */ +void ipz_qpt_ctor(struct ipz_qpt *qpt, + const u32 nr_of_qes, + const u32 pagesize, + const u32 qe_size, + const u8 lowbyte, const u8 toggle, + u32 * act_nr_of_QEs, u32 * act_nr_of_pages); + +/* + * return current Queue Entry, increment Queue Entry iterator by one + * step in struct ipz_queue, will wrap in ringbuffer + * returns address (kv) of Queue Entry BEFORE increment + * warning don't use in parallel with ipz_qpageit_get_inc() + * warning unpredictable results may occur if steps>act_nr_of_queue_entries + * fix EQ page problems + */ +void *ipz_qeit_eq_get_inc(struct ipz_queue *queue); + +/* + * return current Event Queue Entry, increment Queue Entry iterator + * by one step in struct ipz_queue if valid, will wrap in ringbuffer + * returns address (kv) of Queue Entry BEFORE increment + * returns 0 and does not increment, if wrong valid state + * warning don't use in parallel with ipz_queue_QPageit_get_inc() + * warning unpredictable results may occur if steps>act_nr_of_queue_entries + */ +static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) +{ + void *ret = ipz_qeit_get(queue); + u32 qe = *(u8 *) ret; + if ((qe >> 7) != (queue->toggle_state & 1)) + return NULL; + ipz_qeit_eq_get_inc(queue); /* this is a good one */ + return ret; +} + +/* returns address (GX) of first queue entry */ +static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt) +{ + return be64_to_cpu(qpt->qpts[0]); +} + +/* returns address (kv) of first page of queue page table */ +static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt) +{ + return qpt->qpts; +} + +#endif /* __IPZ_PT_FN_H__ */ diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig index 1db9489f1e82..574a678e7fdd 100644 --- a/drivers/infiniband/hw/ipath/Kconfig +++ b/drivers/infiniband/hw/ipath/Kconfig @@ -1,16 +1,9 @@ -config IPATH_CORE - tristate "QLogic InfiniPath Driver" - depends on 64BIT && PCI_MSI && NET - ---help--- - This is a low-level driver for QLogic InfiniPath host channel - adapters (HCAs) based on the HT-400 and PE-800 chips. - config INFINIBAND_IPATH - tristate "QLogic InfiniPath Verbs Driver" - depends on IPATH_CORE && INFINIBAND + tristate "QLogic InfiniPath Driver" + depends on PCI_MSI && 64BIT && INFINIBAND ---help--- - This is a driver that provides InfiniBand verbs support for - QLogic InfiniPath host channel adapters (HCAs). This - allows these devices to be used with both kernel upper level - protocols such as IP-over-InfiniBand as well as with userspace - applications (in conjunction with InfiniBand userspace access). + This is a driver for QLogic InfiniPath host channel adapters, + including InfiniBand verbs support. This driver allows these + devices to be used with both kernel upper level protocols such + as IP-over-InfiniBand as well as with userspace applications + (in conjunction with InfiniBand userspace access). diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile index b0bf72864130..5e29cb0095e5 100644 --- a/drivers/infiniband/hw/ipath/Makefile +++ b/drivers/infiniband/hw/ipath/Makefile @@ -1,36 +1,35 @@ EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \ -DIPATH_KERN_TYPE=0 -obj-$(CONFIG_IPATH_CORE) += ipath_core.o obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o -ipath_core-y := \ +ib_ipath-y := \ + ipath_cq.o \ ipath_diag.o \ ipath_driver.o \ ipath_eeprom.o \ ipath_file_ops.o \ ipath_fs.o \ - ipath_ht400.o \ + ipath_iba6110.o \ + ipath_iba6120.o \ ipath_init_chip.o \ ipath_intr.o \ - ipath_layer.o \ - ipath_pe800.o \ - ipath_stats.o \ - ipath_sysfs.o \ - ipath_user_pages.o - -ipath_core-$(CONFIG_X86_64) += ipath_wc_x86_64.o - -ib_ipath-y := \ - ipath_cq.o \ ipath_keys.o \ + ipath_layer.o \ ipath_mad.o \ + ipath_mmap.o \ ipath_mr.o \ ipath_qp.o \ ipath_rc.o \ ipath_ruc.o \ ipath_srq.o \ + ipath_stats.o \ + ipath_sysfs.o \ ipath_uc.o \ ipath_ud.o \ - ipath_verbs.o \ - ipath_verbs_mcast.o + ipath_user_pages.o \ + ipath_verbs_mcast.o \ + ipath_verbs.o + +ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o +ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h index 062bd392e7e5..f577905e3aca 100644 --- a/drivers/infiniband/hw/ipath/ipath_common.h +++ b/drivers/infiniband/hw/ipath/ipath_common.h @@ -106,9 +106,9 @@ struct infinipath_stats { __u64 sps_ether_spkts; /* number of "ethernet" packets received by driver */ __u64 sps_ether_rpkts; - /* number of SMA packets sent by driver */ + /* number of SMA packets sent by driver. Obsolete. */ __u64 sps_sma_spkts; - /* number of SMA packets received by driver */ + /* number of SMA packets received by driver. Obsolete. */ __u64 sps_sma_rpkts; /* number of times all ports rcvhdrq was full and packet dropped */ __u64 sps_hdrqfull; @@ -138,7 +138,7 @@ struct infinipath_stats { __u64 sps_pageunlocks; /* * Number of packets dropped in kernel other than errors (ether - * packets if ipath not configured, sma/mad, etc.) + * packets if ipath not configured, etc.) */ __u64 sps_krdrops; /* pad for future growth */ @@ -153,8 +153,6 @@ struct infinipath_stats { #define IPATH_STATUS_DISABLED 0x2 /* hardware disabled */ /* Device has been disabled via admin request */ #define IPATH_STATUS_ADMIN_DISABLED 0x4 -#define IPATH_STATUS_OIB_SMA 0x8 /* ipath_mad kernel SMA running */ -#define IPATH_STATUS_SMA 0x10 /* user SMA running */ /* Chip has been found and initted */ #define IPATH_STATUS_CHIP_PRESENT 0x20 /* IB link is at ACTIVE, usable for data traffic */ @@ -465,12 +463,11 @@ struct __ipath_sendpkt { struct ipath_iovec sps_iov[4]; }; -/* Passed into SMA special file's ->read and ->write methods. */ -struct ipath_sma_pkt -{ - __u32 unit; /* unit on which to send packet */ - __u64 data; /* address of payload in userspace */ - __u32 len; /* length of payload */ +/* Passed into diag data special file's ->write method. */ +struct ipath_diag_pkt { + __u32 unit; + __u64 data; + __u32 len; }; /* diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 3efee341c9bc..049221bc590e 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c @@ -42,20 +42,28 @@ * @entry: work completion entry to add * @sig: true if @entry is a solicitated entry * - * This may be called with one of the qp->s_lock or qp->r_rq.lock held. + * This may be called with qp->s_lock held. */ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) { + struct ipath_cq_wc *wc = cq->queue; unsigned long flags; + u32 head; u32 next; spin_lock_irqsave(&cq->lock, flags); - if (cq->head == cq->ibcq.cqe) + /* + * Note that the head pointer might be writable by user processes. + * Take care to verify it is a sane value. + */ + head = wc->head; + if (head >= (unsigned) cq->ibcq.cqe) { + head = cq->ibcq.cqe; next = 0; - else - next = cq->head + 1; - if (unlikely(next == cq->tail)) { + } else + next = head + 1; + if (unlikely(next == wc->tail)) { spin_unlock_irqrestore(&cq->lock, flags); if (cq->ibcq.event_handler) { struct ib_event ev; @@ -67,8 +75,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) } return; } - cq->queue[cq->head] = *entry; - cq->head = next; + wc->queue[head] = *entry; + wc->head = next; if (cq->notify == IB_CQ_NEXT_COMP || (cq->notify == IB_CQ_SOLICITED && solicited)) { @@ -101,19 +109,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { struct ipath_cq *cq = to_icq(ibcq); + struct ipath_cq_wc *wc = cq->queue; unsigned long flags; int npolled; spin_lock_irqsave(&cq->lock, flags); for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { - if (cq->tail == cq->head) + if (wc->tail == wc->head) break; - *entry = cq->queue[cq->tail]; - if (cq->tail == cq->ibcq.cqe) - cq->tail = 0; + *entry = wc->queue[wc->tail]; + if (wc->tail >= cq->ibcq.cqe) + wc->tail = 0; else - cq->tail++; + wc->tail++; } spin_unlock_irqrestore(&cq->lock, flags); @@ -160,38 +169,74 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, { struct ipath_ibdev *dev = to_idev(ibdev); struct ipath_cq *cq; - struct ib_wc *wc; + struct ipath_cq_wc *wc; struct ib_cq *ret; - if (entries > ib_ipath_max_cqes) { + if (entries < 1 || entries > ib_ipath_max_cqes) { ret = ERR_PTR(-EINVAL); - goto bail; + goto done; } if (dev->n_cqs_allocated == ib_ipath_max_cqs) { ret = ERR_PTR(-ENOMEM); - goto bail; + goto done; } - /* - * Need to use vmalloc() if we want to support large #s of - * entries. - */ + /* Allocate the completion queue structure. */ cq = kmalloc(sizeof(*cq), GFP_KERNEL); if (!cq) { ret = ERR_PTR(-ENOMEM); - goto bail; + goto done; } /* - * Need to use vmalloc() if we want to support large #s of entries. + * Allocate the completion queue entries and head/tail pointers. + * This is allocated separately so that it can be resized and + * also mapped into user space. + * We need to use vmalloc() in order to support mmap and large + * numbers of entries. */ - wc = vmalloc(sizeof(*wc) * (entries + 1)); + wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries); if (!wc) { - kfree(cq); ret = ERR_PTR(-ENOMEM); - goto bail; + goto bail_cq; } + + /* + * Return the address of the WC as the offset to mmap. + * See ipath_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + struct ipath_mmap_info *ip; + __u64 offset = (__u64) wc; + int err; + + err = ib_copy_to_udata(udata, &offset, sizeof(offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_wc; + } + + /* Allocate info for ipath_mmap(). */ + ip = kmalloc(sizeof(*ip), GFP_KERNEL); + if (!ip) { + ret = ERR_PTR(-ENOMEM); + goto bail_wc; + } + cq->ip = ip; + ip->context = context; + ip->obj = wc; + kref_init(&ip->ref); + ip->mmap_cnt = 0; + ip->size = PAGE_ALIGN(sizeof(*wc) + + sizeof(struct ib_wc) * entries); + spin_lock_irq(&dev->pending_lock); + ip->next = dev->pending_mmaps; + dev->pending_mmaps = ip; + spin_unlock_irq(&dev->pending_lock); + } else + cq->ip = NULL; + /* * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. * The number of entries should be >= the number requested or return @@ -202,15 +247,22 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, cq->triggered = 0; spin_lock_init(&cq->lock); tasklet_init(&cq->comptask, send_complete, (unsigned long)cq); - cq->head = 0; - cq->tail = 0; + wc->head = 0; + wc->tail = 0; cq->queue = wc; ret = &cq->ibcq; dev->n_cqs_allocated++; + goto done; -bail: +bail_wc: + vfree(wc); + +bail_cq: + kfree(cq); + +done: return ret; } @@ -229,7 +281,10 @@ int ipath_destroy_cq(struct ib_cq *ibcq) tasklet_kill(&cq->comptask); dev->n_cqs_allocated--; - vfree(cq->queue); + if (cq->ip) + kref_put(&cq->ip->ref, ipath_release_mmap_info); + else + vfree(cq->queue); kfree(cq); return 0; @@ -253,7 +308,7 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) spin_lock_irqsave(&cq->lock, flags); /* * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow - * any other transitions. + * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). */ if (cq->notify != IB_CQ_NEXT_COMP) cq->notify = notify; @@ -264,46 +319,86 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) { struct ipath_cq *cq = to_icq(ibcq); - struct ib_wc *wc, *old_wc; - u32 n; + struct ipath_cq_wc *old_wc = cq->queue; + struct ipath_cq_wc *wc; + u32 head, tail, n; int ret; + if (cqe < 1 || cqe > ib_ipath_max_cqes) { + ret = -EINVAL; + goto bail; + } + /* * Need to use vmalloc() if we want to support large #s of entries. */ - wc = vmalloc(sizeof(*wc) * (cqe + 1)); + wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe); if (!wc) { ret = -ENOMEM; goto bail; } + /* + * Return the address of the WC as the offset to mmap. + * See ipath_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + __u64 offset = (__u64) wc; + + ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); + if (ret) + goto bail; + } + spin_lock_irq(&cq->lock); - if (cq->head < cq->tail) - n = cq->ibcq.cqe + 1 + cq->head - cq->tail; + /* + * Make sure head and tail are sane since they + * might be user writable. + */ + head = old_wc->head; + if (head > (u32) cq->ibcq.cqe) + head = (u32) cq->ibcq.cqe; + tail = old_wc->tail; + if (tail > (u32) cq->ibcq.cqe) + tail = (u32) cq->ibcq.cqe; + if (head < tail) + n = cq->ibcq.cqe + 1 + head - tail; else - n = cq->head - cq->tail; + n = head - tail; if (unlikely((u32)cqe < n)) { spin_unlock_irq(&cq->lock); vfree(wc); ret = -EOVERFLOW; goto bail; } - for (n = 0; cq->tail != cq->head; n++) { - wc[n] = cq->queue[cq->tail]; - if (cq->tail == cq->ibcq.cqe) - cq->tail = 0; + for (n = 0; tail != head; n++) { + wc->queue[n] = old_wc->queue[tail]; + if (tail == (u32) cq->ibcq.cqe) + tail = 0; else - cq->tail++; + tail++; } cq->ibcq.cqe = cqe; - cq->head = n; - cq->tail = 0; - old_wc = cq->queue; + wc->head = n; + wc->tail = 0; cq->queue = wc; spin_unlock_irq(&cq->lock); vfree(old_wc); + if (cq->ip) { + struct ipath_ibdev *dev = to_idev(ibcq->device); + struct ipath_mmap_info *ip = cq->ip; + + ip->obj = wc; + ip->size = PAGE_ALIGN(sizeof(*wc) + + sizeof(struct ib_wc) * cqe); + spin_lock_irq(&dev->pending_lock); + ip->next = dev->pending_mmaps; + dev->pending_mmaps = ip; + spin_unlock_irq(&dev->pending_lock); + } + ret = 0; bail: diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h index f415beda0d32..df69f0d80b8b 100644 --- a/drivers/infiniband/hw/ipath/ipath_debug.h +++ b/drivers/infiniband/hw/ipath/ipath_debug.h @@ -60,7 +60,6 @@ #define __IPATH_USER_SEND 0x1000 /* use user mode send */ #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ -#define __IPATH_SMADBG 0x8000 /* sma packet debug */ #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */ #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */ #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */ @@ -84,7 +83,6 @@ /* print mmap/nopage stuff, not using VDBG any more */ #define __IPATH_MMDBG 0x0 #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ -#define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */ #define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index 147dd89e21c9..28b6b46c106a 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c @@ -41,11 +41,11 @@ * through the /sys/bus/pci resource mmap interface. */ +#include <linux/io.h> #include <linux/pci.h> #include <asm/uaccess.h> #include "ipath_kernel.h" -#include "ipath_layer.h" #include "ipath_common.h" int ipath_diag_inuse; @@ -274,6 +274,158 @@ bail: return ret; } +static ssize_t ipath_diagpkt_write(struct file *fp, + const char __user *data, + size_t count, loff_t *off); + +static struct file_operations diagpkt_file_ops = { + .owner = THIS_MODULE, + .write = ipath_diagpkt_write, +}; + +static struct cdev *diagpkt_cdev; +static struct class_device *diagpkt_class_dev; + +int __init ipath_diagpkt_add(void) +{ + return ipath_cdev_init(IPATH_DIAGPKT_MINOR, + "ipath_diagpkt", &diagpkt_file_ops, + &diagpkt_cdev, &diagpkt_class_dev); +} + +void __exit ipath_diagpkt_remove(void) +{ + ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev); +} + +/** + * ipath_diagpkt_write - write an IB packet + * @fp: the diag data device file pointer + * @data: ipath_diag_pkt structure saying where to get the packet + * @count: size of data to write + * @off: unused by this code + */ +static ssize_t ipath_diagpkt_write(struct file *fp, + const char __user *data, + size_t count, loff_t *off) +{ + u32 __iomem *piobuf; + u32 plen, clen, pbufn; + struct ipath_diag_pkt dp; + u32 *tmpbuf = NULL; + struct ipath_devdata *dd; + ssize_t ret = 0; + u64 val; + + if (count < sizeof(dp)) { + ret = -EINVAL; + goto bail; + } + + if (copy_from_user(&dp, data, sizeof(dp))) { + ret = -EFAULT; + goto bail; + } + + /* send count must be an exact number of dwords */ + if (dp.len & 3) { + ret = -EINVAL; + goto bail; + } + + clen = dp.len >> 2; + + dd = ipath_lookup(dp.unit); + if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || + !dd->ipath_kregbase) { + ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n", + dp.unit); + ret = -ENODEV; + goto bail; + } + + if (ipath_diag_inuse && !diag_set_link && + !(dd->ipath_flags & IPATH_LINKACTIVE)) { + diag_set_link = 1; + ipath_cdbg(VERBOSE, "Trying to set to set link active for " + "diag pkt\n"); + ipath_set_linkstate(dd, IPATH_IB_LINKARM); + ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); + } + + if (!(dd->ipath_flags & IPATH_INITTED)) { + /* no hardware, freeze, etc. */ + ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit); + ret = -ENODEV; + goto bail; + } + val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; + if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM && + val != IPATH_IBSTATE_ACTIVE) { + ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", + dd->ipath_unit, (unsigned long long) val); + ret = -EINVAL; + goto bail; + } + + /* need total length before first word written */ + /* +1 word is for the qword padding */ + plen = sizeof(u32) + dp.len; + + if ((plen + 4) > dd->ipath_ibmaxlen) { + ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", + plen - 4, dd->ipath_ibmaxlen); + ret = -EINVAL; + goto bail; /* before writing pbc */ + } + tmpbuf = vmalloc(plen); + if (!tmpbuf) { + dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " + "failing\n"); + ret = -ENOMEM; + goto bail; + } + + if (copy_from_user(tmpbuf, + (const void __user *) (unsigned long) dp.data, + dp.len)) { + ret = -EFAULT; + goto bail; + } + + piobuf = ipath_getpiobuf(dd, &pbufn); + if (!piobuf) { + ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", + dd->ipath_unit); + ret = -EBUSY; + goto bail; + } + + plen >>= 2; /* in dwords */ + + if (ipath_debug & __IPATH_PKTDBG) + ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", + dd->ipath_unit, plen - 1, pbufn); + + /* we have to flush after the PBC for correctness on some cpus + * or WC buffer can be written out of order */ + writeq(plen, piobuf); + ipath_flush_wc(); + /* copy all by the trigger word, then flush, so it's written + * to chip before trigger word, then write trigger word, then + * flush again, so packet is sent. */ + __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); + ipath_flush_wc(); + __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); + ipath_flush_wc(); + + ret = sizeof(dp); + +bail: + vfree(tmpbuf); + return ret; +} + static int ipath_diag_release(struct inode *in, struct file *fp) { mutex_lock(&ipath_mutex); diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index f98518d912b5..2108466c7e33 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -39,7 +39,7 @@ #include <linux/vmalloc.h> #include "ipath_kernel.h" -#include "ipath_layer.h" +#include "ipath_verbs.h" #include "ipath_common.h" static void ipath_update_pio_bufs(struct ipath_devdata *); @@ -51,8 +51,6 @@ const char *ipath_get_unit_name(int unit) return iname; } -EXPORT_SYMBOL_GPL(ipath_get_unit_name); - #define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: " #define PFX IPATH_DRV_NAME ": " @@ -60,13 +58,13 @@ EXPORT_SYMBOL_GPL(ipath_get_unit_name); * The size has to be longer than this string, so we can append * board/chip information to it in the init code. */ -const char ipath_core_version[] = IPATH_IDSTR "\n"; +const char ib_ipath_version[] = IPATH_IDSTR "\n"; static struct idr unit_table; DEFINE_SPINLOCK(ipath_devs_lock); LIST_HEAD(ipath_dev_list); -wait_queue_head_t ipath_sma_state_wait; +wait_queue_head_t ipath_state_wait; unsigned ipath_debug = __IPATH_INFO; @@ -403,10 +401,10 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, /* setup the chip-specific functions, as early as possible. */ switch (ent->device) { case PCI_DEVICE_ID_INFINIPATH_HT: - ipath_init_ht400_funcs(dd); + ipath_init_iba6110_funcs(dd); break; case PCI_DEVICE_ID_INFINIPATH_PE800: - ipath_init_pe800_funcs(dd); + ipath_init_iba6120_funcs(dd); break; default: ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " @@ -440,7 +438,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, } dd->ipath_pcirev = rev; +#if defined(__powerpc__) + /* There isn't a generic way to specify writethrough mappings */ + dd->ipath_kregbase = __ioremap(addr, len, + (_PAGE_NO_CACHE|_PAGE_WRITETHRU)); +#else dd->ipath_kregbase = ioremap_nocache(addr, len); +#endif if (!dd->ipath_kregbase) { ipath_dbg("Unable to map io addr %llx to kvirt, failing\n", @@ -503,7 +507,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, ipathfs_add_device(dd); ipath_user_add(dd); ipath_diag_add(dd); - ipath_layer_add(dd); + ipath_register_ib_device(dd); goto bail; @@ -532,7 +536,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) return; dd = pci_get_drvdata(pdev); - ipath_layer_remove(dd); + ipath_unregister_ib_device(dd->verbs_dev); ipath_diag_remove(dd); ipath_user_remove(dd); ipathfs_remove_device(dd); @@ -607,21 +611,23 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first, * * wait up to msecs milliseconds for IB link state change to occur for * now, take the easy polling route. Currently used only by - * ipath_layer_set_linkstate. Returns 0 if state reached, otherwise + * ipath_set_linkstate. Returns 0 if state reached, otherwise * -ETIMEDOUT state can have multiple states set, for any of several * transitions. */ -int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs) +static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, + int msecs) { - dd->ipath_sma_state_wanted = state; - wait_event_interruptible_timeout(ipath_sma_state_wait, + dd->ipath_state_wanted = state; + wait_event_interruptible_timeout(ipath_state_wait, (dd->ipath_flags & state), msecs_to_jiffies(msecs)); - dd->ipath_sma_state_wanted = 0; + dd->ipath_state_wanted = 0; if (!(dd->ipath_flags & state)) { u64 val; - ipath_cdbg(SMA, "Didn't reach linkstate %s within %u ms\n", + ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u" + " ms\n", /* test INIT ahead of DOWN, both can be set */ (state & IPATH_LINKINIT) ? "INIT" : ((state & IPATH_LINKDOWN) ? "DOWN" : @@ -807,58 +813,6 @@ bail: return skb; } -/** - * ipath_rcv_layer - receive a packet for the layered (ethernet) driver - * @dd: the infinipath device - * @etail: the sk_buff number - * @tlen: the total packet length - * @hdr: the ethernet header - * - * Separate routine for better overall optimization - */ -static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail, - u32 tlen, struct ether_header *hdr) -{ - u32 elen; - u8 pad, *bthbytes; - struct sk_buff *skb, *nskb; - - if (dd->ipath_port0_skbs && - hdr->sub_opcode == IPATH_ITH4X_OPCODE_ENCAP) { - /* - * Allocate a new sk_buff to replace the one we give - * to the network stack. - */ - nskb = ipath_alloc_skb(dd, GFP_ATOMIC); - if (!nskb) { - /* count OK packets that we drop */ - ipath_stats.sps_krdrops++; - return; - } - - bthbytes = (u8 *) hdr->bth; - pad = (bthbytes[1] >> 4) & 3; - /* +CRC32 */ - elen = tlen - (sizeof(*hdr) + pad + sizeof(u32)); - - skb = dd->ipath_port0_skbs[etail]; - dd->ipath_port0_skbs[etail] = nskb; - skb_put(skb, elen); - - dd->ipath_f_put_tid(dd, etail + (u64 __iomem *) - ((char __iomem *) dd->ipath_kregbase - + dd->ipath_rcvegrbase), 0, - virt_to_phys(nskb->data)); - - __ipath_layer_rcv(dd, hdr, skb); - - /* another ether packet received */ - ipath_stats.sps_ether_rpkts++; - } - else if (hdr->sub_opcode == IPATH_ITH4X_OPCODE_LID_ARP) - __ipath_layer_rcv_lid(dd, hdr); -} - static void ipath_rcv_hdrerr(struct ipath_devdata *dd, u32 eflags, u32 l, @@ -972,26 +926,17 @@ reloop: if (unlikely(eflags)) ipath_rcv_hdrerr(dd, eflags, l, etail, rc); else if (etype == RCVHQ_RCV_TYPE_NON_KD) { - int ret = __ipath_verbs_rcv(dd, rc + 1, - ebuf, tlen); - if (ret == -ENODEV) - ipath_cdbg(VERBOSE, - "received IB packet, " - "not SMA (QP=%x)\n", qp); - if (dd->ipath_lli_counter) - dd->ipath_lli_counter--; - - } else if (etype == RCVHQ_RCV_TYPE_EAGER) { - if (qp == IPATH_KD_QP && - bthbytes[0] == ipath_layer_rcv_opcode && - ebuf) - ipath_rcv_layer(dd, etail, tlen, - (struct ether_header *)hdr); - else - ipath_cdbg(PKT, "typ %x, opcode %x (eager, " - "qp=%x), len %x; ignored\n", - etype, bthbytes[0], qp, tlen); + ipath_ib_rcv(dd->verbs_dev, rc + 1, ebuf, tlen); + if (dd->ipath_lli_counter) + dd->ipath_lli_counter--; + ipath_cdbg(PKT, "typ %x, opcode %x (eager, " + "qp=%x), len %x; ignored\n", + etype, bthbytes[0], qp, tlen); } + else if (etype == RCVHQ_RCV_TYPE_EAGER) + ipath_cdbg(PKT, "typ %x, opcode %x (eager, " + "qp=%x), len %x; ignored\n", + etype, bthbytes[0], qp, tlen); else if (etype == RCVHQ_RCV_TYPE_EXPECTED) ipath_dbg("Bug: Expected TID, opcode %x; ignored\n", be32_to_cpu(hdr->bth[0]) & 0xff); @@ -1024,7 +969,8 @@ reloop: */ if (l == hdrqtail || (i && !(i&0xf))) { u64 lval; - if (l == hdrqtail) /* PE-800 interrupt only on last */ + if (l == hdrqtail) + /* request IBA6120 interrupt only on last */ lval = dd->ipath_rhdrhead_intr_off | l; else lval = l; @@ -1038,7 +984,7 @@ reloop: } if (!dd->ipath_rhdrhead_intr_off && !reloop) { - /* HT-400 workaround; we can have a race clearing chip + /* IBA6110 workaround; we can have a race clearing chip * interrupt with another interrupt about to be delivered, * and can clear it before it is delivered on the GPIO * workaround. By doing the extra check here for the @@ -1211,7 +1157,7 @@ int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize) * * do appropriate marking as busy, etc. * returns buffer number if one found (>=0), negative number is error. - * Used by ipath_sma_send_pkt and ipath_layer_send + * Used by ipath_layer_send */ u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 * pbufnum) { @@ -1317,13 +1263,6 @@ rescan: goto bail; } - if (updated) - /* - * ran out of bufs, now some (at least this one we just - * got) are now available, so tell the layered driver. - */ - __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); - /* * set next starting place. Since it's just an optimization, * it doesn't matter who wins on this, so no locking @@ -1500,7 +1439,7 @@ int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd) return ret; } -void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) +static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) { static const char *what[4] = { [0] = "DOWN", @@ -1511,7 +1450,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) int linkcmd = (which >> INFINIPATH_IBCC_LINKCMD_SHIFT) & INFINIPATH_IBCC_LINKCMD_MASK; - ipath_cdbg(SMA, "Trying to move unit %u to %s, current ltstate " + ipath_cdbg(VERBOSE, "Trying to move unit %u to %s, current ltstate " "is %s\n", dd->ipath_unit, what[linkcmd], ipath_ibcstatus_str[ @@ -1520,7 +1459,7 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); /* flush all queued sends when going to DOWN or INIT, to be sure that - * they don't block SMA and other MAD packets */ + * they don't block MAD packets */ if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) { ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, INFINIPATH_S_ABORT); @@ -1534,6 +1473,180 @@ void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) dd->ipath_ibcctrl | which); } +int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate) +{ + u32 lstate; + int ret; + + switch (newstate) { + case IPATH_IB_LINKDOWN: + ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << + INFINIPATH_IBCC_LINKINITCMD_SHIFT); + /* don't wait */ + ret = 0; + goto bail; + + case IPATH_IB_LINKDOWN_SLEEP: + ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << + INFINIPATH_IBCC_LINKINITCMD_SHIFT); + /* don't wait */ + ret = 0; + goto bail; + + case IPATH_IB_LINKDOWN_DISABLE: + ipath_set_ib_lstate(dd, + INFINIPATH_IBCC_LINKINITCMD_DISABLE << + INFINIPATH_IBCC_LINKINITCMD_SHIFT); + /* don't wait */ + ret = 0; + goto bail; + + case IPATH_IB_LINKINIT: + if (dd->ipath_flags & IPATH_LINKINIT) { + ret = 0; + goto bail; + } + ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << + INFINIPATH_IBCC_LINKCMD_SHIFT); + lstate = IPATH_LINKINIT; + break; + + case IPATH_IB_LINKARM: + if (dd->ipath_flags & IPATH_LINKARMED) { + ret = 0; + goto bail; + } + if (!(dd->ipath_flags & + (IPATH_LINKINIT | IPATH_LINKACTIVE))) { + ret = -EINVAL; + goto bail; + } + ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << + INFINIPATH_IBCC_LINKCMD_SHIFT); + /* + * Since the port can transition to ACTIVE by receiving + * a non VL 15 packet, wait for either state. + */ + lstate = IPATH_LINKARMED | IPATH_LINKACTIVE; + break; + + case IPATH_IB_LINKACTIVE: + if (dd->ipath_flags & IPATH_LINKACTIVE) { + ret = 0; + goto bail; + } + if (!(dd->ipath_flags & IPATH_LINKARMED)) { + ret = -EINVAL; + goto bail; + } + ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << + INFINIPATH_IBCC_LINKCMD_SHIFT); + lstate = IPATH_LINKACTIVE; + break; + + default: + ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); + ret = -EINVAL; + goto bail; + } + ret = ipath_wait_linkstate(dd, lstate, 2000); + +bail: + return ret; +} + +/** + * ipath_set_mtu - set the MTU + * @dd: the infinipath device + * @arg: the new MTU + * + * we can handle "any" incoming size, the issue here is whether we + * need to restrict our outgoing size. For now, we don't do any + * sanity checking on this, and we don't deal with what happens to + * programs that are already running when the size changes. + * NOTE: changing the MTU will usually cause the IBC to go back to + * link initialize (IPATH_IBSTATE_INIT) state... + */ +int ipath_set_mtu(struct ipath_devdata *dd, u16 arg) +{ + u32 piosize; + int changed = 0; + int ret; + + /* + * mtu is IB data payload max. It's the largest power of 2 less + * than piosize (or even larger, since it only really controls the + * largest we can receive; we can send the max of the mtu and + * piosize). We check that it's one of the valid IB sizes. + */ + if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && + arg != 4096) { + ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); + ret = -EINVAL; + goto bail; + } + if (dd->ipath_ibmtu == arg) { + ret = 0; /* same as current */ + goto bail; + } + + piosize = dd->ipath_ibmaxlen; + dd->ipath_ibmtu = arg; + + if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { + /* Only if it's not the initial value (or reset to it) */ + if (piosize != dd->ipath_init_ibmaxlen) { + dd->ipath_ibmaxlen = piosize; + changed = 1; + } + } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) { + piosize = arg + IPATH_PIO_MAXIBHDR; + ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x " + "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize, + arg); + dd->ipath_ibmaxlen = piosize; + changed = 1; + } + + if (changed) { + /* + * set the IBC maxpktlength to the size of our pio + * buffers in words + */ + u64 ibc = dd->ipath_ibcctrl; + ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << + INFINIPATH_IBCC_MAXPKTLEN_SHIFT); + + piosize = piosize - 2 * sizeof(u32); /* ignore pbc */ + dd->ipath_ibmaxlen = piosize; + piosize /= sizeof(u32); /* in words */ + /* + * for ICRC, which we only send in diag test pkt mode, and + * we don't need to worry about that for mtu + */ + piosize += 1; + + ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT; + dd->ipath_ibcctrl = ibc; + ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, + dd->ipath_ibcctrl); + dd->ipath_f_tidtemplate(dd); + } + + ret = 0; + +bail: + return ret; +} + +int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) +{ + dd->ipath_lid = arg; + dd->ipath_lmc = lmc; + + return 0; +} + /** * ipath_read_kreg64_port - read a device's per-port 64-bit kernel register * @dd: the infinipath device @@ -1637,13 +1750,6 @@ void ipath_shutdown_device(struct ipath_devdata *dd) ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_DISABLE << INFINIPATH_IBCC_LINKINITCMD_SHIFT); - /* - * we are shutting down, so tell the layered driver. We don't do - * this on just a link state change, much like ethernet, a cable - * unplug, etc. doesn't change driver state - */ - ipath_layer_intr(dd, IPATH_LAYER_INT_IF_DOWN); - /* disable IBC */ dd->ipath_control &= ~INFINIPATH_C_LINKENABLE; ipath_write_kreg(dd, dd->ipath_kregs->kr_control, @@ -1743,7 +1849,7 @@ static int __init infinipath_init(void) { int ret; - ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ipath_core_version); + ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version); /* * These must be called before the driver is registered with @@ -1776,8 +1882,18 @@ static int __init infinipath_init(void) goto bail_group; } + ret = ipath_diagpkt_add(); + if (ret < 0) { + printk(KERN_ERR IPATH_DRV_NAME ": Unable to create " + "diag data device: error %d\n", -ret); + goto bail_ipathfs; + } + goto bail; +bail_ipathfs: + ipath_exit_ipathfs(); + bail_group: ipath_driver_remove_group(&ipath_driver.driver); @@ -1888,6 +2004,8 @@ static void __exit infinipath_cleanup(void) struct ipath_devdata *dd, *tmp; unsigned long flags; + ipath_diagpkt_remove(); + ipath_exit_ipathfs(); ipath_driver_remove_group(&ipath_driver.driver); @@ -1998,5 +2116,22 @@ bail: return ret; } +int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv) +{ + u64 val; + if ( new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK ) { + return -1; + } + if ( dd->ipath_rx_pol_inv != new_pol_inv ) { + dd->ipath_rx_pol_inv = new_pol_inv; + val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); + val &= ~(INFINIPATH_XGXS_RX_POL_MASK << + INFINIPATH_XGXS_RX_POL_SHIFT); + val |= ((u64)dd->ipath_rx_pol_inv) << + INFINIPATH_XGXS_RX_POL_SHIFT; + ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); + } + return 0; +} module_init(infinipath_init); module_exit(infinipath_cleanup); diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index bbaa70e57db1..29930e22318e 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -39,7 +39,6 @@ #include <asm/pgtable.h> #include "ipath_kernel.h" -#include "ipath_layer.h" #include "ipath_common.h" static int ipath_open(struct inode *, struct file *); @@ -985,15 +984,17 @@ static int mmap_piobufs(struct vm_area_struct *vma, * write combining behavior we want on the PIO buffers! */ - if (vma->vm_flags & VM_READ) { - dev_info(&dd->pcidev->dev, - "Can't map piobufs as readable (flags=%lx)\n", - vma->vm_flags); - ret = -EPERM; - goto bail; - } +#if defined(__powerpc__) + /* There isn't a generic way to specify writethrough mappings */ + pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; + pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; + pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; +#endif - /* don't allow them to later change to readable with mprotect */ + /* + * don't allow them to later change to readable with mprotect (for when + * not initially mapped readable, as is normally the case) + */ vma->vm_flags &= ~VM_MAYREAD; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; @@ -1109,7 +1110,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) ret = mmap_rcvegrbufs(vma, pd); else if (pgaddr == (u64) pd->port_rcvhdrq_phys) { /* - * The rcvhdrq itself; readonly except on HT-400 (so have + * The rcvhdrq itself; readonly except on HT (so have * to allow writable mapping), multiple pages, contiguous * from an i/o perspective. */ @@ -1149,6 +1150,7 @@ static unsigned int ipath_poll(struct file *fp, struct ipath_portdata *pd; u32 head, tail; int bit; + unsigned pollflag = 0; struct ipath_devdata *dd; pd = port_fp(fp); @@ -1185,9 +1187,12 @@ static unsigned int ipath_poll(struct file *fp, clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); pd->port_rcvwait_to++; } + else + pollflag = POLLIN | POLLRDNORM; } else { /* it's already happened; don't do wait_event overhead */ + pollflag = POLLIN | POLLRDNORM; pd->port_rcvnowait++; } @@ -1195,7 +1200,7 @@ static unsigned int ipath_poll(struct file *fp, ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); - return 0; + return pollflag; } static int try_alloc_port(struct ipath_devdata *dd, int port, @@ -1297,14 +1302,14 @@ static int find_best_unit(struct file *fp) * This code is present to allow a knowledgeable person to * specify the layout of processes to processors before opening * this driver, and then we'll assign the process to the "closest" - * HT-400 to that processor (we assume reasonable connectivity, + * InfiniPath chip to that processor (we assume reasonable connectivity, * for now). This code assumes that if affinity has been set * before this point, that at most one cpu is set; for now this * is reasonable. I check for both cpus_empty() and cpus_full(), * in case some kernel variant sets none of the bits when no * affinity is set. 2.6.11 and 12 kernels have all present * cpus set. Some day we'll have to fix it up further to handle - * a cpu subset. This algorithm fails for two HT-400's connected + * a cpu subset. This algorithm fails for two HT chips connected * in tunnel fashion. Eventually this needs real topology * information. There may be some issues with dual core numbering * as well. This needs more work prior to release. @@ -1815,7 +1820,7 @@ int ipath_user_add(struct ipath_devdata *dd) if (ret < 0) { ipath_dev_err(dd, "Could not create wildcard " "minor: error %d\n", -ret); - goto bail_sma; + goto bail_user; } atomic_set(&user_setup, 1); @@ -1831,7 +1836,7 @@ int ipath_user_add(struct ipath_devdata *dd) goto bail; -bail_sma: +bail_user: user_cleanup(); bail: return ret; diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 0936d8e8d704..a5eb30a06a5c 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -191,8 +191,8 @@ static ssize_t atomic_port_info_read(struct file *file, char __user *buf, portinfo[4] = (dd->ipath_lid << 16); /* - * Notimpl yet SMLID (should we store this in the driver, in case - * SMA dies?) CapabilityMask is 0, we don't support any of these + * Notimpl yet SMLID. + * CapabilityMask is 0, we don't support any of these * DiagCode is 0; we don't store any diag info for now Notimpl yet * M_KeyLeasePeriod (we don't support M_Key) */ diff --git a/drivers/infiniband/hw/ipath/ipath_ht400.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 3db015da6e77..bf2455a6d562 100644 --- a/drivers/infiniband/hw/ipath/ipath_ht400.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c @@ -33,7 +33,7 @@ /* * This file contains all of the code that is specific to the InfiniPath - * HT-400 chip. + * HT chip. */ #include <linux/pci.h> @@ -43,7 +43,7 @@ #include "ipath_registers.h" /* - * This lists the InfiniPath HT400 registers, in the actual chip layout. + * This lists the InfiniPath registers, in the actual chip layout. * This structure should never be directly accessed. * * The names are in InterCap form because they're taken straight from @@ -461,8 +461,9 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, * times. */ if (dd->ipath_flags & IPATH_INITTED) { - ipath_dev_err(dd, "Fatal Error (freeze " - "mode), no longer usable\n"); + ipath_dev_err(dd, "Fatal Hardware Error (freeze " + "mode), no longer usable, SN %.16s\n", + dd->ipath_serial); isfatal = 1; } *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; @@ -537,7 +538,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, if (hwerrs & INFINIPATH_HWE_HTCMISCERR7) strlcat(msg, "[HT core Misc7]", msgl); if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { - strlcat(msg, "[Memory BIST test failed, HT-400 unusable]", + strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", msgl); /* ignore from now on, so disable until driver reloaded */ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; @@ -553,7 +554,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, if (hwerrs & _IPATH_PLL_FAIL) { snprintf(bitsmsg, sizeof bitsmsg, - "[PLL failed (%llx), HT-400 unusable]", + "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) (hwerrs & _IPATH_PLL_FAIL)); strlcat(msg, bitsmsg, msgl); /* ignore from now on, so disable until driver reloaded */ @@ -610,18 +611,18 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, break; case 5: /* - * HT-460 original production board; two production levels, with + * original production board; two production levels, with * different serial number ranges. See ipath_ht_early_init() for * case where we enable IPATH_GPIO_INTR for later serial # range. */ - n = "InfiniPath_HT-460"; + n = "InfiniPath_QHT7040"; break; case 6: n = "OEM_Board_3"; break; case 7: - /* HT-460 small form factor production board */ - n = "InfiniPath_HT-465"; + /* small form factor production board */ + n = "InfiniPath_QHT7140"; break; case 8: n = "LS/X-1"; @@ -633,7 +634,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, n = "OEM_Board_2"; break; case 11: - n = "InfiniPath_HT-470"; + n = "InfiniPath_HT-470"; /* obsoleted */ break; case 12: n = "OEM_Board_4"; @@ -641,7 +642,7 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, default: /* don't know, just print the number */ ipath_dev_err(dd, "Don't yet know about board " "with ID %u\n", boardrev); - snprintf(name, namelen, "Unknown_InfiniPath_HT-4xx_%u", + snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u", boardrev); break; } @@ -650,11 +651,10 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) { /* - * This version of the driver only supports the HT-400 - * Rev 3.2 + * This version of the driver only supports Rev 3.2 and 3.3 */ ipath_dev_err(dd, - "Unsupported HT-400 revision %u.%u!\n", + "Unsupported InfiniPath hardware revision %u.%u!\n", dd->ipath_majrev, dd->ipath_minrev); ret = 1; goto bail; @@ -738,7 +738,7 @@ static void ipath_check_htlink(struct ipath_devdata *dd) static int ipath_setup_ht_reset(struct ipath_devdata *dd) { - ipath_dbg("No reset possible for HT-400\n"); + ipath_dbg("No reset possible for this InfiniPath hardware\n"); return 0; } @@ -925,7 +925,7 @@ static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev, /* * kernels with CONFIG_PCI_MSI set the vector in the irq field of - * struct pci_device, so we use that to program the HT-400 internal + * struct pci_device, so we use that to program the internal * interrupt register (not config space) with that value. The BIOS * must still have done the basic MSI setup. */ @@ -1013,7 +1013,7 @@ bail: * @dd: the infinipath device * * Called during driver unload. - * This is currently a nop for the HT-400, not for all chips + * This is currently a nop for the HT chip, not for all chips */ static void ipath_setup_ht_cleanup(struct ipath_devdata *dd) { @@ -1290,6 +1290,15 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd) val &= ~INFINIPATH_XGXS_RESET; change = 1; } + if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & + INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { + /* need to compensate for Tx inversion in partner */ + val &= ~(INFINIPATH_XGXS_RX_POL_MASK << + INFINIPATH_XGXS_RX_POL_SHIFT); + val |= dd->ipath_rx_pol_inv << + INFINIPATH_XGXS_RX_POL_SHIFT; + change = 1; + } if (change) ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); @@ -1470,7 +1479,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; /* - * For HT-400, we allocate a somewhat overly large eager buffer, + * For HT, we allocate a somewhat overly large eager buffer, * such that we can guarantee that we can receive the largest * packet that we can send out. To truly support a 4KB MTU, * we need to bump this to a large value. To date, other than @@ -1531,7 +1540,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd) if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') { /* - * Later production HT-460 has same changes as HT-465, so + * Later production QHT7040 has same changes as QHT7140, so * can use GPIO interrupts. They have serial #'s starting * with 128, rather than 112. */ @@ -1560,13 +1569,13 @@ static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase) } /** - * ipath_init_ht400_funcs - set up the chip-specific function pointers + * ipath_init_iba6110_funcs - set up the chip-specific function pointers * @dd: the infinipath device * * This is global, and is called directly at init to set up the * chip-specific function pointers for later use. */ -void ipath_init_ht400_funcs(struct ipath_devdata *dd) +void ipath_init_iba6110_funcs(struct ipath_devdata *dd) { dd->ipath_f_intrsetup = ipath_ht_intconfig; dd->ipath_f_bus = ipath_setup_ht_config; diff --git a/drivers/infiniband/hw/ipath/ipath_pe800.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index b83f66d8262c..d86516d23df6 100644 --- a/drivers/infiniband/hw/ipath/ipath_pe800.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c @@ -32,7 +32,7 @@ */ /* * This file contains all of the code that is specific to the - * InfiniPath PE-800 chip. + * InfiniPath PCIe chip. */ #include <linux/interrupt.h> @@ -45,9 +45,9 @@ /* * This file contains all the chip-specific register information and - * access functions for the QLogic InfiniPath PE800, the PCI-Express chip. + * access functions for the QLogic InfiniPath PCI-Express chip. * - * This lists the InfiniPath PE800 registers, in the actual chip layout. + * This lists the InfiniPath registers, in the actual chip layout. * This structure should never be directly accessed. */ struct _infinipath_do_not_use_kernel_regs { @@ -213,7 +213,6 @@ static const struct ipath_kregs ipath_pe_kregs = { .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0), - /* This group is pe-800-specific; and used only in this file */ /* The rcvpktled register controls one of the debug port signals, so * a packet activity LED can be connected to it. */ .kr_rcvpktledcnt = IPATH_KREG_OFFSET(RcvPktLEDCnt), @@ -364,8 +363,9 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, * and we get here multiple times */ if (dd->ipath_flags & IPATH_INITTED) { - ipath_dev_err(dd, "Fatal Error (freeze " - "mode), no longer usable\n"); + ipath_dev_err(dd, "Fatal Hardware Error (freeze " + "mode), no longer usable, SN %.16s\n", + dd->ipath_serial); isfatal = 1; } /* @@ -388,7 +388,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, *msg = '\0'; if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { - strlcat(msg, "[Memory BIST test failed, PE-800 unusable]", + strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", msgl); /* ignore from now on, so disable until driver reloaded */ *dd->ipath_statusp |= IPATH_STATUS_HWERROR; @@ -433,7 +433,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg, if (hwerrs & _IPATH_PLL_FAIL) { snprintf(bitsmsg, sizeof bitsmsg, - "[PLL failed (%llx), PE-800 unusable]", + "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) hwerrs & _IPATH_PLL_FAIL); strlcat(msg, bitsmsg, msgl); /* ignore from now on, so disable until driver reloaded */ @@ -511,22 +511,25 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, n = "InfiniPath_Emulation"; break; case 1: - n = "InfiniPath_PE-800-Bringup"; + n = "InfiniPath_QLE7140-Bringup"; break; case 2: - n = "InfiniPath_PE-880"; + n = "InfiniPath_QLE7140"; break; case 3: - n = "InfiniPath_PE-850"; + n = "InfiniPath_QMI7140"; break; case 4: - n = "InfiniPath_PE-860"; + n = "InfiniPath_QEM7140"; + break; + case 5: + n = "InfiniPath_QMH7140"; break; default: ipath_dev_err(dd, "Don't yet know about board with ID %u\n", boardrev); - snprintf(name, namelen, "Unknown_InfiniPath_PE-8xx_%u", + snprintf(name, namelen, "Unknown_InfiniPath_PCIe_%u", boardrev); break; } @@ -534,7 +537,7 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name, snprintf(name, namelen, "%s", n); if (dd->ipath_majrev != 4 || !dd->ipath_minrev || dd->ipath_minrev>2) { - ipath_dev_err(dd, "Unsupported PE-800 revision %u.%u!\n", + ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n", dd->ipath_majrev, dd->ipath_minrev); ret = 1; } else @@ -651,6 +654,15 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) val &= ~INFINIPATH_XGXS_RESET; change = 1; } + if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & + INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { + /* need to compensate for Tx inversion in partner */ + val &= ~(INFINIPATH_XGXS_RX_POL_MASK << + INFINIPATH_XGXS_RX_POL_SHIFT); + val |= dd->ipath_rx_pol_inv << + INFINIPATH_XGXS_RX_POL_SHIFT; + change = 1; + } if (change) ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); @@ -705,7 +717,7 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); } -/* this is not yet needed on the PE800, so just return 0. */ +/* this is not yet needed on this chip, so just return 0. */ static int ipath_pe_intconfig(struct ipath_devdata *dd) { return 0; @@ -759,8 +771,8 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst, * * This is called during driver unload. * We do the pci_disable_msi here, not in generic code, because it - * isn't used for the HT-400. If we do end up needing pci_enable_msi - * at some point in the future for HT-400, we'll move the call back + * isn't used for the HT chips. If we do end up needing pci_enable_msi + * at some point in the future for HT, we'll move the call back * into the main init_one code. */ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) @@ -780,10 +792,10 @@ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd) * late in 2.6.16). * All that can be done is to edit the kernel source to remove the quirk * check until that is fixed. - * We do not need to call enable_msi() for our HyperTransport chip (HT-400), - * even those it uses MSI, and we want to avoid the quirk warning, so - * So we call enable_msi only for the PE-800. If we do end up needing - * pci_enable_msi at some point in the future for HT-400, we'll move the + * We do not need to call enable_msi() for our HyperTransport chip, + * even though it uses MSI, and we want to avoid the quirk warning, so + * So we call enable_msi only for PCIe. If we do end up needing + * pci_enable_msi at some point in the future for HT, we'll move the * call back into the main init_one code. * We save the msi lo and hi values, so we can restore them after * chip reset (the kernel PCI infrastructure doesn't yet handle that @@ -971,8 +983,7 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd) int ret; /* Use ERROR so it shows up in logs, etc. */ - ipath_dev_err(dd, "Resetting PE-800 unit %u\n", - dd->ipath_unit); + ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit); /* keep chip from being accessed in a few places */ dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT); val = dd->ipath_control | INFINIPATH_C_RESET; @@ -1078,7 +1089,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, * @port: the port * * clear all TID entries for a port, expected and eager. - * Used from ipath_close(). On PE800, TIDs are only 32 bits, + * Used from ipath_close(). On this chip, TIDs are only 32 bits, * not 64, but they are still on 64 bit boundaries, so tidbase * is declared as u64 * for the pointer math, even though we write 32 bits */ @@ -1148,9 +1159,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) dd->ipath_flags |= IPATH_4BYTE_TID; /* - * For openib, we need to be able to handle an IB header of 96 bytes - * or 24 dwords. HT-400 has arbitrary sized receive buffers, so we - * made them the same size as the PIO buffers. The PE-800 does not + * For openfabrics, we need to be able to handle an IB header of + * 24 dwords. HT chip has arbitrary sized receive buffers, so we + * made them the same size as the PIO buffers. This chip does not * handle arbitrary size buffers, so we need the header large enough * to handle largest IB header, but still have room for a 2KB MTU * standard IB packet. @@ -1158,11 +1169,10 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) dd->ipath_rcvhdrentsize = 24; dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; - /* For HT-400, we allocate a somewhat overly large eager buffer, - * such that we can guarantee that we can receive the largest packet - * that we can send out. To truly support a 4KB MTU, we need to - * bump this to a larger value. We'll do this when I get around to - * testing 4KB sends on the PE-800, which I have not yet done. + /* + * To truly support a 4KB MTU (for usermode), we need to + * bump this to a larger value. For now, we use them for + * the kernel only. */ dd->ipath_rcvegrbufsize = 2048; /* @@ -1175,9 +1185,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd) dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; /* - * For PE-800, we can request a receive interrupt for 1 or + * We can request a receive interrupt for 1 or * more packets from current offset. For now, we set this - * up for a single packet, to match the HT-400 behavior. + * up for a single packet. */ dd->ipath_rhdrhead_intr_off = 1ULL<<32; @@ -1216,13 +1226,13 @@ static int ipath_pe_get_base_info(struct ipath_portdata *pd, void *kbase) } /** - * ipath_init_pe800_funcs - set up the chip-specific function pointers + * ipath_init_iba6120_funcs - set up the chip-specific function pointers * @dd: the infinipath device * * This is global, and is called directly at init to set up the * chip-specific function pointers for later use. */ -void ipath_init_pe800_funcs(struct ipath_devdata *dd) +void ipath_init_iba6120_funcs(struct ipath_devdata *dd) { dd->ipath_f_intrsetup = ipath_pe_intconfig; dd->ipath_f_bus = ipath_setup_pe_config; diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 414cdd1d80a6..44669dc2e22d 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -53,8 +53,8 @@ module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO); MODULE_PARM_DESC(cfgports, "Set max number of ports to use"); /* - * Number of buffers reserved for driver (layered drivers and SMA - * send). Reserved at end of buffer list. Initialized based on + * Number of buffers reserved for driver (verbs and layered drivers.) + * Reserved at end of buffer list. Initialized based on * number of PIO buffers if not set via module interface. * The problem with this is that it's global, but we'll use different * numbers for different chip types. So the default value is not @@ -80,7 +80,7 @@ MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); * * Allocate the eager TID buffers and program them into infinipath. * We use the network layer alloc_skb() allocator to allocate the - * memory, and either use the buffers as is for things like SMA + * memory, and either use the buffers as is for things like verbs * packets, or pass the buffers up to the ipath layered driver and * thence the network layer, replacing them as we do so (see * ipath_rcv_layer()). @@ -240,7 +240,11 @@ static int init_chip_first(struct ipath_devdata *dd, "only supports %u\n", ipath_cfgports, dd->ipath_portcnt); } - dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_cfgports, + /* + * Allocate full portcnt array, rather than just cfgports, because + * cleanup iterates across all possible ports. + */ + dd->ipath_pd = kzalloc(sizeof(*dd->ipath_pd) * dd->ipath_portcnt, GFP_KERNEL); if (!dd->ipath_pd) { @@ -446,9 +450,9 @@ static void enable_chip(struct ipath_devdata *dd, u32 val; int i; - if (!reinit) { - init_waitqueue_head(&ipath_sma_state_wait); - } + if (!reinit) + init_waitqueue_head(&ipath_state_wait); + ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); @@ -687,7 +691,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) / (sizeof(u64) * BITS_PER_BYTE / 2); if (ipath_kpiobufs == 0) { - /* not set by user, or set explictly to default */ + /* not set by user (this is default) */ if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128) kpiobufs = 32; else @@ -946,6 +950,7 @@ static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp) dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val; } + ipath_kpiobufs = val; ret = 0; bail: spin_unlock_irqrestore(&ipath_devs_lock, flags); diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 280e732660a1..49bf7bb15b04 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c @@ -34,7 +34,7 @@ #include <linux/pci.h> #include "ipath_kernel.h" -#include "ipath_layer.h" +#include "ipath_verbs.h" #include "ipath_common.h" /* These are all rcv-related errors which we want to count for stats */ @@ -201,7 +201,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, ib_linkstate(lstate)); } else - ipath_cdbg(SMA, "Unit %u link state %s, last " + ipath_cdbg(VERBOSE, "Unit %u link state %s, last " "was %s\n", dd->ipath_unit, ib_linkstate(lstate), ib_linkstate((unsigned) @@ -213,7 +213,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM || lstate == IPATH_IBSTATE_ACTIVE) - ipath_cdbg(SMA, "Unit %u link state down" + ipath_cdbg(VERBOSE, "Unit %u link state down" " (state 0x%x), from %s\n", dd->ipath_unit, (u32)val & IPATH_IBSTATE_MASK, @@ -269,7 +269,7 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, INFINIPATH_IBCS_LINKSTATE_MASK) == INFINIPATH_IBCS_L_STATE_ACTIVE) /* if from up to down be more vocal */ - ipath_cdbg(SMA, + ipath_cdbg(VERBOSE, "Unit %u link now down (%s)\n", dd->ipath_unit, ipath_ibcstatus_str[ltstate]); @@ -289,8 +289,6 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, *dd->ipath_statusp |= IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF; dd->ipath_f_setextled(dd, lstate, ltstate); - - __ipath_layer_intr(dd, IPATH_LAYER_INT_IF_UP); } else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) { /* * set INIT and DOWN. Down is checked by most of the other @@ -598,11 +596,11 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs) if (!noprint && *msg) ipath_dev_err(dd, "%s error\n", msg); - if (dd->ipath_sma_state_wanted & dd->ipath_flags) { - ipath_cdbg(VERBOSE, "sma wanted state %x, iflags now %x, " - "waking\n", dd->ipath_sma_state_wanted, + if (dd->ipath_state_wanted & dd->ipath_flags) { + ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, " + "waking\n", dd->ipath_state_wanted, dd->ipath_flags); - wake_up_interruptible(&ipath_sma_state_wait); + wake_up_interruptible(&ipath_state_wait); } return chkerrpkts; @@ -708,11 +706,7 @@ static void handle_layer_pioavail(struct ipath_devdata *dd) { int ret; - ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); - if (ret > 0) - goto set; - - ret = __ipath_verbs_piobufavail(dd); + ret = ipath_ib_piobufavail(dd->verbs_dev); if (ret > 0) goto set; diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index e9f374fb641e..a8a56276ff1d 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -132,12 +132,6 @@ struct _ipath_layer { void *l_arg; }; -/* Verbs layer interface */ -struct _verbs_layer { - void *l_arg; - struct timer_list l_timer; -}; - struct ipath_devdata { struct list_head ipath_list; @@ -198,7 +192,8 @@ struct ipath_devdata { void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64); /* fill out chip-specific fields */ int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); - struct _verbs_layer verbs_layer; + struct ipath_ibdev *verbs_dev; + struct timer_list verbs_timer; /* total dwords sent (summed from counter) */ u64 ipath_sword; /* total dwords rcvd (summed from counter) */ @@ -241,7 +236,7 @@ struct ipath_devdata { u64 ipath_tidtemplate; /* value to write to free TIDs */ u64 ipath_tidinvalid; - /* PE-800 rcv interrupt setup */ + /* IBA6120 rcv interrupt setup */ u64 ipath_rhdrhead_intr_off; /* size of memory at ipath_kregbase */ @@ -250,8 +245,8 @@ struct ipath_devdata { u32 ipath_pioavregs; /* IPATH_POLL, etc. */ u32 ipath_flags; - /* ipath_flags sma is waiting for */ - u32 ipath_sma_state_wanted; + /* ipath_flags driver is waiting for */ + u32 ipath_state_wanted; /* last buffer for user use, first buf for kernel use is this * index. */ u32 ipath_lastport_piobuf; @@ -311,10 +306,6 @@ struct ipath_devdata { u32 ipath_pcibar0; /* so we can rewrite it after a chip reset */ u32 ipath_pcibar1; - /* sequential tries for SMA send and no bufs */ - u32 ipath_nosma_bufs; - /* duration (seconds) ipath_nosma_bufs set */ - u32 ipath_nosma_secs; /* HT/PCI Vendor ID (here for NodeInfo) */ u16 ipath_vendorid; @@ -512,6 +503,8 @@ struct ipath_devdata { u8 ipath_pci_cacheline; /* LID mask control */ u8 ipath_lmc; + /* Rx Polarity inversion (compensate for ~tx on partner) */ + u8 ipath_rx_pol_inv; /* local link integrity counter */ u32 ipath_lli_counter; @@ -523,18 +516,6 @@ extern struct list_head ipath_dev_list; extern spinlock_t ipath_devs_lock; extern struct ipath_devdata *ipath_lookup(int unit); -extern u16 ipath_layer_rcv_opcode; -extern int __ipath_layer_intr(struct ipath_devdata *, u32); -extern int ipath_layer_intr(struct ipath_devdata *, u32); -extern int __ipath_layer_rcv(struct ipath_devdata *, void *, - struct sk_buff *); -extern int __ipath_layer_rcv_lid(struct ipath_devdata *, void *); -extern int __ipath_verbs_piobufavail(struct ipath_devdata *); -extern int __ipath_verbs_rcv(struct ipath_devdata *, void *, void *, u32); - -void ipath_layer_add(struct ipath_devdata *); -void ipath_layer_remove(struct ipath_devdata *); - int ipath_init_chip(struct ipath_devdata *, int); int ipath_enable_wc(struct ipath_devdata *dd); void ipath_disable_wc(struct ipath_devdata *dd); @@ -549,9 +530,8 @@ void ipath_cdev_cleanup(struct cdev **cdevp, int ipath_diag_add(struct ipath_devdata *); void ipath_diag_remove(struct ipath_devdata *); -void ipath_diag_bringup_link(struct ipath_devdata *); -extern wait_queue_head_t ipath_sma_state_wait; +extern wait_queue_head_t ipath_state_wait; int ipath_user_add(struct ipath_devdata *dd); void ipath_user_remove(struct ipath_devdata *dd); @@ -582,12 +562,14 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *); int ipath_parse_ushort(const char *str, unsigned short *valp); -int ipath_wait_linkstate(struct ipath_devdata *, u32, int); -void ipath_set_ib_lstate(struct ipath_devdata *, int); void ipath_kreceive(struct ipath_devdata *); int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); int ipath_reset_device(int); void ipath_get_faststats(unsigned long); +int ipath_set_linkstate(struct ipath_devdata *, u8); +int ipath_set_mtu(struct ipath_devdata *, u16); +int ipath_set_lid(struct ipath_devdata *, u32, u8); +int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); /* for use in system calls, where we want to know device type, etc. */ #define port_fp(fp) ((struct ipath_portdata *) (fp)->private_data) @@ -642,10 +624,8 @@ void ipath_free_data(struct ipath_portdata *dd); int ipath_waitfor_mdio_cmdready(struct ipath_devdata *); int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *); u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); -/* init PE-800-specific func */ -void ipath_init_pe800_funcs(struct ipath_devdata *); -/* init HT-400-specific func */ -void ipath_init_ht400_funcs(struct ipath_devdata *); +void ipath_init_iba6120_funcs(struct ipath_devdata *); +void ipath_init_iba6110_funcs(struct ipath_devdata *); void ipath_get_eeprom_info(struct ipath_devdata *); u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg); @@ -801,7 +781,7 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, struct device_driver; -extern const char ipath_core_version[]; +extern const char ib_ipath_version[]; int ipath_driver_create_group(struct device_driver *); void ipath_driver_remove_group(struct device_driver *); @@ -810,6 +790,9 @@ int ipath_device_create_group(struct device *, struct ipath_devdata *); void ipath_device_remove_group(struct device *, struct ipath_devdata *); int ipath_expose_reset(struct device *); +int ipath_diagpkt_add(void); +void ipath_diagpkt_remove(void); + int ipath_init_ipathfs(void); void ipath_exit_ipathfs(void); int ipathfs_add_device(struct ipath_devdata *); @@ -831,10 +814,10 @@ const char *ipath_get_unit_name(int unit); extern struct mutex ipath_mutex; -#define IPATH_DRV_NAME "ipath_core" +#define IPATH_DRV_NAME "ib_ipath" #define IPATH_MAJOR 233 #define IPATH_USER_MINOR_BASE 0 -#define IPATH_SMA_MINOR 128 +#define IPATH_DIAGPKT_MINOR 127 #define IPATH_DIAG_MINOR_BASE 129 #define IPATH_NMINORS 255 diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c index a5ca279370aa..ba1b93226caa 100644 --- a/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/drivers/infiniband/hw/ipath/ipath_keys.c @@ -34,6 +34,7 @@ #include <asm/io.h> #include "ipath_verbs.h" +#include "ipath_kernel.h" /** * ipath_alloc_lkey - allocate an lkey @@ -60,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr) r = (r + 1) & (rkt->max - 1); if (r == n) { spin_unlock_irqrestore(&rkt->lock, flags); - _VERBS_INFO("LKEY table full\n"); + ipath_dbg(KERN_INFO "LKEY table full\n"); ret = 0; goto bail; } diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c index b28c6f81c731..e46aa4ed2a7e 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.c +++ b/drivers/infiniband/hw/ipath/ipath_layer.c @@ -42,26 +42,20 @@ #include "ipath_kernel.h" #include "ipath_layer.h" +#include "ipath_verbs.h" #include "ipath_common.h" /* Acquire before ipath_devs_lock. */ static DEFINE_MUTEX(ipath_layer_mutex); -static int ipath_verbs_registered; - u16 ipath_layer_rcv_opcode; static int (*layer_intr)(void *, u32); static int (*layer_rcv)(void *, void *, struct sk_buff *); static int (*layer_rcv_lid)(void *, void *); -static int (*verbs_piobufavail)(void *); -static void (*verbs_rcv)(void *, void *, void *, u32); static void *(*layer_add_one)(int, struct ipath_devdata *); static void (*layer_remove_one)(void *); -static void *(*verbs_add_one)(int, struct ipath_devdata *); -static void (*verbs_remove_one)(void *); -static void (*verbs_timer_cb)(void *); int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg) { @@ -107,302 +101,16 @@ int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr) return ret; } -int __ipath_verbs_piobufavail(struct ipath_devdata *dd) -{ - int ret = -ENODEV; - - if (dd->verbs_layer.l_arg && verbs_piobufavail) - ret = verbs_piobufavail(dd->verbs_layer.l_arg); - - return ret; -} - -int __ipath_verbs_rcv(struct ipath_devdata *dd, void *rc, void *ebuf, - u32 tlen) -{ - int ret = -ENODEV; - - if (dd->verbs_layer.l_arg && verbs_rcv) { - verbs_rcv(dd->verbs_layer.l_arg, rc, ebuf, tlen); - ret = 0; - } - - return ret; -} - -int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate) +void ipath_layer_lid_changed(struct ipath_devdata *dd) { - u32 lstate; - int ret; - - switch (newstate) { - case IPATH_IB_LINKDOWN: - ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << - INFINIPATH_IBCC_LINKINITCMD_SHIFT); - /* don't wait */ - ret = 0; - goto bail; - - case IPATH_IB_LINKDOWN_SLEEP: - ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP << - INFINIPATH_IBCC_LINKINITCMD_SHIFT); - /* don't wait */ - ret = 0; - goto bail; - - case IPATH_IB_LINKDOWN_DISABLE: - ipath_set_ib_lstate(dd, - INFINIPATH_IBCC_LINKINITCMD_DISABLE << - INFINIPATH_IBCC_LINKINITCMD_SHIFT); - /* don't wait */ - ret = 0; - goto bail; - - case IPATH_IB_LINKINIT: - if (dd->ipath_flags & IPATH_LINKINIT) { - ret = 0; - goto bail; - } - ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << - INFINIPATH_IBCC_LINKCMD_SHIFT); - lstate = IPATH_LINKINIT; - break; - - case IPATH_IB_LINKARM: - if (dd->ipath_flags & IPATH_LINKARMED) { - ret = 0; - goto bail; - } - if (!(dd->ipath_flags & - (IPATH_LINKINIT | IPATH_LINKACTIVE))) { - ret = -EINVAL; - goto bail; - } - ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED << - INFINIPATH_IBCC_LINKCMD_SHIFT); - /* - * Since the port can transition to ACTIVE by receiving - * a non VL 15 packet, wait for either state. - */ - lstate = IPATH_LINKARMED | IPATH_LINKACTIVE; - break; - - case IPATH_IB_LINKACTIVE: - if (dd->ipath_flags & IPATH_LINKACTIVE) { - ret = 0; - goto bail; - } - if (!(dd->ipath_flags & IPATH_LINKARMED)) { - ret = -EINVAL; - goto bail; - } - ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE << - INFINIPATH_IBCC_LINKCMD_SHIFT); - lstate = IPATH_LINKACTIVE; - break; - - default: - ipath_dbg("Invalid linkstate 0x%x requested\n", newstate); - ret = -EINVAL; - goto bail; - } - ret = ipath_wait_linkstate(dd, lstate, 2000); - -bail: - return ret; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_linkstate); - -/** - * ipath_layer_set_mtu - set the MTU - * @dd: the infinipath device - * @arg: the new MTU - * - * we can handle "any" incoming size, the issue here is whether we - * need to restrict our outgoing size. For now, we don't do any - * sanity checking on this, and we don't deal with what happens to - * programs that are already running when the size changes. - * NOTE: changing the MTU will usually cause the IBC to go back to - * link initialize (IPATH_IBSTATE_INIT) state... - */ -int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg) -{ - u32 piosize; - int changed = 0; - int ret; - - /* - * mtu is IB data payload max. It's the largest power of 2 less - * than piosize (or even larger, since it only really controls the - * largest we can receive; we can send the max of the mtu and - * piosize). We check that it's one of the valid IB sizes. - */ - if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 && - arg != 4096) { - ipath_dbg("Trying to set invalid mtu %u, failing\n", arg); - ret = -EINVAL; - goto bail; - } - if (dd->ipath_ibmtu == arg) { - ret = 0; /* same as current */ - goto bail; - } - - piosize = dd->ipath_ibmaxlen; - dd->ipath_ibmtu = arg; - - if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) { - /* Only if it's not the initial value (or reset to it) */ - if (piosize != dd->ipath_init_ibmaxlen) { - dd->ipath_ibmaxlen = piosize; - changed = 1; - } - } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) { - piosize = arg + IPATH_PIO_MAXIBHDR; - ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x " - "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize, - arg); - dd->ipath_ibmaxlen = piosize; - changed = 1; - } - - if (changed) { - /* - * set the IBC maxpktlength to the size of our pio - * buffers in words - */ - u64 ibc = dd->ipath_ibcctrl; - ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK << - INFINIPATH_IBCC_MAXPKTLEN_SHIFT); - - piosize = piosize - 2 * sizeof(u32); /* ignore pbc */ - dd->ipath_ibmaxlen = piosize; - piosize /= sizeof(u32); /* in words */ - /* - * for ICRC, which we only send in diag test pkt mode, and - * we don't need to worry about that for mtu - */ - piosize += 1; - - ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT; - dd->ipath_ibcctrl = ibc; - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, - dd->ipath_ibcctrl); - dd->ipath_f_tidtemplate(dd); - } - - ret = 0; - -bail: - return ret; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_mtu); - -int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc) -{ - dd->ipath_lid = arg; - dd->ipath_lmc = lmc; - mutex_lock(&ipath_layer_mutex); if (dd->ipath_layer.l_arg && layer_intr) layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID); mutex_unlock(&ipath_layer_mutex); - - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_set_lid); - -int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid) -{ - /* XXX - need to inform anyone who cares this just happened. */ - dd->ipath_guid = guid; - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_guid); - -__be64 ipath_layer_get_guid(struct ipath_devdata *dd) -{ - return dd->ipath_guid; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_guid); - -u32 ipath_layer_get_nguid(struct ipath_devdata *dd) -{ - return dd->ipath_nguid; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_nguid); - -u32 ipath_layer_get_majrev(struct ipath_devdata *dd) -{ - return dd->ipath_majrev; } -EXPORT_SYMBOL_GPL(ipath_layer_get_majrev); - -u32 ipath_layer_get_minrev(struct ipath_devdata *dd) -{ - return dd->ipath_minrev; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_minrev); - -u32 ipath_layer_get_pcirev(struct ipath_devdata *dd) -{ - return dd->ipath_pcirev; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_pcirev); - -u32 ipath_layer_get_flags(struct ipath_devdata *dd) -{ - return dd->ipath_flags; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_flags); - -struct device *ipath_layer_get_device(struct ipath_devdata *dd) -{ - return &dd->pcidev->dev; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_device); - -u16 ipath_layer_get_deviceid(struct ipath_devdata *dd) -{ - return dd->ipath_deviceid; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_deviceid); - -u32 ipath_layer_get_vendorid(struct ipath_devdata *dd) -{ - return dd->ipath_vendorid; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_vendorid); - -u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd) -{ - return dd->ipath_lastibcstat; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_lastibcstat); - -u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd) -{ - return dd->ipath_ibmtu; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_ibmtu); - void ipath_layer_add(struct ipath_devdata *dd) { mutex_lock(&ipath_layer_mutex); @@ -411,10 +119,6 @@ void ipath_layer_add(struct ipath_devdata *dd) dd->ipath_layer.l_arg = layer_add_one(dd->ipath_unit, dd); - if (verbs_add_one) - dd->verbs_layer.l_arg = - verbs_add_one(dd->ipath_unit, dd); - mutex_unlock(&ipath_layer_mutex); } @@ -427,11 +131,6 @@ void ipath_layer_remove(struct ipath_devdata *dd) dd->ipath_layer.l_arg = NULL; } - if (dd->verbs_layer.l_arg && verbs_remove_one) { - verbs_remove_one(dd->verbs_layer.l_arg); - dd->verbs_layer.l_arg = NULL; - } - mutex_unlock(&ipath_layer_mutex); } @@ -463,9 +162,6 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), if (dd->ipath_layer.l_arg) continue; - if (!(*dd->ipath_statusp & IPATH_STATUS_SMA)) - *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA; - spin_unlock_irqrestore(&ipath_devs_lock, flags); dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd); spin_lock_irqsave(&ipath_devs_lock, flags); @@ -509,107 +205,6 @@ void ipath_layer_unregister(void) EXPORT_SYMBOL_GPL(ipath_layer_unregister); -static void __ipath_verbs_timer(unsigned long arg) -{ - struct ipath_devdata *dd = (struct ipath_devdata *) arg; - - /* - * If port 0 receive packet interrupts are not available, or - * can be missed, poll the receive queue - */ - if (dd->ipath_flags & IPATH_POLL_RX_INTR) - ipath_kreceive(dd); - - /* Handle verbs layer timeouts. */ - if (dd->verbs_layer.l_arg && verbs_timer_cb) - verbs_timer_cb(dd->verbs_layer.l_arg); - - mod_timer(&dd->verbs_layer.l_timer, jiffies + 1); -} - -/** - * ipath_verbs_register - verbs layer registration - * @l_piobufavail: callback for when PIO buffers become available - * @l_rcv: callback for receiving a packet - * @l_timer_cb: timer callback - * @ipath_devdata: device data structure is put here - */ -int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *), - void (*l_remove)(void *arg), - int (*l_piobufavail) (void *arg), - void (*l_rcv) (void *arg, void *rhdr, - void *data, u32 tlen), - void (*l_timer_cb) (void *arg)) -{ - struct ipath_devdata *dd, *tmp; - unsigned long flags; - - mutex_lock(&ipath_layer_mutex); - - verbs_add_one = l_add; - verbs_remove_one = l_remove; - verbs_piobufavail = l_piobufavail; - verbs_rcv = l_rcv; - verbs_timer_cb = l_timer_cb; - - spin_lock_irqsave(&ipath_devs_lock, flags); - - list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { - if (!(dd->ipath_flags & IPATH_INITTED)) - continue; - - if (dd->verbs_layer.l_arg) - continue; - - spin_unlock_irqrestore(&ipath_devs_lock, flags); - dd->verbs_layer.l_arg = l_add(dd->ipath_unit, dd); - spin_lock_irqsave(&ipath_devs_lock, flags); - } - - spin_unlock_irqrestore(&ipath_devs_lock, flags); - mutex_unlock(&ipath_layer_mutex); - - ipath_verbs_registered = 1; - - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_verbs_register); - -void ipath_verbs_unregister(void) -{ - struct ipath_devdata *dd, *tmp; - unsigned long flags; - - mutex_lock(&ipath_layer_mutex); - spin_lock_irqsave(&ipath_devs_lock, flags); - - list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) { - *dd->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; - - if (dd->verbs_layer.l_arg && verbs_remove_one) { - spin_unlock_irqrestore(&ipath_devs_lock, flags); - verbs_remove_one(dd->verbs_layer.l_arg); - spin_lock_irqsave(&ipath_devs_lock, flags); - dd->verbs_layer.l_arg = NULL; - } - } - - spin_unlock_irqrestore(&ipath_devs_lock, flags); - - verbs_add_one = NULL; - verbs_remove_one = NULL; - verbs_piobufavail = NULL; - verbs_rcv = NULL; - verbs_timer_cb = NULL; - - ipath_verbs_registered = 0; - - mutex_unlock(&ipath_layer_mutex); -} - -EXPORT_SYMBOL_GPL(ipath_verbs_unregister); - int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax) { int ret; @@ -698,390 +293,6 @@ u16 ipath_layer_get_bcast(struct ipath_devdata *dd) EXPORT_SYMBOL_GPL(ipath_layer_get_bcast); -u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd) -{ - return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_cr_errpkey); - -static void update_sge(struct ipath_sge_state *ss, u32 length) -{ - struct ipath_sge *sge = &ss->sge; - - sge->vaddr += length; - sge->length -= length; - sge->sge_length -= length; - if (sge->sge_length == 0) { - if (--ss->num_sge) - *sge = *ss->sg_list++; - } else if (sge->length == 0 && sge->mr != NULL) { - if (++sge->n >= IPATH_SEGSZ) { - if (++sge->m >= sge->mr->mapsz) - return; - sge->n = 0; - } - sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; - sge->length = sge->mr->map[sge->m]->segs[sge->n].length; - } -} - -#ifdef __LITTLE_ENDIAN -static inline u32 get_upper_bits(u32 data, u32 shift) -{ - return data >> shift; -} - -static inline u32 set_upper_bits(u32 data, u32 shift) -{ - return data << shift; -} - -static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) -{ - data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); - data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); - return data; -} -#else -static inline u32 get_upper_bits(u32 data, u32 shift) -{ - return data << shift; -} - -static inline u32 set_upper_bits(u32 data, u32 shift) -{ - return data >> shift; -} - -static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) -{ - data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); - data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); - return data; -} -#endif - -static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, - u32 length) -{ - u32 extra = 0; - u32 data = 0; - u32 last; - - while (1) { - u32 len = ss->sge.length; - u32 off; - - BUG_ON(len == 0); - if (len > length) - len = length; - if (len > ss->sge.sge_length) - len = ss->sge.sge_length; - /* If the source address is not aligned, try to align it. */ - off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); - if (off) { - u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & - ~(sizeof(u32) - 1)); - u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); - u32 y; - - y = sizeof(u32) - off; - if (len > y) - len = y; - if (len + extra >= sizeof(u32)) { - data |= set_upper_bits(v, extra * - BITS_PER_BYTE); - len = sizeof(u32) - extra; - if (len == length) { - last = data; - break; - } - __raw_writel(data, piobuf); - piobuf++; - extra = 0; - data = 0; - } else { - /* Clear unused upper bytes */ - data |= clear_upper_bytes(v, len, extra); - if (len == length) { - last = data; - break; - } - extra += len; - } - } else if (extra) { - /* Source address is aligned. */ - u32 *addr = (u32 *) ss->sge.vaddr; - int shift = extra * BITS_PER_BYTE; - int ushift = 32 - shift; - u32 l = len; - - while (l >= sizeof(u32)) { - u32 v = *addr; - - data |= set_upper_bits(v, shift); - __raw_writel(data, piobuf); - data = get_upper_bits(v, ushift); - piobuf++; - addr++; - l -= sizeof(u32); - } - /* - * We still have 'extra' number of bytes leftover. - */ - if (l) { - u32 v = *addr; - - if (l + extra >= sizeof(u32)) { - data |= set_upper_bits(v, shift); - len -= l + extra - sizeof(u32); - if (len == length) { - last = data; - break; - } - __raw_writel(data, piobuf); - piobuf++; - extra = 0; - data = 0; - } else { - /* Clear unused upper bytes */ - data |= clear_upper_bytes(v, l, - extra); - if (len == length) { - last = data; - break; - } - extra += l; - } - } else if (len == length) { - last = data; - break; - } - } else if (len == length) { - u32 w; - - /* - * Need to round up for the last dword in the - * packet. - */ - w = (len + 3) >> 2; - __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); - piobuf += w - 1; - last = ((u32 *) ss->sge.vaddr)[w - 1]; - break; - } else { - u32 w = len >> 2; - - __iowrite32_copy(piobuf, ss->sge.vaddr, w); - piobuf += w; - - extra = len & (sizeof(u32) - 1); - if (extra) { - u32 v = ((u32 *) ss->sge.vaddr)[w]; - - /* Clear unused upper bytes */ - data = clear_upper_bytes(v, extra, 0); - } - } - update_sge(ss, len); - length -= len; - } - /* Update address before sending packet. */ - update_sge(ss, length); - /* must flush early everything before trigger word */ - ipath_flush_wc(); - __raw_writel(last, piobuf); - /* be sure trigger word is written */ - ipath_flush_wc(); -} - -/** - * ipath_verbs_send - send a packet from the verbs layer - * @dd: the infinipath device - * @hdrwords: the number of words in the header - * @hdr: the packet header - * @len: the length of the packet in bytes - * @ss: the SGE to send - * - * This is like ipath_sma_send_pkt() in that we need to be able to send - * packets after the chip is initialized (MADs) but also like - * ipath_layer_send_hdr() since its used by the verbs layer. - */ -int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, - u32 *hdr, u32 len, struct ipath_sge_state *ss) -{ - u32 __iomem *piobuf; - u32 plen; - int ret; - - /* +1 is for the qword padding of pbc */ - plen = hdrwords + ((len + 3) >> 2) + 1; - if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { - ipath_dbg("packet len 0x%x too long, failing\n", plen); - ret = -EINVAL; - goto bail; - } - - /* Get a PIO buffer to use. */ - piobuf = ipath_getpiobuf(dd, NULL); - if (unlikely(piobuf == NULL)) { - ret = -EBUSY; - goto bail; - } - - /* - * Write len to control qword, no flags. - * We have to flush after the PBC for correctness on some cpus - * or WC buffer can be written out of order. - */ - writeq(plen, piobuf); - ipath_flush_wc(); - piobuf += 2; - if (len == 0) { - /* - * If there is just the header portion, must flush before - * writing last word of header for correctness, and after - * the last header word (trigger word). - */ - __iowrite32_copy(piobuf, hdr, hdrwords - 1); - ipath_flush_wc(); - __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); - ipath_flush_wc(); - ret = 0; - goto bail; - } - - __iowrite32_copy(piobuf, hdr, hdrwords); - piobuf += hdrwords; - - /* The common case is aligned and contained in one segment. */ - if (likely(ss->num_sge == 1 && len <= ss->sge.length && - !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { - u32 w; - u32 *addr = (u32 *) ss->sge.vaddr; - - /* Update address before sending packet. */ - update_sge(ss, len); - /* Need to round up for the last dword in the packet. */ - w = (len + 3) >> 2; - __iowrite32_copy(piobuf, addr, w - 1); - /* must flush early everything before trigger word */ - ipath_flush_wc(); - __raw_writel(addr[w - 1], piobuf + w - 1); - /* be sure trigger word is written */ - ipath_flush_wc(); - ret = 0; - goto bail; - } - copy_io(piobuf, ss, len); - ret = 0; - -bail: - return ret; -} - -EXPORT_SYMBOL_GPL(ipath_verbs_send); - -int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords, - u64 *rwords, u64 *spkts, u64 *rpkts, - u64 *xmit_wait) -{ - int ret; - - if (!(dd->ipath_flags & IPATH_INITTED)) { - /* no hardware, freeze, etc. */ - ipath_dbg("unit %u not usable\n", dd->ipath_unit); - ret = -EINVAL; - goto bail; - } - *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); - *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); - *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); - *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); - *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); - - ret = 0; - -bail: - return ret; -} - -EXPORT_SYMBOL_GPL(ipath_layer_snapshot_counters); - -/** - * ipath_layer_get_counters - get various chip counters - * @dd: the infinipath device - * @cntrs: counters are placed here - * - * Return the counters needed by recv_pma_get_portcounters(). - */ -int ipath_layer_get_counters(struct ipath_devdata *dd, - struct ipath_layer_counters *cntrs) -{ - int ret; - - if (!(dd->ipath_flags & IPATH_INITTED)) { - /* no hardware, freeze, etc. */ - ipath_dbg("unit %u not usable\n", dd->ipath_unit); - ret = -EINVAL; - goto bail; - } - cntrs->symbol_error_counter = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); - cntrs->link_error_recovery_counter = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); - /* - * The link downed counter counts when the other side downs the - * connection. We add in the number of times we downed the link - * due to local link integrity errors to compensate. - */ - cntrs->link_downed_counter = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt); - cntrs->port_rcv_errors = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + - ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt); - cntrs->port_rcv_remphys_errors = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); - cntrs->port_xmit_discards = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt); - cntrs->port_xmit_data = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); - cntrs->port_rcv_data = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); - cntrs->port_xmit_packets = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); - cntrs->port_rcv_packets = - ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); - cntrs->local_link_integrity_errors = dd->ipath_lli_errors; - cntrs->excessive_buffer_overrun_errors = 0; /* XXX */ - - ret = 0; - -bail: - return ret; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_counters); - -int ipath_layer_want_buffer(struct ipath_devdata *dd) -{ - set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); - ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, - dd->ipath_sendctrl); - - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_want_buffer); - int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr) { int ret = 0; @@ -1153,389 +364,3 @@ int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd) } EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int); - -int ipath_layer_enable_timer(struct ipath_devdata *dd) -{ - /* - * HT-400 has a design flaw where the chip and kernel idea - * of the tail register don't always agree, and therefore we won't - * get an interrupt on the next packet received. - * If the board supports per packet receive interrupts, use it. - * Otherwise, the timer function periodically checks for packets - * to cover this case. - * Either way, the timer is needed for verbs layer related - * processing. - */ - if (dd->ipath_flags & IPATH_GPIO_INTR) { - ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, - 0x2074076542310ULL); - /* Enable GPIO bit 2 interrupt */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, - (u64) (1 << 2)); - } - - init_timer(&dd->verbs_layer.l_timer); - dd->verbs_layer.l_timer.function = __ipath_verbs_timer; - dd->verbs_layer.l_timer.data = (unsigned long)dd; - dd->verbs_layer.l_timer.expires = jiffies + 1; - add_timer(&dd->verbs_layer.l_timer); - - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_enable_timer); - -int ipath_layer_disable_timer(struct ipath_devdata *dd) -{ - /* Disable GPIO bit 2 interrupt */ - if (dd->ipath_flags & IPATH_GPIO_INTR) - ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0); - - del_timer_sync(&dd->verbs_layer.l_timer); - - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_disable_timer); - -/** - * ipath_layer_set_verbs_flags - set the verbs layer flags - * @dd: the infinipath device - * @flags: the flags to set - */ -int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags) -{ - struct ipath_devdata *ss; - unsigned long lflags; - - spin_lock_irqsave(&ipath_devs_lock, lflags); - - list_for_each_entry(ss, &ipath_dev_list, ipath_list) { - if (!(ss->ipath_flags & IPATH_INITTED)) - continue; - if ((flags & IPATH_VERBS_KERNEL_SMA) && - !(*ss->ipath_statusp & IPATH_STATUS_SMA)) - *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA; - else - *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA; - } - - spin_unlock_irqrestore(&ipath_devs_lock, lflags); - - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_verbs_flags); - -/** - * ipath_layer_get_npkeys - return the size of the PKEY table for port 0 - * @dd: the infinipath device - */ -unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd) -{ - return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_npkeys); - -/** - * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table - * @dd: the infinipath device - * @index: the PKEY index - */ -unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index) -{ - unsigned ret; - - if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) - ret = 0; - else - ret = dd->ipath_pd[0]->port_pkeys[index]; - - return ret; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_pkey); - -/** - * ipath_layer_get_pkeys - return the PKEY table for port 0 - * @dd: the infinipath device - * @pkeys: the pkey table is placed here - */ -int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys) -{ - struct ipath_portdata *pd = dd->ipath_pd[0]; - - memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); - - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_pkeys); - -/** - * rm_pkey - decrecment the reference count for the given PKEY - * @dd: the infinipath device - * @key: the PKEY index - * - * Return true if this was the last reference and the hardware table entry - * needs to be changed. - */ -static int rm_pkey(struct ipath_devdata *dd, u16 key) -{ - int i; - int ret; - - for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { - if (dd->ipath_pkeys[i] != key) - continue; - if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { - dd->ipath_pkeys[i] = 0; - ret = 1; - goto bail; - } - break; - } - - ret = 0; - -bail: - return ret; -} - -/** - * add_pkey - add the given PKEY to the hardware table - * @dd: the infinipath device - * @key: the PKEY - * - * Return an error code if unable to add the entry, zero if no change, - * or 1 if the hardware PKEY register needs to be updated. - */ -static int add_pkey(struct ipath_devdata *dd, u16 key) -{ - int i; - u16 lkey = key & 0x7FFF; - int any = 0; - int ret; - - if (lkey == 0x7FFF) { - ret = 0; - goto bail; - } - - /* Look for an empty slot or a matching PKEY. */ - for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { - if (!dd->ipath_pkeys[i]) { - any++; - continue; - } - /* If it matches exactly, try to increment the ref count */ - if (dd->ipath_pkeys[i] == key) { - if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { - ret = 0; - goto bail; - } - /* Lost the race. Look for an empty slot below. */ - atomic_dec(&dd->ipath_pkeyrefs[i]); - any++; - } - /* - * It makes no sense to have both the limited and unlimited - * PKEY set at the same time since the unlimited one will - * disable the limited one. - */ - if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { - ret = -EEXIST; - goto bail; - } - } - if (!any) { - ret = -EBUSY; - goto bail; - } - for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { - if (!dd->ipath_pkeys[i] && - atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { - /* for ipathstats, etc. */ - ipath_stats.sps_pkeys[i] = lkey; - dd->ipath_pkeys[i] = key; - ret = 1; - goto bail; - } - } - ret = -EBUSY; - -bail: - return ret; -} - -/** - * ipath_layer_set_pkeys - set the PKEY table for port 0 - * @dd: the infinipath device - * @pkeys: the PKEY table - */ -int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys) -{ - struct ipath_portdata *pd; - int i; - int changed = 0; - - pd = dd->ipath_pd[0]; - - for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { - u16 key = pkeys[i]; - u16 okey = pd->port_pkeys[i]; - - if (key == okey) - continue; - /* - * The value of this PKEY table entry is changing. - * Remove the old entry in the hardware's array of PKEYs. - */ - if (okey & 0x7FFF) - changed |= rm_pkey(dd, okey); - if (key & 0x7FFF) { - int ret = add_pkey(dd, key); - - if (ret < 0) - key = 0; - else - changed |= ret; - } - pd->port_pkeys[i] = key; - } - if (changed) { - u64 pkey; - - pkey = (u64) dd->ipath_pkeys[0] | - ((u64) dd->ipath_pkeys[1] << 16) | - ((u64) dd->ipath_pkeys[2] << 32) | - ((u64) dd->ipath_pkeys[3] << 48); - ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", - (unsigned long long) pkey); - ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, - pkey); - } - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_pkeys); - -/** - * ipath_layer_get_linkdowndefaultstate - get the default linkdown state - * @dd: the infinipath device - * - * Returns zero if the default is POLL, 1 if the default is SLEEP. - */ -int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd) -{ - return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE); -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_linkdowndefaultstate); - -/** - * ipath_layer_set_linkdowndefaultstate - set the default linkdown state - * @dd: the infinipath device - * @sleep: the new state - * - * Note that this will only take effect when the link state changes. - */ -int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd, - int sleep) -{ - if (sleep) - dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; - else - dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, - dd->ipath_ibcctrl); - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_linkdowndefaultstate); - -int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd) -{ - return (dd->ipath_ibcctrl >> - INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & - INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_phyerrthreshold); - -/** - * ipath_layer_set_phyerrthreshold - set the physical error threshold - * @dd: the infinipath device - * @n: the new threshold - * - * Note that this will only take effect when the link state changes. - */ -int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n) -{ - unsigned v; - - v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & - INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; - if (v != n) { - dd->ipath_ibcctrl &= - ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << - INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); - dd->ipath_ibcctrl |= - (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, - dd->ipath_ibcctrl); - } - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_phyerrthreshold); - -int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd) -{ - return (dd->ipath_ibcctrl >> - INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & - INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; -} - -EXPORT_SYMBOL_GPL(ipath_layer_get_overrunthreshold); - -/** - * ipath_layer_set_overrunthreshold - set the overrun threshold - * @dd: the infinipath device - * @n: the new threshold - * - * Note that this will only take effect when the link state changes. - */ -int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n) -{ - unsigned v; - - v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & - INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; - if (v != n) { - dd->ipath_ibcctrl &= - ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << - INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); - dd->ipath_ibcctrl |= - (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; - ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, - dd->ipath_ibcctrl); - } - return 0; -} - -EXPORT_SYMBOL_GPL(ipath_layer_set_overrunthreshold); - -int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name, - size_t namelen) -{ - return dd->ipath_f_get_boardname(dd, name, namelen); -} -EXPORT_SYMBOL_GPL(ipath_layer_get_boardname); - -u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd) -{ - return dd->ipath_rcvhdrentsize; -} -EXPORT_SYMBOL_GPL(ipath_layer_get_rcvhdrentsize); diff --git a/drivers/infiniband/hw/ipath/ipath_layer.h b/drivers/infiniband/hw/ipath/ipath_layer.h index 71485096fcac..3854a4eae684 100644 --- a/drivers/infiniband/hw/ipath/ipath_layer.h +++ b/drivers/infiniband/hw/ipath/ipath_layer.h @@ -40,73 +40,9 @@ */ struct sk_buff; -struct ipath_sge_state; struct ipath_devdata; struct ether_header; -struct ipath_layer_counters { - u64 symbol_error_counter; - u64 link_error_recovery_counter; - u64 link_downed_counter; - u64 port_rcv_errors; - u64 port_rcv_remphys_errors; - u64 port_xmit_discards; - u64 port_xmit_data; - u64 port_rcv_data; - u64 port_xmit_packets; - u64 port_rcv_packets; - u32 local_link_integrity_errors; - u32 excessive_buffer_overrun_errors; -}; - -/* - * A segment is a linear region of low physical memory. - * XXX Maybe we should use phys addr here and kmap()/kunmap(). - * Used by the verbs layer. - */ -struct ipath_seg { - void *vaddr; - size_t length; -}; - -/* The number of ipath_segs that fit in a page. */ -#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg)) - -struct ipath_segarray { - struct ipath_seg segs[IPATH_SEGSZ]; -}; - -struct ipath_mregion { - u64 user_base; /* User's address for this region */ - u64 iova; /* IB start address of this region */ - size_t length; - u32 lkey; - u32 offset; /* offset (bytes) to start of region */ - int access_flags; - u32 max_segs; /* number of ipath_segs in all the arrays */ - u32 mapsz; /* size of the map array */ - struct ipath_segarray *map[0]; /* the segments */ -}; - -/* - * These keep track of the copy progress within a memory region. - * Used by the verbs layer. - */ -struct ipath_sge { - struct ipath_mregion *mr; - void *vaddr; /* current pointer into the segment */ - u32 sge_length; /* length of the SGE */ - u32 length; /* remaining length of the segment */ - u16 m; /* current index: mr->map[m] */ - u16 n; /* current index: mr->map[m]->segs[n] */ -}; - -struct ipath_sge_state { - struct ipath_sge *sg_list; /* next SGE to be used if any */ - struct ipath_sge sge; /* progress state for the current SGE */ - u8 num_sge; -}; - int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), void (*l_remove)(void *), int (*l_intr)(void *, u32), @@ -114,62 +50,14 @@ int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *), struct sk_buff *), u16 rcv_opcode, int (*l_rcv_lid)(void *, void *)); -int ipath_verbs_register(void *(*l_add)(int, struct ipath_devdata *), - void (*l_remove)(void *arg), - int (*l_piobufavail)(void *arg), - void (*l_rcv)(void *arg, void *rhdr, - void *data, u32 tlen), - void (*l_timer_cb)(void *arg)); void ipath_layer_unregister(void); -void ipath_verbs_unregister(void); int ipath_layer_open(struct ipath_devdata *, u32 * pktmax); u16 ipath_layer_get_lid(struct ipath_devdata *dd); int ipath_layer_get_mac(struct ipath_devdata *dd, u8 *); u16 ipath_layer_get_bcast(struct ipath_devdata *dd); -u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd); -int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 state); -int ipath_layer_set_mtu(struct ipath_devdata *, u16); -int ipath_set_lid(struct ipath_devdata *, u32, u8); int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr); -int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, - u32 * hdr, u32 len, struct ipath_sge_state *ss); int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd); -int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name, - size_t namelen); -int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords, - u64 *rwords, u64 *spkts, u64 *rpkts, - u64 *xmit_wait); -int ipath_layer_get_counters(struct ipath_devdata *dd, - struct ipath_layer_counters *cntrs); -int ipath_layer_want_buffer(struct ipath_devdata *dd); -int ipath_layer_set_guid(struct ipath_devdata *, __be64 guid); -__be64 ipath_layer_get_guid(struct ipath_devdata *); -u32 ipath_layer_get_nguid(struct ipath_devdata *); -u32 ipath_layer_get_majrev(struct ipath_devdata *); -u32 ipath_layer_get_minrev(struct ipath_devdata *); -u32 ipath_layer_get_pcirev(struct ipath_devdata *); -u32 ipath_layer_get_flags(struct ipath_devdata *dd); -struct device *ipath_layer_get_device(struct ipath_devdata *dd); -u16 ipath_layer_get_deviceid(struct ipath_devdata *dd); -u32 ipath_layer_get_vendorid(struct ipath_devdata *); -u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd); -u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd); -int ipath_layer_enable_timer(struct ipath_devdata *dd); -int ipath_layer_disable_timer(struct ipath_devdata *dd); -int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags); -unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd); -unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index); -int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 *pkeys); -int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 *pkeys); -int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd); -int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd, - int sleep); -int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd); -int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n); -int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd); -int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n); -u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd); /* ipath_ether interrupt values */ #define IPATH_LAYER_INT_IF_UP 0x2 @@ -178,9 +66,6 @@ u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd); #define IPATH_LAYER_INT_SEND_CONTINUE 0x10 #define IPATH_LAYER_INT_BCAST 0x40 -/* _verbs_layer.l_flags */ -#define IPATH_VERBS_KERNEL_SMA 0x1 - extern unsigned ipath_debug; /* debugging bit mask */ #endif /* _IPATH_LAYER_H */ diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c index d3402341b7d0..72d1db89db8f 100644 --- a/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/drivers/infiniband/hw/ipath/ipath_mad.c @@ -101,15 +101,15 @@ static int recv_subn_get_nodeinfo(struct ib_smp *smp, nip->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ nip->sys_guid = to_idev(ibdev)->sys_image_guid; - nip->node_guid = ipath_layer_get_guid(dd); + nip->node_guid = dd->ipath_guid; nip->port_guid = nip->sys_guid; - nip->partition_cap = cpu_to_be16(ipath_layer_get_npkeys(dd)); - nip->device_id = cpu_to_be16(ipath_layer_get_deviceid(dd)); - majrev = ipath_layer_get_majrev(dd); - minrev = ipath_layer_get_minrev(dd); + nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd)); + nip->device_id = cpu_to_be16(dd->ipath_deviceid); + majrev = dd->ipath_majrev; + minrev = dd->ipath_minrev; nip->revision = cpu_to_be32((majrev << 16) | minrev); nip->local_port_num = port; - vendor = ipath_layer_get_vendorid(dd); + vendor = dd->ipath_vendorid; nip->vendor_id[0] = 0; nip->vendor_id[1] = vendor >> 8; nip->vendor_id[2] = vendor; @@ -133,13 +133,89 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp, */ if (startgx == 0) /* The first is a copy of the read-only HW GUID. */ - *p = ipath_layer_get_guid(to_idev(ibdev)->dd); + *p = to_idev(ibdev)->dd->ipath_guid; else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } + +static int get_overrunthreshold(struct ipath_devdata *dd) +{ + return (dd->ipath_ibcctrl >> + INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & + INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; +} + +/** + * set_overrunthreshold - set the overrun threshold + * @dd: the infinipath device + * @n: the new threshold + * + * Note that this will only take effect when the link state changes. + */ +static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n) +{ + unsigned v; + + v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) & + INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK; + if (v != n) { + dd->ipath_ibcctrl &= + ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK << + INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT); + dd->ipath_ibcctrl |= + (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT; + ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, + dd->ipath_ibcctrl); + } + return 0; +} + +static int get_phyerrthreshold(struct ipath_devdata *dd) +{ + return (dd->ipath_ibcctrl >> + INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & + INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; +} + +/** + * set_phyerrthreshold - set the physical error threshold + * @dd: the infinipath device + * @n: the new threshold + * + * Note that this will only take effect when the link state changes. + */ +static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n) +{ + unsigned v; + + v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) & + INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK; + if (v != n) { + dd->ipath_ibcctrl &= + ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK << + INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT); + dd->ipath_ibcctrl |= + (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT; + ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, + dd->ipath_ibcctrl); + } + return 0; +} + +/** + * get_linkdowndefaultstate - get the default linkdown state + * @dd: the infinipath device + * + * Returns zero if the default is POLL, 1 if the default is SLEEP. + */ +static int get_linkdowndefaultstate(struct ipath_devdata *dd) +{ + return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE); +} + static int recv_subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { @@ -166,7 +242,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, (dev->mkeyprot_resv_lmc >> 6) == 0) pip->mkey = dev->mkey; pip->gid_prefix = dev->gid_prefix; - lid = ipath_layer_get_lid(dev->dd); + lid = dev->dd->ipath_lid; pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; pip->sm_lid = cpu_to_be16(dev->sm_lid); pip->cap_mask = cpu_to_be32(dev->port_cap_flags); @@ -177,14 +253,14 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, pip->link_width_supported = 3; /* 1x or 4x */ pip->link_width_active = 2; /* 4x */ pip->linkspeed_portstate = 0x10; /* 2.5Gbps */ - ibcstat = ipath_layer_get_lastibcstat(dev->dd); + ibcstat = dev->dd->ipath_lastibcstat; pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1; pip->portphysstate_linkdown = (ipath_cvt_physportstate[ibcstat & 0xf] << 4) | - (ipath_layer_get_linkdowndefaultstate(dev->dd) ? 1 : 2); + (get_linkdowndefaultstate(dev->dd) ? 1 : 2); pip->mkeyprot_resv_lmc = dev->mkeyprot_resv_lmc; pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */ - switch (ipath_layer_get_ibmtu(dev->dd)) { + switch (dev->dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; @@ -217,7 +293,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, pip->mkey_violations = cpu_to_be16(dev->mkey_violations); /* P_KeyViolations are counted by hardware. */ pip->pkey_violations = - cpu_to_be16((ipath_layer_get_cr_errpkey(dev->dd) - + cpu_to_be16((ipath_get_cr_errpkey(dev->dd) - dev->z_pkey_violations) & 0xFFFF); pip->qkey_violations = cpu_to_be16(dev->qkey_violations); /* Only the hardware GUID is supported for now */ @@ -226,8 +302,8 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, /* 32.768 usec. response time (guessing) */ pip->resv_resptimevalue = 3; pip->localphyerrors_overrunerrors = - (ipath_layer_get_phyerrthreshold(dev->dd) << 4) | - ipath_layer_get_overrunthreshold(dev->dd); + (get_phyerrthreshold(dev->dd) << 4) | + get_overrunthreshold(dev->dd); /* pip->max_credit_hint; */ /* pip->link_roundtrip_latency[3]; */ @@ -237,6 +313,20 @@ bail: return ret; } +/** + * get_pkeys - return the PKEY table for port 0 + * @dd: the infinipath device + * @pkeys: the pkey table is placed here + */ +static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) +{ + struct ipath_portdata *pd = dd->ipath_pd[0]; + + memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); + + return 0; +} + static int recv_subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev) { @@ -249,9 +339,9 @@ static int recv_subn_get_pkeytable(struct ib_smp *smp, memset(smp->data, 0, sizeof(smp->data)); if (startpx == 0) { struct ipath_ibdev *dev = to_idev(ibdev); - unsigned i, n = ipath_layer_get_npkeys(dev->dd); + unsigned i, n = ipath_get_npkeys(dev->dd); - ipath_layer_get_pkeys(dev->dd, p); + get_pkeys(dev->dd, p); for (i = 0; i < n; i++) q[i] = cpu_to_be16(p[i]); @@ -269,6 +359,24 @@ static int recv_subn_set_guidinfo(struct ib_smp *smp, } /** + * set_linkdowndefaultstate - set the default linkdown state + * @dd: the infinipath device + * @sleep: the new state + * + * Note that this will only take effect when the link state changes. + */ +static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep) +{ + if (sleep) + dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; + else + dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE; + ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, + dd->ipath_ibcctrl); + return 0; +} + +/** * recv_subn_set_portinfo - set port information * @smp: the incoming SM packet * @ibdev: the infiniband device @@ -290,7 +398,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, u8 state; u16 lstate; u32 mtu; - int ret; + int ret, ore; if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) goto err; @@ -304,7 +412,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); lid = be16_to_cpu(pip->lid); - if (lid != ipath_layer_get_lid(dev->dd)) { + if (lid != dev->dd->ipath_lid) { /* Must be a valid unicast LID address. */ if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) goto err; @@ -342,11 +450,11 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, case 0: /* NOP */ break; case 1: /* SLEEP */ - if (ipath_layer_set_linkdowndefaultstate(dev->dd, 1)) + if (set_linkdowndefaultstate(dev->dd, 1)) goto err; break; case 2: /* POLL */ - if (ipath_layer_set_linkdowndefaultstate(dev->dd, 0)) + if (set_linkdowndefaultstate(dev->dd, 0)) goto err; break; default: @@ -376,7 +484,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, /* XXX We have already partially updated our state! */ goto err; } - ipath_layer_set_mtu(dev->dd, mtu); + ipath_set_mtu(dev->dd, mtu); dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF; @@ -392,20 +500,16 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, * later. */ if (pip->pkey_violations == 0) - dev->z_pkey_violations = - ipath_layer_get_cr_errpkey(dev->dd); + dev->z_pkey_violations = ipath_get_cr_errpkey(dev->dd); if (pip->qkey_violations == 0) dev->qkey_violations = 0; - if (ipath_layer_set_phyerrthreshold( - dev->dd, - (pip->localphyerrors_overrunerrors >> 4) & 0xF)) + ore = pip->localphyerrors_overrunerrors; + if (set_phyerrthreshold(dev->dd, (ore >> 4) & 0xF)) goto err; - if (ipath_layer_set_overrunthreshold( - dev->dd, - (pip->localphyerrors_overrunerrors & 0xF))) + if (set_overrunthreshold(dev->dd, (ore & 0xF))) goto err; dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; @@ -423,7 +527,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, * is down or is being set to down. */ state = pip->linkspeed_portstate & 0xF; - flags = ipath_layer_get_flags(dev->dd); + flags = dev->dd->ipath_flags; lstate = (pip->portphysstate_linkdown >> 4) & 0xF; if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) goto err; @@ -439,7 +543,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, /* FALLTHROUGH */ case IB_PORT_DOWN: if (lstate == 0) - if (ipath_layer_get_linkdowndefaultstate(dev->dd)) + if (get_linkdowndefaultstate(dev->dd)) lstate = IPATH_IB_LINKDOWN_SLEEP; else lstate = IPATH_IB_LINKDOWN; @@ -451,7 +555,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, lstate = IPATH_IB_LINKDOWN_DISABLE; else goto err; - ipath_layer_set_linkstate(dev->dd, lstate); + ipath_set_linkstate(dev->dd, lstate); if (flags & IPATH_LINKACTIVE) { event.event = IB_EVENT_PORT_ERR; ib_dispatch_event(&event); @@ -460,7 +564,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, case IB_PORT_ARMED: if (!(flags & (IPATH_LINKINIT | IPATH_LINKACTIVE))) break; - ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKARM); + ipath_set_linkstate(dev->dd, IPATH_IB_LINKARM); if (flags & IPATH_LINKACTIVE) { event.event = IB_EVENT_PORT_ERR; ib_dispatch_event(&event); @@ -469,7 +573,7 @@ static int recv_subn_set_portinfo(struct ib_smp *smp, case IB_PORT_ACTIVE: if (!(flags & IPATH_LINKARMED)) break; - ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); + ipath_set_linkstate(dev->dd, IPATH_IB_LINKACTIVE); event.event = IB_EVENT_PORT_ACTIVE; ib_dispatch_event(&event); break; @@ -493,6 +597,152 @@ done: return ret; } +/** + * rm_pkey - decrecment the reference count for the given PKEY + * @dd: the infinipath device + * @key: the PKEY index + * + * Return true if this was the last reference and the hardware table entry + * needs to be changed. + */ +static int rm_pkey(struct ipath_devdata *dd, u16 key) +{ + int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { + if (dd->ipath_pkeys[i] != key) + continue; + if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) { + dd->ipath_pkeys[i] = 0; + ret = 1; + goto bail; + } + break; + } + + ret = 0; + +bail: + return ret; +} + +/** + * add_pkey - add the given PKEY to the hardware table + * @dd: the infinipath device + * @key: the PKEY + * + * Return an error code if unable to add the entry, zero if no change, + * or 1 if the hardware PKEY register needs to be updated. + */ +static int add_pkey(struct ipath_devdata *dd, u16 key) +{ + int i; + u16 lkey = key & 0x7FFF; + int any = 0; + int ret; + + if (lkey == 0x7FFF) { + ret = 0; + goto bail; + } + + /* Look for an empty slot or a matching PKEY. */ + for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { + if (!dd->ipath_pkeys[i]) { + any++; + continue; + } + /* If it matches exactly, try to increment the ref count */ + if (dd->ipath_pkeys[i] == key) { + if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) { + ret = 0; + goto bail; + } + /* Lost the race. Look for an empty slot below. */ + atomic_dec(&dd->ipath_pkeyrefs[i]); + any++; + } + /* + * It makes no sense to have both the limited and unlimited + * PKEY set at the same time since the unlimited one will + * disable the limited one. + */ + if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { + ret = -EEXIST; + goto bail; + } + } + if (!any) { + ret = -EBUSY; + goto bail; + } + for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { + if (!dd->ipath_pkeys[i] && + atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { + /* for ipathstats, etc. */ + ipath_stats.sps_pkeys[i] = lkey; + dd->ipath_pkeys[i] = key; + ret = 1; + goto bail; + } + } + ret = -EBUSY; + +bail: + return ret; +} + +/** + * set_pkeys - set the PKEY table for port 0 + * @dd: the infinipath device + * @pkeys: the PKEY table + */ +static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) +{ + struct ipath_portdata *pd; + int i; + int changed = 0; + + pd = dd->ipath_pd[0]; + + for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { + u16 key = pkeys[i]; + u16 okey = pd->port_pkeys[i]; + + if (key == okey) + continue; + /* + * The value of this PKEY table entry is changing. + * Remove the old entry in the hardware's array of PKEYs. + */ + if (okey & 0x7FFF) + changed |= rm_pkey(dd, okey); + if (key & 0x7FFF) { + int ret = add_pkey(dd, key); + + if (ret < 0) + key = 0; + else + changed |= ret; + } + pd->port_pkeys[i] = key; + } + if (changed) { + u64 pkey; + + pkey = (u64) dd->ipath_pkeys[0] | + ((u64) dd->ipath_pkeys[1] << 16) | + ((u64) dd->ipath_pkeys[2] << 32) | + ((u64) dd->ipath_pkeys[3] << 48); + ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n", + (unsigned long long) pkey); + ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, + pkey); + } + return 0; +} + static int recv_subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev) { @@ -500,13 +750,12 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp, __be16 *p = (__be16 *) smp->data; u16 *q = (u16 *) smp->data; struct ipath_ibdev *dev = to_idev(ibdev); - unsigned i, n = ipath_layer_get_npkeys(dev->dd); + unsigned i, n = ipath_get_npkeys(dev->dd); for (i = 0; i < n; i++) q[i] = be16_to_cpu(p[i]); - if (startpx != 0 || - ipath_layer_set_pkeys(dev->dd, q) != 0) + if (startpx != 0 || set_pkeys(dev->dd, q) != 0) smp->status |= IB_SMP_INVALID_FIELD; return recv_subn_get_pkeytable(smp, ibdev); @@ -844,10 +1093,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp, struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); - struct ipath_layer_counters cntrs; + struct ipath_verbs_counters cntrs; u8 port_select = p->port_select; - ipath_layer_get_counters(dev->dd, &cntrs); + ipath_get_counters(dev->dd, &cntrs); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= dev->z_symbol_error_counter; @@ -944,8 +1193,8 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, u64 swords, rwords, spkts, rpkts, xwait; u8 port_select = p->port_select; - ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, - &rpkts, &xwait); + ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, + &rpkts, &xwait); /* Adjust counters for any resets done. */ swords -= dev->z_port_xmit_data; @@ -978,13 +1227,13 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp, struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct ipath_ibdev *dev = to_idev(ibdev); - struct ipath_layer_counters cntrs; + struct ipath_verbs_counters cntrs; /* * Since the HW doesn't support clearing counters, we save the * current count and subtract it from future responses. */ - ipath_layer_get_counters(dev->dd, &cntrs); + ipath_get_counters(dev->dd, &cntrs); if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) dev->z_symbol_error_counter = cntrs.symbol_error_counter; @@ -1041,8 +1290,8 @@ static int recv_pma_set_portcounters_ext(struct ib_perf *pmp, struct ipath_ibdev *dev = to_idev(ibdev); u64 swords, rwords, spkts, rpkts, xwait; - ipath_layer_snapshot_counters(dev->dd, &swords, &rwords, &spkts, - &rpkts, &xwait); + ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts, + &rpkts, &xwait); if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) dev->z_port_xmit_data = swords; diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c new file mode 100644 index 000000000000..11b7378ff214 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_mmap.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2006 QLogic, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/errno.h> +#include <asm/pgtable.h> + +#include "ipath_verbs.h" + +/** + * ipath_release_mmap_info - free mmap info structure + * @ref: a pointer to the kref within struct ipath_mmap_info + */ +void ipath_release_mmap_info(struct kref *ref) +{ + struct ipath_mmap_info *ip = + container_of(ref, struct ipath_mmap_info, ref); + + vfree(ip->obj); + kfree(ip); +} + +/* + * open and close keep track of how many times the CQ is mapped, + * to avoid releasing it. + */ +static void ipath_vma_open(struct vm_area_struct *vma) +{ + struct ipath_mmap_info *ip = vma->vm_private_data; + + kref_get(&ip->ref); + ip->mmap_cnt++; +} + +static void ipath_vma_close(struct vm_area_struct *vma) +{ + struct ipath_mmap_info *ip = vma->vm_private_data; + + ip->mmap_cnt--; + kref_put(&ip->ref, ipath_release_mmap_info); +} + +static struct vm_operations_struct ipath_vm_ops = { + .open = ipath_vma_open, + .close = ipath_vma_close, +}; + +/** + * ipath_mmap - create a new mmap region + * @context: the IB user context of the process making the mmap() call + * @vma: the VMA to be initialized + * Return zero if the mmap is OK. Otherwise, return an errno. + */ +int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct ipath_ibdev *dev = to_idev(context->device); + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long size = vma->vm_end - vma->vm_start; + struct ipath_mmap_info *ip, **pp; + int ret = -EINVAL; + + /* + * Search the device's list of objects waiting for a mmap call. + * Normally, this list is very short since a call to create a + * CQ, QP, or SRQ is soon followed by a call to mmap(). + */ + spin_lock_irq(&dev->pending_lock); + for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) { + /* Only the creator is allowed to mmap the object */ + if (context != ip->context || (void *) offset != ip->obj) + continue; + /* Don't allow a mmap larger than the object. */ + if (size > ip->size) + break; + + *pp = ip->next; + spin_unlock_irq(&dev->pending_lock); + + ret = remap_vmalloc_range(vma, ip->obj, 0); + if (ret) + goto done; + vma->vm_ops = &ipath_vm_ops; + vma->vm_private_data = ip; + ipath_vma_open(vma); + goto done; + } + spin_unlock_irq(&dev->pending_lock); +done: + return ret; +} diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index 4ac31a5da330..b36f6fb3e37a 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c @@ -36,6 +36,18 @@ #include "ipath_verbs.h" +/* Fast memory region */ +struct ipath_fmr { + struct ib_fmr ibfmr; + u8 page_shift; + struct ipath_mregion mr; /* must be last */ +}; + +static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) +{ + return container_of(ibfmr, struct ipath_fmr, ibfmr); +} + /** * ipath_get_dma_mr - get a DMA memory region * @pd: protection domain for this memory region diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 83e557be591e..224b0f40767f 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c @@ -35,7 +35,7 @@ #include <linux/vmalloc.h> #include "ipath_verbs.h" -#include "ipath_common.h" +#include "ipath_kernel.h" #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) @@ -44,19 +44,6 @@ #define find_next_offset(map, off) find_next_zero_bit((map)->page, \ BITS_PER_PAGE, off) -#define TRANS_INVALID 0 -#define TRANS_ANY2RST 1 -#define TRANS_RST2INIT 2 -#define TRANS_INIT2INIT 3 -#define TRANS_INIT2RTR 4 -#define TRANS_RTR2RTS 5 -#define TRANS_RTS2RTS 6 -#define TRANS_SQERR2RTS 7 -#define TRANS_ANY2ERR 8 -#define TRANS_RTS2SQD 9 /* XXX Wait for expected ACKs & signal event */ -#define TRANS_SQD2SQD 10 /* error if not drained & parameter change */ -#define TRANS_SQD2RTS 11 /* error if not drained */ - /* * Convert the AETH credit code into the number of credits. */ @@ -287,7 +274,7 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt) free_qpn(qpt, qp->ibqp.qp_num); if (!atomic_dec_and_test(&qp->refcount) || !ipath_destroy_qp(&qp->ibqp)) - _VERBS_INFO("QP memory leak!\n"); + ipath_dbg(KERN_INFO "QP memory leak!\n"); qp = nqp; } } @@ -355,8 +342,10 @@ static void ipath_reset_qp(struct ipath_qp *qp) qp->s_last = 0; qp->s_ssn = 1; qp->s_lsn = 0; - qp->r_rq.head = 0; - qp->r_rq.tail = 0; + if (qp->r_rq.wq) { + qp->r_rq.wq->head = 0; + qp->r_rq.wq->tail = 0; + } qp->r_reuse_sge = 0; } @@ -373,8 +362,8 @@ void ipath_error_qp(struct ipath_qp *qp) struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ib_wc wc; - _VERBS_INFO("QP%d/%d in error state\n", - qp->ibqp.qp_num, qp->remote_qpn); + ipath_dbg(KERN_INFO "QP%d/%d in error state\n", + qp->ibqp.qp_num, qp->remote_qpn); spin_lock(&dev->pending_lock); /* XXX What if its already removed by the timeout code? */ @@ -410,15 +399,32 @@ void ipath_error_qp(struct ipath_qp *qp) qp->s_hdrwords = 0; qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; - wc.opcode = IB_WC_RECV; - spin_lock(&qp->r_rq.lock); - while (qp->r_rq.tail != qp->r_rq.head) { - wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id; - if (++qp->r_rq.tail >= qp->r_rq.size) - qp->r_rq.tail = 0; - ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + if (qp->r_rq.wq) { + struct ipath_rwq *wq; + u32 head; + u32 tail; + + spin_lock(&qp->r_rq.lock); + + /* sanity check pointers before trusting them */ + wq = qp->r_rq.wq; + head = wq->head; + if (head >= qp->r_rq.size) + head = 0; + tail = wq->tail; + if (tail >= qp->r_rq.size) + tail = 0; + wc.opcode = IB_WC_RECV; + while (tail != head) { + wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; + if (++tail >= qp->r_rq.size) + tail = 0; + ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + } + wq->tail = tail; + + spin_unlock(&qp->r_rq.lock); } - spin_unlock(&qp->r_rq.lock); } /** @@ -426,11 +432,12 @@ void ipath_error_qp(struct ipath_qp *qp) * @ibqp: the queue pair who's attributes we're modifying * @attr: the new attributes * @attr_mask: the mask of attributes to modify + * @udata: user data for ipathverbs.so * * Returns 0 on success, otherwise returns an errno. */ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask) + int attr_mask, struct ib_udata *udata) { struct ipath_ibdev *dev = to_idev(ibqp->device); struct ipath_qp *qp = to_iqp(ibqp); @@ -448,19 +455,46 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, attr_mask)) goto inval; - if (attr_mask & IB_QP_AV) + if (attr_mask & IB_QP_AV) { if (attr->ah_attr.dlid == 0 || attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) goto inval; + if ((attr->ah_attr.ah_flags & IB_AH_GRH) && + (attr->ah_attr.grh.sgid_index > 1)) + goto inval; + } + if (attr_mask & IB_QP_PKEY_INDEX) - if (attr->pkey_index >= ipath_layer_get_npkeys(dev->dd)) + if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) goto inval; if (attr_mask & IB_QP_MIN_RNR_TIMER) if (attr->min_rnr_timer > 31) goto inval; + if (attr_mask & IB_QP_PORT) + if (attr->port_num == 0 || + attr->port_num > ibqp->device->phys_port_cnt) + goto inval; + + if (attr_mask & IB_QP_PATH_MTU) + if (attr->path_mtu > IB_MTU_4096) + goto inval; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + if (attr->max_dest_rd_atomic > 1) + goto inval; + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) + if (attr->max_rd_atomic > 1) + goto inval; + + if (attr_mask & IB_QP_PATH_MIG_STATE) + if (attr->path_mig_state != IB_MIG_MIGRATED && + attr->path_mig_state != IB_MIG_REARM) + goto inval; + switch (new_state) { case IB_QPS_RESET: ipath_reset_qp(qp); @@ -511,6 +545,9 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_MIN_RNR_TIMER) qp->r_min_rnr_timer = attr->min_rnr_timer; + if (attr_mask & IB_QP_TIMEOUT) + qp->timeout = attr->timeout; + if (attr_mask & IB_QP_QKEY) qp->qkey = attr->qkey; @@ -543,7 +580,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, attr->dest_qp_num = qp->remote_qpn; attr->qp_access_flags = qp->qp_access_flags; attr->cap.max_send_wr = qp->s_size - 1; - attr->cap.max_recv_wr = qp->r_rq.size - 1; + attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; attr->cap.max_send_sge = qp->s_max_sge; attr->cap.max_recv_sge = qp->r_rq.max_sge; attr->cap.max_inline_data = 0; @@ -557,7 +594,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, attr->max_dest_rd_atomic = 1; attr->min_rnr_timer = qp->r_min_rnr_timer; attr->port_num = 1; - attr->timeout = 0; + attr->timeout = qp->timeout; attr->retry_cnt = qp->s_retry_cnt; attr->rnr_retry = qp->s_rnr_retry; attr->alt_port_num = 0; @@ -569,9 +606,10 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->recv_cq = qp->ibqp.recv_cq; init_attr->srq = qp->ibqp.srq; init_attr->cap = attr->cap; - init_attr->sq_sig_type = - (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) - ? IB_SIGNAL_REQ_WR : 0; + if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR)) + init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; + else + init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->qp_type = qp->ibqp.qp_type; init_attr->port_num = 1; return 0; @@ -596,13 +634,23 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp) } else { u32 min, max, x; u32 credits; - + struct ipath_rwq *wq = qp->r_rq.wq; + u32 head; + u32 tail; + + /* sanity check pointers before trusting them */ + head = wq->head; + if (head >= qp->r_rq.size) + head = 0; + tail = wq->tail; + if (tail >= qp->r_rq.size) + tail = 0; /* * Compute the number of credits available (RWQEs). * XXX Not holding the r_rq.lock here so there is a small * chance that the pair of reads are not atomic. */ - credits = qp->r_rq.head - qp->r_rq.tail; + credits = head - tail; if ((int)credits < 0) credits += qp->r_rq.size; /* @@ -679,27 +727,37 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: - qp = kmalloc(sizeof(*qp), GFP_KERNEL); + sz = sizeof(*qp); + if (init_attr->srq) { + struct ipath_srq *srq = to_isrq(init_attr->srq); + + sz += sizeof(*qp->r_sg_list) * + srq->rq.max_sge; + } else + sz += sizeof(*qp->r_sg_list) * + init_attr->cap.max_recv_sge; + qp = kmalloc(sz, GFP_KERNEL); if (!qp) { - vfree(swq); ret = ERR_PTR(-ENOMEM); - goto bail; + goto bail_swq; } if (init_attr->srq) { + sz = 0; qp->r_rq.size = 0; qp->r_rq.max_sge = 0; qp->r_rq.wq = NULL; + init_attr->cap.max_recv_wr = 0; + init_attr->cap.max_recv_sge = 0; } else { qp->r_rq.size = init_attr->cap.max_recv_wr + 1; qp->r_rq.max_sge = init_attr->cap.max_recv_sge; - sz = (sizeof(struct ipath_sge) * qp->r_rq.max_sge) + + sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + sizeof(struct ipath_rwqe); - qp->r_rq.wq = vmalloc(qp->r_rq.size * sz); + qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + + qp->r_rq.size * sz); if (!qp->r_rq.wq) { - kfree(qp); - vfree(swq); ret = ERR_PTR(-ENOMEM); - goto bail; + goto bail_qp; } } @@ -719,24 +777,19 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, qp->s_wq = swq; qp->s_size = init_attr->cap.max_send_wr + 1; qp->s_max_sge = init_attr->cap.max_send_sge; - qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ? - 1 << IPATH_S_SIGNAL_REQ_WR : 0; + if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) + qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR; + else + qp->s_flags = 0; dev = to_idev(ibpd->device); err = ipath_alloc_qpn(&dev->qp_table, qp, init_attr->qp_type); if (err) { - vfree(swq); - vfree(qp->r_rq.wq); - kfree(qp); ret = ERR_PTR(err); - goto bail; + goto bail_rwq; } + qp->ip = NULL; ipath_reset_qp(qp); - - /* Tell the core driver that the kernel SMA is present. */ - if (init_attr->qp_type == IB_QPT_SMI) - ipath_layer_set_verbs_flags(dev->dd, - IPATH_VERBS_KERNEL_SMA); break; default: @@ -747,8 +800,63 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, init_attr->cap.max_inline_data = 0; + /* + * Return the address of the RWQ as the offset to mmap. + * See ipath_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + struct ipath_mmap_info *ip; + __u64 offset = (__u64) qp->r_rq.wq; + int err; + + err = ib_copy_to_udata(udata, &offset, sizeof(offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_rwq; + } + + if (qp->r_rq.wq) { + /* Allocate info for ipath_mmap(). */ + ip = kmalloc(sizeof(*ip), GFP_KERNEL); + if (!ip) { + ret = ERR_PTR(-ENOMEM); + goto bail_rwq; + } + qp->ip = ip; + ip->context = ibpd->uobject->context; + ip->obj = qp->r_rq.wq; + kref_init(&ip->ref); + ip->mmap_cnt = 0; + ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + + qp->r_rq.size * sz); + spin_lock_irq(&dev->pending_lock); + ip->next = dev->pending_mmaps; + dev->pending_mmaps = ip; + spin_unlock_irq(&dev->pending_lock); + } + } + + spin_lock(&dev->n_qps_lock); + if (dev->n_qps_allocated == ib_ipath_max_qps) { + spin_unlock(&dev->n_qps_lock); + ret = ERR_PTR(-ENOMEM); + goto bail_ip; + } + + dev->n_qps_allocated++; + spin_unlock(&dev->n_qps_lock); + ret = &qp->ibqp; + goto bail; +bail_ip: + kfree(qp->ip); +bail_rwq: + vfree(qp->r_rq.wq); +bail_qp: + kfree(qp); +bail_swq: + vfree(swq); bail: return ret; } @@ -768,15 +876,12 @@ int ipath_destroy_qp(struct ib_qp *ibqp) struct ipath_ibdev *dev = to_idev(ibqp->device); unsigned long flags; - /* Tell the core driver that the kernel SMA is gone. */ - if (qp->ibqp.qp_type == IB_QPT_SMI) - ipath_layer_set_verbs_flags(dev->dd, 0); - - spin_lock_irqsave(&qp->r_rq.lock, flags); - spin_lock(&qp->s_lock); + spin_lock_irqsave(&qp->s_lock, flags); qp->state = IB_QPS_ERR; - spin_unlock(&qp->s_lock); - spin_unlock_irqrestore(&qp->r_rq.lock, flags); + spin_unlock_irqrestore(&qp->s_lock, flags); + spin_lock(&dev->n_qps_lock); + dev->n_qps_allocated--; + spin_unlock(&dev->n_qps_lock); /* Stop the sending tasklet. */ tasklet_kill(&qp->s_task); @@ -797,8 +902,11 @@ int ipath_destroy_qp(struct ib_qp *ibqp) if (atomic_read(&qp->refcount) != 0) ipath_free_qp(&dev->qp_table, qp); + if (qp->ip) + kref_put(&qp->ip->ref, ipath_release_mmap_info); + else + vfree(qp->r_rq.wq); vfree(qp->s_wq); - vfree(qp->r_rq.wq); kfree(qp); return 0; } @@ -850,8 +958,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) struct ipath_ibdev *dev = to_idev(qp->ibqp.device); struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); - _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n", - qp->ibqp.qp_num, qp->remote_qpn, wc->status); + ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n", + qp->ibqp.qp_num, qp->remote_qpn, wc->status); spin_lock(&dev->pending_lock); /* XXX What if its already removed by the timeout code? */ diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c index 774d1615ce2f..a08654042c03 100644 --- a/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/drivers/infiniband/hw/ipath/ipath_rc.c @@ -32,7 +32,7 @@ */ #include "ipath_verbs.h" -#include "ipath_common.h" +#include "ipath_kernel.h" /* cut down ridiculously long IB macro names */ #define OP(x) IB_OPCODE_RC_##x @@ -540,7 +540,7 @@ static void send_rc_ack(struct ipath_qp *qp) lrh0 = IPATH_LRH_GRH; } /* read pkey_index w/o lock (its atomic) */ - bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); + bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index); if (qp->r_nak_state) ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) | (qp->r_nak_state << @@ -557,7 +557,7 @@ static void send_rc_ack(struct ipath_qp *qp) hdr.lrh[0] = cpu_to_be16(lrh0); hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); - hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); + hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); @@ -1323,8 +1323,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, * the eager header buffer size to 56 bytes so the last 4 * bytes of the BTH header (PSN) is in the data buffer. */ - header_in_data = - ipath_layer_get_rcvhdrentsize(dev->dd) == 16; + header_in_data = dev->dd->ipath_rcvhdrentsize == 16; if (header_in_data) { psn = be32_to_cpu(((__be32 *) data)[0]); data += sizeof(__be32); diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h index 89df8f5ea998..6e23b3d632b8 100644 --- a/drivers/infiniband/hw/ipath/ipath_registers.h +++ b/drivers/infiniband/hw/ipath/ipath_registers.h @@ -36,8 +36,7 @@ /* * This file should only be included by kernel source, and by the diags. It - * defines the registers, and their contents, for the InfiniPath HT-400 - * chip. + * defines the registers, and their contents, for InfiniPath chips. */ /* @@ -283,10 +282,12 @@ #define INFINIPATH_XGXS_RESET 0x7ULL #define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL #define INFINIPATH_XGXS_MDIOADDR_SHIFT 4 +#define INFINIPATH_XGXS_RX_POL_SHIFT 19 +#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL #define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */ -/* TID entries (memory), HT400-only */ +/* TID entries (memory), HT-only */ #define INFINIPATH_RT_VALID 0x8000000000000000ULL #define INFINIPATH_RT_ADDR_SHIFT 0 #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 772bc59fb85c..5c1da2d25e03 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c @@ -32,7 +32,7 @@ */ #include "ipath_verbs.h" -#include "ipath_common.h" +#include "ipath_kernel.h" /* * Convert the AETH RNR timeout code into the number of milliseconds. @@ -106,6 +106,54 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) spin_unlock_irqrestore(&dev->pending_lock, flags); } +static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) +{ + struct ipath_ibdev *dev = to_idev(qp->ibqp.device); + int user = to_ipd(qp->ibqp.pd)->user; + int i, j, ret; + struct ib_wc wc; + + qp->r_len = 0; + for (i = j = 0; i < wqe->num_sge; i++) { + if (wqe->sg_list[i].length == 0) + continue; + /* Check LKEY */ + if ((user && wqe->sg_list[i].lkey == 0) || + !ipath_lkey_ok(&dev->lk_table, + &qp->r_sg_list[j], &wqe->sg_list[i], + IB_ACCESS_LOCAL_WRITE)) + goto bad_lkey; + qp->r_len += wqe->sg_list[i].length; + j++; + } + qp->r_sge.sge = qp->r_sg_list[0]; + qp->r_sge.sg_list = qp->r_sg_list + 1; + qp->r_sge.num_sge = j; + ret = 1; + goto bail; + +bad_lkey: + wc.wr_id = wqe->wr_id; + wc.status = IB_WC_LOC_PROT_ERR; + wc.opcode = IB_WC_RECV; + wc.vendor_err = 0; + wc.byte_len = 0; + wc.imm_data = 0; + wc.qp_num = qp->ibqp.qp_num; + wc.src_qp = 0; + wc.wc_flags = 0; + wc.pkey_index = 0; + wc.slid = 0; + wc.sl = 0; + wc.dlid_path_bits = 0; + wc.port_num = 0; + /* Signal solicited completion event. */ + ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + ret = 0; +bail: + return ret; +} + /** * ipath_get_rwqe - copy the next RWQE into the QP's RWQE * @qp: the QP @@ -119,71 +167,71 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) { unsigned long flags; struct ipath_rq *rq; + struct ipath_rwq *wq; struct ipath_srq *srq; struct ipath_rwqe *wqe; - int ret = 1; + void (*handler)(struct ib_event *, void *); + u32 tail; + int ret; - if (!qp->ibqp.srq) { + if (qp->ibqp.srq) { + srq = to_isrq(qp->ibqp.srq); + handler = srq->ibsrq.event_handler; + rq = &srq->rq; + } else { + srq = NULL; + handler = NULL; rq = &qp->r_rq; - spin_lock_irqsave(&rq->lock, flags); - - if (unlikely(rq->tail == rq->head)) { - ret = 0; - goto done; - } - wqe = get_rwqe_ptr(rq, rq->tail); - qp->r_wr_id = wqe->wr_id; - if (!wr_id_only) { - qp->r_sge.sge = wqe->sg_list[0]; - qp->r_sge.sg_list = wqe->sg_list + 1; - qp->r_sge.num_sge = wqe->num_sge; - qp->r_len = wqe->length; - } - if (++rq->tail >= rq->size) - rq->tail = 0; - goto done; } - srq = to_isrq(qp->ibqp.srq); - rq = &srq->rq; spin_lock_irqsave(&rq->lock, flags); - - if (unlikely(rq->tail == rq->head)) { - ret = 0; - goto done; - } - wqe = get_rwqe_ptr(rq, rq->tail); + wq = rq->wq; + tail = wq->tail; + /* Validate tail before using it since it is user writable. */ + if (tail >= rq->size) + tail = 0; + do { + if (unlikely(tail == wq->head)) { + spin_unlock_irqrestore(&rq->lock, flags); + ret = 0; + goto bail; + } + wqe = get_rwqe_ptr(rq, tail); + if (++tail >= rq->size) + tail = 0; + } while (!wr_id_only && !init_sge(qp, wqe)); qp->r_wr_id = wqe->wr_id; - if (!wr_id_only) { - qp->r_sge.sge = wqe->sg_list[0]; - qp->r_sge.sg_list = wqe->sg_list + 1; - qp->r_sge.num_sge = wqe->num_sge; - qp->r_len = wqe->length; - } - if (++rq->tail >= rq->size) - rq->tail = 0; - if (srq->ibsrq.event_handler) { - struct ib_event ev; + wq->tail = tail; + + ret = 1; + if (handler) { u32 n; - if (rq->head < rq->tail) - n = rq->size + rq->head - rq->tail; + /* + * validate head pointer value and compute + * the number of remaining WQEs. + */ + n = wq->head; + if (n >= rq->size) + n = 0; + if (n < tail) + n += rq->size - tail; else - n = rq->head - rq->tail; + n -= tail; if (n < srq->limit) { + struct ib_event ev; + srq->limit = 0; spin_unlock_irqrestore(&rq->lock, flags); ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; - srq->ibsrq.event_handler(&ev, - srq->ibsrq.srq_context); + handler(&ev, srq->ibsrq.srq_context); goto bail; } } - -done: spin_unlock_irqrestore(&rq->lock, flags); + bail: return ret; } @@ -422,6 +470,15 @@ done: wake_up(&qp->wait); } +static int want_buffer(struct ipath_devdata *dd) +{ + set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); + ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, + dd->ipath_sendctrl); + + return 0; +} + /** * ipath_no_bufs_available - tell the layer driver we need buffers * @qp: the QP that caused the problem @@ -438,7 +495,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) list_add_tail(&qp->piowait, &dev->piowait); spin_unlock_irqrestore(&dev->pending_lock, flags); /* - * Note that as soon as ipath_layer_want_buffer() is called and + * Note that as soon as want_buffer() is called and * possibly before it returns, ipath_ib_piobufavail() * could be called. If we are still in the tasklet function, * tasklet_hi_schedule() will not call us until the next time @@ -448,7 +505,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) */ clear_bit(IPATH_S_BUSY, &qp->s_flags); tasklet_unlock(&qp->s_task); - ipath_layer_want_buffer(dev->dd); + want_buffer(dev->dd); dev->n_piowait++; } @@ -563,7 +620,7 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, hdr->hop_limit = grh->hop_limit; /* The SGID is 32-bit aligned. */ hdr->sgid.global.subnet_prefix = dev->gid_prefix; - hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd); + hdr->sgid.global.interface_id = dev->dd->ipath_guid; hdr->dgid = grh->dgid; /* GRH header size in 32-bit words. */ @@ -595,8 +652,7 @@ void ipath_do_ruc_send(unsigned long data) if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) goto bail; - if (unlikely(qp->remote_ah_attr.dlid == - ipath_layer_get_lid(dev->dd))) { + if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { ipath_ruc_loopback(qp); goto clear; } @@ -663,8 +719,8 @@ again: qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); - qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); - bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); + qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); + bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); bth0 |= extra_bytes << 20; ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index f760434660bd..941e866d9517 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c @@ -48,66 +48,39 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct ipath_srq *srq = to_isrq(ibsrq); - struct ipath_ibdev *dev = to_idev(ibsrq->device); + struct ipath_rwq *wq; unsigned long flags; int ret; for (; wr; wr = wr->next) { struct ipath_rwqe *wqe; u32 next; - int i, j; + int i; - if (wr->num_sge > srq->rq.max_sge) { + if ((unsigned) wr->num_sge > srq->rq.max_sge) { *bad_wr = wr; ret = -ENOMEM; goto bail; } spin_lock_irqsave(&srq->rq.lock, flags); - next = srq->rq.head + 1; + wq = srq->rq.wq; + next = wq->head + 1; if (next >= srq->rq.size) next = 0; - if (next == srq->rq.tail) { + if (next == wq->tail) { spin_unlock_irqrestore(&srq->rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } - wqe = get_rwqe_ptr(&srq->rq, srq->rq.head); + wqe = get_rwqe_ptr(&srq->rq, wq->head); wqe->wr_id = wr->wr_id; - wqe->sg_list[0].mr = NULL; - wqe->sg_list[0].vaddr = NULL; - wqe->sg_list[0].length = 0; - wqe->sg_list[0].sge_length = 0; - wqe->length = 0; - for (i = 0, j = 0; i < wr->num_sge; i++) { - /* Check LKEY */ - if (to_ipd(srq->ibsrq.pd)->user && - wr->sg_list[i].lkey == 0) { - spin_unlock_irqrestore(&srq->rq.lock, - flags); - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - if (wr->sg_list[i].length == 0) - continue; - if (!ipath_lkey_ok(&dev->lk_table, - &wqe->sg_list[j], - &wr->sg_list[i], - IB_ACCESS_LOCAL_WRITE)) { - spin_unlock_irqrestore(&srq->rq.lock, - flags); - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - wqe->length += wr->sg_list[i].length; - j++; - } - wqe->num_sge = j; - srq->rq.head = next; + wqe->num_sge = wr->num_sge; + for (i = 0; i < wr->num_sge; i++) + wqe->sg_list[i] = wr->sg_list[i]; + wq->head = next; spin_unlock_irqrestore(&srq->rq.lock, flags); } ret = 0; @@ -133,53 +106,95 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, if (dev->n_srqs_allocated == ib_ipath_max_srqs) { ret = ERR_PTR(-ENOMEM); - goto bail; + goto done; } if (srq_init_attr->attr.max_wr == 0) { ret = ERR_PTR(-EINVAL); - goto bail; + goto done; } if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) || (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) { ret = ERR_PTR(-EINVAL); - goto bail; + goto done; } srq = kmalloc(sizeof(*srq), GFP_KERNEL); if (!srq) { ret = ERR_PTR(-ENOMEM); - goto bail; + goto done; } /* * Need to use vmalloc() if we want to support large #s of entries. */ srq->rq.size = srq_init_attr->attr.max_wr + 1; - sz = sizeof(struct ipath_sge) * srq_init_attr->attr.max_sge + + srq->rq.max_sge = srq_init_attr->attr.max_sge; + sz = sizeof(struct ib_sge) * srq->rq.max_sge + sizeof(struct ipath_rwqe); - srq->rq.wq = vmalloc(srq->rq.size * sz); + srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); if (!srq->rq.wq) { - kfree(srq); ret = ERR_PTR(-ENOMEM); - goto bail; + goto bail_srq; } /* + * Return the address of the RWQ as the offset to mmap. + * See ipath_mmap() for details. + */ + if (udata && udata->outlen >= sizeof(__u64)) { + struct ipath_mmap_info *ip; + __u64 offset = (__u64) srq->rq.wq; + int err; + + err = ib_copy_to_udata(udata, &offset, sizeof(offset)); + if (err) { + ret = ERR_PTR(err); + goto bail_wq; + } + + /* Allocate info for ipath_mmap(). */ + ip = kmalloc(sizeof(*ip), GFP_KERNEL); + if (!ip) { + ret = ERR_PTR(-ENOMEM); + goto bail_wq; + } + srq->ip = ip; + ip->context = ibpd->uobject->context; + ip->obj = srq->rq.wq; + kref_init(&ip->ref); + ip->mmap_cnt = 0; + ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + + srq->rq.size * sz); + spin_lock_irq(&dev->pending_lock); + ip->next = dev->pending_mmaps; + dev->pending_mmaps = ip; + spin_unlock_irq(&dev->pending_lock); + } else + srq->ip = NULL; + + /* * ib_create_srq() will initialize srq->ibsrq. */ spin_lock_init(&srq->rq.lock); - srq->rq.head = 0; - srq->rq.tail = 0; + srq->rq.wq->head = 0; + srq->rq.wq->tail = 0; srq->rq.max_sge = srq_init_attr->attr.max_sge; srq->limit = srq_init_attr->attr.srq_limit; + dev->n_srqs_allocated++; + ret = &srq->ibsrq; + goto done; - dev->n_srqs_allocated++; +bail_wq: + vfree(srq->rq.wq); -bail: +bail_srq: + kfree(srq); + +done: return ret; } @@ -188,83 +203,130 @@ bail: * @ibsrq: the SRQ to modify * @attr: the new attributes of the SRQ * @attr_mask: indicates which attributes to modify + * @udata: user data for ipathverbs.so */ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask) + enum ib_srq_attr_mask attr_mask, + struct ib_udata *udata) { struct ipath_srq *srq = to_isrq(ibsrq); - unsigned long flags; - int ret; + int ret = 0; - if (attr_mask & IB_SRQ_MAX_WR) - if ((attr->max_wr > ib_ipath_max_srq_wrs) || - (attr->max_sge > srq->rq.max_sge)) { - ret = -EINVAL; - goto bail; - } + if (attr_mask & IB_SRQ_MAX_WR) { + struct ipath_rwq *owq; + struct ipath_rwq *wq; + struct ipath_rwqe *p; + u32 sz, size, n, head, tail; - if (attr_mask & IB_SRQ_LIMIT) - if (attr->srq_limit >= srq->rq.size) { + /* Check that the requested sizes are below the limits. */ + if ((attr->max_wr > ib_ipath_max_srq_wrs) || + ((attr_mask & IB_SRQ_LIMIT) ? + attr->srq_limit : srq->limit) > attr->max_wr) { ret = -EINVAL; goto bail; } - if (attr_mask & IB_SRQ_MAX_WR) { - struct ipath_rwqe *wq, *p; - u32 sz, size, n; - sz = sizeof(struct ipath_rwqe) + - attr->max_sge * sizeof(struct ipath_sge); + srq->rq.max_sge * sizeof(struct ib_sge); size = attr->max_wr + 1; - wq = vmalloc(size * sz); + wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz); if (!wq) { ret = -ENOMEM; goto bail; } - spin_lock_irqsave(&srq->rq.lock, flags); - if (srq->rq.head < srq->rq.tail) - n = srq->rq.size + srq->rq.head - srq->rq.tail; + /* + * Return the address of the RWQ as the offset to mmap. + * See ipath_mmap() for details. + */ + if (udata && udata->inlen >= sizeof(__u64)) { + __u64 offset_addr; + __u64 offset = (__u64) wq; + + ret = ib_copy_from_udata(&offset_addr, udata, + sizeof(offset_addr)); + if (ret) { + vfree(wq); + goto bail; + } + udata->outbuf = (void __user *) offset_addr; + ret = ib_copy_to_udata(udata, &offset, + sizeof(offset)); + if (ret) { + vfree(wq); + goto bail; + } + } + + spin_lock_irq(&srq->rq.lock); + /* + * validate head pointer value and compute + * the number of remaining WQEs. + */ + owq = srq->rq.wq; + head = owq->head; + if (head >= srq->rq.size) + head = 0; + tail = owq->tail; + if (tail >= srq->rq.size) + tail = 0; + n = head; + if (n < tail) + n += srq->rq.size - tail; else - n = srq->rq.head - srq->rq.tail; - if (size <= n || size <= srq->limit) { - spin_unlock_irqrestore(&srq->rq.lock, flags); + n -= tail; + if (size <= n) { + spin_unlock_irq(&srq->rq.lock); vfree(wq); ret = -EINVAL; goto bail; } n = 0; - p = wq; - while (srq->rq.tail != srq->rq.head) { + p = wq->wq; + while (tail != head) { struct ipath_rwqe *wqe; int i; - wqe = get_rwqe_ptr(&srq->rq, srq->rq.tail); + wqe = get_rwqe_ptr(&srq->rq, tail); p->wr_id = wqe->wr_id; - p->length = wqe->length; p->num_sge = wqe->num_sge; for (i = 0; i < wqe->num_sge; i++) p->sg_list[i] = wqe->sg_list[i]; n++; p = (struct ipath_rwqe *)((char *) p + sz); - if (++srq->rq.tail >= srq->rq.size) - srq->rq.tail = 0; + if (++tail >= srq->rq.size) + tail = 0; } - vfree(srq->rq.wq); srq->rq.wq = wq; srq->rq.size = size; - srq->rq.head = n; - srq->rq.tail = 0; - srq->rq.max_sge = attr->max_sge; - spin_unlock_irqrestore(&srq->rq.lock, flags); - } - - if (attr_mask & IB_SRQ_LIMIT) { - spin_lock_irqsave(&srq->rq.lock, flags); - srq->limit = attr->srq_limit; - spin_unlock_irqrestore(&srq->rq.lock, flags); + wq->head = n; + wq->tail = 0; + if (attr_mask & IB_SRQ_LIMIT) + srq->limit = attr->srq_limit; + spin_unlock_irq(&srq->rq.lock); + + vfree(owq); + + if (srq->ip) { + struct ipath_mmap_info *ip = srq->ip; + struct ipath_ibdev *dev = to_idev(srq->ibsrq.device); + + ip->obj = wq; + ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) + + size * sz); + spin_lock_irq(&dev->pending_lock); + ip->next = dev->pending_mmaps; + dev->pending_mmaps = ip; + spin_unlock_irq(&dev->pending_lock); + } + } else if (attr_mask & IB_SRQ_LIMIT) { + spin_lock_irq(&srq->rq.lock); + if (attr->srq_limit >= srq->rq.size) + ret = -EINVAL; + else + srq->limit = attr->srq_limit; + spin_unlock_irq(&srq->rq.lock); } - ret = 0; bail: return ret; diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c index 70351b7e35c0..30a825928fcf 100644 --- a/drivers/infiniband/hw/ipath/ipath_stats.c +++ b/drivers/infiniband/hw/ipath/ipath_stats.c @@ -271,33 +271,6 @@ void ipath_get_faststats(unsigned long opaque) } } - if (dd->ipath_nosma_bufs) { - dd->ipath_nosma_secs += 5; - if (dd->ipath_nosma_secs >= 30) { - ipath_cdbg(SMA, "No SMA bufs avail %u seconds; " - "cancelling pending sends\n", - dd->ipath_nosma_secs); - /* - * issue an abort as well, in case we have a packet - * stuck in launch fifo. This could corrupt an - * outgoing user packet in the worst case, - * but this is a pretty catastrophic, anyway. - */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, - INFINIPATH_S_ABORT); - ipath_disarm_piobufs(dd, dd->ipath_lastport_piobuf, - dd->ipath_piobcnt2k + - dd->ipath_piobcnt4k - - dd->ipath_lastport_piobuf); - /* start again, if necessary */ - dd->ipath_nosma_secs = 0; - } else - ipath_cdbg(SMA, "No SMA bufs avail %u tries, " - "after %u seconds\n", - dd->ipath_nosma_bufs, - dd->ipath_nosma_secs); - } - done: mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5); } diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index b98821d7801d..e299148c4b68 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c @@ -35,7 +35,6 @@ #include <linux/pci.h> #include "ipath_kernel.h" -#include "ipath_layer.h" #include "ipath_common.h" /** @@ -76,7 +75,7 @@ bail: static ssize_t show_version(struct device_driver *dev, char *buf) { /* The string printed here is already newline-terminated. */ - return scnprintf(buf, PAGE_SIZE, "%s", ipath_core_version); + return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version); } static ssize_t show_num_units(struct device_driver *dev, char *buf) @@ -108,8 +107,8 @@ static const char *ipath_status_str[] = { "Initted", "Disabled", "Admin_Disabled", - "OIB_SMA", - "SMA", + "", /* This used to be the old "OIB_SMA" status. */ + "", /* This used to be the old "SMA" status. */ "Present", "IB_link_up", "IB_configured", @@ -227,7 +226,6 @@ static ssize_t store_mlid(struct device *dev, unit = dd->ipath_unit; dd->ipath_mlid = mlid; - ipath_layer_intr(dd, IPATH_LAYER_INT_BCAST); goto bail; invalid: @@ -467,7 +465,7 @@ static ssize_t store_link_state(struct device *dev, if (ret < 0) goto invalid; - r = ipath_layer_set_linkstate(dd, state); + r = ipath_set_linkstate(dd, state); if (r < 0) { ret = r; goto bail; @@ -502,7 +500,7 @@ static ssize_t store_mtu(struct device *dev, if (ret < 0) goto invalid; - r = ipath_layer_set_mtu(dd, mtu); + r = ipath_set_mtu(dd, mtu); if (r < 0) ret = r; @@ -563,6 +561,33 @@ bail: return ret; } +static ssize_t store_rx_pol_inv(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct ipath_devdata *dd = dev_get_drvdata(dev); + int ret, r; + u16 val; + + ret = ipath_parse_ushort(buf, &val); + if (ret < 0) + goto invalid; + + r = ipath_set_rx_pol_inv(dd, val); + if (r < 0) { + ret = r; + goto bail; + } + + goto bail; +invalid: + ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n"); +bail: + return ret; +} + + static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL); static DRIVER_ATTR(version, S_IRUGO, show_version, NULL); @@ -589,6 +614,7 @@ static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL); static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL); +static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv); static struct attribute *dev_attributes[] = { &dev_attr_guid.attr, @@ -603,6 +629,7 @@ static struct attribute *dev_attributes[] = { &dev_attr_boardversion.attr, &dev_attr_unit.attr, &dev_attr_enabled.attr, + &dev_attr_rx_pol_inv.attr, NULL }; diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c index c33abea2d5a7..0fd3cded16ba 100644 --- a/drivers/infiniband/hw/ipath/ipath_uc.c +++ b/drivers/infiniband/hw/ipath/ipath_uc.c @@ -32,7 +32,7 @@ */ #include "ipath_verbs.h" -#include "ipath_common.h" +#include "ipath_kernel.h" /* cut down ridiculously long IB macro names */ #define OP(x) IB_OPCODE_UC_##x @@ -261,8 +261,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, * size to 56 bytes so the last 4 bytes of * the BTH header (PSN) is in the data buffer. */ - header_in_data = - ipath_layer_get_rcvhdrentsize(dev->dd) == 16; + header_in_data = dev->dd->ipath_rcvhdrentsize == 16; if (header_in_data) { psn = be32_to_cpu(((__be32 *) data)[0]); data += sizeof(__be32); diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 3466129af804..6991d1d74e3c 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c @@ -34,7 +34,54 @@ #include <rdma/ib_smi.h> #include "ipath_verbs.h" -#include "ipath_common.h" +#include "ipath_kernel.h" + +static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, + u32 *lengthp, struct ipath_sge_state *ss) +{ + struct ipath_ibdev *dev = to_idev(qp->ibqp.device); + int user = to_ipd(qp->ibqp.pd)->user; + int i, j, ret; + struct ib_wc wc; + + *lengthp = 0; + for (i = j = 0; i < wqe->num_sge; i++) { + if (wqe->sg_list[i].length == 0) + continue; + /* Check LKEY */ + if ((user && wqe->sg_list[i].lkey == 0) || + !ipath_lkey_ok(&dev->lk_table, + j ? &ss->sg_list[j - 1] : &ss->sge, + &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) + goto bad_lkey; + *lengthp += wqe->sg_list[i].length; + j++; + } + ss->num_sge = j; + ret = 1; + goto bail; + +bad_lkey: + wc.wr_id = wqe->wr_id; + wc.status = IB_WC_LOC_PROT_ERR; + wc.opcode = IB_WC_RECV; + wc.vendor_err = 0; + wc.byte_len = 0; + wc.imm_data = 0; + wc.qp_num = qp->ibqp.qp_num; + wc.src_qp = 0; + wc.wc_flags = 0; + wc.pkey_index = 0; + wc.slid = 0; + wc.sl = 0; + wc.dlid_path_bits = 0; + wc.port_num = 0; + /* Signal solicited completion event. */ + ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + ret = 0; +bail: + return ret; +} /** * ipath_ud_loopback - handle send on loopback QPs @@ -46,6 +93,8 @@ * * This is called from ipath_post_ud_send() to forward a WQE addressed * to the same HCA. + * Note that the receive interrupt handler may be calling ipath_ud_rcv() + * while this is being called. */ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, @@ -60,7 +109,11 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_srq *srq; struct ipath_sge_state rsge; struct ipath_sge *sge; + struct ipath_rwq *wq; struct ipath_rwqe *wqe; + void (*handler)(struct ib_event *, void *); + u32 tail; + u32 rlen; qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); if (!qp) @@ -94,6 +147,13 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, wc->imm_data = 0; } + if (wr->num_sge > 1) { + rsge.sg_list = kmalloc((wr->num_sge - 1) * + sizeof(struct ipath_sge), + GFP_ATOMIC); + } else + rsge.sg_list = NULL; + /* * Get the next work request entry to find where to put the data. * Note that it is safe to drop the lock after changing rq->tail @@ -101,37 +161,52 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, */ if (qp->ibqp.srq) { srq = to_isrq(qp->ibqp.srq); + handler = srq->ibsrq.event_handler; rq = &srq->rq; } else { srq = NULL; + handler = NULL; rq = &qp->r_rq; } + spin_lock_irqsave(&rq->lock, flags); - if (rq->tail == rq->head) { - spin_unlock_irqrestore(&rq->lock, flags); - dev->n_pkt_drops++; - goto done; + wq = rq->wq; + tail = wq->tail; + while (1) { + if (unlikely(tail == wq->head)) { + spin_unlock_irqrestore(&rq->lock, flags); + dev->n_pkt_drops++; + goto bail_sge; + } + wqe = get_rwqe_ptr(rq, tail); + if (++tail >= rq->size) + tail = 0; + if (init_sge(qp, wqe, &rlen, &rsge)) + break; + wq->tail = tail; } /* Silently drop packets which are too big. */ - wqe = get_rwqe_ptr(rq, rq->tail); - if (wc->byte_len > wqe->length) { + if (wc->byte_len > rlen) { spin_unlock_irqrestore(&rq->lock, flags); dev->n_pkt_drops++; - goto done; + goto bail_sge; } + wq->tail = tail; wc->wr_id = wqe->wr_id; - rsge.sge = wqe->sg_list[0]; - rsge.sg_list = wqe->sg_list + 1; - rsge.num_sge = wqe->num_sge; - if (++rq->tail >= rq->size) - rq->tail = 0; - if (srq && srq->ibsrq.event_handler) { + if (handler) { u32 n; - if (rq->head < rq->tail) - n = rq->size + rq->head - rq->tail; + /* + * validate head pointer value and compute + * the number of remaining WQEs. + */ + n = wq->head; + if (n >= rq->size) + n = 0; + if (n < tail) + n += rq->size - tail; else - n = rq->head - rq->tail; + n -= tail; if (n < srq->limit) { struct ib_event ev; @@ -140,12 +215,12 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ev.device = qp->ibqp.device; ev.element.srq = qp->ibqp.srq; ev.event = IB_EVENT_SRQ_LIMIT_REACHED; - srq->ibsrq.event_handler(&ev, - srq->ibsrq.srq_context); + handler(&ev, srq->ibsrq.srq_context); } else spin_unlock_irqrestore(&rq->lock, flags); } else spin_unlock_irqrestore(&rq->lock, flags); + ah_attr = &to_iah(wr->wr.ud.ah)->attr; if (ah_attr->ah_flags & IB_AH_GRH) { ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); @@ -186,7 +261,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, wc->src_qp = sqp->ibqp.qp_num; /* XXX do we know which pkey matched? Only needed for GSI. */ wc->pkey_index = 0; - wc->slid = ipath_layer_get_lid(dev->dd) | + wc->slid = dev->dd->ipath_lid | (ah_attr->src_path_bits & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); wc->sl = ah_attr->sl; @@ -196,6 +271,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, wr->send_flags & IB_SEND_SOLICITED); +bail_sge: + kfree(rsge.sg_list); done: if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); @@ -276,7 +353,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) ss.num_sge++; } /* Check for invalid packet size. */ - if (len > ipath_layer_get_ibmtu(dev->dd)) { + if (len > dev->dd->ipath_ibmtu) { ret = -EINVAL; goto bail; } @@ -298,7 +375,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) dev->n_unicast_xmit++; lid = ah_attr->dlid & ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); - if (unlikely(lid == ipath_layer_get_lid(dev->dd))) { + if (unlikely(lid == dev->dd->ipath_lid)) { /* * Pass in an uninitialized ib_wc to save stack * space. @@ -327,7 +404,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix; qp->s_hdr.u.l.grh.sgid.global.interface_id = - ipath_layer_get_guid(dev->dd); + dev->dd->ipath_guid; qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; /* * Don't worry about sending to locally attached multicast @@ -357,7 +434,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); - lid = ipath_layer_get_lid(dev->dd); + lid = dev->dd->ipath_lid; if (lid) { lid |= ah_attr->src_path_bits & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); @@ -368,7 +445,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) bth0 |= 1 << 23; bth0 |= extra_bytes << 20; bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY : - ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); + ipath_get_pkey(dev->dd, qp->s_pkey_index); ohdr->bth[0] = cpu_to_be32(bth0); /* * Use the multicast QP if the destination LID is a multicast LID. @@ -433,13 +510,9 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, int opcode; u32 hdrsize; u32 pad; - unsigned long flags; struct ib_wc wc; u32 qkey; u32 src_qp; - struct ipath_rq *rq; - struct ipath_srq *srq; - struct ipath_rwqe *wqe; u16 dlid; int header_in_data; @@ -458,8 +531,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, * the eager header buffer size to 56 bytes so the last 12 * bytes of the IB header is in the data buffer. */ - header_in_data = - ipath_layer_get_rcvhdrentsize(dev->dd) == 16; + header_in_data = dev->dd->ipath_rcvhdrentsize == 16; if (header_in_data) { qkey = be32_to_cpu(((__be32 *) data)[1]); src_qp = be32_to_cpu(((__be32 *) data)[2]); @@ -547,19 +619,10 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, /* * Get the next work request entry to find where to put the data. - * Note that it is safe to drop the lock after changing rq->tail - * since ipath_post_receive() won't fill the empty slot. */ - if (qp->ibqp.srq) { - srq = to_isrq(qp->ibqp.srq); - rq = &srq->rq; - } else { - srq = NULL; - rq = &qp->r_rq; - } - spin_lock_irqsave(&rq->lock, flags); - if (rq->tail == rq->head) { - spin_unlock_irqrestore(&rq->lock, flags); + if (qp->r_reuse_sge) + qp->r_reuse_sge = 0; + else if (!ipath_get_rwqe(qp, 0)) { /* * Count VL15 packets dropped due to no receive buffer. * Otherwise, count them as buffer overruns since usually, @@ -573,39 +636,11 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, goto bail; } /* Silently drop packets which are too big. */ - wqe = get_rwqe_ptr(rq, rq->tail); - if (wc.byte_len > wqe->length) { - spin_unlock_irqrestore(&rq->lock, flags); + if (wc.byte_len > qp->r_len) { + qp->r_reuse_sge = 1; dev->n_pkt_drops++; goto bail; } - wc.wr_id = wqe->wr_id; - qp->r_sge.sge = wqe->sg_list[0]; - qp->r_sge.sg_list = wqe->sg_list + 1; - qp->r_sge.num_sge = wqe->num_sge; - if (++rq->tail >= rq->size) - rq->tail = 0; - if (srq && srq->ibsrq.event_handler) { - u32 n; - - if (rq->head < rq->tail) - n = rq->size + rq->head - rq->tail; - else - n = rq->head - rq->tail; - if (n < srq->limit) { - struct ib_event ev; - - srq->limit = 0; - spin_unlock_irqrestore(&rq->lock, flags); - ev.device = qp->ibqp.device; - ev.element.srq = qp->ibqp.srq; - ev.event = IB_EVENT_SRQ_LIMIT_REACHED; - srq->ibsrq.event_handler(&ev, - srq->ibsrq.srq_context); - } else - spin_unlock_irqrestore(&rq->lock, flags); - } else - spin_unlock_irqrestore(&rq->lock, flags); if (has_grh) { ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, sizeof(struct ib_grh)); @@ -614,6 +649,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); ipath_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh)); + wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = IB_WC_RECV; wc.vendor_err = 0; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index d70a9b6b5239..b8381c5e72bd 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -33,15 +33,13 @@ #include <rdma/ib_mad.h> #include <rdma/ib_user_verbs.h> +#include <linux/io.h> #include <linux/utsname.h> #include "ipath_kernel.h" #include "ipath_verbs.h" #include "ipath_common.h" -/* Not static, because we don't want the compiler removing it */ -const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR; - static unsigned int ib_ipath_qp_table_size = 251; module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); @@ -52,10 +50,6 @@ module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint, MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)"); -unsigned int ib_ipath_debug; /* debug mask */ -module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(debug, "Verbs debug mask"); - static unsigned int ib_ipath_max_pds = 0xFFFF; module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_pds, @@ -79,6 +73,10 @@ module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); +unsigned int ib_ipath_max_qps = 16384; +module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); + unsigned int ib_ipath_max_sges = 0x60; module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); @@ -109,9 +107,9 @@ module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("QLogic <support@pathscale.com>"); -MODULE_DESCRIPTION("QLogic InfiniPath driver"); +static unsigned int ib_ipath_disable_sma; +module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(ib_ipath_disable_sma, "Disable the SMA"); const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { [IB_QPS_RESET] = 0, @@ -125,6 +123,16 @@ const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { [IB_QPS_ERR] = 0, }; +struct ipath_ucontext { + struct ib_ucontext ibucontext; +}; + +static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext + *ibucontext) +{ + return container_of(ibucontext, struct ipath_ucontext, ibucontext); +} + /* * Translate ib_wr_opcode into ib_wc_opcode. */ @@ -277,11 +285,12 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct ipath_qp *qp = to_iqp(ibqp); + struct ipath_rwq *wq = qp->r_rq.wq; unsigned long flags; int ret; /* Check that state is OK to post receive. */ - if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK)) { + if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { *bad_wr = wr; ret = -EINVAL; goto bail; @@ -290,59 +299,31 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, for (; wr; wr = wr->next) { struct ipath_rwqe *wqe; u32 next; - int i, j; + int i; - if (wr->num_sge > qp->r_rq.max_sge) { + if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { *bad_wr = wr; ret = -ENOMEM; goto bail; } spin_lock_irqsave(&qp->r_rq.lock, flags); - next = qp->r_rq.head + 1; + next = wq->head + 1; if (next >= qp->r_rq.size) next = 0; - if (next == qp->r_rq.tail) { + if (next == wq->tail) { spin_unlock_irqrestore(&qp->r_rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } - wqe = get_rwqe_ptr(&qp->r_rq, qp->r_rq.head); + wqe = get_rwqe_ptr(&qp->r_rq, wq->head); wqe->wr_id = wr->wr_id; - wqe->sg_list[0].mr = NULL; - wqe->sg_list[0].vaddr = NULL; - wqe->sg_list[0].length = 0; - wqe->sg_list[0].sge_length = 0; - wqe->length = 0; - for (i = 0, j = 0; i < wr->num_sge; i++) { - /* Check LKEY */ - if (to_ipd(qp->ibqp.pd)->user && - wr->sg_list[i].lkey == 0) { - spin_unlock_irqrestore(&qp->r_rq.lock, - flags); - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - if (wr->sg_list[i].length == 0) - continue; - if (!ipath_lkey_ok( - &to_idev(qp->ibqp.device)->lk_table, - &wqe->sg_list[j], &wr->sg_list[i], - IB_ACCESS_LOCAL_WRITE)) { - spin_unlock_irqrestore(&qp->r_rq.lock, - flags); - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - wqe->length += wr->sg_list[i].length; - j++; - } - wqe->num_sge = j; - qp->r_rq.head = next; + wqe->num_sge = wr->num_sge; + for (i = 0; i < wr->num_sge; i++) + wqe->sg_list[i] = wr->sg_list[i]; + wq->head = next; spin_unlock_irqrestore(&qp->r_rq.lock, flags); } ret = 0; @@ -377,6 +358,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev, switch (qp->ibqp.qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: + if (ib_ipath_disable_sma) + break; + /* FALLTHROUGH */ case IB_QPT_UD: ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); break; @@ -395,7 +379,7 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev, } /** - * ipath_ib_rcv - process and incoming packet + * ipath_ib_rcv - process an incoming packet * @arg: the device pointer * @rhdr: the header of the packet * @data: the packet data @@ -404,9 +388,9 @@ static void ipath_qp_rcv(struct ipath_ibdev *dev, * This is called from ipath_kreceive() to process an incoming packet at * interrupt level. Tlen is the length of the header + data + CRC in bytes. */ -static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen) +void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, + u32 tlen) { - struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; struct ipath_ib_header *hdr = rhdr; struct ipath_other_headers *ohdr; struct ipath_qp *qp; @@ -427,7 +411,7 @@ static void ipath_ib_rcv(void *arg, void *rhdr, void *data, u32 tlen) lid = be16_to_cpu(hdr->lrh[1]); if (lid < IPATH_MULTICAST_LID_BASE) { lid &= ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); - if (unlikely(lid != ipath_layer_get_lid(dev->dd))) { + if (unlikely(lid != dev->dd->ipath_lid)) { dev->rcv_errors++; goto bail; } @@ -495,9 +479,8 @@ bail:; * This is called from ipath_do_rcv_timer() at interrupt level to check for * QPs which need retransmits and to collect performance numbers. */ -static void ipath_ib_timer(void *arg) +void ipath_ib_timer(struct ipath_ibdev *dev) { - struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; struct ipath_qp *resend = NULL; struct list_head *last; struct ipath_qp *qp; @@ -539,19 +522,19 @@ static void ipath_ib_timer(void *arg) if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED && --dev->pma_sample_start == 0) { dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; - ipath_layer_snapshot_counters(dev->dd, &dev->ipath_sword, - &dev->ipath_rword, - &dev->ipath_spkts, - &dev->ipath_rpkts, - &dev->ipath_xmit_wait); + ipath_snapshot_counters(dev->dd, &dev->ipath_sword, + &dev->ipath_rword, + &dev->ipath_spkts, + &dev->ipath_rpkts, + &dev->ipath_xmit_wait); } if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { if (dev->pma_sample_interval == 0) { u64 ta, tb, tc, td, te; dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE; - ipath_layer_snapshot_counters(dev->dd, &ta, &tb, - &tc, &td, &te); + ipath_snapshot_counters(dev->dd, &ta, &tb, + &tc, &td, &te); dev->ipath_sword = ta - dev->ipath_sword; dev->ipath_rword = tb - dev->ipath_rword; @@ -581,6 +564,362 @@ static void ipath_ib_timer(void *arg) } } +static void update_sge(struct ipath_sge_state *ss, u32 length) +{ + struct ipath_sge *sge = &ss->sge; + + sge->vaddr += length; + sge->length -= length; + sge->sge_length -= length; + if (sge->sge_length == 0) { + if (--ss->num_sge) + *sge = *ss->sg_list++; + } else if (sge->length == 0 && sge->mr != NULL) { + if (++sge->n >= IPATH_SEGSZ) { + if (++sge->m >= sge->mr->mapsz) + return; + sge->n = 0; + } + sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; + sge->length = sge->mr->map[sge->m]->segs[sge->n].length; + } +} + +#ifdef __LITTLE_ENDIAN +static inline u32 get_upper_bits(u32 data, u32 shift) +{ + return data >> shift; +} + +static inline u32 set_upper_bits(u32 data, u32 shift) +{ + return data << shift; +} + +static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) +{ + data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); + data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); + return data; +} +#else +static inline u32 get_upper_bits(u32 data, u32 shift) +{ + return data << shift; +} + +static inline u32 set_upper_bits(u32 data, u32 shift) +{ + return data >> shift; +} + +static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) +{ + data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); + data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); + return data; +} +#endif + +static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss, + u32 length) +{ + u32 extra = 0; + u32 data = 0; + u32 last; + + while (1) { + u32 len = ss->sge.length; + u32 off; + + BUG_ON(len == 0); + if (len > length) + len = length; + if (len > ss->sge.sge_length) + len = ss->sge.sge_length; + /* If the source address is not aligned, try to align it. */ + off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); + if (off) { + u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & + ~(sizeof(u32) - 1)); + u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); + u32 y; + + y = sizeof(u32) - off; + if (len > y) + len = y; + if (len + extra >= sizeof(u32)) { + data |= set_upper_bits(v, extra * + BITS_PER_BYTE); + len = sizeof(u32) - extra; + if (len == length) { + last = data; + break; + } + __raw_writel(data, piobuf); + piobuf++; + extra = 0; + data = 0; + } else { + /* Clear unused upper bytes */ + data |= clear_upper_bytes(v, len, extra); + if (len == length) { + last = data; + break; + } + extra += len; + } + } else if (extra) { + /* Source address is aligned. */ + u32 *addr = (u32 *) ss->sge.vaddr; + int shift = extra * BITS_PER_BYTE; + int ushift = 32 - shift; + u32 l = len; + + while (l >= sizeof(u32)) { + u32 v = *addr; + + data |= set_upper_bits(v, shift); + __raw_writel(data, piobuf); + data = get_upper_bits(v, ushift); + piobuf++; + addr++; + l -= sizeof(u32); + } + /* + * We still have 'extra' number of bytes leftover. + */ + if (l) { + u32 v = *addr; + + if (l + extra >= sizeof(u32)) { + data |= set_upper_bits(v, shift); + len -= l + extra - sizeof(u32); + if (len == length) { + last = data; + break; + } + __raw_writel(data, piobuf); + piobuf++; + extra = 0; + data = 0; + } else { + /* Clear unused upper bytes */ + data |= clear_upper_bytes(v, l, + extra); + if (len == length) { + last = data; + break; + } + extra += l; + } + } else if (len == length) { + last = data; + break; + } + } else if (len == length) { + u32 w; + + /* + * Need to round up for the last dword in the + * packet. + */ + w = (len + 3) >> 2; + __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); + piobuf += w - 1; + last = ((u32 *) ss->sge.vaddr)[w - 1]; + break; + } else { + u32 w = len >> 2; + + __iowrite32_copy(piobuf, ss->sge.vaddr, w); + piobuf += w; + + extra = len & (sizeof(u32) - 1); + if (extra) { + u32 v = ((u32 *) ss->sge.vaddr)[w]; + + /* Clear unused upper bytes */ + data = clear_upper_bytes(v, extra, 0); + } + } + update_sge(ss, len); + length -= len; + } + /* Update address before sending packet. */ + update_sge(ss, length); + /* must flush early everything before trigger word */ + ipath_flush_wc(); + __raw_writel(last, piobuf); + /* be sure trigger word is written */ + ipath_flush_wc(); +} + +/** + * ipath_verbs_send - send a packet + * @dd: the infinipath device + * @hdrwords: the number of words in the header + * @hdr: the packet header + * @len: the length of the packet in bytes + * @ss: the SGE to send + */ +int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, + u32 *hdr, u32 len, struct ipath_sge_state *ss) +{ + u32 __iomem *piobuf; + u32 plen; + int ret; + + /* +1 is for the qword padding of pbc */ + plen = hdrwords + ((len + 3) >> 2) + 1; + if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) { + ipath_dbg("packet len 0x%x too long, failing\n", plen); + ret = -EINVAL; + goto bail; + } + + /* Get a PIO buffer to use. */ + piobuf = ipath_getpiobuf(dd, NULL); + if (unlikely(piobuf == NULL)) { + ret = -EBUSY; + goto bail; + } + + /* + * Write len to control qword, no flags. + * We have to flush after the PBC for correctness on some cpus + * or WC buffer can be written out of order. + */ + writeq(plen, piobuf); + ipath_flush_wc(); + piobuf += 2; + if (len == 0) { + /* + * If there is just the header portion, must flush before + * writing last word of header for correctness, and after + * the last header word (trigger word). + */ + __iowrite32_copy(piobuf, hdr, hdrwords - 1); + ipath_flush_wc(); + __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); + ipath_flush_wc(); + ret = 0; + goto bail; + } + + __iowrite32_copy(piobuf, hdr, hdrwords); + piobuf += hdrwords; + + /* The common case is aligned and contained in one segment. */ + if (likely(ss->num_sge == 1 && len <= ss->sge.length && + !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { + u32 w; + u32 *addr = (u32 *) ss->sge.vaddr; + + /* Update address before sending packet. */ + update_sge(ss, len); + /* Need to round up for the last dword in the packet. */ + w = (len + 3) >> 2; + __iowrite32_copy(piobuf, addr, w - 1); + /* must flush early everything before trigger word */ + ipath_flush_wc(); + __raw_writel(addr[w - 1], piobuf + w - 1); + /* be sure trigger word is written */ + ipath_flush_wc(); + ret = 0; + goto bail; + } + copy_io(piobuf, ss, len); + ret = 0; + +bail: + return ret; +} + +int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, + u64 *rwords, u64 *spkts, u64 *rpkts, + u64 *xmit_wait) +{ + int ret; + + if (!(dd->ipath_flags & IPATH_INITTED)) { + /* no hardware, freeze, etc. */ + ipath_dbg("unit %u not usable\n", dd->ipath_unit); + ret = -EINVAL; + goto bail; + } + *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); + *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); + *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); + *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); + *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt); + + ret = 0; + +bail: + return ret; +} + +/** + * ipath_get_counters - get various chip counters + * @dd: the infinipath device + * @cntrs: counters are placed here + * + * Return the counters needed by recv_pma_get_portcounters(). + */ +int ipath_get_counters(struct ipath_devdata *dd, + struct ipath_verbs_counters *cntrs) +{ + int ret; + + if (!(dd->ipath_flags & IPATH_INITTED)) { + /* no hardware, freeze, etc. */ + ipath_dbg("unit %u not usable\n", dd->ipath_unit); + ret = -EINVAL; + goto bail; + } + cntrs->symbol_error_counter = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); + cntrs->link_error_recovery_counter = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); + /* + * The link downed counter counts when the other side downs the + * connection. We add in the number of times we downed the link + * due to local link integrity errors to compensate. + */ + cntrs->link_downed_counter = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt); + cntrs->port_rcv_errors = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) + + ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt); + cntrs->port_rcv_remphys_errors = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt); + cntrs->port_xmit_discards = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt); + cntrs->port_xmit_data = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt); + cntrs->port_rcv_data = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt); + cntrs->port_xmit_packets = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt); + cntrs->port_rcv_packets = + ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt); + cntrs->local_link_integrity_errors = dd->ipath_lli_errors; + cntrs->excessive_buffer_overrun_errors = 0; /* XXX */ + + ret = 0; + +bail: + return ret; +} + /** * ipath_ib_piobufavail - callback when a PIO buffer is available * @arg: the device pointer @@ -591,9 +930,8 @@ static void ipath_ib_timer(void *arg) * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and * return zero). */ -static int ipath_ib_piobufavail(void *arg) +int ipath_ib_piobufavail(struct ipath_ibdev *dev) { - struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; struct ipath_qp *qp; unsigned long flags; @@ -624,14 +962,14 @@ static int ipath_query_device(struct ib_device *ibdev, IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID; props->page_size_cap = PAGE_SIZE; - props->vendor_id = ipath_layer_get_vendorid(dev->dd); - props->vendor_part_id = ipath_layer_get_deviceid(dev->dd); - props->hw_ver = ipath_layer_get_pcirev(dev->dd); + props->vendor_id = dev->dd->ipath_vendorid; + props->vendor_part_id = dev->dd->ipath_deviceid; + props->hw_ver = dev->dd->ipath_pcirev; props->sys_image_guid = dev->sys_image_guid; props->max_mr_size = ~0ull; - props->max_qp = dev->qp_table.max; + props->max_qp = ib_ipath_max_qps; props->max_qp_wr = ib_ipath_max_qp_wrs; props->max_sge = ib_ipath_max_sges; props->max_cq = ib_ipath_max_cqs; @@ -647,7 +985,7 @@ static int ipath_query_device(struct ib_device *ibdev, props->max_srq_sge = ib_ipath_max_srq_sges; /* props->local_ca_ack_delay */ props->atomic_cap = IB_ATOMIC_HCA; - props->max_pkeys = ipath_layer_get_npkeys(dev->dd); + props->max_pkeys = ipath_get_npkeys(dev->dd); props->max_mcast_grp = ib_ipath_max_mcast_grps; props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * @@ -672,12 +1010,17 @@ const u8 ipath_cvt_physportstate[16] = { [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, }; +u32 ipath_get_cr_errpkey(struct ipath_devdata *dd) +{ + return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey); +} + static int ipath_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct ipath_ibdev *dev = to_idev(ibdev); enum ib_mtu mtu; - u16 lid = ipath_layer_get_lid(dev->dd); + u16 lid = dev->dd->ipath_lid; u64 ibcstat; memset(props, 0, sizeof(*props)); @@ -685,16 +1028,16 @@ static int ipath_query_port(struct ib_device *ibdev, props->lmc = dev->mkeyprot_resv_lmc & 7; props->sm_lid = dev->sm_lid; props->sm_sl = dev->sm_sl; - ibcstat = ipath_layer_get_lastibcstat(dev->dd); + ibcstat = dev->dd->ipath_lastibcstat; props->state = ((ibcstat >> 4) & 0x3) + 1; /* See phys_state_show() */ props->phys_state = ipath_cvt_physportstate[ - ipath_layer_get_lastibcstat(dev->dd) & 0xf]; + dev->dd->ipath_lastibcstat & 0xf]; props->port_cap_flags = dev->port_cap_flags; props->gid_tbl_len = 1; props->max_msg_sz = 0x80000000; - props->pkey_tbl_len = ipath_layer_get_npkeys(dev->dd); - props->bad_pkey_cntr = ipath_layer_get_cr_errpkey(dev->dd) - + props->pkey_tbl_len = ipath_get_npkeys(dev->dd); + props->bad_pkey_cntr = ipath_get_cr_errpkey(dev->dd) - dev->z_pkey_violations; props->qkey_viol_cntr = dev->qkey_violations; props->active_width = IB_WIDTH_4X; @@ -704,7 +1047,7 @@ static int ipath_query_port(struct ib_device *ibdev, props->init_type_reply = 0; props->max_mtu = IB_MTU_4096; - switch (ipath_layer_get_ibmtu(dev->dd)) { + switch (dev->dd->ipath_ibmtu) { case 4096: mtu = IB_MTU_4096; break; @@ -763,7 +1106,7 @@ static int ipath_modify_port(struct ib_device *ibdev, dev->port_cap_flags |= props->set_port_cap_mask; dev->port_cap_flags &= ~props->clr_port_cap_mask; if (port_modify_mask & IB_PORT_SHUTDOWN) - ipath_layer_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); + ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN); if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) dev->qkey_violations = 0; return 0; @@ -780,7 +1123,7 @@ static int ipath_query_gid(struct ib_device *ibdev, u8 port, goto bail; } gid->global.subnet_prefix = dev->gid_prefix; - gid->global.interface_id = ipath_layer_get_guid(dev->dd); + gid->global.interface_id = dev->dd->ipath_guid; ret = 0; @@ -803,18 +1146,22 @@ static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev, * we allow allocations of more than we report for this value. */ - if (dev->n_pds_allocated == ib_ipath_max_pds) { + pd = kmalloc(sizeof *pd, GFP_KERNEL); + if (!pd) { ret = ERR_PTR(-ENOMEM); goto bail; } - pd = kmalloc(sizeof *pd, GFP_KERNEL); - if (!pd) { + spin_lock(&dev->n_pds_lock); + if (dev->n_pds_allocated == ib_ipath_max_pds) { + spin_unlock(&dev->n_pds_lock); + kfree(pd); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_pds_allocated++; + spin_unlock(&dev->n_pds_lock); /* ib_alloc_pd() will initialize pd->ibpd. */ pd->user = udata != NULL; @@ -830,7 +1177,9 @@ static int ipath_dealloc_pd(struct ib_pd *ibpd) struct ipath_pd *pd = to_ipd(ibpd); struct ipath_ibdev *dev = to_idev(ibpd->device); + spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; + spin_unlock(&dev->n_pds_lock); kfree(pd); @@ -851,11 +1200,6 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, struct ib_ah *ret; struct ipath_ibdev *dev = to_idev(pd->device); - if (dev->n_ahs_allocated == ib_ipath_max_ahs) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - /* A multicast address requires a GRH (see ch. 8.4.1). */ if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE && ah_attr->dlid != IPATH_PERMISSIVE_LID && @@ -881,7 +1225,16 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd, goto bail; } + spin_lock(&dev->n_ahs_lock); + if (dev->n_ahs_allocated == ib_ipath_max_ahs) { + spin_unlock(&dev->n_ahs_lock); + kfree(ah); + ret = ERR_PTR(-ENOMEM); + goto bail; + } + dev->n_ahs_allocated++; + spin_unlock(&dev->n_ahs_lock); /* ib_create_ah() will initialize ah->ibah. */ ah->attr = *ah_attr; @@ -903,7 +1256,9 @@ static int ipath_destroy_ah(struct ib_ah *ibah) struct ipath_ibdev *dev = to_idev(ibah->device); struct ipath_ah *ah = to_iah(ibah); + spin_lock(&dev->n_ahs_lock); dev->n_ahs_allocated--; + spin_unlock(&dev->n_ahs_lock); kfree(ah); @@ -919,25 +1274,50 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) return 0; } +/** + * ipath_get_npkeys - return the size of the PKEY table for port 0 + * @dd: the infinipath device + */ +unsigned ipath_get_npkeys(struct ipath_devdata *dd) +{ + return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys); +} + +/** + * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table + * @dd: the infinipath device + * @index: the PKEY index + */ +unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) +{ + unsigned ret; + + if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) + ret = 0; + else + ret = dd->ipath_pd[0]->port_pkeys[index]; + + return ret; +} + static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct ipath_ibdev *dev = to_idev(ibdev); int ret; - if (index >= ipath_layer_get_npkeys(dev->dd)) { + if (index >= ipath_get_npkeys(dev->dd)) { ret = -EINVAL; goto bail; } - *pkey = ipath_layer_get_pkey(dev->dd, index); + *pkey = ipath_get_pkey(dev->dd, index); ret = 0; bail: return ret; } - /** * ipath_alloc_ucontext - allocate a ucontest * @ibdev: the infiniband device @@ -970,26 +1350,91 @@ static int ipath_dealloc_ucontext(struct ib_ucontext *context) static int ipath_verbs_register_sysfs(struct ib_device *dev); +static void __verbs_timer(unsigned long arg) +{ + struct ipath_devdata *dd = (struct ipath_devdata *) arg; + + /* + * If port 0 receive packet interrupts are not available, or + * can be missed, poll the receive queue + */ + if (dd->ipath_flags & IPATH_POLL_RX_INTR) + ipath_kreceive(dd); + + /* Handle verbs layer timeouts. */ + ipath_ib_timer(dd->verbs_dev); + + mod_timer(&dd->verbs_timer, jiffies + 1); +} + +static int enable_timer(struct ipath_devdata *dd) +{ + /* + * Early chips had a design flaw where the chip and kernel idea + * of the tail register don't always agree, and therefore we won't + * get an interrupt on the next packet received. + * If the board supports per packet receive interrupts, use it. + * Otherwise, the timer function periodically checks for packets + * to cover this case. + * Either way, the timer is needed for verbs layer related + * processing. + */ + if (dd->ipath_flags & IPATH_GPIO_INTR) { + ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, + 0x2074076542310ULL); + /* Enable GPIO bit 2 interrupt */ + ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, + (u64) (1 << 2)); + } + + init_timer(&dd->verbs_timer); + dd->verbs_timer.function = __verbs_timer; + dd->verbs_timer.data = (unsigned long)dd; + dd->verbs_timer.expires = jiffies + 1; + add_timer(&dd->verbs_timer); + + return 0; +} + +static int disable_timer(struct ipath_devdata *dd) +{ + /* Disable GPIO bit 2 interrupt */ + if (dd->ipath_flags & IPATH_GPIO_INTR) + ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0); + + del_timer_sync(&dd->verbs_timer); + + return 0; +} + /** * ipath_register_ib_device - register our device with the infiniband core - * @unit: the device number to register * @dd: the device data structure * Return the allocated ipath_ibdev pointer or NULL on error. */ -static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) +int ipath_register_ib_device(struct ipath_devdata *dd) { - struct ipath_layer_counters cntrs; + struct ipath_verbs_counters cntrs; struct ipath_ibdev *idev; struct ib_device *dev; int ret; idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev); - if (idev == NULL) + if (idev == NULL) { + ret = -ENOMEM; goto bail; + } dev = &idev->ibdev; /* Only need to initialize non-zero fields. */ + spin_lock_init(&idev->n_pds_lock); + spin_lock_init(&idev->n_ahs_lock); + spin_lock_init(&idev->n_cqs_lock); + spin_lock_init(&idev->n_qps_lock); + spin_lock_init(&idev->n_srqs_lock); + spin_lock_init(&idev->n_mcast_grps_lock); + spin_lock_init(&idev->qp_table.lock); spin_lock_init(&idev->lk_table.lock); idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE); @@ -1030,7 +1475,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) idev->link_width_enabled = 3; /* 1x or 4x */ /* Snapshot current HW counters to "clear" them. */ - ipath_layer_get_counters(dd, &cntrs); + ipath_get_counters(dd, &cntrs); idev->z_symbol_error_counter = cntrs.symbol_error_counter; idev->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; @@ -1054,14 +1499,14 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) * device types in the system, we can't be sure this is unique. */ if (!sys_image_guid) - sys_image_guid = ipath_layer_get_guid(dd); + sys_image_guid = dd->ipath_guid; idev->sys_image_guid = sys_image_guid; - idev->ib_unit = unit; + idev->ib_unit = dd->ipath_unit; idev->dd = dd; strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX); dev->owner = THIS_MODULE; - dev->node_guid = ipath_layer_get_guid(dd); + dev->node_guid = dd->ipath_guid; dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | @@ -1093,9 +1538,9 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); - dev->node_type = IB_NODE_CA; + dev->node_type = RDMA_NODE_IB_CA; dev->phys_port_cnt = 1; - dev->dma_device = ipath_layer_get_device(dd); + dev->dma_device = &dd->pcidev->dev; dev->class_dev.dev = dev->dma_device; dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; @@ -1137,9 +1582,10 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) dev->attach_mcast = ipath_multicast_attach; dev->detach_mcast = ipath_multicast_detach; dev->process_mad = ipath_process_mad; + dev->mmap = ipath_mmap; snprintf(dev->node_desc, sizeof(dev->node_desc), - IPATH_IDSTR " %s kernel_SMA", system_utsname.nodename); + IPATH_IDSTR " %s", system_utsname.nodename); ret = ib_register_device(dev); if (ret) @@ -1148,7 +1594,7 @@ static void *ipath_register_ib_device(int unit, struct ipath_devdata *dd) if (ipath_verbs_register_sysfs(dev)) goto err_class; - ipath_layer_enable_timer(dd); + enable_timer(dd); goto bail; @@ -1160,37 +1606,32 @@ err_lk: kfree(idev->qp_table.table); err_qp: ib_dealloc_device(dev); - _VERBS_ERROR("ib_ipath%d cannot register verbs (%d)!\n", - unit, -ret); + ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret); idev = NULL; bail: - return idev; + dd->verbs_dev = idev; + return ret; } -static void ipath_unregister_ib_device(void *arg) +void ipath_unregister_ib_device(struct ipath_ibdev *dev) { - struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; struct ib_device *ibdev = &dev->ibdev; - ipath_layer_disable_timer(dev->dd); + disable_timer(dev->dd); ib_unregister_device(ibdev); if (!list_empty(&dev->pending[0]) || !list_empty(&dev->pending[1]) || !list_empty(&dev->pending[2])) - _VERBS_ERROR("ipath%d pending list not empty!\n", - dev->ib_unit); + ipath_dev_err(dev->dd, "pending list not empty!\n"); if (!list_empty(&dev->piowait)) - _VERBS_ERROR("ipath%d piowait list not empty!\n", - dev->ib_unit); + ipath_dev_err(dev->dd, "piowait list not empty!\n"); if (!list_empty(&dev->rnrwait)) - _VERBS_ERROR("ipath%d rnrwait list not empty!\n", - dev->ib_unit); + ipath_dev_err(dev->dd, "rnrwait list not empty!\n"); if (!ipath_mcast_tree_empty()) - _VERBS_ERROR("ipath%d multicast table memory leak!\n", - dev->ib_unit); + ipath_dev_err(dev->dd, "multicast table memory leak!\n"); /* * Note that ipath_unregister_ib_device() can be called before all * the QPs are destroyed! @@ -1201,25 +1642,12 @@ static void ipath_unregister_ib_device(void *arg) ib_dealloc_device(ibdev); } -static int __init ipath_verbs_init(void) -{ - return ipath_verbs_register(ipath_register_ib_device, - ipath_unregister_ib_device, - ipath_ib_piobufavail, ipath_ib_rcv, - ipath_ib_timer); -} - -static void __exit ipath_verbs_cleanup(void) -{ - ipath_verbs_unregister(); -} - static ssize_t show_rev(struct class_device *cdev, char *buf) { struct ipath_ibdev *dev = container_of(cdev, struct ipath_ibdev, ibdev.class_dev); - return sprintf(buf, "%x\n", ipath_layer_get_pcirev(dev->dd)); + return sprintf(buf, "%x\n", dev->dd->ipath_pcirev); } static ssize_t show_hca(struct class_device *cdev, char *buf) @@ -1228,7 +1656,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf) container_of(cdev, struct ipath_ibdev, ibdev.class_dev); int ret; - ret = ipath_layer_get_boardname(dev->dd, buf, 128); + ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128); if (ret < 0) goto bail; strcat(buf, "\n"); @@ -1305,6 +1733,3 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev) bail: return ret; } - -module_init(ipath_verbs_init); -module_exit(ipath_verbs_cleanup); diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index 2df684727dc1..09bbb3f9a217 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -38,10 +38,10 @@ #include <linux/spinlock.h> #include <linux/kernel.h> #include <linux/interrupt.h> +#include <linux/kref.h> #include <rdma/ib_pack.h> #include "ipath_layer.h" -#include "verbs_debug.h" #define QPN_MAX (1 << 24) #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) @@ -50,7 +50,7 @@ * Increment this value if any changes that break userspace ABI * compatibility are made. */ -#define IPATH_UVERBS_ABI_VERSION 1 +#define IPATH_UVERBS_ABI_VERSION 2 /* * Define an ib_cq_notify value that is not valid so we know when CQ @@ -152,19 +152,6 @@ struct ipath_mcast { int n_attached; }; -/* Memory region */ -struct ipath_mr { - struct ib_mr ibmr; - struct ipath_mregion mr; /* must be last */ -}; - -/* Fast memory region */ -struct ipath_fmr { - struct ib_fmr ibfmr; - u8 page_shift; - struct ipath_mregion mr; /* must be last */ -}; - /* Protection domain */ struct ipath_pd { struct ib_pd ibpd; @@ -178,58 +165,89 @@ struct ipath_ah { }; /* - * Quick description of our CQ/QP locking scheme: - * - * We have one global lock that protects dev->cq/qp_table. Each - * struct ipath_cq/qp also has its own lock. An individual qp lock - * may be taken inside of an individual cq lock. Both cqs attached to - * a qp may be locked, with the send cq locked first. No other - * nesting should be done. - * - * Each struct ipath_cq/qp also has an atomic_t ref count. The - * pointer from the cq/qp_table to the struct counts as one reference. - * This reference also is good for access through the consumer API, so - * modifying the CQ/QP etc doesn't need to take another reference. - * Access because of a completion being polled does need a reference. - * - * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the - * destroy function to sleep on. - * - * This means that access from the consumer API requires nothing but - * taking the struct's lock. - * - * Access because of a completion event should go as follows: - * - lock cq/qp_table and look up struct - * - increment ref count in struct - * - drop cq/qp_table lock - * - lock struct, do your thing, and unlock struct - * - decrement ref count; if zero, wake up waiters - * - * To destroy a CQ/QP, we can do the following: - * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock - * - decrement ref count - * - wait_event until ref count is zero - * - * It is the consumer's responsibilty to make sure that no QP - * operations (WQE posting or state modification) are pending when the - * QP is destroyed. Also, the consumer must make sure that calls to - * qp_modify are serialized. - * - * Possible optimizations (wait for profile data to see if/where we - * have locks bouncing between CPUs): - * - split cq/qp table lock into n separate (cache-aligned) locks, - * indexed (say) by the page in the table + * This structure is used by ipath_mmap() to validate an offset + * when an mmap() request is made. The vm_area_struct then uses + * this as its vm_private_data. + */ +struct ipath_mmap_info { + struct ipath_mmap_info *next; + struct ib_ucontext *context; + void *obj; + struct kref ref; + unsigned size; + unsigned mmap_cnt; +}; + +/* + * This structure is used to contain the head pointer, tail pointer, + * and completion queue entries as a single memory allocation so + * it can be mmap'ed into user space. */ +struct ipath_cq_wc { + u32 head; /* index of next entry to fill */ + u32 tail; /* index of next ib_poll_cq() entry */ + struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */ +}; +/* + * The completion queue structure. + */ struct ipath_cq { struct ib_cq ibcq; struct tasklet_struct comptask; spinlock_t lock; u8 notify; u8 triggered; - u32 head; /* new records added to the head */ - u32 tail; /* poll_cq() reads from here. */ - struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */ + struct ipath_cq_wc *queue; + struct ipath_mmap_info *ip; +}; + +/* + * A segment is a linear region of low physical memory. + * XXX Maybe we should use phys addr here and kmap()/kunmap(). + * Used by the verbs layer. + */ +struct ipath_seg { + void *vaddr; + size_t length; +}; + +/* The number of ipath_segs that fit in a page. */ +#define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg)) + +struct ipath_segarray { + struct ipath_seg segs[IPATH_SEGSZ]; +}; + +struct ipath_mregion { + u64 user_base; /* User's address for this region */ + u64 iova; /* IB start address of this region */ + size_t length; + u32 lkey; + u32 offset; /* offset (bytes) to start of region */ + int access_flags; + u32 max_segs; /* number of ipath_segs in all the arrays */ + u32 mapsz; /* size of the map array */ + struct ipath_segarray *map[0]; /* the segments */ +}; + +/* + * These keep track of the copy progress within a memory region. + * Used by the verbs layer. + */ +struct ipath_sge { + struct ipath_mregion *mr; + void *vaddr; /* current pointer into the segment */ + u32 sge_length; /* length of the SGE */ + u32 length; /* remaining length of the segment */ + u16 m; /* current index: mr->map[m] */ + u16 n; /* current index: mr->map[m]->segs[n] */ +}; + +/* Memory region */ +struct ipath_mr { + struct ib_mr ibmr; + struct ipath_mregion mr; /* must be last */ }; /* @@ -248,32 +266,50 @@ struct ipath_swqe { /* * Receive work request queue entry. - * The size of the sg_list is determined when the QP is created and stored - * in qp->r_max_sge. + * The size of the sg_list is determined when the QP (or SRQ) is created + * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). */ struct ipath_rwqe { u64 wr_id; - u32 length; /* total length of data in sg_list */ u8 num_sge; - struct ipath_sge sg_list[0]; + struct ib_sge sg_list[0]; }; -struct ipath_rq { - spinlock_t lock; +/* + * This structure is used to contain the head pointer, tail pointer, + * and receive work queue entries as a single memory allocation so + * it can be mmap'ed into user space. + * Note that the wq array elements are variable size so you can't + * just index into the array to get the N'th element; + * use get_rwqe_ptr() instead. + */ +struct ipath_rwq { u32 head; /* new work requests posted to the head */ u32 tail; /* receives pull requests from here. */ + struct ipath_rwqe wq[0]; +}; + +struct ipath_rq { + struct ipath_rwq *wq; + spinlock_t lock; u32 size; /* size of RWQE array */ u8 max_sge; - struct ipath_rwqe *wq; /* RWQE array */ }; struct ipath_srq { struct ib_srq ibsrq; struct ipath_rq rq; + struct ipath_mmap_info *ip; /* send signal when number of RWQEs < limit */ u32 limit; }; +struct ipath_sge_state { + struct ipath_sge *sg_list; /* next SGE to be used if any */ + struct ipath_sge sge; /* progress state for the current SGE */ + u8 num_sge; +}; + /* * Variables prefixed with s_ are for the requester (sender). * Variables prefixed with r_ are for the responder (receiver). @@ -293,6 +329,7 @@ struct ipath_qp { atomic_t refcount; wait_queue_head_t wait; struct tasklet_struct s_task; + struct ipath_mmap_info *ip; struct ipath_sge_state *s_cur_sge; struct ipath_sge_state s_sge; /* current send request data */ /* current RDMA read send data */ @@ -334,6 +371,7 @@ struct ipath_qp { u8 s_retry; /* requester retry counter */ u8 s_rnr_retry; /* requester RNR retry counter */ u8 s_pkey_index; /* PKEY index to use */ + u8 timeout; /* Timeout for this QP */ enum ib_mtu path_mtu; u32 remote_qpn; u32 qkey; /* QKEY for this QP (for UD or RD) */ @@ -345,7 +383,8 @@ struct ipath_qp { u32 s_ssn; /* SSN of tail entry */ u32 s_lsn; /* limit sequence number (credit) */ struct ipath_swqe *s_wq; /* send work queue */ - struct ipath_rq r_rq; /* receive work queue */ + struct ipath_rq r_rq; /* receive work queue */ + struct ipath_sge r_sg_list[0]; /* verified SGEs */ }; /* @@ -369,15 +408,15 @@ static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, /* * Since struct ipath_rwqe is not a fixed size, we can't simply index into - * struct ipath_rq.wq. This function does the array index computation. + * struct ipath_rwq.wq. This function does the array index computation. */ static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, unsigned n) { return (struct ipath_rwqe *) - ((char *) rq->wq + + ((char *) rq->wq->wq + (sizeof(struct ipath_rwqe) + - rq->max_sge * sizeof(struct ipath_sge)) * n); + rq->max_sge * sizeof(struct ib_sge)) * n); } /* @@ -417,6 +456,7 @@ struct ipath_ibdev { struct ib_device ibdev; struct list_head dev_list; struct ipath_devdata *dd; + struct ipath_mmap_info *pending_mmaps; int ib_unit; /* This is the device number */ u16 sm_lid; /* in host order */ u8 sm_sl; @@ -435,11 +475,20 @@ struct ipath_ibdev { __be64 sys_image_guid; /* in network order */ __be64 gid_prefix; /* in network order */ __be64 mkey; + u32 n_pds_allocated; /* number of PDs allocated for device */ + spinlock_t n_pds_lock; u32 n_ahs_allocated; /* number of AHs allocated for device */ + spinlock_t n_ahs_lock; u32 n_cqs_allocated; /* number of CQs allocated for device */ + spinlock_t n_cqs_lock; + u32 n_qps_allocated; /* number of QPs allocated for device */ + spinlock_t n_qps_lock; u32 n_srqs_allocated; /* number of SRQs allocated for device */ + spinlock_t n_srqs_lock; u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ + spinlock_t n_mcast_grps_lock; + u64 ipath_sword; /* total dwords sent (sample result) */ u64 ipath_rword; /* total dwords received (sample result) */ u64 ipath_spkts; /* total packets sent (sample result) */ @@ -494,8 +543,19 @@ struct ipath_ibdev { struct ipath_opcode_stats opstats[128]; }; -struct ipath_ucontext { - struct ib_ucontext ibucontext; +struct ipath_verbs_counters { + u64 symbol_error_counter; + u64 link_error_recovery_counter; + u64 link_downed_counter; + u64 port_rcv_errors; + u64 port_rcv_remphys_errors; + u64 port_xmit_discards; + u64 port_xmit_data; + u64 port_rcv_data; + u64 port_xmit_packets; + u64 port_rcv_packets; + u32 local_link_integrity_errors; + u32 excessive_buffer_overrun_errors; }; static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) @@ -503,11 +563,6 @@ static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) return container_of(ibmr, struct ipath_mr, ibmr); } -static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr) -{ - return container_of(ibfmr, struct ipath_fmr, ibfmr); -} - static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) { return container_of(ibpd, struct ipath_pd, ibpd); @@ -545,12 +600,6 @@ int ipath_process_mad(struct ib_device *ibdev, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad); -static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext - *ibucontext) -{ - return container_of(ibucontext, struct ipath_ucontext, ibucontext); -} - /* * Compare the lower 24 bits of the two values. * Returns an integer <, ==, or > than zero. @@ -562,6 +611,13 @@ static inline int ipath_cmp24(u32 a, u32 b) struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); +int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, + u64 *rwords, u64 *spkts, u64 *rpkts, + u64 *xmit_wait); + +int ipath_get_counters(struct ipath_devdata *dd, + struct ipath_verbs_counters *cntrs); + int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); @@ -579,7 +635,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, int ipath_destroy_qp(struct ib_qp *ibqp); int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask); + int attr_mask, struct ib_udata *udata); int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr); @@ -592,6 +648,9 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); void ipath_get_credit(struct ipath_qp *qp, u32 aeth); +int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords, + u32 *hdr, u32 len, struct ipath_sge_state *ss); + void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss, @@ -638,7 +697,8 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, struct ib_udata *udata); int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask); + enum ib_srq_attr_mask attr_mask, + struct ib_udata *udata); int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); @@ -680,6 +740,10 @@ int ipath_unmap_fmr(struct list_head *fmr_list); int ipath_dealloc_fmr(struct ib_fmr *ibfmr); +void ipath_release_mmap_info(struct kref *ref); + +int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); + void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev); void ipath_insert_rnr_queue(struct ipath_qp *qp); @@ -700,6 +764,22 @@ int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, int ipath_make_uc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr, u32 pmtu, u32 *bth0p, u32 *bth2p); +int ipath_register_ib_device(struct ipath_devdata *); + +void ipath_unregister_ib_device(struct ipath_ibdev *); + +void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32); + +int ipath_ib_piobufavail(struct ipath_ibdev *); + +void ipath_ib_timer(struct ipath_ibdev *); + +unsigned ipath_get_npkeys(struct ipath_devdata *); + +u32 ipath_get_cr_errpkey(struct ipath_devdata *); + +unsigned ipath_get_pkey(struct ipath_devdata *, unsigned); + extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; extern const u8 ipath_cvt_physportstate[]; @@ -714,6 +794,8 @@ extern unsigned int ib_ipath_max_cqs; extern unsigned int ib_ipath_max_qp_wrs; +extern unsigned int ib_ipath_max_qps; + extern unsigned int ib_ipath_max_sges; extern unsigned int ib_ipath_max_mcast_grps; diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c index ee0e1d96d723..085e28b939ec 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c @@ -207,12 +207,17 @@ static int ipath_mcast_add(struct ipath_ibdev *dev, goto bail; } + spin_lock(&dev->n_mcast_grps_lock); if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) { + spin_unlock(&dev->n_mcast_grps_lock); ret = ENOMEM; goto bail; } dev->n_mcast_grps_allocated++; + spin_unlock(&dev->n_mcast_grps_lock); + + mcast->n_attached++; list_add_tail_rcu(&mqp->list, &mcast->qp_list); @@ -343,7 +348,9 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) atomic_dec(&mcast->refcount); wait_event(mcast->wait, !atomic_read(&mcast->refcount)); ipath_mcast_free(mcast); + spin_lock(&dev->n_mcast_grps_lock); dev->n_mcast_grps_allocated--; + spin_unlock(&dev->n_mcast_grps_lock); } ret = 0; diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c new file mode 100644 index 000000000000..036fde662aa9 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2006 QLogic, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/* + * This file is conditionally built on PowerPC only. Otherwise weak symbol + * versions of the functions exported from here are used. + */ + +#include "ipath_kernel.h" + +/** + * ipath_unordered_wc - indicate whether write combining is ordered + * + * PowerPC systems (at least those in the 970 processor family) + * write partially filled store buffers in address order, but will write + * completely filled store buffers in "random" order, and therefore must + * have serialization for correctness with current InfiniPath chips. + * + */ +int ipath_unordered_wc(void) +{ + return 1; +} diff --git a/drivers/infiniband/hw/ipath/verbs_debug.h b/drivers/infiniband/hw/ipath/verbs_debug.h deleted file mode 100644 index 6186676f2a16..000000000000 --- a/drivers/infiniband/hw/ipath/verbs_debug.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2006 QLogic, Inc. All rights reserved. - * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _VERBS_DEBUG_H -#define _VERBS_DEBUG_H - -/* - * This file contains tracing code for the ib_ipath kernel module. - */ -#ifndef _VERBS_DEBUGGING /* tracing enabled or not */ -#define _VERBS_DEBUGGING 1 -#endif - -extern unsigned ib_ipath_debug; - -#define _VERBS_ERROR(fmt,...) \ - do { \ - printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \ - } while(0) - -#define _VERBS_UNIT_ERROR(unit,fmt,...) \ - do { \ - printk(KERN_ERR "%s: " fmt, "ib_ipath", ##__VA_ARGS__); \ - } while(0) - -#if _VERBS_DEBUGGING - -/* - * Mask values for debugging. The scheme allows us to compile out any - * of the debug tracing stuff, and if compiled in, to enable or - * disable dynamically. - * This can be set at modprobe time also: - * modprobe ib_path ib_ipath_debug=3 - */ - -#define __VERBS_INFO 0x1 /* generic low verbosity stuff */ -#define __VERBS_DBG 0x2 /* generic debug */ -#define __VERBS_VDBG 0x4 /* verbose debug */ -#define __VERBS_SMADBG 0x8000 /* sma packet debug */ - -#define _VERBS_INFO(fmt,...) \ - do { \ - if (unlikely(ib_ipath_debug&__VERBS_INFO)) \ - printk(KERN_INFO "%s: " fmt,"ib_ipath", \ - ##__VA_ARGS__); \ - } while(0) - -#define _VERBS_DBG(fmt,...) \ - do { \ - if (unlikely(ib_ipath_debug&__VERBS_DBG)) \ - printk(KERN_DEBUG "%s: " fmt, __func__, \ - ##__VA_ARGS__); \ - } while(0) - -#define _VERBS_VDBG(fmt,...) \ - do { \ - if (unlikely(ib_ipath_debug&__VERBS_VDBG)) \ - printk(KERN_DEBUG "%s: " fmt, __func__, \ - ##__VA_ARGS__); \ - } while(0) - -#define _VERBS_SMADBG(fmt,...) \ - do { \ - if (unlikely(ib_ipath_debug&__VERBS_SMADBG)) \ - printk(KERN_DEBUG "%s: " fmt, __func__, \ - ##__VA_ARGS__); \ - } while(0) - -#else /* ! _VERBS_DEBUGGING */ - -#define _VERBS_INFO(fmt,...) -#define _VERBS_DBG(fmt,...) -#define _VERBS_VDBG(fmt,...) -#define _VERBS_SMADBG(fmt,...) - -#endif /* _VERBS_DEBUGGING */ - -#endif /* _VERBS_DEBUG_H */ diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c index e215041b2db9..69599455aca2 100644 --- a/drivers/infiniband/hw/mthca/mthca_av.c +++ b/drivers/infiniband/hw/mthca/mthca_av.c @@ -90,7 +90,7 @@ static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate) case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS; case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS; case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS; - default: return port_rate; + default: return mult_to_ib_rate(port_rate); } } diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index c3bec7490f52..cd044ea2dfa4 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c @@ -34,6 +34,7 @@ #include <linux/jiffies.h> #include <linux/timer.h> +#include <linux/workqueue.h> #include "mthca_dev.h" @@ -48,9 +49,41 @@ enum { static DEFINE_SPINLOCK(catas_lock); +static LIST_HEAD(catas_list); +static struct workqueue_struct *catas_wq; +static struct work_struct catas_work; + +static int catas_reset_disable; +module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); +MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); + +static void catas_reset(void *work_ptr) +{ + struct mthca_dev *dev, *tmpdev; + LIST_HEAD(tlist); + int ret; + + mutex_lock(&mthca_device_mutex); + + spin_lock_irq(&catas_lock); + list_splice_init(&catas_list, &tlist); + spin_unlock_irq(&catas_lock); + + list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) { + ret = __mthca_restart_one(dev->pdev); + if (ret) + mthca_err(dev, "Reset failed (%d)\n", ret); + else + mthca_dbg(dev, "Reset succeeded\n"); + } + + mutex_unlock(&mthca_device_mutex); +} + static void handle_catas(struct mthca_dev *dev) { struct ib_event event; + unsigned long flags; const char *type; int i; @@ -82,6 +115,14 @@ static void handle_catas(struct mthca_dev *dev) for (i = 0; i < dev->catas_err.size; ++i) mthca_err(dev, " buf[%02x]: %08x\n", i, swab32(readl(dev->catas_err.map + i))); + + if (catas_reset_disable) + return; + + spin_lock_irqsave(&catas_lock, flags); + list_add(&dev->catas_err.list, &catas_list); + queue_work(catas_wq, &catas_work); + spin_unlock_irqrestore(&catas_lock, flags); } static void poll_catas(unsigned long dev_ptr) @@ -135,6 +176,7 @@ void mthca_start_catas_poll(struct mthca_dev *dev) dev->catas_err.timer.data = (unsigned long) dev; dev->catas_err.timer.function = poll_catas; dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL; + INIT_LIST_HEAD(&dev->catas_err.list); add_timer(&dev->catas_err.timer); } @@ -153,4 +195,24 @@ void mthca_stop_catas_poll(struct mthca_dev *dev) dev->catas_err.addr), dev->catas_err.size * 4); } + + spin_lock_irq(&catas_lock); + list_del(&dev->catas_err.list); + spin_unlock_irq(&catas_lock); +} + +int __init mthca_catas_init(void) +{ + INIT_WORK(&catas_work, catas_reset, NULL); + + catas_wq = create_singlethread_workqueue("mthca_catas"); + if (!catas_wq) + return -ENOMEM; + + return 0; +} + +void mthca_catas_cleanup(void) +{ + destroy_workqueue(catas_wq); } diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index deabc14b4ea4..99a94d710935 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -34,7 +34,7 @@ * $Id: mthca_cmd.c 1349 2004-12-16 21:09:43Z roland $ */ -#include <linux/sched.h> +#include <linux/completion.h> #include <linux/pci.h> #include <linux/errno.h> #include <asm/io.h> diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 3e27a084257e..e393681ba7d4 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -544,11 +544,11 @@ static inline int mthca_poll_one(struct mthca_dev *dev, wq = &(*cur_qp)->rq; wqe = be32_to_cpu(cqe->wqe); wqe_index = wqe >> wq->wqe_shift; - /* - * WQE addr == base - 1 might be reported in receive completion - * with error instead of (rq size - 1) by Sinai FW 1.0.800 and - * Arbel FW 5.1.400. This bug should be fixed in later FW revs. - */ + /* + * WQE addr == base - 1 might be reported in receive completion + * with error instead of (rq size - 1) by Sinai FW 1.0.800 and + * Arbel FW 5.1.400. This bug should be fixed in later FW revs. + */ if (unlikely(wqe_index < 0)) wqe_index = wq->max - 1; entry->wr_id = (*cur_qp)->wrid[wqe_index]; diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h index f8160b8de090..fe5cecf70fed 100644 --- a/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/drivers/infiniband/hw/mthca/mthca_dev.h @@ -45,6 +45,7 @@ #include <linux/dma-mapping.h> #include <linux/timer.h> #include <linux/mutex.h> +#include <linux/list.h> #include <asm/semaphore.h> @@ -283,8 +284,11 @@ struct mthca_catas_err { unsigned long stop; u32 size; struct timer_list timer; + struct list_head list; }; +extern struct mutex mthca_device_mutex; + struct mthca_dev { struct ib_device ib_dev; struct pci_dev *pdev; @@ -450,6 +454,9 @@ void mthca_unregister_device(struct mthca_dev *dev); void mthca_start_catas_poll(struct mthca_dev *dev); void mthca_stop_catas_poll(struct mthca_dev *dev); +int __mthca_restart_one(struct pci_dev *pdev); +int mthca_catas_init(void); +void mthca_catas_cleanup(void); int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); @@ -506,7 +513,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, struct ib_srq_attr *attr, struct mthca_srq *srq); void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask); + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata); int mthca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int mthca_max_srq_sge(struct mthca_dev *dev); void mthca_srq_event(struct mthca_dev *dev, u32 srqn, @@ -521,7 +528,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, enum ib_event_type event_type); int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); -int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask); +int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, + struct ib_udata *udata); int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr); int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index d9bc030bcccc..45e106f14807 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c @@ -119,7 +119,7 @@ static void smp_snoop(struct ib_device *ibdev, mthca_update_rate(to_mdev(ibdev), port_num); update_sm_ah(to_mdev(ibdev), port_num, - be16_to_cpu(pinfo->lid), + be16_to_cpu(pinfo->sm_lid), pinfo->neighbormtu_mastersmsl & 0xf); event.device = ibdev; diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index 7b82c1907f04..47ea02148368 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -80,6 +80,8 @@ static int tune_pci = 0; module_param(tune_pci, int, 0444); MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero"); +struct mutex mthca_device_mutex; + static const char mthca_version[] __devinitdata = DRV_NAME ": Mellanox InfiniBand HCA driver v" DRV_VERSION " (" DRV_RELDATE ")\n"; @@ -978,28 +980,15 @@ static struct { MTHCA_FLAG_SINAI_OPT } }; -static int __devinit mthca_init_one(struct pci_dev *pdev, - const struct pci_device_id *id) +static int __mthca_init_one(struct pci_dev *pdev, int hca_type) { - static int mthca_version_printed = 0; int ddr_hidden = 0; int err; struct mthca_dev *mdev; - if (!mthca_version_printed) { - printk(KERN_INFO "%s", mthca_version); - ++mthca_version_printed; - } - printk(KERN_INFO PFX "Initializing %s\n", pci_name(pdev)); - if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { - printk(KERN_ERR PFX "%s has invalid driver data %lx\n", - pci_name(pdev), id->driver_data); - return -ENODEV; - } - err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, " @@ -1065,7 +1054,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev, mdev->pdev = pdev; - mdev->mthca_flags = mthca_hca_table[id->driver_data].flags; + mdev->mthca_flags = mthca_hca_table[hca_type].flags; if (ddr_hidden) mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; @@ -1099,13 +1088,13 @@ static int __devinit mthca_init_one(struct pci_dev *pdev, if (err) goto err_cmd; - if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) { + if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) { mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n", (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver & 0xffff), - (int) (mthca_hca_table[id->driver_data].latest_fw >> 32), - (int) (mthca_hca_table[id->driver_data].latest_fw >> 16) & 0xffff, - (int) (mthca_hca_table[id->driver_data].latest_fw & 0xffff)); + (int) (mthca_hca_table[hca_type].latest_fw >> 32), + (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff, + (int) (mthca_hca_table[hca_type].latest_fw & 0xffff)); mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n"); } @@ -1122,6 +1111,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev, goto err_unregister; pci_set_drvdata(pdev, mdev); + mdev->hca_type = hca_type; return 0; @@ -1166,7 +1156,7 @@ err_disable_pdev: return err; } -static void __devexit mthca_remove_one(struct pci_dev *pdev) +static void __mthca_remove_one(struct pci_dev *pdev) { struct mthca_dev *mdev = pci_get_drvdata(pdev); u8 status; @@ -1211,6 +1201,51 @@ static void __devexit mthca_remove_one(struct pci_dev *pdev) } } +int __mthca_restart_one(struct pci_dev *pdev) +{ + struct mthca_dev *mdev; + + mdev = pci_get_drvdata(pdev); + if (!mdev) + return -ENODEV; + __mthca_remove_one(pdev); + return __mthca_init_one(pdev, mdev->hca_type); +} + +static int __devinit mthca_init_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static int mthca_version_printed = 0; + int ret; + + mutex_lock(&mthca_device_mutex); + + if (!mthca_version_printed) { + printk(KERN_INFO "%s", mthca_version); + ++mthca_version_printed; + } + + if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) { + printk(KERN_ERR PFX "%s has invalid driver data %lx\n", + pci_name(pdev), id->driver_data); + mutex_unlock(&mthca_device_mutex); + return -ENODEV; + } + + ret = __mthca_init_one(pdev, id->driver_data); + + mutex_unlock(&mthca_device_mutex); + + return ret; +} + +static void __devexit mthca_remove_one(struct pci_dev *pdev) +{ + mutex_lock(&mthca_device_mutex); + __mthca_remove_one(pdev); + mutex_unlock(&mthca_device_mutex); +} + static struct pci_device_id mthca_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR), .driver_data = TAVOR }, @@ -1248,13 +1283,24 @@ static int __init mthca_init(void) { int ret; + mutex_init(&mthca_device_mutex); + ret = mthca_catas_init(); + if (ret) + return ret; + ret = pci_register_driver(&mthca_driver); - return ret < 0 ? ret : 0; + if (ret < 0) { + mthca_catas_cleanup(); + return ret; + } + + return 0; } static void __exit mthca_cleanup(void) { pci_unregister_driver(&mthca_driver); + mthca_catas_cleanup(); } module_init(mthca_init); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 265b1d1c4a62..981fe2eebdfa 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1288,7 +1288,7 @@ int mthca_register_device(struct mthca_dev *dev) (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST); - dev->ib_dev.node_type = IB_NODE_CA; + dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.phys_port_cnt = dev->limits.num_ports; dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.class_dev.dev = &dev->pdev->dev; diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 2e8f6f36e0a5..5e5c58b9920b 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -408,7 +408,7 @@ static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; ib_ah_attr->static_rate = mthca_rate_to_ib(dev, - path->static_rate & 0x7, + path->static_rate & 0xf, ib_ah_attr->port_num); ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; if (ib_ah_attr->ah_flags) { @@ -472,10 +472,14 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m if (qp->transport == RC || qp->transport == UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); + qp_attr->alt_pkey_index = + be32_to_cpu(context->alt_path.port_pkey) & 0x7f; + qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } - qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; - qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; + qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; + qp_attr->port_num = + (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; @@ -486,11 +490,9 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; - qp_attr->port_num = qp_attr->ah_attr.port_num; qp_attr->timeout = context->pri_path.ackto >> 3; qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; - qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; qp_attr->alt_timeout = context->alt_path.ackto >> 3; qp_init_attr->cap = qp_attr->cap; @@ -527,7 +529,8 @@ static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, return 0; } -int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) +int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, + struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibqp->device); struct mthca_qp *qp = to_mqp(ibqp); @@ -842,11 +845,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, - qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); mthca_wq_reset(&qp->sq); qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c index b60a9d79ae54..0f316c87bf64 100644 --- a/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/drivers/infiniband/hw/mthca/mthca_srq.c @@ -358,7 +358,7 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) } int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask) + enum ib_srq_attr_mask attr_mask, struct ib_udata *udata) { struct mthca_dev *dev = to_mdev(ibsrq->device); struct mthca_srq *srq = to_msrq(ibsrq); diff --git a/drivers/infiniband/hw/mthca/mthca_uar.c b/drivers/infiniband/hw/mthca/mthca_uar.c index 8e9219842be4..8b728486410d 100644 --- a/drivers/infiniband/hw/mthca/mthca_uar.c +++ b/drivers/infiniband/hw/mthca/mthca_uar.c @@ -60,7 +60,7 @@ int mthca_init_uar_table(struct mthca_dev *dev) ret = mthca_alloc_init(&dev->uar_table.alloc, dev->limits.num_uars, dev->limits.num_uars - 1, - dev->limits.reserved_uars); + dev->limits.reserved_uars + 1); if (ret) return ret; diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 474aa214ab57..0b8a79d53a00 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -336,6 +336,8 @@ static inline void ipoib_unregister_debugfs(void) { } extern int ipoib_sendq_size; extern int ipoib_recvq_size; +extern struct ib_sa_client ipoib_sa_client; + #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG extern int ipoib_debug_level; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 5033666b1481..f426a69d9a43 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -169,117 +169,129 @@ static int ipoib_ib_post_receives(struct net_device *dev) return 0; } -static void ipoib_ib_handle_wc(struct net_device *dev, - struct ib_wc *wc) +static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); - unsigned int wr_id = wc->wr_id; + unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; + struct sk_buff *skb; + dma_addr_t addr; - ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n", + ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", wr_id, wc->opcode, wc->status); - if (wr_id & IPOIB_OP_RECV) { - wr_id &= ~IPOIB_OP_RECV; - - if (wr_id < ipoib_recvq_size) { - struct sk_buff *skb = priv->rx_ring[wr_id].skb; - dma_addr_t addr = priv->rx_ring[wr_id].mapping; - - if (unlikely(wc->status != IB_WC_SUCCESS)) { - if (wc->status != IB_WC_WR_FLUSH_ERR) - ipoib_warn(priv, "failed recv event " - "(status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); - dma_unmap_single(priv->ca->dma_device, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - priv->rx_ring[wr_id].skb = NULL; - return; - } + if (unlikely(wr_id >= ipoib_recvq_size)) { + ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", + wr_id, ipoib_recvq_size); + return; + } - /* - * If we can't allocate a new RX buffer, dump - * this packet and reuse the old buffer. - */ - if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { - ++priv->stats.rx_dropped; - goto repost; - } + skb = priv->rx_ring[wr_id].skb; + addr = priv->rx_ring[wr_id].mapping; - ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", - wc->byte_len, wc->slid); + if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed recv event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); + dma_unmap_single(priv->ca->dma_device, addr, + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + priv->rx_ring[wr_id].skb = NULL; + return; + } - dma_unmap_single(priv->ca->dma_device, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + /* + * If we can't allocate a new RX buffer, dump + * this packet and reuse the old buffer. + */ + if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { + ++priv->stats.rx_dropped; + goto repost; + } - skb_put(skb, wc->byte_len); - skb_pull(skb, IB_GRH_BYTES); + ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", + wc->byte_len, wc->slid); - if (wc->slid != priv->local_lid || - wc->src_qp != priv->qp->qp_num) { - skb->protocol = ((struct ipoib_header *) skb->data)->proto; - skb->mac.raw = skb->data; - skb_pull(skb, IPOIB_ENCAP_LEN); + dma_unmap_single(priv->ca->dma_device, addr, + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); - dev->last_rx = jiffies; - ++priv->stats.rx_packets; - priv->stats.rx_bytes += skb->len; + skb_put(skb, wc->byte_len); + skb_pull(skb, IB_GRH_BYTES); - skb->dev = dev; - /* XXX get correct PACKET_ type here */ - skb->pkt_type = PACKET_HOST; - netif_rx_ni(skb); - } else { - ipoib_dbg_data(priv, "dropping loopback packet\n"); - dev_kfree_skb_any(skb); - } + if (wc->slid != priv->local_lid || + wc->src_qp != priv->qp->qp_num) { + skb->protocol = ((struct ipoib_header *) skb->data)->proto; + skb->mac.raw = skb->data; + skb_pull(skb, IPOIB_ENCAP_LEN); - repost: - if (unlikely(ipoib_ib_post_receive(dev, wr_id))) - ipoib_warn(priv, "ipoib_ib_post_receive failed " - "for buf %d\n", wr_id); - } else - ipoib_warn(priv, "completion event with wrid %d\n", - wr_id); + dev->last_rx = jiffies; + ++priv->stats.rx_packets; + priv->stats.rx_bytes += skb->len; + skb->dev = dev; + /* XXX get correct PACKET_ type here */ + skb->pkt_type = PACKET_HOST; + netif_rx_ni(skb); } else { - struct ipoib_tx_buf *tx_req; - unsigned long flags; + ipoib_dbg_data(priv, "dropping loopback packet\n"); + dev_kfree_skb_any(skb); + } - if (wr_id >= ipoib_sendq_size) { - ipoib_warn(priv, "completion event with wrid %d (> %d)\n", - wr_id, ipoib_sendq_size); - return; - } +repost: + if (unlikely(ipoib_ib_post_receive(dev, wr_id))) + ipoib_warn(priv, "ipoib_ib_post_receive failed " + "for buf %d\n", wr_id); +} - ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id); +static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + unsigned int wr_id = wc->wr_id; + struct ipoib_tx_buf *tx_req; + unsigned long flags; - tx_req = &priv->tx_ring[wr_id]; + ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n", + wr_id, wc->opcode, wc->status); - dma_unmap_single(priv->ca->dma_device, - pci_unmap_addr(tx_req, mapping), - tx_req->skb->len, - DMA_TO_DEVICE); + if (unlikely(wr_id >= ipoib_sendq_size)) { + ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", + wr_id, ipoib_sendq_size); + return; + } - ++priv->stats.tx_packets; - priv->stats.tx_bytes += tx_req->skb->len; + tx_req = &priv->tx_ring[wr_id]; - dev_kfree_skb_any(tx_req->skb); + dma_unmap_single(priv->ca->dma_device, + pci_unmap_addr(tx_req, mapping), + tx_req->skb->len, + DMA_TO_DEVICE); - spin_lock_irqsave(&priv->tx_lock, flags); - ++priv->tx_tail; - if (netif_queue_stopped(dev) && - test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && - priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) - netif_wake_queue(dev); - spin_unlock_irqrestore(&priv->tx_lock, flags); + ++priv->stats.tx_packets; + priv->stats.tx_bytes += tx_req->skb->len; - if (wc->status != IB_WC_SUCCESS && - wc->status != IB_WC_WR_FLUSH_ERR) - ipoib_warn(priv, "failed send event " - "(status=%d, wrid=%d vend_err %x)\n", - wc->status, wr_id, wc->vendor_err); - } + dev_kfree_skb_any(tx_req->skb); + + spin_lock_irqsave(&priv->tx_lock, flags); + ++priv->tx_tail; + if (netif_queue_stopped(dev) && + test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) && + priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1) + netif_wake_queue(dev); + spin_unlock_irqrestore(&priv->tx_lock, flags); + + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) + ipoib_warn(priv, "failed send event " + "(status=%d, wrid=%d vend_err %x)\n", + wc->status, wr_id, wc->vendor_err); +} + +static void ipoib_ib_handle_wc(struct net_device *dev, struct ib_wc *wc) +{ + if (wc->wr_id & IPOIB_OP_RECV) + ipoib_ib_handle_rx_wc(dev, wc); + else + ipoib_ib_handle_tx_wc(dev, wc); } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) @@ -320,7 +332,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_tx_buf *tx_req; dma_addr_t addr; - if (skb->len > dev->mtu + INFINIBAND_ALEN) { + if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, dev->mtu + INFINIBAND_ALEN); ++priv->stats.tx_dropped; @@ -619,8 +631,10 @@ void ipoib_ib_dev_flush(void *_dev) * The device could have been brought down between the start and when * we get here, don't bring it back up if it's not configured up */ - if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) + if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { ipoib_ib_dev_up(dev); + ipoib_mcast_restart_task(dev); + } mutex_lock(&priv->vlan_mutex); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index cf71d2a5515c..1eaf00e9862c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -40,7 +40,6 @@ #include <linux/init.h> #include <linux/slab.h> -#include <linux/vmalloc.h> #include <linux/kernel.h> #include <linux/if_arp.h> /* For ARPHRD_xxx */ @@ -82,6 +81,8 @@ static const u8 ipv4_bcast_addr[] = { struct workqueue_struct *ipoib_workqueue; +struct ib_sa_client ipoib_sa_client; + static void ipoib_add_one(struct ib_device *device); static void ipoib_remove_one(struct ib_device *device); @@ -336,7 +337,8 @@ void ipoib_flush_paths(struct net_device *dev) struct ipoib_path *path, *tp; LIST_HEAD(remove_list); - spin_lock_irq(&priv->lock); + spin_lock_irq(&priv->tx_lock); + spin_lock(&priv->lock); list_splice(&priv->path_list, &remove_list); INIT_LIST_HEAD(&priv->path_list); @@ -347,12 +349,15 @@ void ipoib_flush_paths(struct net_device *dev) list_for_each_entry_safe(path, tp, &remove_list, list) { if (path->query) ib_sa_cancel_query(path->query_id, path->query); - spin_unlock_irq(&priv->lock); + spin_unlock(&priv->lock); + spin_unlock_irq(&priv->tx_lock); wait_for_completion(&path->done); path_free(dev, path); - spin_lock_irq(&priv->lock); + spin_lock_irq(&priv->tx_lock); + spin_lock(&priv->lock); } - spin_unlock_irq(&priv->lock); + spin_unlock(&priv->lock); + spin_unlock_irq(&priv->tx_lock); } static void path_rec_completion(int status, @@ -459,7 +464,7 @@ static int path_rec_start(struct net_device *dev, init_completion(&path->done); path->query_id = - ib_sa_path_rec_get(priv->ca, priv->port, + ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port, &path->pathrec, IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | @@ -615,7 +620,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) struct ipoib_neigh *neigh; unsigned long flags; - if (!spin_trylock_irqsave(&priv->tx_lock, flags)) + if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags))) return NETDEV_TX_LOCKED; /* @@ -628,7 +633,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (skb->dst && skb->dst->neighbour) { + if (likely(skb->dst && skb->dst->neighbour)) { if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { ipoib_path_lookup(skb, dev); goto out; @@ -1107,13 +1112,16 @@ static void ipoib_add_one(struct ib_device *device) struct ipoib_dev_priv *priv; int s, e, p; + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL); if (!dev_list) return; INIT_LIST_HEAD(dev_list); - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { s = 0; e = 0; } else { @@ -1137,6 +1145,9 @@ static void ipoib_remove_one(struct ib_device *device) struct ipoib_dev_priv *priv, *tmp; struct list_head *dev_list; + if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) + return; + dev_list = ib_get_client_data(device, &ipoib_client); list_for_each_entry_safe(priv, tmp, dev_list, list) { @@ -1181,13 +1192,16 @@ static int __init ipoib_init_module(void) goto err_fs; } + ib_sa_register_client(&ipoib_sa_client); + ret = ib_register_client(&ipoib_client); if (ret) - goto err_wq; + goto err_sa; return 0; -err_wq: +err_sa: + ib_sa_unregister_client(&ipoib_sa_client); destroy_workqueue(ipoib_workqueue); err_fs: @@ -1199,6 +1213,7 @@ err_fs: static void __exit ipoib_cleanup_module(void) { ib_unregister_client(&ipoib_client); + ib_sa_unregister_client(&ipoib_sa_client); ipoib_unregister_debugfs(); destroy_workqueue(ipoib_workqueue); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index ec356ce7cdcd..3faa1820f0e9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -361,7 +361,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) init_completion(&mcast->done); - ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, + ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | @@ -472,22 +472,32 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, if (create) { comp_mask |= - IB_SA_MCMEMBER_REC_QKEY | - IB_SA_MCMEMBER_REC_SL | - IB_SA_MCMEMBER_REC_FLOW_LABEL | - IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; + IB_SA_MCMEMBER_REC_QKEY | + IB_SA_MCMEMBER_REC_MTU_SELECTOR | + IB_SA_MCMEMBER_REC_MTU | + IB_SA_MCMEMBER_REC_TRAFFIC_CLASS | + IB_SA_MCMEMBER_REC_RATE_SELECTOR | + IB_SA_MCMEMBER_REC_RATE | + IB_SA_MCMEMBER_REC_SL | + IB_SA_MCMEMBER_REC_FLOW_LABEL | + IB_SA_MCMEMBER_REC_HOP_LIMIT; rec.qkey = priv->broadcast->mcmember.qkey; + rec.mtu_selector = IB_SA_EQ; + rec.mtu = priv->broadcast->mcmember.mtu; + rec.traffic_class = priv->broadcast->mcmember.traffic_class; + rec.rate_selector = IB_SA_EQ; + rec.rate = priv->broadcast->mcmember.rate; rec.sl = priv->broadcast->mcmember.sl; rec.flow_label = priv->broadcast->mcmember.flow_label; - rec.traffic_class = priv->broadcast->mcmember.traffic_class; + rec.hop_limit = priv->broadcast->mcmember.hop_limit; } init_completion(&mcast->done); - ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask, - mcast->backoff * 1000, GFP_ATOMIC, - ipoib_mcast_join_complete, + ret = ib_sa_mcmember_rec_set(&ipoib_sa_client, priv->ca, priv->port, + &rec, comp_mask, mcast->backoff * 1000, + GFP_ATOMIC, ipoib_mcast_join_complete, mcast, &mcast->query); if (ret < 0) { @@ -528,7 +538,7 @@ void ipoib_mcast_join_task(void *dev_ptr) priv->local_rate = attr.active_speed * ib_width_enum_to_int(attr.active_width); } else - ipoib_warn(priv, "ib_query_port failed\n"); + ipoib_warn(priv, "ib_query_port failed\n"); } if (!priv->broadcast) { @@ -681,7 +691,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) * Just make one shot at leaving and don't wait for a reply; * if we fail, too bad. */ - ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec, + ret = ib_sa_mcmember_rec_delete(&ipoib_sa_client, priv->ca, priv->port, &rec, IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | @@ -795,7 +805,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) } if (priv->broadcast) { - rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); + rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree); list_add_tail(&priv->broadcast->list, &remove_list); priv->broadcast = NULL; } diff --git a/drivers/infiniband/ulp/iser/Kconfig b/drivers/infiniband/ulp/iser/Kconfig index fead87d1eff9..365a1b5f19e0 100644 --- a/drivers/infiniband/ulp/iser/Kconfig +++ b/drivers/infiniband/ulp/iser/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_ISER tristate "ISCSI RDMA Protocol" - depends on INFINIBAND && SCSI + depends on INFINIBAND && SCSI && INET select SCSI_ISCSI_ATTRS ---help--- Support for the ISCSI RDMA Protocol over InfiniBand. This diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1437d7ee3b19..e9cf1a9f1e1c 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -555,6 +555,7 @@ static struct scsi_host_template iscsi_iser_sht = { .queuecommand = iscsi_queuecommand, .can_queue = ISCSI_XMIT_CMDS_MAX - 1, .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, + .max_sectors = 1024, .cmd_per_lun = ISCSI_MAX_CMD_PER_LUN, .eh_abort_handler = iscsi_eh_abort, .eh_host_reset_handler = iscsi_eh_host_reset, diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 3350ba690cfe..7e1a411db2a3 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -82,8 +82,12 @@ __func__ , ## arg); \ } while (0) +#define SHIFT_4K 12 +#define SIZE_4K (1UL << SHIFT_4K) +#define MASK_4K (~(SIZE_4K-1)) + /* support upto 512KB in one RDMA */ -#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> PAGE_SHIFT) +#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K) #define ISCSI_ISER_MAX_LUN 256 #define ISCSI_ISER_MAX_CMD_LEN 16 @@ -171,6 +175,7 @@ struct iser_mem_reg { u64 va; u64 len; void *mem_h; + int is_fmr; }; struct iser_regd_buf { diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 31950a522a1c..d0b03f426581 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -42,6 +42,7 @@ #include "iscsi_iser.h" #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ + /** * Decrements the reference count for the * registered buffer & releases it @@ -55,7 +56,7 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf) if ((atomic_read(®d_buf->ref_count) == 0) || atomic_dec_and_test(®d_buf->ref_count)) { /* if we used the dma mr, unreg is just NOP */ - if (regd_buf->reg.rkey != 0) + if (regd_buf->reg.is_fmr) iser_unreg_mem(®d_buf->reg); if (regd_buf->dma_addr) { @@ -90,9 +91,9 @@ void iser_reg_single(struct iser_device *device, BUG_ON(dma_mapping_error(dma_addr)); regd_buf->reg.lkey = device->mr->lkey; - regd_buf->reg.rkey = 0; /* indicate there's no need to unreg */ regd_buf->reg.len = regd_buf->data_size; regd_buf->reg.va = dma_addr; + regd_buf->reg.is_fmr = 0; regd_buf->dma_addr = dma_addr; regd_buf->direction = direction; @@ -239,7 +240,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, int i; /* compute the offset of first element */ - page_vec->offset = (u64) sg[0].offset; + page_vec->offset = (u64) sg[0].offset & ~MASK_4K; for (i = 0; i < data->dma_nents; i++) { total_sz += sg_dma_len(&sg[i]); @@ -247,21 +248,30 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, first_addr = sg_dma_address(&sg[i]); last_addr = first_addr + sg_dma_len(&sg[i]); - start_aligned = !(first_addr & ~PAGE_MASK); - end_aligned = !(last_addr & ~PAGE_MASK); + start_aligned = !(first_addr & ~MASK_4K); + end_aligned = !(last_addr & ~MASK_4K); /* continue to collect page fragments till aligned or SG ends */ while (!end_aligned && (i + 1 < data->dma_nents)) { i++; total_sz += sg_dma_len(&sg[i]); last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); - end_aligned = !(last_addr & ~PAGE_MASK); + end_aligned = !(last_addr & ~MASK_4K); } - first_addr = first_addr & PAGE_MASK; - - for (page = first_addr; page < last_addr; page += PAGE_SIZE) - page_vec->pages[cur_page++] = page; + /* handle the 1st page in the 1st DMA element */ + if (cur_page == 0) { + page = first_addr & MASK_4K; + page_vec->pages[cur_page] = page; + cur_page++; + page += SIZE_4K; + } else + page = first_addr; + + for (; page < last_addr; page += SIZE_4K) { + page_vec->pages[cur_page] = page; + cur_page++; + } } page_vec->data_size = total_sz; @@ -269,8 +279,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, return cur_page; } -#define MASK_4K ((1UL << 12) - 1) /* 0xFFF */ -#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & MASK_4K) == 0) +#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) /** * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned @@ -320,9 +329,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data) struct scatterlist *sg = (struct scatterlist *)data->buf; int i; - for (i = 0; i < data->size; i++) + for (i = 0; i < data->dma_nents; i++) iser_err("sg[%d] dma_addr:0x%lX page:0x%p " - "off:%d sz:%d dma_len:%d\n", + "off:0x%x sz:0x%x dma_len:0x%x\n", i, (unsigned long)sg_dma_address(&sg[i]), sg[i].page, sg[i].offset, sg[i].length,sg_dma_len(&sg[i])); @@ -352,7 +361,7 @@ static void iser_page_vec_build(struct iser_data_buf *data, page_vec->length = page_vec_len; - if (page_vec_len * PAGE_SIZE < page_vec->data_size) { + if (page_vec_len * SIZE_4K < page_vec->data_size) { iser_err("page_vec too short to hold this SG\n"); iser_data_buf_dump(data); iser_dump_page_vec(page_vec); @@ -370,15 +379,18 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, enum iser_data_dir cmd_dir) { struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; + struct iser_device *device = ib_conn->device; struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; struct iser_regd_buf *regd_buf; int aligned_len; int err; + int i; + struct scatterlist *sg; regd_buf = &iser_ctask->rdma_regd[cmd_dir]; aligned_len = iser_data_buf_aligned_len(mem); - if (aligned_len != mem->size) { + if (aligned_len != mem->dma_nents) { iser_err("rdma alignment violation %d/%d aligned\n", aligned_len, mem->size); iser_data_buf_dump(mem); @@ -389,10 +401,38 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, mem = &iser_ctask->data_copy[cmd_dir]; } - iser_page_vec_build(mem, ib_conn->page_vec); - err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); - if (err) - return err; + /* if there a single dma entry, FMR is not needed */ + if (mem->dma_nents == 1) { + sg = (struct scatterlist *)mem->buf; + + regd_buf->reg.lkey = device->mr->lkey; + regd_buf->reg.rkey = device->mr->rkey; + regd_buf->reg.len = sg_dma_len(&sg[0]); + regd_buf->reg.va = sg_dma_address(&sg[0]); + regd_buf->reg.is_fmr = 0; + + iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " + "va: 0x%08lX sz: %ld]\n", + (unsigned int)regd_buf->reg.lkey, + (unsigned int)regd_buf->reg.rkey, + (unsigned long)regd_buf->reg.va, + (unsigned long)regd_buf->reg.len); + } else { /* use FMR for multiple dma entries */ + iser_page_vec_build(mem, ib_conn->page_vec); + err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); + if (err) { + iser_data_buf_dump(mem); + iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, + ntoh24(iser_ctask->desc.iscsi_header.dlength)); + iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", + ib_conn->page_vec->data_size, ib_conn->page_vec->length, + ib_conn->page_vec->offset); + for (i=0 ; i<ib_conn->page_vec->length ; i++) + iser_err("page_vec[%d] = 0x%llx\n", i, + (unsigned long long) ib_conn->page_vec->pages[i]); + return err; + } + } /* take a reference on this regd buf such that it will not be released * * (eg in send dto completion) before we get the scsi response */ diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 72febf1f8ff8..ecdca7fc1e4c 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -88,8 +88,9 @@ static int iser_create_device_ib_res(struct iser_device *device) iser_cq_tasklet_fn, (unsigned long)device); - device->mr = ib_get_dma_mr(device->pd, - IB_ACCESS_LOCAL_WRITE); + device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ); if (IS_ERR(device->mr)) goto dma_mr_err; @@ -150,7 +151,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) } ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); - params.page_shift = PAGE_SHIFT; + params.page_shift = SHIFT_4K; /* when the first/last SG element are not start/end * * page aligned, the map whould be of N+1 pages */ params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1; @@ -604,8 +605,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn, mem_reg->lkey = mem->fmr->lkey; mem_reg->rkey = mem->fmr->rkey; - mem_reg->len = page_vec->length * PAGE_SIZE; + mem_reg->len = page_vec->length * SIZE_4K; mem_reg->va = io_addr; + mem_reg->is_fmr = 1; mem_reg->mem_h = (void *)mem; mem_reg->va += page_vec->offset; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index fd8344cdc0db..44b9e5be6687 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -96,6 +96,8 @@ static struct ib_client srp_client = { .remove = srp_remove_one }; +static struct ib_sa_client srp_sa_client; + static inline struct srp_target_port *host_to_target(struct Scsi_Host *host) { return (struct srp_target_port *) host->hostdata; @@ -267,7 +269,8 @@ static int srp_lookup_path(struct srp_target_port *target) init_completion(&target->done); - target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev, + target->path_query_id = ib_sa_path_rec_get(&srp_sa_client, + target->srp_host->dev->dev, target->srp_host->port, &target->path, IB_SA_PATH_REC_DGID | @@ -330,7 +333,7 @@ static int srp_send_req(struct srp_target_port *target) req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT); /* - * In the published SRP specification (draft rev. 16a), the + * In the published SRP specification (draft rev. 16a), the * port identifier format is 8 bytes of ID extension followed * by 8 bytes of GUID. Older drafts put the two halves in the * opposite order, so that the GUID comes first. @@ -1449,12 +1452,28 @@ static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf) return sprintf(buf, "%d\n", target->zero_req_lim); } -static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); -static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); -static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); -static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); -static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); -static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); +static ssize_t show_local_ib_port(struct class_device *cdev, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(cdev)); + + return sprintf(buf, "%d\n", target->srp_host->port); +} + +static ssize_t show_local_ib_device(struct class_device *cdev, char *buf) +{ + struct srp_target_port *target = host_to_target(class_to_shost(cdev)); + + return sprintf(buf, "%s\n", target->srp_host->dev->dev->name); +} + +static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL); +static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL); +static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); +static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); +static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); +static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); +static CLASS_DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); +static CLASS_DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); static struct class_device_attribute *srp_host_attrs[] = { &class_device_attr_id_ext, @@ -1463,6 +1482,8 @@ static struct class_device_attribute *srp_host_attrs[] = { &class_device_attr_pkey, &class_device_attr_dgid, &class_device_attr_zero_req_lim, + &class_device_attr_local_ib_port, + &class_device_attr_local_ib_device, NULL }; @@ -1881,7 +1902,7 @@ static void srp_add_one(struct ib_device *device) if (IS_ERR(srp_dev->fmr_pool)) srp_dev->fmr_pool = NULL; - if (device->node_type == IB_NODE_SWITCH) { + if (device->node_type == RDMA_NODE_IB_SWITCH) { s = 0; e = 0; } else { @@ -1980,9 +2001,12 @@ static int __init srp_init_module(void) return ret; } + ib_sa_register_client(&srp_sa_client); + ret = ib_register_client(&srp_client); if (ret) { printk(KERN_ERR PFX "couldn't register IB client\n"); + ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); return ret; } @@ -1993,6 +2017,7 @@ static int __init srp_init_module(void) static void __exit srp_cleanup_module(void) { ib_unregister_client(&srp_client); + ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); } diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 82657bc86d19..d56216067549 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -139,7 +139,9 @@ static int macio_uevent(struct device *dev, char **envp, int num_envp, { struct macio_dev * macio_dev; struct of_device * of; - char *scratch, *compat, *compat2; + char *scratch; + const char *compat, *compat2; + int i = 0; int length, cplen, cplen2, seen = 0; @@ -173,7 +175,7 @@ static int macio_uevent(struct device *dev, char **envp, int num_envp, * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ - compat = (char *) get_property(of->node, "compatible", &cplen); + compat = get_property(of->node, "compatible", &cplen); compat2 = compat; cplen2= cplen; while (compat && cplen > 0) { @@ -454,7 +456,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, struct resource *parent_res) { struct macio_dev *dev; - u32 *reg; + const u32 *reg; if (np == NULL) return NULL; @@ -489,7 +491,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, #endif MAX_NODE_NAME_SIZE, np->name); } else { - reg = (u32 *)get_property(np, "reg", NULL); + reg = get_property(np, "reg", NULL); sprintf(dev->ofdev.dev.bus_id, "%1d.%08x:%.*s", chip->lbus.index, reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name); diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c index cae24a13526a..8566bdfdd4b8 100644 --- a/drivers/macintosh/macio_sysfs.c +++ b/drivers/macintosh/macio_sysfs.c @@ -16,12 +16,12 @@ static ssize_t compatible_show (struct device *dev, struct device_attribute *attr, char *buf) { struct of_device *of; - char *compat; + const char *compat; int cplen; int length = 0; of = &to_macio_device (dev)->ofdev; - compat = (char *) get_property(of->node, "compatible", &cplen); + compat = get_property(of->node, "compatible", &cplen); if (!compat) { *buf = '\0'; return 0; @@ -42,12 +42,12 @@ static ssize_t modalias_show (struct device *dev, struct device_attribute *attr, char *buf) { struct of_device *of; - char *compat; + const char *compat; int cplen; int length; of = &to_macio_device (dev)->ofdev; - compat = (char *) get_property (of->node, "compatible", &cplen); + compat = get_property(of->node, "compatible", &cplen); if (!compat) compat = "", cplen = 1; length = sprintf (buf, "of:N%sT%s", of->node->name, of->node->type); buf += length; diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 00ef46898147..090e40fc5013 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -454,7 +454,7 @@ EXPORT_SYMBOL(smu_present); int __init smu_init (void) { struct device_node *np; - u32 *data; + const u32 *data; np = of_find_node_by_type(NULL, "smu"); if (np == NULL) @@ -490,7 +490,7 @@ int __init smu_init (void) printk(KERN_ERR "SMU: Can't find doorbell GPIO !\n"); goto fail; } - data = (u32 *)get_property(smu->db_node, "reg", NULL); + data = get_property(smu->db_node, "reg", NULL); if (data == NULL) { of_node_put(smu->db_node); smu->db_node = NULL; @@ -511,7 +511,7 @@ int __init smu_init (void) smu->msg_node = of_find_node_by_name(NULL, "smu-interrupt"); if (smu->msg_node == NULL) break; - data = (u32 *)get_property(smu->msg_node, "reg", NULL); + data = get_property(smu->msg_node, "reg", NULL); if (data == NULL) { of_node_put(smu->msg_node); smu->msg_node = NULL; @@ -982,11 +982,11 @@ static struct smu_sdbp_header *smu_create_sdb_partition(int id) /* Note: Only allowed to return error code in pointers (using ERR_PTR) * when interruptible is 1 */ -struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, - int interruptible) +const struct smu_sdbp_header *__smu_get_sdb_partition(int id, + unsigned int *size, int interruptible) { char pname[32]; - struct smu_sdbp_header *part; + const struct smu_sdbp_header *part; if (!smu) return NULL; @@ -1003,8 +1003,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, } else mutex_lock(&smu_part_access); - part = (struct smu_sdbp_header *)get_property(smu->of_node, - pname, size); + part = get_property(smu->of_node, pname, size); if (part == NULL) { DPRINTK("trying to extract from SMU ...\n"); part = smu_create_sdb_partition(id); @@ -1015,7 +1014,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, return part; } -struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size) +const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size) { return __smu_get_sdb_partition(id, size, 0); } @@ -1094,7 +1093,7 @@ static ssize_t smu_write(struct file *file, const char __user *buf, pp->mode = smu_file_events; return 0; } else if (hdr.cmdtype == SMU_CMDTYPE_GET_PARTITION) { - struct smu_sdbp_header *part; + const struct smu_sdbp_header *part; part = __smu_get_sdb_partition(hdr.cmd, NULL, 1); if (part == NULL) return -EINVAL; diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index 7f86478bdd36..a0f30d0853ea 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -47,7 +47,7 @@ static u8 FAN_SPD_SET[2] = {0x30, 0x31}; static u8 default_limits_local[3] = {70, 50, 70}; /* local, sensor1, sensor2 */ static u8 default_limits_chip[3] = {80, 65, 80}; /* local, sensor1, sensor2 */ -static char *sensor_location[3] = {NULL, NULL, NULL}; +static const char *sensor_location[3] = {NULL, NULL, NULL}; static int limit_adjust = 0; static int fan_speed = -1; @@ -553,7 +553,7 @@ static int __init thermostat_init(void) { struct device_node* np; - u32 *prop; + const u32 *prop; int i = 0, offset = 0; np = of_find_node_by_name(NULL, "fan"); @@ -566,13 +566,13 @@ thermostat_init(void) else return -ENODEV; - prop = (u32 *)get_property(np, "hwsensor-params-version", NULL); + prop = get_property(np, "hwsensor-params-version", NULL); printk(KERN_INFO "adt746x: version %d (%ssupported)\n", *prop, (*prop == 1)?"":"un"); if (*prop != 1) return -ENODEV; - prop = (u32 *)get_property(np, "reg", NULL); + prop = get_property(np, "reg", NULL); if (!prop) return -ENODEV; diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index 20bf67244e2c..d00c0c37a12e 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -660,7 +660,7 @@ static int read_eeprom(int cpu, struct mpu_data *out) { struct device_node *np; char nodename[64]; - u8 *data; + const u8 *data; int len; /* prom.c routine for finding a node by path is a bit brain dead @@ -673,7 +673,7 @@ static int read_eeprom(int cpu, struct mpu_data *out) printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n"); return -ENODEV; } - data = (u8 *)get_property(np, "cpuid", &len); + data = get_property(np, "cpuid", &len); if (data == NULL) { printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n"); of_node_put(np); @@ -1336,7 +1336,7 @@ static int init_backside_state(struct backside_pid_state *state) */ u3 = of_find_node_by_path("/u3@0,f8000000"); if (u3 != NULL) { - u32 *vers = (u32 *)get_property(u3, "device-rev", NULL); + const u32 *vers = get_property(u3, "device-rev", NULL); if (vers) if (((*vers) & 0x3f) < 0x34) u3h = 0; @@ -2111,8 +2111,8 @@ static void fcu_lookup_fans(struct device_node *fcu_node) while ((np = of_get_next_child(fcu_node, np)) != NULL) { int type = -1; - char *loc; - u32 *reg; + const char *loc; + const u32 *reg; DBG(" control: %s, type: %s\n", np->name, np->type); @@ -2128,8 +2128,8 @@ static void fcu_lookup_fans(struct device_node *fcu_node) continue; /* Lookup for a matching location */ - loc = (char *)get_property(np, "location", NULL); - reg = (u32 *)get_property(np, "reg", NULL); + loc = get_property(np, "location", NULL); + reg = get_property(np, "reg", NULL); if (loc == NULL || reg == NULL) continue; DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg); diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index c7d1c290cb0c..738faab1b22c 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -484,14 +484,14 @@ struct apple_thermal_info { static int __init g4fan_init( void ) { - struct apple_thermal_info *info; + const struct apple_thermal_info *info; struct device_node *np; init_MUTEX( &x.lock ); if( !(np=of_find_node_by_name(NULL, "power-mgt")) ) return -ENODEV; - info = (struct apple_thermal_info*)get_property(np, "thermal-info", NULL); + info = get_property(np, "thermal-info", NULL); of_node_put(np); if( !info || !machine_is_compatible("PowerMac3,6") ) diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c index 69d5452fd22f..7512d1c15207 100644 --- a/drivers/macintosh/via-cuda.c +++ b/drivers/macintosh/via-cuda.c @@ -123,7 +123,7 @@ int __init find_via_cuda(void) { struct adb_request req; phys_addr_t taddr; - u32 *reg; + const u32 *reg; int err; if (vias != 0) @@ -132,7 +132,7 @@ int __init find_via_cuda(void) if (vias == 0) return 0; - reg = (u32 *)get_property(vias, "reg", NULL); + reg = get_property(vias, "reg", NULL); if (reg == NULL) { printk(KERN_ERR "via-cuda: No \"reg\" property !\n"); goto fail; diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c index 5189d5454b1f..179af10105d9 100644 --- a/drivers/macintosh/via-pmu-led.c +++ b/drivers/macintosh/via-pmu-led.c @@ -120,7 +120,7 @@ static int __init via_pmu_led_init(void) dt = of_find_node_by_path("/"); if (dt == NULL) return -ENODEV; - model = (const char *)get_property(dt, "model", NULL); + model = get_property(dt, "model", NULL); if (model == NULL) return -ENODEV; if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 && diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 14610a63f580..dda03985dcf5 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -280,7 +280,7 @@ static char *pbook_type[] = { int __init find_via_pmu(void) { u64 taddr; - u32 *reg; + const u32 *reg; if (via != 0) return 1; @@ -288,7 +288,7 @@ int __init find_via_pmu(void) if (vias == NULL) return 0; - reg = (u32 *)get_property(vias, "reg", NULL); + reg = get_property(vias, "reg", NULL); if (reg == NULL) { printk(KERN_ERR "via-pmu: No \"reg\" property !\n"); goto fail; @@ -330,7 +330,7 @@ int __init find_via_pmu(void) gpiop = of_find_node_by_name(NULL, "gpio"); if (gpiop) { - reg = (u32 *)get_property(gpiop, "reg", NULL); + reg = get_property(gpiop, "reg", NULL); if (reg) gaddr = of_translate_address(gpiop, reg); if (gaddr != OF_BAD_ADDR) @@ -479,9 +479,9 @@ static int __init via_pmu_dev_init(void) pmu_batteries[1].flags |= PMU_BATT_TYPE_SMART; } else { struct device_node* prim = find_devices("power-mgt"); - u32 *prim_info = NULL; + const u32 *prim_info = NULL; if (prim) - prim_info = (u32 *)get_property(prim, "prim-info", NULL); + prim_info = get_property(prim, "prim-info", NULL); if (prim_info) { /* Other stuffs here yet unknown */ pmu_battery_count = (prim_info[6] >> 16) & 0xff; diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c index f1df6efcbe68..2ff546e4c92f 100644 --- a/drivers/macintosh/windfarm_pm81.c +++ b/drivers/macintosh/windfarm_pm81.c @@ -396,7 +396,7 @@ static void wf_smu_sys_fans_tick(struct wf_smu_sys_fans_state *st) static void wf_smu_create_cpu_fans(void) { struct wf_cpu_pid_param pid_param; - struct smu_sdbp_header *hdr; + const struct smu_sdbp_header *hdr; struct smu_sdbp_cpupiddata *piddata; struct smu_sdbp_fvt *fvt; s32 tmax, tdelta, maxpow, powadj; @@ -702,7 +702,7 @@ static struct notifier_block wf_smu_events = { static int wf_init_pm(void) { - struct smu_sdbp_header *hdr; + const struct smu_sdbp_header *hdr; hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL); if (hdr != 0) { diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c index 0d6372e96d32..59e9ffe37c39 100644 --- a/drivers/macintosh/windfarm_pm91.c +++ b/drivers/macintosh/windfarm_pm91.c @@ -144,7 +144,7 @@ static struct wf_smu_slots_fans_state *wf_smu_slots_fans; static void wf_smu_create_cpu_fans(void) { struct wf_cpu_pid_param pid_param; - struct smu_sdbp_header *hdr; + const struct smu_sdbp_header *hdr; struct smu_sdbp_cpupiddata *piddata; struct smu_sdbp_fvt *fvt; s32 tmax, tdelta, maxpow, powadj; diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c index a9e88edc0c72..bff1f372f188 100644 --- a/drivers/macintosh/windfarm_smu_controls.c +++ b/drivers/macintosh/windfarm_smu_controls.c @@ -159,14 +159,15 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node, int pwm_fan) { struct smu_fan_control *fct; - s32 *v; u32 *reg; - char *l; + const s32 *v; + const u32 *reg; + const char *l; fct = kmalloc(sizeof(struct smu_fan_control), GFP_KERNEL); if (fct == NULL) return NULL; fct->ctrl.ops = &smu_fan_ops; - l = (char *)get_property(node, "location", NULL); + l = get_property(node, "location", NULL); if (l == NULL) goto fail; @@ -223,17 +224,17 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node, goto fail; /* Get min & max values*/ - v = (s32 *)get_property(node, "min-value", NULL); + v = get_property(node, "min-value", NULL); if (v == NULL) goto fail; fct->min = *v; - v = (s32 *)get_property(node, "max-value", NULL); + v = get_property(node, "max-value", NULL); if (v == NULL) goto fail; fct->max = *v; /* Get "reg" value */ - reg = (u32 *)get_property(node, "reg", NULL); + reg = get_property(node, "reg", NULL); if (reg == NULL) goto fail; fct->reg = *reg; diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index e295a07a1ebc..aceb61d9fbc8 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -233,15 +233,15 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) { struct wf_sat *sat; struct wf_sat_sensor *sens; - u32 *reg; - char *loc, *type; + const u32 *reg; + const char *loc, *type; u8 addr, chip, core; struct device_node *child; int shift, cpu, index; char *name; int vsens[2], isens[2]; - reg = (u32 *) get_property(dev, "reg", NULL); + reg = get_property(dev, "reg", NULL); if (reg == NULL) return; addr = *reg; @@ -268,7 +268,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) isens[0] = isens[1] = -1; child = NULL; while ((child = of_get_next_child(dev, child)) != NULL) { - reg = (u32 *) get_property(child, "reg", NULL); + reg = get_property(child, "reg", NULL); type = get_property(child, "device_type", NULL); loc = get_property(child, "location", NULL); if (reg == NULL || loc == NULL) diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index bed25dcf8a1e..defe9922ebd1 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c @@ -198,14 +198,14 @@ static struct wf_sensor_ops smu_slotspow_ops = { static struct smu_ad_sensor *smu_ads_create(struct device_node *node) { struct smu_ad_sensor *ads; - char *c, *l; - u32 *v; + const char *c, *l; + const u32 *v; ads = kmalloc(sizeof(struct smu_ad_sensor), GFP_KERNEL); if (ads == NULL) return NULL; - c = (char *)get_property(node, "device_type", NULL); - l = (char *)get_property(node, "location", NULL); + c = get_property(node, "device_type", NULL); + l = get_property(node, "location", NULL); if (c == NULL || l == NULL) goto fail; @@ -255,7 +255,7 @@ static struct smu_ad_sensor *smu_ads_create(struct device_node *node) } else goto fail; - v = (u32 *)get_property(node, "reg", NULL); + v = get_property(node, "reg", NULL); if (v == NULL) goto fail; ads->reg = *v; @@ -382,7 +382,7 @@ smu_cpu_power_create(struct wf_sensor *volts, struct wf_sensor *amps) static void smu_fetch_param_partitions(void) { - struct smu_sdbp_header *hdr; + const struct smu_sdbp_header *hdr; /* Get CPU voltage/current/power calibration data */ hdr = smu_get_sdb_partition(SMU_SDB_CPUVCP_ID, NULL); diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 6022ed12a795..bdbd34993a80 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -5,6 +5,7 @@ * This file is released under the GPL. */ +#include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> @@ -78,11 +79,13 @@ struct crypt_config { */ struct crypt_iv_operations *iv_gen_ops; char *iv_mode; - void *iv_gen_private; + struct crypto_cipher *iv_gen_private; sector_t iv_offset; unsigned int iv_size; - struct crypto_tfm *tfm; + char cipher[CRYPTO_MAX_ALG_NAME]; + char chainmode[CRYPTO_MAX_ALG_NAME]; + struct crypto_blkcipher *tfm; unsigned int key_size; u8 key[0]; }; @@ -96,12 +99,12 @@ static kmem_cache_t *_crypt_io_pool; /* * Different IV generation algorithms: * - * plain: the initial vector is the 32-bit low-endian version of the sector + * plain: the initial vector is the 32-bit little-endian version of the sector * number, padded with zeros if neccessary. * - * ess_iv: "encrypted sector|salt initial vector", the sector number is - * encrypted with the bulk cipher using a salt as key. The salt - * should be derived from the bulk cipher's key via hashing. + * essiv: "encrypted sector|salt initial vector", the sector number is + * encrypted with the bulk cipher using a salt as key. The salt + * should be derived from the bulk cipher's key via hashing. * * plumb: unimplemented, see: * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 @@ -118,11 +121,13 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - struct crypto_tfm *essiv_tfm; - struct crypto_tfm *hash_tfm; + struct crypto_cipher *essiv_tfm; + struct crypto_hash *hash_tfm; + struct hash_desc desc; struct scatterlist sg; unsigned int saltsize; u8 *salt; + int err; if (opts == NULL) { ti->error = "Digest algorithm missing for ESSIV mode"; @@ -130,76 +135,70 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, } /* Hash the cipher key with the given hash algorithm */ - hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); - if (hash_tfm == NULL) { + hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hash_tfm)) { ti->error = "Error initializing ESSIV hash"; - return -EINVAL; + return PTR_ERR(hash_tfm); } - if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { - ti->error = "Expected digest algorithm for ESSIV hash"; - crypto_free_tfm(hash_tfm); - return -EINVAL; - } - - saltsize = crypto_tfm_alg_digestsize(hash_tfm); + saltsize = crypto_hash_digestsize(hash_tfm); salt = kmalloc(saltsize, GFP_KERNEL); if (salt == NULL) { ti->error = "Error kmallocing salt storage in ESSIV"; - crypto_free_tfm(hash_tfm); + crypto_free_hash(hash_tfm); return -ENOMEM; } sg_set_buf(&sg, cc->key, cc->key_size); - crypto_digest_digest(hash_tfm, &sg, 1, salt); - crypto_free_tfm(hash_tfm); + desc.tfm = hash_tfm; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_hash_digest(&desc, &sg, cc->key_size, salt); + crypto_free_hash(hash_tfm); + + if (err) { + ti->error = "Error calculating hash in ESSIV"; + return err; + } /* Setup the essiv_tfm with the given salt */ - essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), - CRYPTO_TFM_MODE_ECB | - CRYPTO_TFM_REQ_MAY_SLEEP); - if (essiv_tfm == NULL) { + essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(essiv_tfm)) { ti->error = "Error allocating crypto tfm for ESSIV"; kfree(salt); - return -EINVAL; + return PTR_ERR(essiv_tfm); } - if (crypto_tfm_alg_blocksize(essiv_tfm) - != crypto_tfm_alg_ivsize(cc->tfm)) { + if (crypto_cipher_blocksize(essiv_tfm) != + crypto_blkcipher_ivsize(cc->tfm)) { ti->error = "Block size of ESSIV cipher does " "not match IV size of block cipher"; - crypto_free_tfm(essiv_tfm); + crypto_free_cipher(essiv_tfm); kfree(salt); return -EINVAL; } - if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { + err = crypto_cipher_setkey(essiv_tfm, salt, saltsize); + if (err) { ti->error = "Failed to set key for ESSIV cipher"; - crypto_free_tfm(essiv_tfm); + crypto_free_cipher(essiv_tfm); kfree(salt); - return -EINVAL; + return err; } kfree(salt); - cc->iv_gen_private = (void *)essiv_tfm; + cc->iv_gen_private = essiv_tfm; return 0; } static void crypt_iv_essiv_dtr(struct crypt_config *cc) { - crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); + crypto_free_cipher(cc->iv_gen_private); cc->iv_gen_private = NULL; } static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) { - struct scatterlist sg; - memset(iv, 0, cc->iv_size); *(u64 *)iv = cpu_to_le64(sector); - - sg_set_buf(&sg, iv, cc->iv_size); - crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private, - &sg, &sg, cc->iv_size); - + crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv); return 0; } @@ -220,6 +219,11 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, int write, sector_t sector) { u8 iv[cc->iv_size]; + struct blkcipher_desc desc = { + .tfm = cc->tfm, + .info = iv, + .flags = CRYPTO_TFM_REQ_MAY_SLEEP, + }; int r; if (cc->iv_gen_ops) { @@ -228,14 +232,14 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, return r; if (write) - r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); + r = crypto_blkcipher_encrypt_iv(&desc, out, in, length); else - r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); + r = crypto_blkcipher_decrypt_iv(&desc, out, in, length); } else { if (write) - r = crypto_cipher_encrypt(cc->tfm, out, in, length); + r = crypto_blkcipher_encrypt(&desc, out, in, length); else - r = crypto_cipher_decrypt(cc->tfm, out, in, length); + r = crypto_blkcipher_decrypt(&desc, out, in, length); } return r; @@ -510,13 +514,12 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct crypt_config *cc; - struct crypto_tfm *tfm; + struct crypto_blkcipher *tfm; char *tmp; char *cipher; char *chainmode; char *ivmode; char *ivopts; - unsigned int crypto_flags; unsigned int key_size; unsigned long long tmpll; @@ -556,31 +559,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) ivmode = "plain"; } - /* Choose crypto_flags according to chainmode */ - if (strcmp(chainmode, "cbc") == 0) - crypto_flags = CRYPTO_TFM_MODE_CBC; - else if (strcmp(chainmode, "ecb") == 0) - crypto_flags = CRYPTO_TFM_MODE_ECB; - else { - ti->error = "Unknown chaining mode"; + if (strcmp(chainmode, "ecb") && !ivmode) { + ti->error = "This chaining mode requires an IV mechanism"; goto bad1; } - if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { - ti->error = "This chaining mode requires an IV mechanism"; + if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode, + cipher) >= CRYPTO_MAX_ALG_NAME) { + ti->error = "Chain mode + cipher name is too long"; goto bad1; } - tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); - if (!tfm) { + tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { ti->error = "Error allocating crypto tfm"; goto bad1; } - if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { - ti->error = "Expected cipher algorithm"; - goto bad2; - } + strcpy(cc->cipher, cipher); + strcpy(cc->chainmode, chainmode); cc->tfm = tfm; /* @@ -603,12 +600,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) goto bad2; - if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) + cc->iv_size = crypto_blkcipher_ivsize(tfm); + if (cc->iv_size) /* at least a 64 bit sector number should fit in our buffer */ - cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), + cc->iv_size = max(cc->iv_size, (unsigned int)(sizeof(u64) / sizeof(u8))); else { - cc->iv_size = 0; if (cc->iv_gen_ops) { DMWARN("Selected cipher does not support IVs"); if (cc->iv_gen_ops->dtr) @@ -629,7 +626,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad4; } - if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { + if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) { ti->error = "Error setting key"; goto bad5; } @@ -675,7 +672,7 @@ bad3: if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); bad2: - crypto_free_tfm(tfm); + crypto_free_blkcipher(tfm); bad1: /* Must zero key material before freeing */ memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); @@ -693,7 +690,7 @@ static void crypt_dtr(struct dm_target *ti) kfree(cc->iv_mode); if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); - crypto_free_tfm(cc->tfm); + crypto_free_blkcipher(cc->tfm); dm_put_device(ti, cc->dev); /* Must zero key material before freeing */ @@ -858,18 +855,9 @@ static int crypt_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: - cipher = crypto_tfm_alg_name(cc->tfm); + cipher = crypto_blkcipher_name(cc->tfm); - switch(cc->tfm->crt_cipher.cit_mode) { - case CRYPTO_TFM_MODE_CBC: - chainmode = "cbc"; - break; - case CRYPTO_TFM_MODE_ECB: - chainmode = "ecb"; - break; - default: - BUG(); - } + chainmode = cc->chainmode; if (cc->iv_mode) DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 1344ad7a4b14..717e90448fc6 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -263,6 +263,14 @@ config RFD_FTL http://www.gensw.com/pages/prod/bios/rfd.htm +config SSFDC + bool "NAND SSFDC (SmartMedia) read only translation layer" + depends on MTD + default n + help + This enables read only access to SmartMedia formatted NAND + flash. You can mount it with FAT file system. + source "drivers/mtd/chips/Kconfig" source "drivers/mtd/maps/Kconfig" diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index fc9374407c2b..1e36b9aed98b 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_FTL) += ftl.o mtd_blkdevs.o obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs.o obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs.o obj-$(CONFIG_RFD_FTL) += rfd_ftl.o mtd_blkdevs.o +obj-$(CONFIG_SSFDC) += ssfdc.o mtd_blkdevs.o nftl-objs := nftlcore.o nftlmount.o inftl-objs := inftlcore.o inftlmount.o diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index a482e8922de1..702ae4cd8691 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -212,6 +212,7 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) { mtd->lock = cfi_atmel_lock; mtd->unlock = cfi_atmel_unlock; + mtd->flags |= MTD_STUPID_LOCK; } static struct cfi_fixup cfi_fixup_table[] = { diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index 2c0149708739..354e1657cc26 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c @@ -4,82 +4,82 @@ * PMC551 PCI Mezzanine Ram Device * * Author: - * Mark Ferrell <mferrell@mvista.com> - * Copyright 1999,2000 Nortel Networks + * Mark Ferrell <mferrell@mvista.com> + * Copyright 1999,2000 Nortel Networks * * License: - * As part of this driver was derived from the slram.c driver it - * falls under the same license, which is GNU General Public - * License v2 + * As part of this driver was derived from the slram.c driver it + * falls under the same license, which is GNU General Public + * License v2 * * Description: - * This driver is intended to support the PMC551 PCI Ram device - * from Ramix Inc. The PMC551 is a PMC Mezzanine module for - * cPCI embedded systems. The device contains a single SROM - * that initially programs the V370PDC chipset onboard the - * device, and various banks of DRAM/SDRAM onboard. This driver - * implements this PCI Ram device as an MTD (Memory Technology - * Device) so that it can be used to hold a file system, or for - * added swap space in embedded systems. Since the memory on - * this board isn't as fast as main memory we do not try to hook - * it into main memory as that would simply reduce performance - * on the system. Using it as a block device allows us to use - * it as high speed swap or for a high speed disk device of some - * sort. Which becomes very useful on diskless systems in the - * embedded market I might add. + * This driver is intended to support the PMC551 PCI Ram device + * from Ramix Inc. The PMC551 is a PMC Mezzanine module for + * cPCI embedded systems. The device contains a single SROM + * that initially programs the V370PDC chipset onboard the + * device, and various banks of DRAM/SDRAM onboard. This driver + * implements this PCI Ram device as an MTD (Memory Technology + * Device) so that it can be used to hold a file system, or for + * added swap space in embedded systems. Since the memory on + * this board isn't as fast as main memory we do not try to hook + * it into main memory as that would simply reduce performance + * on the system. Using it as a block device allows us to use + * it as high speed swap or for a high speed disk device of some + * sort. Which becomes very useful on diskless systems in the + * embedded market I might add. * * Notes: - * Due to what I assume is more buggy SROM, the 64M PMC551 I - * have available claims that all 4 of it's DRAM banks have 64M - * of ram configured (making a grand total of 256M onboard). - * This is slightly annoying since the BAR0 size reflects the - * aperture size, not the dram size, and the V370PDC supplies no - * other method for memory size discovery. This problem is - * mostly only relevant when compiled as a module, as the - * unloading of the module with an aperture size smaller then - * the ram will cause the driver to detect the onboard memory - * size to be equal to the aperture size when the module is - * reloaded. Soooo, to help, the module supports an msize - * option to allow the specification of the onboard memory, and - * an asize option, to allow the specification of the aperture - * size. The aperture must be equal to or less then the memory - * size, the driver will correct this if you screw it up. This - * problem is not relevant for compiled in drivers as compiled - * in drivers only init once. + * Due to what I assume is more buggy SROM, the 64M PMC551 I + * have available claims that all 4 of it's DRAM banks have 64M + * of ram configured (making a grand total of 256M onboard). + * This is slightly annoying since the BAR0 size reflects the + * aperture size, not the dram size, and the V370PDC supplies no + * other method for memory size discovery. This problem is + * mostly only relevant when compiled as a module, as the + * unloading of the module with an aperture size smaller then + * the ram will cause the driver to detect the onboard memory + * size to be equal to the aperture size when the module is + * reloaded. Soooo, to help, the module supports an msize + * option to allow the specification of the onboard memory, and + * an asize option, to allow the specification of the aperture + * size. The aperture must be equal to or less then the memory + * size, the driver will correct this if you screw it up. This + * problem is not relevant for compiled in drivers as compiled + * in drivers only init once. * * Credits: - * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the - * initial example code of how to initialize this device and for - * help with questions I had concerning operation of the device. + * Saeed Karamooz <saeed@ramix.com> of Ramix INC. for the + * initial example code of how to initialize this device and for + * help with questions I had concerning operation of the device. * - * Most of the MTD code for this driver was originally written - * for the slram.o module in the MTD drivers package which - * allows the mapping of system memory into an MTD device. - * Since the PMC551 memory module is accessed in the same - * fashion as system memory, the slram.c code became a very nice - * fit to the needs of this driver. All we added was PCI - * detection/initialization to the driver and automatically figure - * out the size via the PCI detection.o, later changes by Corey - * Minyard set up the card to utilize a 1M sliding apature. + * Most of the MTD code for this driver was originally written + * for the slram.o module in the MTD drivers package which + * allows the mapping of system memory into an MTD device. + * Since the PMC551 memory module is accessed in the same + * fashion as system memory, the slram.c code became a very nice + * fit to the needs of this driver. All we added was PCI + * detection/initialization to the driver and automatically figure + * out the size via the PCI detection.o, later changes by Corey + * Minyard set up the card to utilize a 1M sliding apature. * - * Corey Minyard <minyard@nortelnetworks.com> - * * Modified driver to utilize a sliding aperture instead of - * mapping all memory into kernel space which turned out to - * be very wasteful. - * * Located a bug in the SROM's initialization sequence that - * made the memory unusable, added a fix to code to touch up - * the DRAM some. + * Corey Minyard <minyard@nortelnetworks.com> + * * Modified driver to utilize a sliding aperture instead of + * mapping all memory into kernel space which turned out to + * be very wasteful. + * * Located a bug in the SROM's initialization sequence that + * made the memory unusable, added a fix to code to touch up + * the DRAM some. * * Bugs/FIXME's: - * * MUST fix the init function to not spin on a register - * waiting for it to set .. this does not safely handle busted - * devices that never reset the register correctly which will - * cause the system to hang w/ a reboot being the only chance at - * recover. [sort of fixed, could be better] - * * Add I2C handling of the SROM so we can read the SROM's information - * about the aperture size. This should always accurately reflect the - * onboard memory size. - * * Comb the init routine. It's still a bit cludgy on a few things. + * * MUST fix the init function to not spin on a register + * waiting for it to set .. this does not safely handle busted + * devices that never reset the register correctly which will + * cause the system to hang w/ a reboot being the only chance at + * recover. [sort of fixed, could be better] + * * Add I2C handling of the SROM so we can read the SROM's information + * about the aperture size. This should always accurately reflect the + * onboard memory size. + * * Comb the init routine. It's still a bit cludgy on a few things. */ #include <linux/kernel.h> @@ -105,74 +105,77 @@ static struct mtd_info *pmc551list; -static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr) +static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) { - struct mypriv *priv = mtd->priv; - u32 soff_hi, soff_lo; /* start address offset hi/lo */ - u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ - unsigned long end; + struct mypriv *priv = mtd->priv; + u32 soff_hi, soff_lo; /* start address offset hi/lo */ + u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ + unsigned long end; u_char *ptr; size_t retlen; #ifdef CONFIG_MTD_PMC551_DEBUG - printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len); + printk(KERN_DEBUG "pmc551_erase(pos:%ld, len:%ld)\n", (long)instr->addr, + (long)instr->len); #endif - end = instr->addr + instr->len - 1; + end = instr->addr + instr->len - 1; - /* Is it past the end? */ - if ( end > mtd->size ) { + /* Is it past the end? */ + if (end > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG - printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", (long)end, (long)mtd->size); + printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", + (long)end, (long)mtd->size); #endif - return -EINVAL; - } - - eoff_hi = end & ~(priv->asize - 1); - soff_hi = instr->addr & ~(priv->asize - 1); - eoff_lo = end & (priv->asize - 1); - soff_lo = instr->addr & (priv->asize - 1); - - pmc551_point (mtd, instr->addr, instr->len, &retlen, &ptr); - - if ( soff_hi == eoff_hi || mtd->size == priv->asize) { - /* The whole thing fits within one access, so just one shot - will do it. */ - memset(ptr, 0xff, instr->len); - } else { - /* We have to do multiple writes to get all the data - written. */ - while (soff_hi != eoff_hi) { + return -EINVAL; + } + + eoff_hi = end & ~(priv->asize - 1); + soff_hi = instr->addr & ~(priv->asize - 1); + eoff_lo = end & (priv->asize - 1); + soff_lo = instr->addr & (priv->asize - 1); + + pmc551_point(mtd, instr->addr, instr->len, &retlen, &ptr); + + if (soff_hi == eoff_hi || mtd->size == priv->asize) { + /* The whole thing fits within one access, so just one shot + will do it. */ + memset(ptr, 0xff, instr->len); + } else { + /* We have to do multiple writes to get all the data + written. */ + while (soff_hi != eoff_hi) { #ifdef CONFIG_MTD_PMC551_DEBUG - printk( KERN_DEBUG "pmc551_erase() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); + printk(KERN_DEBUG "pmc551_erase() soff_hi: %ld, " + "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); #endif - memset(ptr, 0xff, priv->asize); - if (soff_hi + priv->asize >= mtd->size) { - goto out; - } - soff_hi += priv->asize; - pmc551_point (mtd,(priv->base_map0|soff_hi), - priv->asize, &retlen, &ptr); - } - memset (ptr, 0xff, eoff_lo); - } - -out: + memset(ptr, 0xff, priv->asize); + if (soff_hi + priv->asize >= mtd->size) { + goto out; + } + soff_hi += priv->asize; + pmc551_point(mtd, (priv->base_map0 | soff_hi), + priv->asize, &retlen, &ptr); + } + memset(ptr, 0xff, eoff_lo); + } + + out: instr->state = MTD_ERASE_DONE; #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_erase() done\n"); #endif - mtd_erase_callback(instr); - return 0; + mtd_erase_callback(instr); + return 0; } - -static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf) +static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, + size_t * retlen, u_char ** mtdbuf) { - struct mypriv *priv = mtd->priv; - u32 soff_hi; - u32 soff_lo; + struct mypriv *priv = mtd->priv; + u32 soff_hi; + u32 soff_lo; #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); @@ -180,18 +183,19 @@ static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t * if (from + len > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG - printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", (long)from+len, (long)mtd->size); + printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", + (long)from + len, (long)mtd->size); #endif return -EINVAL; } - soff_hi = from & ~(priv->asize - 1); - soff_lo = from & (priv->asize - 1); + soff_hi = from & ~(priv->asize - 1); + soff_lo = from & (priv->asize - 1); /* Cheap hack optimization */ - if( priv->curr_map0 != from ) { - pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0, - (priv->base_map0 | soff_hi) ); + if (priv->curr_map0 != from) { + pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0, + (priv->base_map0 | soff_hi)); priv->curr_map0 = soff_hi; } @@ -200,137 +204,144 @@ static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t * return 0; } - -static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) +static void pmc551_unpoint(struct mtd_info *mtd, u_char * addr, loff_t from, + size_t len) { #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_unpoint()\n"); #endif } - -static int pmc551_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) +static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t * retlen, u_char * buf) { - struct mypriv *priv = mtd->priv; - u32 soff_hi, soff_lo; /* start address offset hi/lo */ - u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ - unsigned long end; + struct mypriv *priv = mtd->priv; + u32 soff_hi, soff_lo; /* start address offset hi/lo */ + u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ + unsigned long end; u_char *ptr; - u_char *copyto = buf; + u_char *copyto = buf; #ifdef CONFIG_MTD_PMC551_DEBUG - printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n", (long)from, (long)len, (long)priv->asize); + printk(KERN_DEBUG "pmc551_read(pos:%ld, len:%ld) asize: %ld\n", + (long)from, (long)len, (long)priv->asize); #endif - end = from + len - 1; + end = from + len - 1; - /* Is it past the end? */ - if (end > mtd->size) { + /* Is it past the end? */ + if (end > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG - printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", (long) end, (long)mtd->size); + printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", + (long)end, (long)mtd->size); #endif - return -EINVAL; - } - - soff_hi = from & ~(priv->asize - 1); - eoff_hi = end & ~(priv->asize - 1); - soff_lo = from & (priv->asize - 1); - eoff_lo = end & (priv->asize - 1); - - pmc551_point (mtd, from, len, retlen, &ptr); - - if (soff_hi == eoff_hi) { - /* The whole thing fits within one access, so just one shot - will do it. */ - memcpy(copyto, ptr, len); - copyto += len; - } else { - /* We have to do multiple writes to get all the data - written. */ - while (soff_hi != eoff_hi) { + return -EINVAL; + } + + soff_hi = from & ~(priv->asize - 1); + eoff_hi = end & ~(priv->asize - 1); + soff_lo = from & (priv->asize - 1); + eoff_lo = end & (priv->asize - 1); + + pmc551_point(mtd, from, len, retlen, &ptr); + + if (soff_hi == eoff_hi) { + /* The whole thing fits within one access, so just one shot + will do it. */ + memcpy(copyto, ptr, len); + copyto += len; + } else { + /* We have to do multiple writes to get all the data + written. */ + while (soff_hi != eoff_hi) { #ifdef CONFIG_MTD_PMC551_DEBUG - printk( KERN_DEBUG "pmc551_read() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); + printk(KERN_DEBUG "pmc551_read() soff_hi: %ld, " + "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); #endif - memcpy(copyto, ptr, priv->asize); - copyto += priv->asize; - if (soff_hi + priv->asize >= mtd->size) { - goto out; - } - soff_hi += priv->asize; - pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr); - } - memcpy(copyto, ptr, eoff_lo); - copyto += eoff_lo; - } - -out: + memcpy(copyto, ptr, priv->asize); + copyto += priv->asize; + if (soff_hi + priv->asize >= mtd->size) { + goto out; + } + soff_hi += priv->asize; + pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr); + } + memcpy(copyto, ptr, eoff_lo); + copyto += eoff_lo; + } + + out: #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_read() done\n"); #endif - *retlen = copyto - buf; - return 0; + *retlen = copyto - buf; + return 0; } -static int pmc551_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) +static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t * retlen, const u_char * buf) { - struct mypriv *priv = mtd->priv; - u32 soff_hi, soff_lo; /* start address offset hi/lo */ - u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ - unsigned long end; + struct mypriv *priv = mtd->priv; + u32 soff_hi, soff_lo; /* start address offset hi/lo */ + u32 eoff_hi, eoff_lo; /* end address offset hi/lo */ + unsigned long end; u_char *ptr; - const u_char *copyfrom = buf; - + const u_char *copyfrom = buf; #ifdef CONFIG_MTD_PMC551_DEBUG - printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n", (long)to, (long)len, (long)priv->asize); + printk(KERN_DEBUG "pmc551_write(pos:%ld, len:%ld) asize:%ld\n", + (long)to, (long)len, (long)priv->asize); #endif - end = to + len - 1; - /* Is it past the end? or did the u32 wrap? */ - if (end > mtd->size ) { + end = to + len - 1; + /* Is it past the end? or did the u32 wrap? */ + if (end > mtd->size) { #ifdef CONFIG_MTD_PMC551_DEBUG - printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, size: %ld, to: %ld)\n", (long) end, (long)mtd->size, (long)to); + printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, " + "size: %ld, to: %ld)\n", (long)end, (long)mtd->size, + (long)to); #endif - return -EINVAL; - } - - soff_hi = to & ~(priv->asize - 1); - eoff_hi = end & ~(priv->asize - 1); - soff_lo = to & (priv->asize - 1); - eoff_lo = end & (priv->asize - 1); - - pmc551_point (mtd, to, len, retlen, &ptr); - - if (soff_hi == eoff_hi) { - /* The whole thing fits within one access, so just one shot - will do it. */ - memcpy(ptr, copyfrom, len); - copyfrom += len; - } else { - /* We have to do multiple writes to get all the data - written. */ - while (soff_hi != eoff_hi) { + return -EINVAL; + } + + soff_hi = to & ~(priv->asize - 1); + eoff_hi = end & ~(priv->asize - 1); + soff_lo = to & (priv->asize - 1); + eoff_lo = end & (priv->asize - 1); + + pmc551_point(mtd, to, len, retlen, &ptr); + + if (soff_hi == eoff_hi) { + /* The whole thing fits within one access, so just one shot + will do it. */ + memcpy(ptr, copyfrom, len); + copyfrom += len; + } else { + /* We have to do multiple writes to get all the data + written. */ + while (soff_hi != eoff_hi) { #ifdef CONFIG_MTD_PMC551_DEBUG - printk( KERN_DEBUG "pmc551_write() soff_hi: %ld, eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); + printk(KERN_DEBUG "pmc551_write() soff_hi: %ld, " + "eoff_hi: %ld\n", (long)soff_hi, (long)eoff_hi); #endif - memcpy(ptr, copyfrom, priv->asize); - copyfrom += priv->asize; - if (soff_hi >= mtd->size) { - goto out; - } - soff_hi += priv->asize; - pmc551_point (mtd, soff_hi, priv->asize, retlen, &ptr); - } - memcpy(ptr, copyfrom, eoff_lo); - copyfrom += eoff_lo; - } - -out: + memcpy(ptr, copyfrom, priv->asize); + copyfrom += priv->asize; + if (soff_hi >= mtd->size) { + goto out; + } + soff_hi += priv->asize; + pmc551_point(mtd, soff_hi, priv->asize, retlen, &ptr); + } + memcpy(ptr, copyfrom, eoff_lo); + copyfrom += eoff_lo; + } + + out: #ifdef CONFIG_MTD_PMC551_DEBUG printk(KERN_DEBUG "pmc551_write() done\n"); #endif - *retlen = copyfrom - buf; - return 0; + *retlen = copyfrom - buf; + return 0; } /* @@ -345,58 +356,58 @@ out: * mechanism * returns the size of the memory region found. */ -static u32 fixup_pmc551 (struct pci_dev *dev) +static u32 fixup_pmc551(struct pci_dev *dev) { #ifdef CONFIG_MTD_PMC551_BUGFIX - u32 dram_data; + u32 dram_data; #endif - u32 size, dcmd, cfg, dtmp; - u16 cmd, tmp, i; + u32 size, dcmd, cfg, dtmp; + u16 cmd, tmp, i; u8 bcmd, counter; - /* Sanity Check */ - if(!dev) { - return -ENODEV; - } + /* Sanity Check */ + if (!dev) { + return -ENODEV; + } /* * Attempt to reset the card * FIXME: Stop Spinning registers */ - counter=0; + counter = 0; /* unlock registers */ - pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5 ); + pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, 0xA5); /* read in old data */ - pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd ); + pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd); /* bang the reset line up and down for a few */ - for(i=0;i<10;i++) { - counter=0; + for (i = 0; i < 10; i++) { + counter = 0; bcmd &= ~0x80; - while(counter++ < 100) { + while (counter++ < 100) { pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); } - counter=0; + counter = 0; bcmd |= 0x80; - while(counter++ < 100) { + while (counter++ < 100) { pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); } } - bcmd |= (0x40|0x20); + bcmd |= (0x40 | 0x20); pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd); - /* + /* * Take care and turn off the memory on the device while we * tweak the configurations */ - pci_read_config_word(dev, PCI_COMMAND, &cmd); - tmp = cmd & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY); - pci_write_config_word(dev, PCI_COMMAND, tmp); + pci_read_config_word(dev, PCI_COMMAND, &cmd); + tmp = cmd & ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY); + pci_write_config_word(dev, PCI_COMMAND, tmp); /* * Disable existing aperture before probing memory size */ pci_read_config_dword(dev, PMC551_PCI_MEM_MAP0, &dcmd); - dtmp=(dcmd|PMC551_PCI_MEM_MAP_ENABLE|PMC551_PCI_MEM_MAP_REG_EN); + dtmp = (dcmd | PMC551_PCI_MEM_MAP_ENABLE | PMC551_PCI_MEM_MAP_REG_EN); pci_write_config_dword(dev, PMC551_PCI_MEM_MAP0, dtmp); /* * Grab old BAR0 config so that we can figure out memory size @@ -407,220 +418,230 @@ static u32 fixup_pmc551 (struct pci_dev *dev) * then write all 1's to the memory space, read back the result into * "size", and then write back all the old config. */ - pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &cfg ); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &cfg); #ifndef CONFIG_MTD_PMC551_BUGFIX - pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, ~0 ); - pci_read_config_dword( dev, PCI_BASE_ADDRESS_0, &size ); - size = (size&PCI_BASE_ADDRESS_MEM_MASK); - size &= ~(size-1); - pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg ); + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, ~0); + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &size); + size = (size & PCI_BASE_ADDRESS_MEM_MASK); + size &= ~(size - 1); + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, cfg); #else - /* - * Get the size of the memory by reading all the DRAM size values - * and adding them up. - * - * KLUDGE ALERT: the boards we are using have invalid column and - * row mux values. We fix them here, but this will break other - * memory configurations. - */ - pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data); - size = PMC551_DRAM_BLK_GET_SIZE(dram_data); - dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); - dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); - pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data); - - pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data); - size += PMC551_DRAM_BLK_GET_SIZE(dram_data); - dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); - dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); - pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data); - - pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data); - size += PMC551_DRAM_BLK_GET_SIZE(dram_data); - dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); - dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); - pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data); - - pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data); - size += PMC551_DRAM_BLK_GET_SIZE(dram_data); - dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); - dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); - pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data); - - /* - * Oops .. something went wrong - */ - if( (size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) { - return -ENODEV; - } -#endif /* CONFIG_MTD_PMC551_BUGFIX */ - - if ((cfg&PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) { - return -ENODEV; + /* + * Get the size of the memory by reading all the DRAM size values + * and adding them up. + * + * KLUDGE ALERT: the boards we are using have invalid column and + * row mux values. We fix them here, but this will break other + * memory configurations. + */ + pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dram_data); + size = PMC551_DRAM_BLK_GET_SIZE(dram_data); + dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); + dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); + pci_write_config_dword(dev, PMC551_DRAM_BLK0, dram_data); + + pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dram_data); + size += PMC551_DRAM_BLK_GET_SIZE(dram_data); + dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); + dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); + pci_write_config_dword(dev, PMC551_DRAM_BLK1, dram_data); + + pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dram_data); + size += PMC551_DRAM_BLK_GET_SIZE(dram_data); + dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); + dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); + pci_write_config_dword(dev, PMC551_DRAM_BLK2, dram_data); + + pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dram_data); + size += PMC551_DRAM_BLK_GET_SIZE(dram_data); + dram_data = PMC551_DRAM_BLK_SET_COL_MUX(dram_data, 0x5); + dram_data = PMC551_DRAM_BLK_SET_ROW_MUX(dram_data, 0x9); + pci_write_config_dword(dev, PMC551_DRAM_BLK3, dram_data); + + /* + * Oops .. something went wrong + */ + if ((size &= PCI_BASE_ADDRESS_MEM_MASK) == 0) { + return -ENODEV; + } +#endif /* CONFIG_MTD_PMC551_BUGFIX */ + + if ((cfg & PCI_BASE_ADDRESS_SPACE) != PCI_BASE_ADDRESS_SPACE_MEMORY) { + return -ENODEV; } - /* - * Precharge Dram - */ - pci_write_config_word( dev, PMC551_SDRAM_MA, 0x0400 ); - pci_write_config_word( dev, PMC551_SDRAM_CMD, 0x00bf ); - - /* - * Wait until command has gone through - * FIXME: register spinning issue - */ - do { pci_read_config_word( dev, PMC551_SDRAM_CMD, &cmd ); - if(counter++ > 100)break; - } while ( (PCI_COMMAND_IO) & cmd ); - - /* + /* + * Precharge Dram + */ + pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0400); + pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x00bf); + + /* + * Wait until command has gone through + * FIXME: register spinning issue + */ + do { + pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); + if (counter++ > 100) + break; + } while ((PCI_COMMAND_IO) & cmd); + + /* * Turn on auto refresh * The loop is taken directly from Ramix's example code. I assume that * this must be held high for some duration of time, but I can find no * documentation refrencing the reasons why. - */ - for ( i = 1; i<=8 ; i++) { - pci_write_config_word (dev, PMC551_SDRAM_CMD, 0x0df); - - /* - * Make certain command has gone through - * FIXME: register spinning issue - */ - counter=0; - do { pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); - if(counter++ > 100)break; - } while ( (PCI_COMMAND_IO) & cmd ); - } - - pci_write_config_word ( dev, PMC551_SDRAM_MA, 0x0020); - pci_write_config_word ( dev, PMC551_SDRAM_CMD, 0x0ff); - - /* - * Wait until command completes - * FIXME: register spinning issue - */ - counter=0; - do { pci_read_config_word ( dev, PMC551_SDRAM_CMD, &cmd); - if(counter++ > 100)break; - } while ( (PCI_COMMAND_IO) & cmd ); - - pci_read_config_dword ( dev, PMC551_DRAM_CFG, &dcmd); - dcmd |= 0x02000000; - pci_write_config_dword ( dev, PMC551_DRAM_CFG, dcmd); - - /* - * Check to make certain fast back-to-back, if not - * then set it so - */ - pci_read_config_word( dev, PCI_STATUS, &cmd); - if((cmd&PCI_COMMAND_FAST_BACK) == 0) { - cmd |= PCI_COMMAND_FAST_BACK; - pci_write_config_word( dev, PCI_STATUS, cmd); - } - - /* - * Check to make certain the DEVSEL is set correctly, this device - * has a tendancy to assert DEVSEL and TRDY when a write is performed - * to the memory when memory is read-only - */ - if((cmd&PCI_STATUS_DEVSEL_MASK) != 0x0) { - cmd &= ~PCI_STATUS_DEVSEL_MASK; - pci_write_config_word( dev, PCI_STATUS, cmd ); - } - /* - * Set to be prefetchable and put everything back based on old cfg. + */ + for (i = 1; i <= 8; i++) { + pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0df); + + /* + * Make certain command has gone through + * FIXME: register spinning issue + */ + counter = 0; + do { + pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); + if (counter++ > 100) + break; + } while ((PCI_COMMAND_IO) & cmd); + } + + pci_write_config_word(dev, PMC551_SDRAM_MA, 0x0020); + pci_write_config_word(dev, PMC551_SDRAM_CMD, 0x0ff); + + /* + * Wait until command completes + * FIXME: register spinning issue + */ + counter = 0; + do { + pci_read_config_word(dev, PMC551_SDRAM_CMD, &cmd); + if (counter++ > 100) + break; + } while ((PCI_COMMAND_IO) & cmd); + + pci_read_config_dword(dev, PMC551_DRAM_CFG, &dcmd); + dcmd |= 0x02000000; + pci_write_config_dword(dev, PMC551_DRAM_CFG, dcmd); + + /* + * Check to make certain fast back-to-back, if not + * then set it so + */ + pci_read_config_word(dev, PCI_STATUS, &cmd); + if ((cmd & PCI_COMMAND_FAST_BACK) == 0) { + cmd |= PCI_COMMAND_FAST_BACK; + pci_write_config_word(dev, PCI_STATUS, cmd); + } + + /* + * Check to make certain the DEVSEL is set correctly, this device + * has a tendancy to assert DEVSEL and TRDY when a write is performed + * to the memory when memory is read-only + */ + if ((cmd & PCI_STATUS_DEVSEL_MASK) != 0x0) { + cmd &= ~PCI_STATUS_DEVSEL_MASK; + pci_write_config_word(dev, PCI_STATUS, cmd); + } + /* + * Set to be prefetchable and put everything back based on old cfg. * it's possible that the reset of the V370PDC nuked the original * setup - */ + */ + /* + cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH; + pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg ); + */ + /* - cfg |= PCI_BASE_ADDRESS_MEM_PREFETCH; - pci_write_config_dword( dev, PCI_BASE_ADDRESS_0, cfg ); - */ - - /* - * Turn PCI memory and I/O bus access back on - */ - pci_write_config_word( dev, PCI_COMMAND, - PCI_COMMAND_MEMORY | PCI_COMMAND_IO ); + * Turn PCI memory and I/O bus access back on + */ + pci_write_config_word(dev, PCI_COMMAND, + PCI_COMMAND_MEMORY | PCI_COMMAND_IO); #ifdef CONFIG_MTD_PMC551_DEBUG - /* - * Some screen fun - */ - printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at 0x%llx\n", - (size<1024)?size:(size<1048576)?size>>10:size>>20, - (size<1024)?'B':(size<1048576)?'K':'M', - size, ((dcmd&(0x1<<3)) == 0)?"non-":"", - (unsigned long long)((dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK)); - - /* - * Check to see the state of the memory - */ - pci_read_config_dword( dev, PMC551_DRAM_BLK0, &dcmd ); - printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n" - "pmc551: DRAM_BLK0 Size: %d at %d\n" - "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n", - (((0x1<<1)&dcmd) == 0)?"RW":"RO", - (((0x1<<0)&dcmd) == 0)?"Off":"On", - PMC551_DRAM_BLK_GET_SIZE(dcmd), - ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); - - pci_read_config_dword( dev, PMC551_DRAM_BLK1, &dcmd ); - printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n" - "pmc551: DRAM_BLK1 Size: %d at %d\n" - "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n", - (((0x1<<1)&dcmd) == 0)?"RW":"RO", - (((0x1<<0)&dcmd) == 0)?"Off":"On", - PMC551_DRAM_BLK_GET_SIZE(dcmd), - ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); - - pci_read_config_dword( dev, PMC551_DRAM_BLK2, &dcmd ); - printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n" - "pmc551: DRAM_BLK2 Size: %d at %d\n" - "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n", - (((0x1<<1)&dcmd) == 0)?"RW":"RO", - (((0x1<<0)&dcmd) == 0)?"Off":"On", - PMC551_DRAM_BLK_GET_SIZE(dcmd), - ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); - - pci_read_config_dword( dev, PMC551_DRAM_BLK3, &dcmd ); - printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n" - "pmc551: DRAM_BLK3 Size: %d at %d\n" - "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n", - (((0x1<<1)&dcmd) == 0)?"RW":"RO", - (((0x1<<0)&dcmd) == 0)?"Off":"On", - PMC551_DRAM_BLK_GET_SIZE(dcmd), - ((dcmd>>20)&0x7FF), ((dcmd>>13)&0x7), ((dcmd>>9)&0xF) ); - - pci_read_config_word( dev, PCI_COMMAND, &cmd ); - printk( KERN_DEBUG "pmc551: Memory Access %s\n", - (((0x1<<1)&cmd) == 0)?"off":"on" ); - printk( KERN_DEBUG "pmc551: I/O Access %s\n", - (((0x1<<0)&cmd) == 0)?"off":"on" ); - - pci_read_config_word( dev, PCI_STATUS, &cmd ); - printk( KERN_DEBUG "pmc551: Devsel %s\n", - ((PCI_STATUS_DEVSEL_MASK&cmd)==0x000)?"Fast": - ((PCI_STATUS_DEVSEL_MASK&cmd)==0x200)?"Medium": - ((PCI_STATUS_DEVSEL_MASK&cmd)==0x400)?"Slow":"Invalid" ); - - printk( KERN_DEBUG "pmc551: %sFast Back-to-Back\n", - ((PCI_COMMAND_FAST_BACK&cmd) == 0)?"Not ":"" ); - - pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd ); - printk( KERN_DEBUG "pmc551: EEPROM is under %s control\n" - "pmc551: System Control Register is %slocked to PCI access\n" - "pmc551: System Control Register is %slocked to EEPROM access\n", - (bcmd&0x1)?"software":"hardware", - (bcmd&0x20)?"":"un", (bcmd&0x40)?"":"un"); + /* + * Some screen fun + */ + printk(KERN_DEBUG "pmc551: %d%c (0x%x) of %sprefetchable memory at " + "0x%llx\n", (size < 1024) ? size : (size < 1048576) ? + size >> 10 : size >> 20, + (size < 1024) ? 'B' : (size < 1048576) ? 'K' : 'M', size, + ((dcmd & (0x1 << 3)) == 0) ? "non-" : "", + (unsigned long long)pci_resource_start(dev, 0)); + + /* + * Check to see the state of the memory + */ + pci_read_config_dword(dev, PMC551_DRAM_BLK0, &dcmd); + printk(KERN_DEBUG "pmc551: DRAM_BLK0 Flags: %s,%s\n" + "pmc551: DRAM_BLK0 Size: %d at %d\n" + "pmc551: DRAM_BLK0 Row MUX: %d, Col MUX: %d\n", + (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", + (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", + PMC551_DRAM_BLK_GET_SIZE(dcmd), + ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), + ((dcmd >> 9) & 0xF)); + + pci_read_config_dword(dev, PMC551_DRAM_BLK1, &dcmd); + printk(KERN_DEBUG "pmc551: DRAM_BLK1 Flags: %s,%s\n" + "pmc551: DRAM_BLK1 Size: %d at %d\n" + "pmc551: DRAM_BLK1 Row MUX: %d, Col MUX: %d\n", + (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", + (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", + PMC551_DRAM_BLK_GET_SIZE(dcmd), + ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), + ((dcmd >> 9) & 0xF)); + + pci_read_config_dword(dev, PMC551_DRAM_BLK2, &dcmd); + printk(KERN_DEBUG "pmc551: DRAM_BLK2 Flags: %s,%s\n" + "pmc551: DRAM_BLK2 Size: %d at %d\n" + "pmc551: DRAM_BLK2 Row MUX: %d, Col MUX: %d\n", + (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", + (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", + PMC551_DRAM_BLK_GET_SIZE(dcmd), + ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), + ((dcmd >> 9) & 0xF)); + + pci_read_config_dword(dev, PMC551_DRAM_BLK3, &dcmd); + printk(KERN_DEBUG "pmc551: DRAM_BLK3 Flags: %s,%s\n" + "pmc551: DRAM_BLK3 Size: %d at %d\n" + "pmc551: DRAM_BLK3 Row MUX: %d, Col MUX: %d\n", + (((0x1 << 1) & dcmd) == 0) ? "RW" : "RO", + (((0x1 << 0) & dcmd) == 0) ? "Off" : "On", + PMC551_DRAM_BLK_GET_SIZE(dcmd), + ((dcmd >> 20) & 0x7FF), ((dcmd >> 13) & 0x7), + ((dcmd >> 9) & 0xF)); + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + printk(KERN_DEBUG "pmc551: Memory Access %s\n", + (((0x1 << 1) & cmd) == 0) ? "off" : "on"); + printk(KERN_DEBUG "pmc551: I/O Access %s\n", + (((0x1 << 0) & cmd) == 0) ? "off" : "on"); + + pci_read_config_word(dev, PCI_STATUS, &cmd); + printk(KERN_DEBUG "pmc551: Devsel %s\n", + ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x000) ? "Fast" : + ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x200) ? "Medium" : + ((PCI_STATUS_DEVSEL_MASK & cmd) == 0x400) ? "Slow" : "Invalid"); + + printk(KERN_DEBUG "pmc551: %sFast Back-to-Back\n", + ((PCI_COMMAND_FAST_BACK & cmd) == 0) ? "Not " : ""); + + pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd); + printk(KERN_DEBUG "pmc551: EEPROM is under %s control\n" + "pmc551: System Control Register is %slocked to PCI access\n" + "pmc551: System Control Register is %slocked to EEPROM access\n", + (bcmd & 0x1) ? "software" : "hardware", + (bcmd & 0x20) ? "" : "un", (bcmd & 0x40) ? "" : "un"); #endif - return size; + return size; } /* * Kernel version specific module stuffages */ - MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>"); MODULE_DESCRIPTION(PMC551_VERSION); @@ -628,11 +649,11 @@ MODULE_DESCRIPTION(PMC551_VERSION); /* * Stuff these outside the ifdef so as to not bust compiled in driver support */ -static int msize=0; +static int msize = 0; #if defined(CONFIG_MTD_PMC551_APERTURE_SIZE) -static int asize=CONFIG_MTD_PMC551_APERTURE_SIZE +static int asize = CONFIG_MTD_PMC551_APERTURE_SIZE #else -static int asize=0; +static int asize = 0; #endif module_param(msize, int, 0); @@ -645,164 +666,174 @@ MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]"); */ static int __init init_pmc551(void) { - struct pci_dev *PCI_Device = NULL; - struct mypriv *priv; - int count, found=0; - struct mtd_info *mtd; - u32 length = 0; - - if(msize) { - msize = (1 << (ffs(msize) - 1))<<20; - if (msize > (1<<30)) { - printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n", msize); + struct pci_dev *PCI_Device = NULL; + struct mypriv *priv; + int count, found = 0; + struct mtd_info *mtd; + u32 length = 0; + + if (msize) { + msize = (1 << (ffs(msize) - 1)) << 20; + if (msize > (1 << 30)) { + printk(KERN_NOTICE "pmc551: Invalid memory size [%d]\n", + msize); return -EINVAL; } } - if(asize) { - asize = (1 << (ffs(asize) - 1))<<20; - if (asize > (1<<30) ) { - printk(KERN_NOTICE "pmc551: Invalid aperture size [%d]\n", asize); + if (asize) { + asize = (1 << (ffs(asize) - 1)) << 20; + if (asize > (1 << 30)) { + printk(KERN_NOTICE "pmc551: Invalid aperture size " + "[%d]\n", asize); return -EINVAL; } } - printk(KERN_INFO PMC551_VERSION); - - /* - * PCU-bus chipset probe. - */ - for( count = 0; count < MAX_MTD_DEVICES; count++ ) { - - if ((PCI_Device = pci_find_device(PCI_VENDOR_ID_V3_SEMI, - PCI_DEVICE_ID_V3_SEMI_V370PDC, - PCI_Device ) ) == NULL) { - break; - } - - printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n", - (unsigned long long)PCI_Device->resource[0].start); - - /* - * The PMC551 device acts VERY weird if you don't init it - * first. i.e. it will not correctly report devsel. If for - * some reason the sdram is in a wrote-protected state the - * device will DEVSEL when it is written to causing problems - * with the oldproc.c driver in - * some kernels (2.2.*) - */ - if((length = fixup_pmc551(PCI_Device)) <= 0) { - printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n"); - break; - } + printk(KERN_INFO PMC551_VERSION); + + /* + * PCU-bus chipset probe. + */ + for (count = 0; count < MAX_MTD_DEVICES; count++) { + + if ((PCI_Device = pci_get_device(PCI_VENDOR_ID_V3_SEMI, + PCI_DEVICE_ID_V3_SEMI_V370PDC, + PCI_Device)) == NULL) { + break; + } + + printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%llx\n", + (unsigned long long)pci_resource_start(PCI_Device, 0)); + + /* + * The PMC551 device acts VERY weird if you don't init it + * first. i.e. it will not correctly report devsel. If for + * some reason the sdram is in a wrote-protected state the + * device will DEVSEL when it is written to causing problems + * with the oldproc.c driver in + * some kernels (2.2.*) + */ + if ((length = fixup_pmc551(PCI_Device)) <= 0) { + printk(KERN_NOTICE "pmc551: Cannot init SDRAM\n"); + break; + } /* * This is needed until the driver is capable of reading the * onboard I2C SROM to discover the "real" memory size. */ - if(msize) { + if (msize) { length = msize; - printk(KERN_NOTICE "pmc551: Using specified memory size 0x%x\n", length); + printk(KERN_NOTICE "pmc551: Using specified memory " + "size 0x%x\n", length); } else { msize = length; } - mtd = kmalloc(sizeof(struct mtd_info), GFP_KERNEL); - if (!mtd) { - printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n"); - break; - } - - memset(mtd, 0, sizeof(struct mtd_info)); - - priv = kmalloc (sizeof(struct mypriv), GFP_KERNEL); - if (!priv) { - printk(KERN_NOTICE "pmc551: Cannot allocate new MTD device.\n"); - kfree(mtd); - break; - } - memset(priv, 0, sizeof(*priv)); - mtd->priv = priv; - priv->dev = PCI_Device; - - if(asize > length) { - printk(KERN_NOTICE "pmc551: reducing aperture size to fit %dM\n",length>>20); + mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); + if (!mtd) { + printk(KERN_NOTICE "pmc551: Cannot allocate new MTD " + "device.\n"); + break; + } + + priv = kzalloc(sizeof(struct mypriv), GFP_KERNEL); + if (!priv) { + printk(KERN_NOTICE "pmc551: Cannot allocate new MTD " + "device.\n"); + kfree(mtd); + break; + } + mtd->priv = priv; + priv->dev = PCI_Device; + + if (asize > length) { + printk(KERN_NOTICE "pmc551: reducing aperture size to " + "fit %dM\n", length >> 20); priv->asize = asize = length; } else if (asize == 0 || asize == length) { - printk(KERN_NOTICE "pmc551: Using existing aperture size %dM\n", length>>20); + printk(KERN_NOTICE "pmc551: Using existing aperture " + "size %dM\n", length >> 20); priv->asize = asize = length; } else { - printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20); + printk(KERN_NOTICE "pmc551: Using specified aperture " + "size %dM\n", asize >> 20); priv->asize = asize; } - priv->start = ioremap(((PCI_Device->resource[0].start) - & PCI_BASE_ADDRESS_MEM_MASK), - priv->asize); + priv->start = pci_iomap(PCI_Device, 0, priv->asize); if (!priv->start) { printk(KERN_NOTICE "pmc551: Unable to map IO space\n"); - kfree(mtd->priv); - kfree(mtd); + kfree(mtd->priv); + kfree(mtd); break; } - #ifdef CONFIG_MTD_PMC551_DEBUG - printk( KERN_DEBUG "pmc551: setting aperture to %d\n", - ffs(priv->asize>>20)-1); + printk(KERN_DEBUG "pmc551: setting aperture to %d\n", + ffs(priv->asize >> 20) - 1); #endif - priv->base_map0 = ( PMC551_PCI_MEM_MAP_REG_EN - | PMC551_PCI_MEM_MAP_ENABLE - | (ffs(priv->asize>>20)-1)<<4 ); - priv->curr_map0 = priv->base_map0; - pci_write_config_dword ( priv->dev, PMC551_PCI_MEM_MAP0, - priv->curr_map0 ); + priv->base_map0 = (PMC551_PCI_MEM_MAP_REG_EN + | PMC551_PCI_MEM_MAP_ENABLE + | (ffs(priv->asize >> 20) - 1) << 4); + priv->curr_map0 = priv->base_map0; + pci_write_config_dword(priv->dev, PMC551_PCI_MEM_MAP0, + priv->curr_map0); #ifdef CONFIG_MTD_PMC551_DEBUG - printk( KERN_DEBUG "pmc551: aperture set to %d\n", - (priv->base_map0 & 0xF0)>>4 ); + printk(KERN_DEBUG "pmc551: aperture set to %d\n", + (priv->base_map0 & 0xF0) >> 4); #endif - mtd->size = msize; - mtd->flags = MTD_CAP_RAM; - mtd->erase = pmc551_erase; - mtd->read = pmc551_read; - mtd->write = pmc551_write; - mtd->point = pmc551_point; - mtd->unpoint = pmc551_unpoint; - mtd->type = MTD_RAM; - mtd->name = "PMC551 RAM board"; - mtd->erasesize = 0x10000; - mtd->writesize = 1; - mtd->owner = THIS_MODULE; - - if (add_mtd_device(mtd)) { - printk(KERN_NOTICE "pmc551: Failed to register new device\n"); - iounmap(priv->start); - kfree(mtd->priv); - kfree(mtd); - break; - } - printk(KERN_NOTICE "Registered pmc551 memory device.\n"); - printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n", - priv->asize>>20, - priv->start, - priv->start + priv->asize); - printk(KERN_NOTICE "Total memory is %d%c\n", - (length<1024)?length: - (length<1048576)?length>>10:length>>20, - (length<1024)?'B':(length<1048576)?'K':'M'); + mtd->size = msize; + mtd->flags = MTD_CAP_RAM; + mtd->erase = pmc551_erase; + mtd->read = pmc551_read; + mtd->write = pmc551_write; + mtd->point = pmc551_point; + mtd->unpoint = pmc551_unpoint; + mtd->type = MTD_RAM; + mtd->name = "PMC551 RAM board"; + mtd->erasesize = 0x10000; + mtd->writesize = 1; + mtd->owner = THIS_MODULE; + + if (add_mtd_device(mtd)) { + printk(KERN_NOTICE "pmc551: Failed to register new " + "device\n"); + pci_iounmap(PCI_Device, priv->start); + kfree(mtd->priv); + kfree(mtd); + break; + } + + /* Keep a reference as the add_mtd_device worked */ + pci_dev_get(PCI_Device); + + printk(KERN_NOTICE "Registered pmc551 memory device.\n"); + printk(KERN_NOTICE "Mapped %dM of memory from 0x%p to 0x%p\n", + priv->asize >> 20, + priv->start, priv->start + priv->asize); + printk(KERN_NOTICE "Total memory is %d%c\n", + (length < 1024) ? length : + (length < 1048576) ? length >> 10 : length >> 20, + (length < 1024) ? 'B' : (length < 1048576) ? 'K' : 'M'); priv->nextpmc551 = pmc551list; pmc551list = mtd; found++; - } + } + + /* Exited early, reference left over */ + if (PCI_Device) + pci_dev_put(PCI_Device); - if( !pmc551list ) { - printk(KERN_NOTICE "pmc551: not detected\n"); - return -ENODEV; - } else { + if (!pmc551list) { + printk(KERN_NOTICE "pmc551: not detected\n"); + return -ENODEV; + } else { printk(KERN_NOTICE "pmc551: %d pmc551 devices loaded\n", found); - return 0; + return 0; } } @@ -811,23 +842,24 @@ static int __init init_pmc551(void) */ static void __exit cleanup_pmc551(void) { - int found=0; - struct mtd_info *mtd; + int found = 0; + struct mtd_info *mtd; struct mypriv *priv; - while((mtd=pmc551list)) { + while ((mtd = pmc551list)) { priv = mtd->priv; pmc551list = priv->nextpmc551; - if(priv->start) { - printk (KERN_DEBUG "pmc551: unmapping %dM starting at 0x%p\n", - priv->asize>>20, priv->start); - iounmap (priv->start); + if (priv->start) { + printk(KERN_DEBUG "pmc551: unmapping %dM starting at " + "0x%p\n", priv->asize >> 20, priv->start); + pci_iounmap(priv->dev, priv->start); } + pci_dev_put(priv->dev); - kfree (mtd->priv); - del_mtd_device (mtd); - kfree (mtd); + kfree(mtd->priv); + del_mtd_device(mtd); + kfree(mtd); found++; } diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 64d1b6a6c920..24747bdc3e19 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -447,14 +447,6 @@ config MTD_DC21285 21285 bridge used with Intel's StrongARM processors. More info at <http://www.intel.com/design/bridge/docs/21285_documentation.htm>. -config MTD_IQ80310 - tristate "CFI Flash device mapped on the XScale IQ80310 board" - depends on MTD_CFI && ARCH_IQ80310 - help - This enables access routines for the flash chips on the Intel XScale - IQ80310 evaluation board. If you have one of these boards and would - like to use the flash chips on it, say 'Y'. - config MTD_IXP4XX tristate "CFI Flash device mapped on Intel IXP4xx based systems" depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP4XX diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index ab71f172eb77..191c1928bbec 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o obj-$(CONFIG_MTD_DC21285) += dc21285.o obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o -obj-$(CONFIG_MTD_IQ80310) += iq80310.o obj-$(CONFIG_MTD_L440GX) += l440gx.o obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index 447955be18af..797caffb20b1 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c @@ -57,6 +57,7 @@ static void amd76xrom_cleanup(struct amd76xrom_window *window) /* Disable writes through the rom window */ pci_read_config_byte(window->pdev, 0x40, &byte); pci_write_config_byte(window->pdev, 0x40, byte & ~1); + pci_dev_put(window->pdev); } /* Free all of the mtd devices */ @@ -91,7 +92,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev, struct amd76xrom_map_info *map = NULL; unsigned long map_top; - /* Remember the pci dev I find the window in */ + /* Remember the pci dev I find the window in - already have a ref */ window->pdev = pdev; /* Assume the rom window is properly setup, and find it's size */ @@ -302,7 +303,7 @@ static int __init init_amd76xrom(void) struct pci_device_id *id; pdev = NULL; for(id = amd76xrom_pci_tbl; id->vendor; id++) { - pdev = pci_find_device(id->vendor, id->device, NULL); + pdev = pci_get_device(id->vendor, id->device, NULL); if (pdev) { break; } diff --git a/drivers/mtd/maps/arctic-mtd.c b/drivers/mtd/maps/arctic-mtd.c index d95ae582fbe9..642d96bc8919 100644 --- a/drivers/mtd/maps/arctic-mtd.c +++ b/drivers/mtd/maps/arctic-mtd.c @@ -96,6 +96,8 @@ static struct mtd_partition arctic_partitions[PARTITIONS] = { static int __init init_arctic_mtd(void) { + int err = 0; + printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR); arctic_mtd_map.virt = ioremap(PADDR, SIZE); @@ -109,12 +111,20 @@ init_arctic_mtd(void) printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8); arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map); - if (!arctic_mtd) + if (!arctic_mtd) { + iounmap((void *) arctic_mtd_map.virt); return -ENXIO; + } arctic_mtd->owner = THIS_MODULE; - return add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS); + err = add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS); + if (err) { + printk("%s: add_mtd_partitions failed\n", NAME); + iounmap((void *) arctic_mtd_map.virt); + } + + return err; } static void __exit diff --git a/drivers/mtd/maps/beech-mtd.c b/drivers/mtd/maps/beech-mtd.c index 5df7361d1407..a64b1a5ab316 100644 --- a/drivers/mtd/maps/beech-mtd.c +++ b/drivers/mtd/maps/beech-mtd.c @@ -72,6 +72,8 @@ static struct mtd_partition beech_partitions[2] = { static int __init init_beech_mtd(void) { + int err = 0; + printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR); beech_mtd_map.virt = ioremap(PADDR, SIZE); @@ -86,12 +88,20 @@ init_beech_mtd(void) printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8); beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map); - if (!beech_mtd) + if (!beech_mtd) { + iounmap((void *) beech_mtd_map.virt); return -ENXIO; + } beech_mtd->owner = THIS_MODULE; - return add_mtd_partitions(beech_mtd, beech_partitions, 2); + err = add_mtd_partitions(beech_mtd, beech_partitions, 2); + if (err) { + printk("%s: add_mtd_partitions failed\n", NAME); + iounmap((void *) beech_mtd_map.virt); + } + + return err; } static void __exit diff --git a/drivers/mtd/maps/cstm_mips_ixx.c b/drivers/mtd/maps/cstm_mips_ixx.c index aa56defb94c8..d6bef100d69a 100644 --- a/drivers/mtd/maps/cstm_mips_ixx.c +++ b/drivers/mtd/maps/cstm_mips_ixx.c @@ -171,7 +171,14 @@ int __init init_cstm_mips_ixx(void) cstm_mips_ixx_map[i].phys = cstm_mips_ixx_board_desc[i].window_addr; cstm_mips_ixx_map[i].virt = ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size); if (!cstm_mips_ixx_map[i].virt) { + int j = 0; printk(KERN_WARNING "Failed to ioremap\n"); + for (j = 0; j < i; j++) { + if (cstm_mips_ixx_map[j].virt) { + iounmap((void *)cstm_mips_ixx_map[j].virt); + cstm_mips_ixx_map[j].virt = 0; + } + } return -EIO; } cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name; @@ -204,8 +211,15 @@ int __init init_cstm_mips_ixx(void) cstm_mips_ixx_map[i].map_priv_2 = (unsigned long)mymtd; add_mtd_partitions(mymtd, parts, cstm_mips_ixx_board_desc[i].num_partitions); } - else - return -ENXIO; + else { + for (i = 0; i < PHYSMAP_NUMBER; i++) { + if (cstm_mips_ixx_map[i].virt) { + iounmap((void *)cstm_mips_ixx_map[i].virt); + cstm_mips_ixx_map[i].virt = 0; + } + } + return -ENXIO; + } } return 0; } diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c index 641e1dd8479e..1488bb92f26f 100644 --- a/drivers/mtd/maps/ebony.c +++ b/drivers/mtd/maps/ebony.c @@ -108,6 +108,7 @@ int __init init_ebony(void) ARRAY_SIZE(ebony_small_partitions)); } else { printk("map probe failed for flash\n"); + iounmap(ebony_small_map.virt); return -ENXIO; } @@ -117,6 +118,7 @@ int __init init_ebony(void) if (!ebony_large_map.virt) { printk("Failed to ioremap flash\n"); + iounmap(ebony_small_map.virt); return -EIO; } @@ -129,6 +131,8 @@ int __init init_ebony(void) ARRAY_SIZE(ebony_large_partitions)); } else { printk("map probe failed for flash\n"); + iounmap(ebony_small_map.virt); + iounmap(ebony_large_map.virt); return -ENXIO; } diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c index c6bf4e1219ef..7c50c271651c 100644 --- a/drivers/mtd/maps/fortunet.c +++ b/drivers/mtd/maps/fortunet.c @@ -218,8 +218,11 @@ int __init init_fortunet(void) map_regions[ix].map_info.size); if(!map_regions[ix].map_info.virt) { + int j = 0; printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n", map_regions[ix].map_info.name); + for (j = 0 ; j < ix; j++) + iounmap(map_regions[j].map_info.virt); return -ENXIO; } simple_map_init(&map_regions[ix].map_info); diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c index db4b570d874a..2bb3e63606e5 100644 --- a/drivers/mtd/maps/ichxrom.c +++ b/drivers/mtd/maps/ichxrom.c @@ -61,6 +61,7 @@ static void ichxrom_cleanup(struct ichxrom_window *window) /* Disable writes through the rom window */ pci_read_config_word(window->pdev, BIOS_CNTL, &word); pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1); + pci_dev_put(window->pdev); /* Free all of the mtd devices */ list_for_each_entry_safe(map, scratch, &window->maps, list) { @@ -355,7 +356,7 @@ static int __init init_ichxrom(void) pdev = NULL; for (id = ichxrom_pci_tbl; id->vendor; id++) { - pdev = pci_find_device(id->vendor, id->device, NULL); + pdev = pci_get_device(id->vendor, id->device, NULL); if (pdev) { break; } diff --git a/drivers/mtd/maps/iq80310.c b/drivers/mtd/maps/iq80310.c deleted file mode 100644 index 62d9e87d84e2..000000000000 --- a/drivers/mtd/maps/iq80310.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * $Id: iq80310.c,v 1.21 2005/11/07 11:14:27 gleixner Exp $ - * - * Mapping for the Intel XScale IQ80310 evaluation board - * - * Author: Nicolas Pitre - * Copyright: (C) 2001 MontaVista Software Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <asm/io.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/map.h> -#include <linux/mtd/partitions.h> - - -#define WINDOW_ADDR 0 -#define WINDOW_SIZE 8*1024*1024 -#define BUSWIDTH 1 - -static struct mtd_info *mymtd; - -static struct map_info iq80310_map = { - .name = "IQ80310 flash", - .size = WINDOW_SIZE, - .bankwidth = BUSWIDTH, - .phys = WINDOW_ADDR -}; - -static struct mtd_partition iq80310_partitions[4] = { - { - .name = "Firmware", - .size = 0x00080000, - .offset = 0, - .mask_flags = MTD_WRITEABLE /* force read-only */ - },{ - .name = "Kernel", - .size = 0x000a0000, - .offset = 0x00080000, - },{ - .name = "Filesystem", - .size = 0x00600000, - .offset = 0x00120000 - },{ - .name = "RedBoot", - .size = 0x000e0000, - .offset = 0x00720000, - .mask_flags = MTD_WRITEABLE - } -}; - -static struct mtd_info *mymtd; -static struct mtd_partition *parsed_parts; -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; - -static int __init init_iq80310(void) -{ - struct mtd_partition *parts; - int nb_parts = 0; - int parsed_nr_parts = 0; - int ret; - - iq80310_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); - if (!iq80310_map.virt) { - printk("Failed to ioremap\n"); - return -EIO; - } - simple_map_init(&iq80310_map); - - mymtd = do_map_probe("cfi_probe", &iq80310_map); - if (!mymtd) { - iounmap((void *)iq80310_map.virt); - return -ENXIO; - } - mymtd->owner = THIS_MODULE; - - ret = parse_mtd_partitions(mymtd, probes, &parsed_parts, 0); - - if (ret > 0) - parsed_nr_parts = ret; - - if (parsed_nr_parts > 0) { - parts = parsed_parts; - nb_parts = parsed_nr_parts; - } else { - parts = iq80310_partitions; - nb_parts = ARRAY_SIZE(iq80310_partitions); - } - add_mtd_partitions(mymtd, parts, nb_parts); - return 0; -} - -static void __exit cleanup_iq80310(void) -{ - if (mymtd) { - del_mtd_partitions(mymtd); - map_destroy(mymtd); - kfree(parsed_parts); - } - if (iq80310_map.virt) - iounmap((void *)iq80310_map.virt); -} - -module_init(init_iq80310); -module_exit(cleanup_iq80310); - - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>"); -MODULE_DESCRIPTION("MTD map driver for Intel XScale IQ80310 evaluation board"); diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 986c58628390..7a828e3e6446 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -253,7 +253,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) /* Use the fast version */ info->map.write = ixp4xx_write16, - err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); + err = parse_mtd_partitions(info->mtd, probes, &info->partitions, dev->resource->start); if (err > 0) { err = add_mtd_partitions(info->mtd, info->partitions, err); if(err) diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c index 6b784ef5ee70..67620adf4811 100644 --- a/drivers/mtd/maps/l440gx.c +++ b/drivers/mtd/maps/l440gx.c @@ -61,14 +61,17 @@ static int __init init_l440gx(void) struct resource *pm_iobase; __u16 word; - dev = pci_find_device(PCI_VENDOR_ID_INTEL, + dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, NULL); - pm_dev = pci_find_device(PCI_VENDOR_ID_INTEL, + pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); + pci_dev_put(dev); + if (!dev || !pm_dev) { printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n"); + pci_dev_put(pm_dev); return -ENODEV; } @@ -76,6 +79,7 @@ static int __init init_l440gx(void) if (!l440gx_map.virt) { printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); + pci_dev_put(pm_dev); return -ENOMEM; } simple_map_init(&l440gx_map); @@ -99,8 +103,12 @@ static int __init init_l440gx(void) pm_iobase->start += iobase & ~1; pm_iobase->end += iobase & ~1; + pci_dev_put(pm_dev); + /* Allocate the resource region */ if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) { + pci_dev_put(dev); + pci_dev_put(pm_dev); printk(KERN_WARNING "Could not allocate pm iobase resource\n"); iounmap(l440gx_map.virt); return -ENXIO; diff --git a/drivers/mtd/maps/lasat.c b/drivers/mtd/maps/lasat.c index 1c13d2dc0cdf..e34376321050 100644 --- a/drivers/mtd/maps/lasat.c +++ b/drivers/mtd/maps/lasat.c @@ -79,6 +79,7 @@ static int __init init_lasat(void) return 0; } + iounmap(lasat_map.virt); return -ENXIO; } @@ -89,6 +90,7 @@ static void __exit cleanup_lasat(void) map_destroy(lasat_mtd); } if (lasat_map.virt) { + iounmap(lasat_map.virt); lasat_map.virt = 0; } } diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c index 0994b5b2e331..198e840ff6db 100644 --- a/drivers/mtd/maps/nettel.c +++ b/drivers/mtd/maps/nettel.c @@ -277,6 +277,7 @@ int __init nettel_init(void) nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize); if (!nettel_amd_map.virt) { printk("SNAPGEAR: failed to ioremap() BOOTCS\n"); + iounmap(nettel_mmcrp); return(-EIO); } simple_map_init(&nettel_amd_map); @@ -337,7 +338,8 @@ int __init nettel_init(void) nettel_amd_map.virt = NULL; #else /* Only AMD flash supported */ - return(-ENXIO); + rc = -ENXIO; + goto out_unmap2; #endif } @@ -361,14 +363,15 @@ int __init nettel_init(void) nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1\n"); - return(-EIO); + rc = -EIO; + goto out_unmap2; } simple_map_init(&nettel_intel_map); intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map); if (!intel_mtd) { - iounmap(nettel_intel_map.virt); - return(-ENXIO); + rc = -ENXIO; + goto out_unmap1; } /* Set PAR to the detected size */ @@ -394,13 +397,14 @@ int __init nettel_init(void) nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize); if (!nettel_intel_map.virt) { printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n"); - return(-EIO); + rc = -EIO; + goto out_unmap2; } intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map); if (! intel_mtd) { - iounmap((void *) nettel_intel_map.virt); - return(-ENXIO); + rc = -ENXIO; + goto out_unmap1; } intel1size = intel_mtd->size - intel0size; @@ -456,6 +460,18 @@ int __init nettel_init(void) #endif return(rc); + +#ifdef CONFIG_MTD_CFI_INTELEXT +out_unmap1: + iounmap((void *) nettel_intel_map.virt); +#endif + +out_unmap2: + iounmap(nettel_mmcrp); + iounmap(nettel_amd_map.virt); + + return(rc); + } /****************************************************************************/ @@ -469,6 +485,10 @@ void __exit nettel_cleanup(void) del_mtd_partitions(amd_mtd); map_destroy(amd_mtd); } + if (nettel_mmcrp) { + iounmap(nettel_mmcrp); + nettel_mmcrp = NULL; + } if (nettel_amd_map.virt) { iounmap(nettel_amd_map.virt); nettel_amd_map.virt = NULL; diff --git a/drivers/mtd/maps/ocotea.c b/drivers/mtd/maps/ocotea.c index 2f07602ba940..5522eac8c980 100644 --- a/drivers/mtd/maps/ocotea.c +++ b/drivers/mtd/maps/ocotea.c @@ -97,6 +97,7 @@ int __init init_ocotea(void) ARRAY_SIZE(ocotea_small_partitions)); } else { printk("map probe failed for flash\n"); + iounmap(ocotea_small_map.virt); return -ENXIO; } @@ -106,6 +107,7 @@ int __init init_ocotea(void) if (!ocotea_large_map.virt) { printk("Failed to ioremap flash\n"); + iounmap(ocotea_small_map.virt); return -EIO; } @@ -118,6 +120,8 @@ int __init init_ocotea(void) ARRAY_SIZE(ocotea_large_partitions)); } else { printk("map probe failed for flash\n"); + iounmap(ocotea_small_map.virt); + iounmap(ocotea_large_map.virt); return -ENXIO; } diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index c861134cbc48..995347b1beba 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c @@ -602,6 +602,10 @@ static int pcmciamtd_config(struct pcmcia_device *link) ret = pcmcia_request_configuration(link, &link->conf); if(ret != CS_SUCCESS) { cs_error(link, RequestConfiguration, ret); + if (dev->win_base) { + iounmap(dev->win_base); + dev->win_base = NULL; + } return -ENODEV; } diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 7799a25a7f2a..bc7cc71788bc 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c @@ -158,9 +158,42 @@ err_out: return err; } +#ifdef CONFIG_PM +static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state) +{ + struct physmap_flash_info *info = platform_get_drvdata(dev); + int ret = 0; + + if (info) + ret = info->mtd->suspend(info->mtd); + + return ret; +} + +static int physmap_flash_resume(struct platform_device *dev) +{ + struct physmap_flash_info *info = platform_get_drvdata(dev); + if (info) + info->mtd->resume(info->mtd); + return 0; +} + +static void physmap_flash_shutdown(struct platform_device *dev) +{ + struct physmap_flash_info *info = platform_get_drvdata(dev); + if (info && info->mtd->suspend(info->mtd) == 0) + info->mtd->resume(info->mtd); +} +#endif + static struct platform_driver physmap_flash_driver = { .probe = physmap_flash_probe, .remove = physmap_flash_remove, +#ifdef CONFIG_PM + .suspend = physmap_flash_suspend, + .resume = physmap_flash_resume, + .shutdown = physmap_flash_shutdown, +#endif .driver = { .name = "physmap-flash", }, diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c index ec8fdae1dd99..2257d2b500c0 100644 --- a/drivers/mtd/maps/redwood.c +++ b/drivers/mtd/maps/redwood.c @@ -126,6 +126,8 @@ static struct mtd_info *redwood_mtd; int __init init_redwood_flash(void) { + int err = 0; + printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR); @@ -141,11 +143,18 @@ int __init init_redwood_flash(void) if (redwood_mtd) { redwood_mtd->owner = THIS_MODULE; - return add_mtd_partitions(redwood_mtd, + err = add_mtd_partitions(redwood_mtd, redwood_flash_partitions, NUM_REDWOOD_FLASH_PARTITIONS); + if (err) { + printk("init_redwood_flash: add_mtd_partitions failed\n"); + iounmap(redwood_flash_map.virt); + } + return err; + } + iounmap(redwood_flash_map.virt); return -ENXIO; } diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c index 7d0fcf8f4f33..b8c1331b7a04 100644 --- a/drivers/mtd/maps/sbc8240.c +++ b/drivers/mtd/maps/sbc8240.c @@ -156,7 +156,7 @@ int __init init_sbc8240_mtd (void) }; int devicesfound = 0; - int i; + int i,j; for (i = 0; i < NUM_FLASH_BANKS; i++) { printk (KERN_NOTICE MSG_PREFIX @@ -166,6 +166,10 @@ int __init init_sbc8240_mtd (void) (unsigned long) ioremap (pt[i].addr, pt[i].size); if (!sbc8240_map[i].map_priv_1) { printk (MSG_PREFIX "failed to ioremap\n"); + for (j = 0; j < i; j++) { + iounmap((void *) sbc8240_map[j].map_priv_1); + sbc8240_map[j].map_priv_1 = 0; + } return -EIO; } simple_map_init(&sbc8240_mtd[i]); @@ -175,6 +179,11 @@ int __init init_sbc8240_mtd (void) if (sbc8240_mtd[i]) { sbc8240_mtd[i]->module = THIS_MODULE; devicesfound++; + } else { + if (sbc8240_map[i].map_priv_1) { + iounmap((void *) sbc8240_map[i].map_priv_1); + sbc8240_map[i].map_priv_1 = 0; + } } } diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c index 7391fd544e86..5e2bce22f37c 100644 --- a/drivers/mtd/maps/scx200_docflash.c +++ b/drivers/mtd/maps/scx200_docflash.c @@ -87,19 +87,23 @@ static int __init init_scx200_docflash(void) printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n"); - if ((bridge = pci_find_device(PCI_VENDOR_ID_NS, + if ((bridge = pci_get_device(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE, NULL)) == NULL) return -ENODEV; /* check that we have found the configuration block */ - if (!scx200_cb_present()) + if (!scx200_cb_present()) { + pci_dev_put(bridge); return -ENODEV; + } if (probe) { /* Try to use the present flash mapping if any */ pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base); pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl); + pci_dev_put(bridge); + pmr = inl(scx200_cb_base + SCx200_PMR); if (base == 0 @@ -127,6 +131,7 @@ static int __init init_scx200_docflash(void) return -ENOMEM; } } else { + pci_dev_put(bridge); for (u = size; u > 1; u >>= 1) ; if (u != 1) { diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c index ec80eec376bf..ca932122fb64 100644 --- a/drivers/mtd/maps/walnut.c +++ b/drivers/mtd/maps/walnut.c @@ -68,6 +68,7 @@ int __init init_walnut(void) if (WALNUT_FLASH_ONBD_N(fpga_brds1)) { printk("The on-board flash is disabled (U79 sw 5)!"); + iounmap(fpga_status_adr); return -EIO; } if (WALNUT_FLASH_SRAM_SEL(fpga_brds1)) @@ -81,6 +82,7 @@ int __init init_walnut(void) if (!walnut_map.virt) { printk("Failed to ioremap flash.\n"); + iounmap(fpga_status_adr); return -EIO; } @@ -93,9 +95,11 @@ int __init init_walnut(void) ARRAY_SIZE(walnut_partitions)); } else { printk("map probe failed for flash\n"); + iounmap(fpga_status_adr); return -ENXIO; } + iounmap(fpga_status_adr); return 0; } diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 168d3ba063c3..c4d26de74349 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -57,6 +57,16 @@ int add_mtd_device(struct mtd_info *mtd) mtd->index = i; mtd->usecount = 0; + /* Some chips always power up locked. Unlock them now */ + if ((mtd->flags & MTD_WRITEABLE) + && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) { + if (mtd->unlock(mtd, 0, mtd->size)) + printk(KERN_WARNING + "%s: unlock failed, " + "writes may not work\n", + mtd->name); + } + DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name); /* No need to get a refcount on the module containing the notifier, since we hold the mtd_table_mutex */ diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 31228334da12..09e421a96893 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -21,18 +21,7 @@ #include <linux/version.h> #include <asm/io.h> -/* fixme: this is ugly */ -#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0) #include <asm/mach-au1x00/au1xxx.h> -#else -#include <asm/au1000.h> -#ifdef CONFIG_MIPS_PB1550 -#include <asm/pb1550.h> -#endif -#ifdef CONFIG_MIPS_DB1550 -#include <asm/db1x00.h> -#endif -#endif /* * MTD structure for NAND controller diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c index 516c0e5e564c..12017f3c6bd6 100644 --- a/drivers/mtd/nand/edb7312.c +++ b/drivers/mtd/nand/edb7312.c @@ -198,6 +198,9 @@ static void __exit ep7312_cleanup(void) /* Release resources, unregister device */ nand_release(ap7312_mtd); + /* Release io resource */ + iounmap((void *)this->IO_ADDR_R); + /* Free the MTD device structure */ kfree(ep7312_mtd); } diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index e5bd88f2d560..039c759cfbfc 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c @@ -168,7 +168,7 @@ static void ndfc_chip_init(struct ndfc_nand_mtd *mtd) chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = 256; chip->ecc.bytes = 3; - chip->ecclayout = mtd->pl_chip->ecclayout; + chip->ecclayout = chip->ecc.layout = mtd->pl_chip->ecclayout; mtd->mtd.priv = chip; mtd->mtd.owner = THIS_MODULE; } diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c index 22fa65c12ab9..eb7d4d443deb 100644 --- a/drivers/mtd/nand/ppchameleonevb.c +++ b/drivers/mtd/nand/ppchameleonevb.c @@ -276,6 +276,7 @@ static int __init ppchameleonevb_init(void) /* Scan to find existence of the device (it could not be mounted) */ if (nand_scan(ppchameleon_mtd, 1)) { iounmap((void *)ppchameleon_fio_base); + ppchameleon_fio_base = NULL; kfree(ppchameleon_mtd); goto nand_evb_init; } @@ -314,6 +315,8 @@ static int __init ppchameleonevb_init(void) ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!ppchameleonevb_mtd) { printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n"); + if (ppchameleon_fio_base) + iounmap(ppchameleon_fio_base); return -ENOMEM; } @@ -322,6 +325,8 @@ static int __init ppchameleonevb_init(void) if (!ppchameleonevb_fio_base) { printk("ioremap PPChameleonEVB NAND flash failed\n"); kfree(ppchameleonevb_mtd); + if (ppchameleon_fio_base) + iounmap(ppchameleon_fio_base); return -EIO; } @@ -378,6 +383,8 @@ static int __init ppchameleonevb_init(void) if (nand_scan(ppchameleonevb_mtd, 1)) { iounmap((void *)ppchameleonevb_fio_base); kfree(ppchameleonevb_mtd); + if (ppchameleon_fio_base) + iounmap(ppchameleon_fio_base); return -ENXIO; } #ifdef CONFIG_MTD_PARTITIONS diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c new file mode 100644 index 000000000000..ddbf015f4119 --- /dev/null +++ b/drivers/mtd/ssfdc.c @@ -0,0 +1,468 @@ +/* + * Linux driver for SSFDC Flash Translation Layer (Read only) + * (c) 2005 Eptar srl + * Author: Claudio Lanconelli <lanconelli.claudio@eptar.com> + * + * Based on NTFL and MTDBLOCK_RO drivers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/hdreg.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/blktrans.h> + +struct ssfdcr_record { + struct mtd_blktrans_dev mbd; + int usecount; + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + int cis_block; /* block n. containing CIS/IDI */ + int erase_size; /* phys_block_size */ + unsigned short *logic_block_map; /* all zones (max 8192 phys blocks on + the 128MB) */ + int map_len; /* n. phys_blocks on the card */ +}; + +#define SSFDCR_MAJOR 257 +#define SSFDCR_PARTN_BITS 3 + +#define SECTOR_SIZE 512 +#define SECTOR_SHIFT 9 +#define OOB_SIZE 16 + +#define MAX_LOGIC_BLK_PER_ZONE 1000 +#define MAX_PHYS_BLK_PER_ZONE 1024 + +#define KB(x) ( (x) * 1024L ) +#define MB(x) ( KB(x) * 1024L ) + +/** CHS Table + 1MB 2MB 4MB 8MB 16MB 32MB 64MB 128MB +NCylinder 125 125 250 250 500 500 500 500 +NHead 4 4 4 4 4 8 8 16 +NSector 4 8 8 16 16 16 32 32 +SumSector 2,000 4,000 8,000 16,000 32,000 64,000 128,000 256,000 +SectorSize 512 512 512 512 512 512 512 512 +**/ + +typedef struct { + unsigned long size; + unsigned short cyl; + unsigned char head; + unsigned char sec; +} chs_entry_t; + +/* Must be ordered by size */ +static const chs_entry_t chs_table[] = { + { MB( 1), 125, 4, 4 }, + { MB( 2), 125, 4, 8 }, + { MB( 4), 250, 4, 8 }, + { MB( 8), 250, 4, 16 }, + { MB( 16), 500, 4, 16 }, + { MB( 32), 500, 8, 16 }, + { MB( 64), 500, 8, 32 }, + { MB(128), 500, 16, 32 }, + { 0 }, +}; + +static int get_chs(unsigned long size, unsigned short *cyl, unsigned char *head, + unsigned char *sec) +{ + int k; + int found = 0; + + k = 0; + while (chs_table[k].size > 0 && size > chs_table[k].size) + k++; + + if (chs_table[k].size > 0) { + if (cyl) + *cyl = chs_table[k].cyl; + if (head) + *head = chs_table[k].head; + if (sec) + *sec = chs_table[k].sec; + found = 1; + } + + return found; +} + +/* These bytes are the signature for the CIS/IDI sector */ +static const uint8_t cis_numbers[] = { + 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20 +}; + +/* Read and check for a valid CIS sector */ +static int get_valid_cis_sector(struct mtd_info *mtd) +{ + int ret, k, cis_sector; + size_t retlen; + loff_t offset; + uint8_t sect_buf[SECTOR_SIZE]; + + /* + * Look for CIS/IDI sector on the first GOOD block (give up after 4 bad + * blocks). If the first good block doesn't contain CIS number the flash + * is not SSFDC formatted + */ + cis_sector = -1; + for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) { + if (!mtd->block_isbad(mtd, offset)) { + ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, + sect_buf); + + /* CIS pattern match on the sector buffer */ + if ( ret < 0 || retlen != SECTOR_SIZE ) { + printk(KERN_WARNING + "SSFDC_RO:can't read CIS/IDI sector\n"); + } else if ( !memcmp(sect_buf, cis_numbers, + sizeof(cis_numbers)) ) { + /* Found */ + cis_sector = (int)(offset >> SECTOR_SHIFT); + } else { + DEBUG(MTD_DEBUG_LEVEL1, + "SSFDC_RO: CIS/IDI sector not found" + " on %s (mtd%d)\n", mtd->name, + mtd->index); + } + break; + } + } + + return cis_sector; +} + +/* Read physical sector (wrapper to MTD_READ) */ +static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf, + int sect_no) +{ + int ret; + size_t retlen; + loff_t offset = (loff_t)sect_no << SECTOR_SHIFT; + + ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf); + if (ret < 0 || retlen != SECTOR_SIZE) + return -1; + + return 0; +} + +/* Read redundancy area (wrapper to MTD_READ_OOB */ +static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf) +{ + struct mtd_oob_ops ops; + int ret; + + ops.mode = MTD_OOB_RAW; + ops.ooboffs = 0; + ops.ooblen = mtd->oobsize; + ops.len = OOB_SIZE; + ops.oobbuf = buf; + ops.datbuf = NULL; + + ret = mtd->read_oob(mtd, offs, &ops); + if (ret < 0 || ops.retlen != OOB_SIZE) + return -1; + + return 0; +} + +/* Parity calculator on a word of n bit size */ +static int get_parity(int number, int size) +{ + int k; + int parity; + + parity = 1; + for (k = 0; k < size; k++) { + parity += (number >> k); + parity &= 1; + } + return parity; +} + +/* Read and validate the logical block address field stored in the OOB */ +static int get_logical_address(uint8_t *oob_buf) +{ + int block_address, parity; + int offset[2] = {6, 11}; /* offset of the 2 address fields within OOB */ + int j; + int ok = 0; + + /* + * Look for the first valid logical address + * Valid address has fixed pattern on most significant bits and + * parity check + */ + for (j = 0; j < ARRAY_SIZE(offset); j++) { + block_address = ((int)oob_buf[offset[j]] << 8) | + oob_buf[offset[j]+1]; + + /* Check for the signature bits in the address field (MSBits) */ + if ((block_address & ~0x7FF) == 0x1000) { + parity = block_address & 0x01; + block_address &= 0x7FF; + block_address >>= 1; + + if (get_parity(block_address, 10) != parity) { + DEBUG(MTD_DEBUG_LEVEL0, + "SSFDC_RO: logical address field%d" + "parity error(0x%04X)\n", j+1, + block_address); + } else { + ok = 1; + break; + } + } + } + + if ( !ok ) + block_address = -2; + + DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n", + block_address); + + return block_address; +} + +/* Build the logic block map */ +static int build_logical_block_map(struct ssfdcr_record *ssfdc) +{ + unsigned long offset; + uint8_t oob_buf[OOB_SIZE]; + int ret, block_address, phys_block; + struct mtd_info *mtd = ssfdc->mbd.mtd; + + DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n", + ssfdc->map_len, (unsigned long)ssfdc->map_len * + ssfdc->erase_size / 1024 ); + + /* Scan every physical block, skip CIS block */ + for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len; + phys_block++) { + offset = (unsigned long)phys_block * ssfdc->erase_size; + if (mtd->block_isbad(mtd, offset)) + continue; /* skip bad blocks */ + + ret = read_raw_oob(mtd, offset, oob_buf); + if (ret < 0) { + DEBUG(MTD_DEBUG_LEVEL0, + "SSFDC_RO: mtd read_oob() failed at %lu\n", + offset); + return -1; + } + block_address = get_logical_address(oob_buf); + + /* Skip invalid addresses */ + if (block_address >= 0 && + block_address < MAX_LOGIC_BLK_PER_ZONE) { + int zone_index; + + zone_index = phys_block / MAX_PHYS_BLK_PER_ZONE; + block_address += zone_index * MAX_LOGIC_BLK_PER_ZONE; + ssfdc->logic_block_map[block_address] = + (unsigned short)phys_block; + + DEBUG(MTD_DEBUG_LEVEL2, + "SSFDC_RO: build_block_map() phys_block=%d," + "logic_block_addr=%d, zone=%d\n", + phys_block, block_address, zone_index); + } + } + return 0; +} + +static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) +{ + struct ssfdcr_record *ssfdc; + int cis_sector; + + /* Check for small page NAND flash */ + if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE) + return; + + /* Check for SSDFC format by reading CIS/IDI sector */ + cis_sector = get_valid_cis_sector(mtd); + if (cis_sector == -1) + return; + + ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL); + if (!ssfdc) { + printk(KERN_WARNING + "SSFDC_RO: out of memory for data structures\n"); + return; + } + + ssfdc->mbd.mtd = mtd; + ssfdc->mbd.devnum = -1; + ssfdc->mbd.blksize = SECTOR_SIZE; + ssfdc->mbd.tr = tr; + ssfdc->mbd.readonly = 1; + + ssfdc->cis_block = cis_sector / (mtd->erasesize >> SECTOR_SHIFT); + ssfdc->erase_size = mtd->erasesize; + ssfdc->map_len = mtd->size / mtd->erasesize; + + DEBUG(MTD_DEBUG_LEVEL1, + "SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n", + ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len, + (ssfdc->map_len + MAX_PHYS_BLK_PER_ZONE - 1) / + MAX_PHYS_BLK_PER_ZONE); + + /* Set geometry */ + ssfdc->heads = 16; + ssfdc->sectors = 32; + get_chs( mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); + ssfdc->cylinders = (unsigned short)((mtd->size >> SECTOR_SHIFT) / + ((long)ssfdc->sectors * (long)ssfdc->heads)); + + DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n", + ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, + (long)ssfdc->cylinders * (long)ssfdc->heads * + (long)ssfdc->sectors ); + + ssfdc->mbd.size = (long)ssfdc->heads * (long)ssfdc->cylinders * + (long)ssfdc->sectors; + + /* Allocate logical block map */ + ssfdc->logic_block_map = kmalloc( sizeof(ssfdc->logic_block_map[0]) * + ssfdc->map_len, GFP_KERNEL); + if (!ssfdc->logic_block_map) { + printk(KERN_WARNING + "SSFDC_RO: out of memory for data structures\n"); + goto out_err; + } + memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) * + ssfdc->map_len); + + /* Build logical block map */ + if (build_logical_block_map(ssfdc) < 0) + goto out_err; + + /* Register device + partitions */ + if (add_mtd_blktrans_dev(&ssfdc->mbd)) + goto out_err; + + printk(KERN_INFO "SSFDC_RO: Found ssfdc%c on mtd%d (%s)\n", + ssfdc->mbd.devnum + 'a', mtd->index, mtd->name); + return; + +out_err: + kfree(ssfdc->logic_block_map); + kfree(ssfdc); +} + +static void ssfdcr_remove_dev(struct mtd_blktrans_dev *dev) +{ + struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; + + DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum); + + del_mtd_blktrans_dev(dev); + kfree(ssfdc->logic_block_map); + kfree(ssfdc); +} + +static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, + unsigned long logic_sect_no, char *buf) +{ + struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; + int sectors_per_block, offset, block_address; + + sectors_per_block = ssfdc->erase_size >> SECTOR_SHIFT; + offset = (int)(logic_sect_no % sectors_per_block); + block_address = (int)(logic_sect_no / sectors_per_block); + + DEBUG(MTD_DEBUG_LEVEL3, + "SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d," + " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, + block_address); + + if (block_address >= ssfdc->map_len) + BUG(); + + block_address = ssfdc->logic_block_map[block_address]; + + DEBUG(MTD_DEBUG_LEVEL3, + "SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n", + block_address); + + if (block_address < 0xffff) { + unsigned long sect_no; + + sect_no = (unsigned long)block_address * sectors_per_block + + offset; + + DEBUG(MTD_DEBUG_LEVEL3, + "SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n", + sect_no); + + if (read_physical_sector( ssfdc->mbd.mtd, buf, sect_no ) < 0) + return -EIO; + } else { + memset(buf, 0xff, SECTOR_SIZE); + } + + return 0; +} + +static int ssfdcr_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) +{ + struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev; + + DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n", + ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); + + geo->heads = ssfdc->heads; + geo->sectors = ssfdc->sectors; + geo->cylinders = ssfdc->cylinders; + + return 0; +} + +/**************************************************************************** + * + * Module stuff + * + ****************************************************************************/ + +static struct mtd_blktrans_ops ssfdcr_tr = { + .name = "ssfdc", + .major = SSFDCR_MAJOR, + .part_bits = SSFDCR_PARTN_BITS, + .getgeo = ssfdcr_getgeo, + .readsect = ssfdcr_readsect, + .add_mtd = ssfdcr_add_mtd, + .remove_dev = ssfdcr_remove_dev, + .owner = THIS_MODULE, +}; + +static int __init init_ssfdcr(void) +{ + printk(KERN_INFO "SSFDC read-only Flash Translation layer\n"); + + return register_mtd_blktrans(&ssfdcr_tr); +} + +static void __exit cleanup_ssfdcr(void) +{ + deregister_mtd_blktrans(&ssfdcr_tr); +} + +module_init(init_ssfdcr); +module_exit(cleanup_ssfdcr); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>"); +MODULE_DESCRIPTION("Flash Translation Layer for read-only SSFDC SmartMedia card"); diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index b0c19292b60d..4adfe7b77031 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -1264,7 +1264,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i { int j, rev, ret; struct bmac_data *bp; - unsigned char *addr; + const unsigned char *prop_addr; + unsigned char addr[6]; struct net_device *dev; int is_bmac_plus = ((int)match->data) != 0; @@ -1272,14 +1273,16 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); return -ENODEV; } - addr = get_property(macio_get_of_node(mdev), "mac-address", NULL); - if (addr == NULL) { - addr = get_property(macio_get_of_node(mdev), "local-mac-address", NULL); - if (addr == NULL) { + prop_addr = get_property(macio_get_of_node(mdev), "mac-address", NULL); + if (prop_addr == NULL) { + prop_addr = get_property(macio_get_of_node(mdev), + "local-mac-address", NULL); + if (prop_addr == NULL) { printk(KERN_ERR "BMAC: Can't get mac-address\n"); return -ENODEV; } } + memcpy(addr, prop_addr, sizeof(addr)); dev = alloc_etherdev(PRIV_BYTES); if (!dev) { diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 48e68f5d1651..767203d35bc2 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -702,7 +702,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) desc[3].desc, desc[4].desc, desc[5].desc, - correlator); + correlator, + &correlator); } while ((lpar_rc == H_BUSY) && (retry_count--)); if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) { diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index 8385bf836507..f5b25bff1540 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h @@ -41,16 +41,6 @@ #define IbmVethMcastRemoveFilter 0x2UL #define IbmVethMcastClearFilterTable 0x3UL -/* hcall numbers */ -#define H_VIO_SIGNAL 0x104 -#define H_REGISTER_LOGICAL_LAN 0x114 -#define H_FREE_LOGICAL_LAN 0x118 -#define H_ADD_LOGICAL_LAN_BUFFER 0x11C -#define H_SEND_LOGICAL_LAN 0x120 -#define H_MULTICAST_CTRL 0x130 -#define H_CHANGE_LOGICAL_LAN_MAC 0x14C -#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 - /* hcall macros */ #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac) @@ -61,8 +51,21 @@ #define h_add_logical_lan_buffer(ua, buf) \ plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf) -#define h_send_logical_lan(ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator) \ - plpar_hcall_8arg_2ret(H_SEND_LOGICAL_LAN, ua, buf1, buf2, buf3, buf4, buf5, buf6, correlator, &correlator) +static inline long h_send_logical_lan(unsigned long unit_address, + unsigned long desc1, unsigned long desc2, unsigned long desc3, + unsigned long desc4, unsigned long desc5, unsigned long desc6, + unsigned long corellator_in, unsigned long *corellator_out) +{ + long rc; + unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; + + rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1, + desc2, desc3, desc4, desc5, desc6, corellator_in); + + *corellator_out = retbuf[0]; + + return rc; +} #define h_multicast_ctrl(ua, cmd, mac) \ plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac) diff --git a/drivers/net/mace.c b/drivers/net/mace.c index 47d7850da47b..27c24eaa2414 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c @@ -113,7 +113,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i struct device_node *mace = macio_get_of_node(mdev); struct net_device *dev; struct mace_data *mp; - unsigned char *addr; + const unsigned char *addr; int j, rev, rc = -EBUSY; if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c index 51ff9a9d1bb5..f3655fd772f5 100644 --- a/drivers/net/ppp_mppe.c +++ b/drivers/net/ppp_mppe.c @@ -43,6 +43,7 @@ * deprecated in 2.6 */ +#include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/version.h> @@ -64,12 +65,13 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); MODULE_VERSION("1.0.2"); -static void +static unsigned int setup_sg(struct scatterlist *sg, const void *address, unsigned int length) { sg[0].page = virt_to_page(address); sg[0].offset = offset_in_page(address); sg[0].length = length; + return length; } #define SHA1_PAD_SIZE 40 @@ -95,8 +97,8 @@ static inline void sha_pad_init(struct sha_pad *shapad) * State for an MPPE (de)compressor. */ struct ppp_mppe_state { - struct crypto_tfm *arc4; - struct crypto_tfm *sha1; + struct crypto_blkcipher *arc4; + struct crypto_hash *sha1; unsigned char *sha1_digest; unsigned char master_key[MPPE_MAX_KEY_LEN]; unsigned char session_key[MPPE_MAX_KEY_LEN]; @@ -136,14 +138,21 @@ struct ppp_mppe_state { */ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey) { + struct hash_desc desc; struct scatterlist sg[4]; + unsigned int nbytes; - setup_sg(&sg[0], state->master_key, state->keylen); - setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1)); - setup_sg(&sg[2], state->session_key, state->keylen); - setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2)); + nbytes = setup_sg(&sg[0], state->master_key, state->keylen); + nbytes += setup_sg(&sg[1], sha_pad->sha_pad1, + sizeof(sha_pad->sha_pad1)); + nbytes += setup_sg(&sg[2], state->session_key, state->keylen); + nbytes += setup_sg(&sg[3], sha_pad->sha_pad2, + sizeof(sha_pad->sha_pad2)); - crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest); + desc.tfm = state->sha1; + desc.flags = 0; + + crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest); memcpy(InterimKey, state->sha1_digest, state->keylen); } @@ -156,14 +165,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { unsigned char InterimKey[MPPE_MAX_KEY_LEN]; struct scatterlist sg_in[1], sg_out[1]; + struct blkcipher_desc desc = { .tfm = state->arc4 }; get_new_key_from_sha(state, InterimKey); if (!initial_key) { - crypto_cipher_setkey(state->arc4, InterimKey, state->keylen); + crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen); setup_sg(sg_in, InterimKey, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); - if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, - state->keylen) != 0) { + if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + state->keylen) != 0) { printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); } } else { @@ -175,7 +185,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) state->session_key[1] = 0x26; state->session_key[2] = 0x9e; } - crypto_cipher_setkey(state->arc4, state->session_key, state->keylen); + crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen); } /* @@ -196,15 +206,19 @@ static void *mppe_alloc(unsigned char *options, int optlen) memset(state, 0, sizeof(*state)); - state->arc4 = crypto_alloc_tfm("arc4", 0); - if (!state->arc4) + state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(state->arc4)) { + state->arc4 = NULL; goto out_free; + } - state->sha1 = crypto_alloc_tfm("sha1", 0); - if (!state->sha1) + state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(state->sha1)) { + state->sha1 = NULL; goto out_free; + } - digestsize = crypto_tfm_alg_digestsize(state->sha1); + digestsize = crypto_hash_digestsize(state->sha1); if (digestsize < MPPE_MAX_KEY_LEN) goto out_free; @@ -229,9 +243,9 @@ static void *mppe_alloc(unsigned char *options, int optlen) if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) - crypto_free_tfm(state->sha1); + crypto_free_hash(state->sha1); if (state->arc4) - crypto_free_tfm(state->arc4); + crypto_free_blkcipher(state->arc4); kfree(state); out: return NULL; @@ -247,9 +261,9 @@ static void mppe_free(void *arg) if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) - crypto_free_tfm(state->sha1); + crypto_free_hash(state->sha1); if (state->arc4) - crypto_free_tfm(state->arc4); + crypto_free_blkcipher(state->arc4); kfree(state); } } @@ -356,6 +370,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, int isize, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; + struct blkcipher_desc desc = { .tfm = state->arc4 }; int proto; struct scatterlist sg_in[1], sg_out[1]; @@ -413,7 +428,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, /* Encrypt packet */ setup_sg(sg_in, ibuf, isize); setup_sg(sg_out, obuf, osize); - if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) { + if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); return -1; } @@ -462,6 +477,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; + struct blkcipher_desc desc = { .tfm = state->arc4 }; unsigned ccount; int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; int sanity = 0; @@ -599,7 +615,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, */ setup_sg(sg_in, ibuf, 1); setup_sg(sg_out, obuf, 1); - if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) { + if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } @@ -619,7 +635,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, /* And finally, decrypt the rest of the packet. */ setup_sg(sg_in, ibuf + 1, isize - 1); setup_sg(sg_out, obuf + 1, osize - 1); - if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) { + if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } @@ -694,8 +710,8 @@ static struct compressor ppp_mppe = { static int __init ppp_mppe_init(void) { int answer; - if (!(crypto_alg_available("arc4", 0) && - crypto_alg_available("sha1", 0))) + if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && + crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) return -ENODEV; sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 88907218457a..d64e718afbd2 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -1697,10 +1697,10 @@ spider_net_setup_phy(struct spider_net_card *card) */ static int spider_net_download_firmware(struct spider_net_card *card, - u8 *firmware_ptr) + const void *firmware_ptr) { int sequencer, i; - u32 *fw_ptr = (u32 *)firmware_ptr; + const u32 *fw_ptr = firmware_ptr; /* stop sequencers */ spider_net_write_reg(card, SPIDER_NET_GSINIT, @@ -1757,7 +1757,7 @@ spider_net_init_firmware(struct spider_net_card *card) { struct firmware *firmware = NULL; struct device_node *dn; - u8 *fw_prop = NULL; + const u8 *fw_prop = NULL; int err = -ENOENT; int fw_size; @@ -1783,7 +1783,7 @@ try_host_fw: if (!dn) goto out_err; - fw_prop = (u8 *)get_property(dn, "firmware", &fw_size); + fw_prop = get_property(dn, "firmware", &fw_size); if (!fw_prop) goto out_err; @@ -1986,7 +1986,7 @@ spider_net_setup_netdev(struct spider_net_card *card) struct net_device *netdev = card->netdev; struct device_node *dn; struct sockaddr addr; - u8 *mac; + const u8 *mac; SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &card->pdev->dev); @@ -2019,7 +2019,7 @@ spider_net_setup_netdev(struct spider_net_card *card) if (!dn) return -EIO; - mac = (u8 *)get_property(dn, "local-mac-address", NULL); + mac = get_property(dn, "local-mac-address", NULL); if (!mac) return -EIO; memcpy(addr.sa_data, mac, ETH_ALEN); diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index e567d0ae77ee..e06c59d4dd62 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -2896,7 +2896,7 @@ static int __devinit gem_get_device_address(struct gem *gp) if (use_idprom) memcpy(dev->dev_addr, idprom->id_ethaddr, 6); #elif defined(CONFIG_PPC_PMAC) - unsigned char *addr; + const unsigned char *addr; addr = get_property(gp->of_node, "local-mac-address", NULL); if (addr == NULL) { diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index e088ceefb4a3..bff04cba3fed 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -19,6 +19,7 @@ ======================================================================*/ +#include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> @@ -1203,7 +1204,7 @@ struct airo_info { struct iw_spy_data spy_data; struct iw_public_data wireless_data; /* MIC stuff */ - struct crypto_tfm *tfm; + struct crypto_cipher *tfm; mic_module mod[2]; mic_statistics micstats; HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors @@ -1271,7 +1272,8 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev); static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq); static void MoveWindow(miccntx *context, u32 micSeq); -static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *); +static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, + struct crypto_cipher *tfm); static void emmh32_init(emmh32_context *context); static void emmh32_update(emmh32_context *context, u8 *pOctets, int len); static void emmh32_final(emmh32_context *context, u8 digest[4]); @@ -1339,10 +1341,11 @@ static int micsetup(struct airo_info *ai) { int i; if (ai->tfm == NULL) - ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP); + ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); - if (ai->tfm == NULL) { + if (IS_ERR(ai->tfm)) { airo_print_err(ai->dev->name, "failed to load transform for AES"); + ai->tfm = NULL; return ERROR; } @@ -1608,7 +1611,8 @@ static void MoveWindow(miccntx *context, u32 micSeq) static unsigned char aes_counter[16]; /* expand the key to fill the MMH coefficient array */ -static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm) +static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, + struct crypto_cipher *tfm) { /* take the keying material, expand if necessary, truncate at 16-bytes */ /* run through AES counter mode to generate context->coeff[] */ @@ -1616,7 +1620,6 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct int i,j; u32 counter; u8 *cipher, plain[16]; - struct scatterlist sg[1]; crypto_cipher_setkey(tfm, pkey, 16); counter = 0; @@ -1627,9 +1630,8 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct aes_counter[12] = (u8)(counter >> 24); counter++; memcpy (plain, aes_counter, 16); - sg_set_buf(sg, plain, 16); - crypto_cipher_encrypt(tfm, sg, sg, 16); - cipher = kmap(sg->page) + sg->offset; + crypto_cipher_encrypt_one(tfm, plain, plain); + cipher = plain; for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); j += 4; @@ -2431,7 +2433,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) ai->shared, ai->shared_dma); } } - crypto_free_tfm(ai->tfm); + crypto_free_cipher(ai->tfm); del_airo_dev( dev ); free_netdev( dev ); } diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 076bd6dcafae..7288a3eccfb3 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -176,16 +176,16 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe return 0; } -static int get_children_props(struct device_node *dn, int **drc_indexes, - int **drc_names, int **drc_types, int **drc_power_domains) +static int get_children_props(struct device_node *dn, const int **drc_indexes, + const int **drc_names, const int **drc_types, + const int **drc_power_domains) { - int *indexes, *names; - int *types, *domains; + const int *indexes, *names, *types, *domains; - indexes = (int *) get_property(dn, "ibm,drc-indexes", NULL); - names = (int *) get_property(dn, "ibm,drc-names", NULL); - types = (int *) get_property(dn, "ibm,drc-types", NULL); - domains = (int *) get_property(dn, "ibm,drc-power-domains", NULL); + indexes = get_property(dn, "ibm,drc-indexes", NULL); + names = get_property(dn, "ibm,drc-names", NULL); + types = get_property(dn, "ibm,drc-types", NULL); + domains = get_property(dn, "ibm,drc-power-domains", NULL); if (!indexes || !names || !types || !domains) { /* Slot does not have dynamically-removable children */ @@ -212,13 +212,13 @@ static int get_children_props(struct device_node *dn, int **drc_indexes, int rpaphp_get_drc_props(struct device_node *dn, int *drc_index, char **drc_name, char **drc_type, int *drc_power_domain) { - int *indexes, *names; - int *types, *domains; - unsigned int *my_index; + const int *indexes, *names; + const int *types, *domains; + const unsigned int *my_index; char *name_tmp, *type_tmp; int i, rc; - my_index = (int *) get_property(dn, "ibm,my-drc-index", NULL); + my_index = get_property(dn, "ibm,my-drc-index", NULL); if (!my_index) { /* Node isn't DLPAR/hotplug capable */ return -EINVAL; @@ -265,10 +265,10 @@ static int is_php_type(char *drc_type) return 1; } -static int is_php_dn(struct device_node *dn, int **indexes, int **names, - int **types, int **power_domains) +static int is_php_dn(struct device_node *dn, const int **indexes, + const int **names, const int **types, const int **power_domains) { - int *drc_types; + const int *drc_types; int rc; rc = get_children_props(dn, indexes, names, &drc_types, power_domains); @@ -296,7 +296,7 @@ int rpaphp_add_slot(struct device_node *dn) struct slot *slot; int retval = 0; int i; - int *indexes, *names, *types, *power_domains; + const int *indexes, *names, *types, *power_domains; char *name, *type; dbg("Entry %s: dn->full_name=%s\n", __FUNCTION__, dn->full_name); diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig index 4d36208ff8de..ae89b9b88743 100644 --- a/drivers/s390/Kconfig +++ b/drivers/s390/Kconfig @@ -213,17 +213,35 @@ config MONREADER help Character device driver for reading z/VM monitor service records +config MONWRITER + tristate "API for writing z/VM monitor service records" + default "m" + help + Character device driver for writing z/VM monitor service records + endmenu menu "Cryptographic devices" -config Z90CRYPT +config ZCRYPT tristate "Support for PCI-attached cryptographic adapters" - default "m" - help + select ZCRYPT_MONOLITHIC if ZCRYPT="y" + default "m" + help Select this option if you want to use a PCI-attached cryptographic - adapter like the PCI Cryptographic Accelerator (PCICA) or the PCI - Cryptographic Coprocessor (PCICC). This option is also available - as a module called z90crypt.ko. + adapter like: + + PCI Cryptographic Accelerator (PCICA) + + PCI Cryptographic Coprocessor (PCICC) + + PCI-X Cryptographic Coprocessor (PCIXCC) + + Crypto Express2 Coprocessor (CEX2C) + + Crypto Express2 Accelerator (CEX2A) + +config ZCRYPT_MONOLITHIC + bool "Monolithic zcrypt module" + depends on ZCRYPT="m" + help + Select this option if you want to have a single module z90crypt.ko + that contains all parts of the crypto device driver (ap bus, + request router and all the card drivers). endmenu diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 25c1ef6dfd44..d0647d116eaa 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -184,7 +184,7 @@ dasd_state_known_to_basic(struct dasd_device * device) device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 8 * sizeof (long)); debug_register_view(device->debug_area, &debug_sprintf_view); - debug_set_level(device->debug_area, DBF_EMERG); + debug_set_level(device->debug_area, DBF_WARNING); DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); device->state = DASD_STATE_BASIC; @@ -893,7 +893,7 @@ dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) device = (struct dasd_device *) cqr->device; if (device == NULL || - device != dasd_device_from_cdev(cdev) || + device != dasd_device_from_cdev_locked(cdev) || strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", cdev->dev.bus_id); @@ -970,7 +970,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, /* first of all check for state change pending interrupt */ mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; if ((irb->scsw.dstat & mask) == mask) { - device = dasd_device_from_cdev(cdev); + device = dasd_device_from_cdev_locked(cdev); if (!IS_ERR(device)) { dasd_handle_state_change_pending(device); dasd_put_device(device); @@ -2169,7 +2169,7 @@ dasd_init(void) goto failed; } debug_register_view(dasd_debug_area, &debug_sprintf_view); - debug_set_level(dasd_debug_area, DBF_EMERG); + debug_set_level(dasd_debug_area, DBF_WARNING); DBF_EVENT(DBF_EMERG, "%s", "debug area created"); diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 9af02c79ce8a..91cf971f0652 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -258,8 +258,12 @@ dasd_parse_keyword( char *parsestring ) { return residual_str; } if (strncmp("nopav", parsestring, length) == 0) { - dasd_nopav = 1; - MESSAGE(KERN_INFO, "%s", "disable PAV mode"); + if (MACHINE_IS_VM) + MESSAGE(KERN_INFO, "%s", "'nopav' not supported on VM"); + else { + dasd_nopav = 1; + MESSAGE(KERN_INFO, "%s", "disable PAV mode"); + } return residual_str; } if (strncmp("fixedbuffers", parsestring, length) == 0) { @@ -523,17 +527,17 @@ dasd_create_device(struct ccw_device *cdev) { struct dasd_devmap *devmap; struct dasd_device *device; + unsigned long flags; int rc; devmap = dasd_devmap_from_cdev(cdev); if (IS_ERR(devmap)) return (void *) devmap; - cdev->dev.driver_data = devmap; device = dasd_alloc_device(); if (IS_ERR(device)) return device; - atomic_set(&device->ref_count, 2); + atomic_set(&device->ref_count, 3); spin_lock(&dasd_devmap_lock); if (!devmap->device) { @@ -552,6 +556,11 @@ dasd_create_device(struct ccw_device *cdev) dasd_free_device(device); return ERR_PTR(rc); } + + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + cdev->dev.driver_data = device; + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); + return device; } @@ -569,6 +578,7 @@ dasd_delete_device(struct dasd_device *device) { struct ccw_device *cdev; struct dasd_devmap *devmap; + unsigned long flags; /* First remove device pointer from devmap. */ devmap = dasd_find_busid(device->cdev->dev.bus_id); @@ -582,9 +592,16 @@ dasd_delete_device(struct dasd_device *device) devmap->device = NULL; spin_unlock(&dasd_devmap_lock); - /* Drop ref_count by 2, one for the devmap reference and - * one for the passed reference. */ - atomic_sub(2, &device->ref_count); + /* Disconnect dasd_device structure from ccw_device structure. */ + spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); + device->cdev->dev.driver_data = NULL; + spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + + /* + * Drop ref_count by 3, one for the devmap reference, one for + * the cdev reference and one for the passed reference. + */ + atomic_sub(3, &device->ref_count); /* Wait for reference counter to drop to zero. */ wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0); @@ -593,9 +610,6 @@ dasd_delete_device(struct dasd_device *device) cdev = device->cdev; device->cdev = NULL; - /* Disconnect dasd_devmap structure from ccw_device structure. */ - cdev->dev.driver_data = NULL; - /* Put ccw_device structure. */ put_device(&cdev->dev); @@ -615,21 +629,32 @@ dasd_put_device_wake(struct dasd_device *device) /* * Return dasd_device structure associated with cdev. + * This function needs to be called with the ccw device + * lock held. It can be used from interrupt context. + */ +struct dasd_device * +dasd_device_from_cdev_locked(struct ccw_device *cdev) +{ + struct dasd_device *device = cdev->dev.driver_data; + + if (!device) + return ERR_PTR(-ENODEV); + dasd_get_device(device); + return device; +} + +/* + * Return dasd_device structure associated with cdev. */ struct dasd_device * dasd_device_from_cdev(struct ccw_device *cdev) { - struct dasd_devmap *devmap; struct dasd_device *device; + unsigned long flags; - device = ERR_PTR(-ENODEV); - spin_lock(&dasd_devmap_lock); - devmap = cdev->dev.driver_data; - if (devmap && devmap->device) { - device = devmap->device; - dasd_get_device(device); - } - spin_unlock(&dasd_devmap_lock); + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + device = dasd_device_from_cdev_locked(cdev); + spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); return device; } @@ -730,16 +755,17 @@ static ssize_t dasd_discipline_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct dasd_devmap *devmap; - char *dname; + struct dasd_device *device; + ssize_t len; - spin_lock(&dasd_devmap_lock); - dname = "none"; - devmap = dev->driver_data; - if (devmap && devmap->device && devmap->device->discipline) - dname = devmap->device->discipline->name; - spin_unlock(&dasd_devmap_lock); - return snprintf(buf, PAGE_SIZE, "%s\n", dname); + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (!IS_ERR(device) && device->discipline) { + len = snprintf(buf, PAGE_SIZE, "%s\n", + device->discipline->name); + dasd_put_device(device); + } else + len = snprintf(buf, PAGE_SIZE, "none\n"); + return len; } static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL); diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index da65f1b032f5..e0bf30ebb215 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -678,7 +678,7 @@ int __init dasd_eer_init(void) return 0; } -void __exit dasd_eer_exit(void) +void dasd_eer_exit(void) { WARN_ON(misc_deregister(&dasd_eer_dev) != 0); } diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 3ccf06d28ba1..9f52004f6fc2 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -534,6 +534,7 @@ int dasd_add_sysfs_files(struct ccw_device *); void dasd_remove_sysfs_files(struct ccw_device *); struct dasd_device *dasd_device_from_cdev(struct ccw_device *); +struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *); struct dasd_device *dasd_device_from_devindex(int); int dasd_parse(void); diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index ca7d51f7eccc..cab2c736683a 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -453,7 +453,7 @@ static int __init xpram_init(void) PRINT_WARN("No expanded memory available\n"); return -ENODEV; } - xpram_pages = xpram_highest_page_index(); + xpram_pages = xpram_highest_page_index() + 1; PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", xpram_pages, (unsigned long) xpram_pages*4); rc = xpram_setup_sizes(xpram_pages); diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 0c0162ff6c0c..c3e97b4fc186 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o obj-$(CONFIG_MONREADER) += monreader.o +obj-$(CONFIG_MONWRITER) += monwriter.o diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c new file mode 100644 index 000000000000..1e3939aeb8ab --- /dev/null +++ b/drivers/s390/char/monwriter.c @@ -0,0 +1,292 @@ +/* + * drivers/s390/char/monwriter.c + * + * Character device driver for writing z/VM *MONITOR service records. + * + * Copyright (C) IBM Corp. 2006 + * + * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/ctype.h> +#include <linux/poll.h> +#include <asm/uaccess.h> +#include <asm/ebcdic.h> +#include <asm/io.h> +#include <asm/appldata.h> +#include <asm/monwriter.h> + +#define MONWRITE_MAX_DATALEN 4024 + +static int mon_max_bufs = 255; + +struct mon_buf { + struct list_head list; + struct monwrite_hdr hdr; + int diag_done; + char *data; +}; + +struct mon_private { + struct list_head list; + struct monwrite_hdr hdr; + size_t hdr_to_read; + size_t data_to_read; + struct mon_buf *current_buf; + int mon_buf_count; +}; + +/* + * helper functions + */ + +static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) +{ + struct appldata_product_id id; + int rc; + + strcpy(id.prod_nr, "LNXAPPL"); + id.prod_fn = myhdr->applid; + id.record_nr = myhdr->record_num; + id.version_nr = myhdr->version; + id.release_nr = myhdr->release; + id.mod_lvl = myhdr->mod_level; + rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); + if (rc <= 0) + return rc; + if (rc == 5) + return -EPERM; + printk("DIAG X'DC' error with return code: %i\n", rc); + return -EINVAL; +} + +static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, + struct monwrite_hdr *monhdr) +{ + struct mon_buf *entry, *next; + + list_for_each_entry_safe(entry, next, &monpriv->list, list) + if (entry->hdr.applid == monhdr->applid && + entry->hdr.record_num == monhdr->record_num && + entry->hdr.version == monhdr->version && + entry->hdr.release == monhdr->release && + entry->hdr.mod_level == monhdr->mod_level) + return entry; + return NULL; +} + +static int monwrite_new_hdr(struct mon_private *monpriv) +{ + struct monwrite_hdr *monhdr = &monpriv->hdr; + struct mon_buf *monbuf; + int rc; + + if (monhdr->datalen > MONWRITE_MAX_DATALEN || + monhdr->mon_function > MONWRITE_START_CONFIG || + monhdr->hdrlen != sizeof(struct monwrite_hdr)) + return -EINVAL; + monbuf = monwrite_find_hdr(monpriv, monhdr); + if (monbuf) { + if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) { + monhdr->datalen = monbuf->hdr.datalen; + rc = monwrite_diag(monhdr, monbuf->data, + APPLDATA_STOP_REC); + list_del(&monbuf->list); + monpriv->mon_buf_count--; + kfree(monbuf->data); + kfree(monbuf); + monbuf = NULL; + } + } else { + if (monpriv->mon_buf_count >= mon_max_bufs) + return -ENOSPC; + monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL); + if (!monbuf) + return -ENOMEM; + monbuf->data = kzalloc(monbuf->hdr.datalen, + GFP_KERNEL | GFP_DMA); + if (!monbuf->data) { + kfree(monbuf); + return -ENOMEM; + } + monbuf->hdr = *monhdr; + list_add_tail(&monbuf->list, &monpriv->list); + monpriv->mon_buf_count++; + } + monpriv->current_buf = monbuf; + return 0; +} + +static int monwrite_new_data(struct mon_private *monpriv) +{ + struct monwrite_hdr *monhdr = &monpriv->hdr; + struct mon_buf *monbuf = monpriv->current_buf; + int rc = 0; + + switch (monhdr->mon_function) { + case MONWRITE_START_INTERVAL: + if (!monbuf->diag_done) { + rc = monwrite_diag(monhdr, monbuf->data, + APPLDATA_START_INTERVAL_REC); + monbuf->diag_done = 1; + } + break; + case MONWRITE_START_CONFIG: + if (!monbuf->diag_done) { + rc = monwrite_diag(monhdr, monbuf->data, + APPLDATA_START_CONFIG_REC); + monbuf->diag_done = 1; + } + break; + case MONWRITE_GEN_EVENT: + rc = monwrite_diag(monhdr, monbuf->data, + APPLDATA_GEN_EVENT_REC); + list_del(&monpriv->current_buf->list); + kfree(monpriv->current_buf->data); + kfree(monpriv->current_buf); + monpriv->current_buf = NULL; + break; + default: + /* monhdr->mon_function is checked in monwrite_new_hdr */ + BUG(); + } + return rc; +} + +/* + * file operations + */ + +static int monwrite_open(struct inode *inode, struct file *filp) +{ + struct mon_private *monpriv; + + monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); + if (!monpriv) + return -ENOMEM; + INIT_LIST_HEAD(&monpriv->list); + monpriv->hdr_to_read = sizeof(monpriv->hdr); + filp->private_data = monpriv; + return nonseekable_open(inode, filp); +} + +static int monwrite_close(struct inode *inode, struct file *filp) +{ + struct mon_private *monpriv = filp->private_data; + struct mon_buf *entry, *next; + + list_for_each_entry_safe(entry, next, &monpriv->list, list) { + if (entry->hdr.mon_function != MONWRITE_GEN_EVENT) + monwrite_diag(&entry->hdr, entry->data, + APPLDATA_STOP_REC); + monpriv->mon_buf_count--; + list_del(&entry->list); + kfree(entry->data); + kfree(entry); + } + kfree(monpriv); + return 0; +} + +static ssize_t monwrite_write(struct file *filp, const char __user *data, + size_t count, loff_t *ppos) +{ + struct mon_private *monpriv = filp->private_data; + size_t len, written; + void *to; + int rc; + + for (written = 0; written < count; ) { + if (monpriv->hdr_to_read) { + len = min(count - written, monpriv->hdr_to_read); + to = (char *) &monpriv->hdr + + sizeof(monpriv->hdr) - monpriv->hdr_to_read; + if (copy_from_user(to, data + written, len)) { + rc = -EFAULT; + goto out_error; + } + monpriv->hdr_to_read -= len; + written += len; + if (monpriv->hdr_to_read > 0) + continue; + rc = monwrite_new_hdr(monpriv); + if (rc) + goto out_error; + monpriv->data_to_read = monpriv->current_buf ? + monpriv->current_buf->hdr.datalen : 0; + } + + if (monpriv->data_to_read) { + len = min(count - written, monpriv->data_to_read); + to = monpriv->current_buf->data + + monpriv->hdr.datalen - monpriv->data_to_read; + if (copy_from_user(to, data + written, len)) { + rc = -EFAULT; + goto out_error; + } + monpriv->data_to_read -= len; + written += len; + if (monpriv->data_to_read > 0) + continue; + rc = monwrite_new_data(monpriv); + if (rc) + goto out_error; + } + monpriv->hdr_to_read = sizeof(monpriv->hdr); + } + return written; + +out_error: + monpriv->data_to_read = 0; + monpriv->hdr_to_read = sizeof(struct monwrite_hdr); + return rc; +} + +static struct file_operations monwrite_fops = { + .owner = THIS_MODULE, + .open = &monwrite_open, + .release = &monwrite_close, + .write = &monwrite_write, +}; + +static struct miscdevice mon_dev = { + .name = "monwriter", + .fops = &monwrite_fops, + .minor = MISC_DYNAMIC_MINOR, +}; + +/* + * module init/exit + */ + +static int __init mon_init(void) +{ + if (MACHINE_IS_VM) + return misc_register(&mon_dev); + else + return -ENODEV; +} + +static void __exit mon_exit(void) +{ + WARN_ON(misc_deregister(&mon_dev) != 0); +} + +module_init(mon_init); +module_exit(mon_exit); + +module_param_named(max_bufs, mon_max_bufs, int, 0644); +MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers" + "that can be active at one time"); + +MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>"); +MODULE_DESCRIPTION("Character device driver for writing z/VM " + "APPLDATA monitor records."); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 19762f3476aa..1678b6c757ec 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2004,2005 IBM Corporation - * Interface implementation for communication with the v/VM control program + * Interface implementation for communication with the z/VM control program * Author(s): Christian Borntraeger <cborntra@de.ibm.com> * * diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h index 87389e730465..8a5975f3dad7 100644 --- a/drivers/s390/char/vmcp.h +++ b/drivers/s390/char/vmcp.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2004, 2005 IBM Corporation - * Interface implementation for communication with the v/VM control program + * Interface implementation for communication with the z/VM control program * Version 1.0 * Author(s): Christian Borntraeger <cborntra@de.ibm.com> * diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index c28444af0919..3bb4e472d73d 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -256,7 +256,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) /* trigger path verification. */ if (sch->driver && sch->driver->verify) sch->driver->verify(&sch->dev); - else if (sch->vpm == mask) + else if (sch->lpm == mask) goto out_unreg; out_unlock: spin_unlock_irq(&sch->lock); @@ -378,6 +378,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) if (chp_mask == 0) { spin_unlock_irq(&sch->lock); + put_device(&sch->dev); return 0; } old_lpm = sch->lpm; @@ -392,7 +393,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) spin_unlock_irq(&sch->lock); put_device(&sch->dev); - return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; + return 0; } diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 89320c1ad825..2e2882daefbb 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -16,11 +16,10 @@ #include <linux/device.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> - #include <asm/cio.h> #include <asm/delay.h> #include <asm/irq.h> - +#include <asm/setup.h> #include "airq.h" #include "cio.h" #include "css.h" @@ -192,7 +191,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ sch->orb.pfch = sch->options.prefetch == 0; sch->orb.spnd = sch->options.suspend; sch->orb.ssic = sch->options.suspend && sch->options.inter; - sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; + sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm; #ifdef CONFIG_64BIT /* * for 64 bit we always support 64 bit IDAWs with 4k page size only @@ -570,10 +569,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) sch->opm = 0xff; if (!cio_is_console(sch->schid)) chsc_validate_chpids(sch); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + sch->lpm = sch->schib.pmcw.pam & sch->opm; CIO_DEBUG(KERN_INFO, 0, "Detected device %04x on subchannel 0.%x.%04X" @@ -841,14 +837,26 @@ __clear_subchannel_easy(struct subchannel_id schid) return -EBUSY; } -extern void do_reipl(unsigned long devno); -static int -__shutdown_subchannel_easy(struct subchannel_id schid, void *data) +struct sch_match_id { + struct subchannel_id schid; + struct ccw_dev_id devid; + int rc; +}; + +static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid, + void *data) { struct schib schib; + struct sch_match_id *match_id = data; if (stsch_err(schid, &schib)) return -ENXIO; + if (match_id && schib.pmcw.dnv && + (schib.pmcw.dev == match_id->devid.devno) && + (schid.ssid == match_id->devid.ssid)) { + match_id->schid = schid; + match_id->rc = 0; + } if (!schib.pmcw.ena) return 0; switch(__disable_subchannel_easy(schid, &schib)) { @@ -864,18 +872,71 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data) return 0; } -void -clear_all_subchannels(void) +static int clear_all_subchannels_and_match(struct ccw_dev_id *devid, + struct subchannel_id *schid) { + struct sch_match_id match_id; + + match_id.devid = *devid; + match_id.rc = -ENODEV; local_irq_disable(); - for_each_subchannel(__shutdown_subchannel_easy, NULL); + for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id); + if (match_id.rc == 0) + *schid = match_id.schid; + return match_id.rc; } + +void clear_all_subchannels(void) +{ + local_irq_disable(); + for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL); +} + +extern void do_reipl_asm(__u32 schid); + /* Make sure all subchannels are quiet before we re-ipl an lpar. */ -void -reipl(unsigned long devno) +void reipl_ccw_dev(struct ccw_dev_id *devid) { - clear_all_subchannels(); + struct subchannel_id schid; + + if (clear_all_subchannels_and_match(devid, &schid)) + panic("IPL Device not found\n"); cio_reset_channel_paths(); - do_reipl(devno); + do_reipl_asm(*((__u32*)&schid)); +} + +extern struct schib ipl_schib; + +/* + * ipl_save_parameters gets called very early. It is not allowed to access + * anything in the bss section at all. The bss section is not cleared yet, + * but may contain some ipl parameters written by the firmware. + * These parameters (if present) are copied to 0x2000. + * To avoid corruption of the ipl parameters, all variables used by this + * function must reside on the stack or in the data section. + */ +void ipl_save_parameters(void) +{ + struct subchannel_id schid; + unsigned int *ipl_ptr; + void *src, *dst; + + schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; + if (!schid.one) + return; + if (stsch(schid, &ipl_schib)) + return; + if (!ipl_schib.pmcw.dnv) + return; + ipl_devno = ipl_schib.pmcw.dev; + ipl_flags |= IPL_DEVNO_VALID; + if (!ipl_schib.pmcw.qf) + return; + ipl_flags |= IPL_PARMBLOCK_VALID; + ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR; + src = (void *)(unsigned long)*ipl_ptr; + dst = (void *)IPL_PARMBLOCK_ORIGIN; + memmove(dst, src, PAGE_SIZE); + *ipl_ptr = IPL_PARMBLOCK_ORIGIN; } diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 13eeea3d547f..7086a74e9871 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -182,136 +182,141 @@ get_subchannel_by_schid(struct subchannel_id schid) return dev ? to_subchannel(dev) : NULL; } - -static inline int -css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid) +static inline int css_get_subchannel_status(struct subchannel *sch) { struct schib schib; - int cc; - cc = stsch(schid, &schib); - if (cc) - return CIO_GONE; - if (!schib.pmcw.dnv) + if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) return CIO_GONE; - if (sch && sch->schib.pmcw.dnv && - (schib.pmcw.dev != sch->schib.pmcw.dev)) + if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) return CIO_REVALIDATE; - if (sch && !sch->lpm) + if (!sch->lpm) return CIO_NO_PATH; return CIO_OPER; } - -static int -css_evaluate_subchannel(struct subchannel_id schid, int slow) + +static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) { int event, ret, disc; - struct subchannel *sch; unsigned long flags; + enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; - sch = get_subchannel_by_schid(schid); - disc = sch ? device_is_disconnected(sch) : 0; + spin_lock_irqsave(&sch->lock, flags); + disc = device_is_disconnected(sch); if (disc && slow) { - if (sch) - put_device(&sch->dev); - return 0; /* Already processed. */ + /* Disconnected devices are evaluated directly only.*/ + spin_unlock_irqrestore(&sch->lock, flags); + return 0; } - /* - * We've got a machine check, so running I/O won't get an interrupt. - * Kill any pending timers. - */ - if (sch) - device_kill_pending_timer(sch); + /* No interrupt after machine check - kill pending timers. */ + device_kill_pending_timer(sch); if (!disc && !slow) { - if (sch) - put_device(&sch->dev); - return -EAGAIN; /* Will be done on the slow path. */ + /* Non-disconnected devices are evaluated on the slow path. */ + spin_unlock_irqrestore(&sch->lock, flags); + return -EAGAIN; } - event = css_get_subchannel_status(sch, schid); + event = css_get_subchannel_status(sch); CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", - schid.ssid, schid.sch_no, event, - sch?(disc?"disconnected":"normal"):"unknown", - slow?"slow":"fast"); + sch->schid.ssid, sch->schid.sch_no, event, + disc ? "disconnected" : "normal", + slow ? "slow" : "fast"); + /* Analyze subchannel status. */ + action = NONE; switch (event) { case CIO_NO_PATH: - case CIO_GONE: - if (!sch) { - /* Never used this subchannel. Ignore. */ - ret = 0; + if (disc) { + /* Check if paths have become available. */ + action = REPROBE; break; } - if (disc && (event == CIO_NO_PATH)) { - /* - * Uargh, hack again. Because we don't get a machine - * check on configure on, our path bookkeeping can - * be out of date here (it's fine while we only do - * logical varying or get chsc machine checks). We - * need to force reprobing or we might miss devices - * coming operational again. It won't do harm in real - * no path situations. - */ - spin_lock_irqsave(&sch->lock, flags); - device_trigger_reprobe(sch); + /* fall through */ + case CIO_GONE: + /* Prevent unwanted effects when opening lock. */ + cio_disable_subchannel(sch); + device_set_disconnected(sch); + /* Ask driver what to do with device. */ + action = UNREGISTER; + if (sch->driver && sch->driver->notify) { spin_unlock_irqrestore(&sch->lock, flags); - ret = 0; - break; - } - if (sch->driver && sch->driver->notify && - sch->driver->notify(&sch->dev, event)) { - cio_disable_subchannel(sch); - device_set_disconnected(sch); - ret = 0; - break; + ret = sch->driver->notify(&sch->dev, event); + spin_lock_irqsave(&sch->lock, flags); + if (ret) + action = NONE; } - /* - * Unregister subchannel. - * The device will be killed automatically. - */ - cio_disable_subchannel(sch); - css_sch_device_unregister(sch); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); - ret = 0; break; case CIO_REVALIDATE: - /* - * Revalidation machine check. Sick. - * We don't notify the driver since we have to throw the device - * away in any case. - */ - if (!disc) { - css_sch_device_unregister(sch); - /* Reset intparm to zeroes. */ - sch->schib.pmcw.intparm = 0; - cio_modify(sch); - put_device(&sch->dev); - ret = css_probe_device(schid); - } else { - /* - * We can't immediately deregister the disconnected - * device since it might block. - */ - spin_lock_irqsave(&sch->lock, flags); - device_trigger_reprobe(sch); - spin_unlock_irqrestore(&sch->lock, flags); - ret = 0; - } + /* Device will be removed, so no notify necessary. */ + if (disc) + /* Reprobe because immediate unregister might block. */ + action = REPROBE; + else + action = UNREGISTER_PROBE; break; case CIO_OPER: - if (disc) { - spin_lock_irqsave(&sch->lock, flags); + if (disc) /* Get device operational again. */ - device_trigger_reprobe(sch); - spin_unlock_irqrestore(&sch->lock, flags); - } - ret = sch ? 0 : css_probe_device(schid); + action = REPROBE; + break; + } + /* Perform action. */ + ret = 0; + switch (action) { + case UNREGISTER: + case UNREGISTER_PROBE: + /* Unregister device (will use subchannel lock). */ + spin_unlock_irqrestore(&sch->lock, flags); + css_sch_device_unregister(sch); + spin_lock_irqsave(&sch->lock, flags); + + /* Reset intparm to zeroes. */ + sch->schib.pmcw.intparm = 0; + cio_modify(sch); + + /* Probe if necessary. */ + if (action == UNREGISTER_PROBE) + ret = css_probe_device(sch->schid); + break; + case REPROBE: + device_trigger_reprobe(sch); break; default: - BUG(); - ret = 0; + break; + } + spin_unlock_irqrestore(&sch->lock, flags); + + return ret; +} + +static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) +{ + struct schib schib; + + if (!slow) { + /* Will be done on the slow path. */ + return -EAGAIN; } + if (stsch(schid, &schib) || !schib.pmcw.dnv) { + /* Unusable - ignore. */ + return 0; + } + CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " + "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); + + return css_probe_device(schid); +} + +static int css_evaluate_subchannel(struct subchannel_id schid, int slow) +{ + struct subchannel *sch; + int ret; + + sch = get_subchannel_by_schid(schid); + if (sch) { + ret = css_evaluate_known_subchannel(sch, slow); + put_device(&sch->dev); + } else + ret = css_evaluate_new_subchannel(schid, slow); + return ret; } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 646da5640401..688945662c15 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -52,53 +52,81 @@ ccw_bus_match (struct device * dev, struct device_driver * drv) return 1; } -/* - * Hotplugging interface for ccw devices. - * Heavily modeled on pci and usb hotplug. - */ -static int -ccw_uevent (struct device *dev, char **envp, int num_envp, - char *buffer, int buffer_size) +/* Store modalias string delimited by prefix/suffix string into buffer with + * specified size. Return length of resulting string (excluding trailing '\0') + * even if string doesn't fit buffer (snprintf semantics). */ +static int snprint_alias(char *buf, size_t size, const char *prefix, + struct ccw_device_id *id, const char *suffix) { - struct ccw_device *cdev = to_ccwdev(dev); - int i = 0; - int length = 0; + int len; - if (!cdev) - return -ENODEV; + len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type, + id->cu_model); + if (len > size) + return len; + buf += len; + size -= len; - /* what we want to pass to /sbin/hotplug */ + if (id->dev_type != 0) + len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, + id->dev_model, suffix); + else + len += snprintf(buf, size, "dtdm%s", suffix); - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", - cdev->id.cu_type); - if ((buffer_size - length <= 0) || (i >= num_envp)) - return -ENOMEM; - ++length; - buffer += length; + return len; +} +/* Set up environment variables for ccw device uevent. Return 0 on success, + * non-zero otherwise. */ +static int ccw_uevent(struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size) +{ + struct ccw_device *cdev = to_ccwdev(dev); + struct ccw_device_id *id = &(cdev->id); + int i = 0; + int len; + + /* CU_TYPE= */ + len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1; + if (len > buffer_size || i >= num_envp) + return -ENOMEM; envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", - cdev->id.cu_model); - if ((buffer_size - length <= 0) || (i >= num_envp)) + buffer += len; + buffer_size -= len; + + /* CU_MODEL= */ + len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1; + if (len > buffer_size || i >= num_envp) return -ENOMEM; - ++length; - buffer += length; + envp[i++] = buffer; + buffer += len; + buffer_size -= len; /* The next two can be zero, that's ok for us */ - envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", - cdev->id.dev_type); - if ((buffer_size - length <= 0) || (i >= num_envp)) + /* DEV_TYPE= */ + len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1; + if (len > buffer_size || i >= num_envp) return -ENOMEM; - ++length; - buffer += length; + envp[i++] = buffer; + buffer += len; + buffer_size -= len; + /* DEV_MODEL= */ + len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X", + (unsigned char) id->dev_model) + 1; + if (len > buffer_size || i >= num_envp) + return -ENOMEM; envp[i++] = buffer; - length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", - cdev->id.dev_model); - if ((buffer_size - length <= 0) || (i >= num_envp)) + buffer += len; + buffer_size -= len; + + /* MODALIAS= */ + len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1; + if (len > buffer_size || i >= num_envp) return -ENOMEM; + envp[i++] = buffer; + buffer += len; + buffer_size -= len; envp[i] = NULL; @@ -251,16 +279,11 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf) { struct ccw_device *cdev = to_ccwdev(dev); struct ccw_device_id *id = &(cdev->id); - int ret; + int len; - ret = sprintf(buf, "ccw:t%04Xm%02X", - id->cu_type, id->cu_model); - if (id->dev_type != 0) - ret += sprintf(buf + ret, "dt%04Xdm%02X\n", - id->dev_type, id->dev_model); - else - ret += sprintf(buf + ret, "dtdm\n"); - return ret; + len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1; + + return len > PAGE_SIZE ? PAGE_SIZE : len; } static ssize_t diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 35e162ba6d54..dace46fc32e8 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -232,10 +232,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) */ old_lpm = sch->lpm; stsch(sch->schid, &sch->schib); - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + sch->lpm = sch->schib.pmcw.pam & sch->opm; /* Check since device may again have become not operational. */ if (!sch->schib.pmcw.dnv) state = DEV_STATE_NOT_OPER; @@ -267,6 +264,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) notify = 1; } /* fill out sense information */ + memset(&cdev->id, 0, sizeof(cdev->id)); cdev->id.cu_type = cdev->private->senseid.cu_type; cdev->id.cu_model = cdev->private->senseid.cu_model; cdev->id.dev_type = cdev->private->senseid.dev_type; @@ -454,8 +452,8 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) return; } /* Start Path Group verification. */ - sch->vpm = 0; /* Start with no path groups set. */ cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; ccw_device_verify_start(cdev); } @@ -555,7 +553,19 @@ ccw_device_nopath_notify(void *data) void ccw_device_verify_done(struct ccw_device *cdev, int err) { - cdev->private->flags.doverify = 0; + struct subchannel *sch; + + sch = to_subchannel(cdev->dev.parent); + /* Update schib - pom may have changed. */ + stsch(sch->schid, &sch->schib); + /* Update lpm with verified path mask. */ + sch->lpm = sch->vpm; + /* Repeat path verification? */ + if (cdev->private->flags.doverify) { + cdev->private->flags.doverify = 0; + ccw_device_verify_start(cdev); + return; + } switch (err) { case -EOPNOTSUPP: /* path grouping not supported, just set online. */ cdev->private->options.pgroup = 0; @@ -613,6 +623,7 @@ ccw_device_online(struct ccw_device *cdev) if (!cdev->private->options.pgroup) { /* Start initial path verification. */ cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; ccw_device_verify_start(cdev); return 0; } @@ -659,7 +670,6 @@ ccw_device_offline(struct ccw_device *cdev) /* Are we doing path grouping? */ if (!cdev->private->options.pgroup) { /* No, set state offline immediately. */ - sch->vpm = 0; ccw_device_done(cdev, DEV_STATE_OFFLINE); return 0; } @@ -780,6 +790,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) } /* Device is idle, we can do the path verification. */ cdev->private->state = DEV_STATE_VERIFY; + cdev->private->flags.doverify = 0; ccw_device_verify_start(cdev); } @@ -1042,9 +1053,9 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) } static void -ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) +ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) { - /* When the I/O has terminated, we have to start verification. */ + /* Start verification after current task finished. */ cdev->private->flags.doverify = 1; } @@ -1110,10 +1121,7 @@ device_trigger_reprobe(struct subchannel *sch) * The pim, pam, pom values may not be accurate, but they are the best * we have before performing device selection :/ */ - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; + sch->lpm = sch->schib.pmcw.pam & sch->opm; /* Re-set some bits in the pmcw that were lost. */ sch->schib.pmcw.isc = 3; sch->schib.pmcw.csense = 1; @@ -1237,7 +1245,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_delay_verify, }, [DEV_STATE_ONLINE] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, @@ -1280,7 +1288,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, - [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, + [DEV_EVENT_VERIFY] = ccw_device_delay_verify, }, [DEV_STATE_QUIESCE] = { [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, @@ -1293,7 +1301,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { [DEV_EVENT_NOTOPER] = ccw_device_nop, [DEV_EVENT_INTERRUPT] = ccw_device_start_id, [DEV_EVENT_TIMEOUT] = ccw_device_bug, - [DEV_EVENT_VERIFY] = ccw_device_nop, + [DEV_EVENT_VERIFY] = ccw_device_start_id, }, [DEV_STATE_DISCONNECTED_SENSE_ID] = { [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 9e3de0bd59b5..93a897eebfff 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -96,6 +96,12 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, ret = cio_set_options (sch, flags); if (ret) return ret; + /* Adjust requested path mask to excluded varied off paths. */ + if (lpm) { + lpm &= sch->opm; + if (lpm == 0) + return -EACCES; + } ret = cio_start_key (sch, cpa, lpm, key); if (ret == 0) cdev->private->intparm = intparm; @@ -250,7 +256,7 @@ ccw_device_get_path_mask(struct ccw_device *cdev) if (!sch) return 0; else - return sch->vpm; + return sch->lpm; } static void @@ -304,7 +310,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _ sch = to_subchannel(cdev->dev.parent); do { ret = cio_start (sch, ccw, lpm); - if ((ret == -EBUSY) || (ret == -EACCES)) { + if (ret == -EBUSY) { /* Try again later. */ spin_unlock_irq(&sch->lock); msleep(10); @@ -433,6 +439,13 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp if (!ciw || ciw->cmd == 0) return -EOPNOTSUPP; + /* Adjust requested path mask to excluded varied off paths. */ + if (lpm) { + lpm &= sch->opm; + if (lpm == 0) + return -EACCES; + } + rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); if (!rcd_ccw) return -ENOMEM; diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 1693a102dcfe..8ca2d078848c 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c @@ -245,18 +245,17 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) memset(&cdev->private->irb, 0, sizeof(struct irb)); /* Try multiple times. */ - ret = -ENODEV; + ret = -EACCES; if (cdev->private->iretry > 0) { cdev->private->iretry--; ret = cio_start (sch, cdev->private->iccws, cdev->private->imask); - /* ret is 0, -EBUSY, -EACCES or -ENODEV */ - if ((ret != -EACCES) && (ret != -ENODEV)) + /* We expect an interrupt in case of success or busy + * indication. */ + if ((ret == 0) || (ret == -EBUSY)) return ret; } - /* PGID command failed on this path. Switch it off. */ - sch->lpm &= ~cdev->private->imask; - sch->vpm &= ~cdev->private->imask; + /* PGID command failed on this path. */ CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " "0.%x.%04x, lpm %02X, became 'not operational'\n", cdev->private->devno, sch->schid.ssid, @@ -286,18 +285,17 @@ static int __ccw_device_do_nop(struct ccw_device *cdev) memset(&cdev->private->irb, 0, sizeof(struct irb)); /* Try multiple times. */ - ret = -ENODEV; + ret = -EACCES; if (cdev->private->iretry > 0) { cdev->private->iretry--; ret = cio_start (sch, cdev->private->iccws, cdev->private->imask); - /* ret is 0, -EBUSY, -EACCES or -ENODEV */ - if ((ret != -EACCES) && (ret != -ENODEV)) + /* We expect an interrupt in case of success or busy + * indication. */ + if ((ret == 0) || (ret == -EBUSY)) return ret; } - /* nop command failed on this path. Switch it off. */ - sch->lpm &= ~cdev->private->imask; - sch->vpm &= ~cdev->private->imask; + /* nop command failed on this path. */ CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " "0.%x.%04x, lpm %02X, became 'not operational'\n", cdev->private->devno, sch->schid.ssid, @@ -372,27 +370,32 @@ static void __ccw_device_verify_start(struct ccw_device *cdev) { struct subchannel *sch; - __u8 imask, func; + __u8 func; int ret; sch = to_subchannel(cdev->dev.parent); - while (sch->vpm != sch->lpm) { - /* Find first unequal bit in vpm vs. lpm */ - for (imask = 0x80; imask != 0; imask >>= 1) - if ((sch->vpm & imask) != (sch->lpm & imask)) - break; - cdev->private->imask = imask; + /* Repeat for all paths. */ + for (; cdev->private->imask; cdev->private->imask >>= 1, + cdev->private->iretry = 5) { + if ((cdev->private->imask & sch->schib.pmcw.pam) == 0) + /* Path not available, try next. */ + continue; if (cdev->private->options.pgroup) { - func = (sch->vpm & imask) ? - SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; + if (sch->opm & cdev->private->imask) + func = SPID_FUNC_ESTABLISH; + else + func = SPID_FUNC_RESIGN; ret = __ccw_device_do_pgid(cdev, func); } else ret = __ccw_device_do_nop(cdev); + /* We expect an interrupt in case of success or busy + * indication. */ if (ret == 0 || ret == -EBUSY) return; - cdev->private->iretry = 5; + /* Permanent path failure, try next. */ } - ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); + /* Done with all paths. */ + ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV); } /* @@ -421,14 +424,14 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) else ret = __ccw_device_check_nop(cdev); memset(&cdev->private->irb, 0, sizeof(struct irb)); + switch (ret) { /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ case 0: - /* Establish or Resign Path Group done. Update vpm. */ - if ((sch->lpm & cdev->private->imask) != 0) - sch->vpm |= cdev->private->imask; - else - sch->vpm &= ~cdev->private->imask; + /* Path verification ccw finished successfully, update lpm. */ + sch->vpm |= sch->opm & cdev->private->imask; + /* Go on with next path. */ + cdev->private->imask >>= 1; cdev->private->iretry = 5; __ccw_device_verify_start(cdev); break; @@ -441,6 +444,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) cdev->private->options.pgroup = 0; else cdev->private->flags.pgid_single = 1; + /* Retry */ + sch->vpm = 0; + cdev->private->imask = 0x80; + cdev->private->iretry = 5; /* fall through. */ case -EAGAIN: /* Try again. */ __ccw_device_verify_start(cdev); @@ -449,8 +456,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) ccw_device_verify_done(cdev, -ETIME); break; case -EACCES: /* channel is not operational. */ - sch->lpm &= ~cdev->private->imask; - sch->vpm &= ~cdev->private->imask; + cdev->private->imask >>= 1; cdev->private->iretry = 5; __ccw_device_verify_start(cdev); break; @@ -463,19 +469,17 @@ ccw_device_verify_start(struct ccw_device *cdev) struct subchannel *sch = to_subchannel(cdev->dev.parent); cdev->private->flags.pgid_single = 0; + cdev->private->imask = 0x80; cdev->private->iretry = 5; - /* - * Update sch->lpm with current values to catch paths becoming - * available again. - */ + + /* Start with empty vpm. */ + sch->vpm = 0; + + /* Get current pam. */ if (stsch(sch->schid, &sch->schib)) { ccw_device_verify_done(cdev, -ENODEV); return; } - sch->lpm = sch->schib.pmcw.pim & - sch->schib.pmcw.pam & - sch->schib.pmcw.pom & - sch->opm; __ccw_device_verify_start(cdev); } @@ -524,7 +528,6 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) switch (ret) { /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ case 0: /* disband successful. */ - sch->vpm = 0; ccw_device_disband_done(cdev, ret); break; case -EOPNOTSUPP: diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 7c93a8798d23..cde822d8b5c8 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -115,7 +115,7 @@ qdio_min(int a,int b) static inline __u64 qdio_get_micros(void) { - return (get_clock() >> 10); /* time>>12 is microseconds */ + return (get_clock() >> 12); /* time>>12 is microseconds */ } /* @@ -1129,7 +1129,7 @@ out: #ifdef QDIO_USE_PROCESSING_STATE if (last_position>=0) - set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); + set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count); #endif /* QDIO_USE_PROCESSING_STATE */ QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index ceb3ab31ee08..124569362f02 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -191,49 +191,49 @@ enum qdio_irq_states { #if QDIO_VERBOSE_LEVEL>8 #define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_STUPID(x...) +#define QDIO_PRINT_STUPID(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>7 #define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_ALL(x...) +#define QDIO_PRINT_ALL(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>6 #define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_INFO(x...) +#define QDIO_PRINT_INFO(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>5 #define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_WARN(x...) +#define QDIO_PRINT_WARN(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>4 #define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_ERR(x...) +#define QDIO_PRINT_ERR(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>3 #define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_CRIT(x...) +#define QDIO_PRINT_CRIT(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>2 #define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_ALERT(x...) +#define QDIO_PRINT_ALERT(x...) do { } while (0) #endif #if QDIO_VERBOSE_LEVEL>1 #define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) #else -#define QDIO_PRINT_EMERG(x...) +#define QDIO_PRINT_EMERG(x...) do { } while (0) #endif #define HEXDUMP16(importance,header,ptr) \ diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile index 15edebbead7f..f0a12d2eb780 100644 --- a/drivers/s390/crypto/Makefile +++ b/drivers/s390/crypto/Makefile @@ -2,5 +2,16 @@ # S/390 crypto devices # -z90crypt-objs := z90main.o z90hardware.o -obj-$(CONFIG_Z90CRYPT) += z90crypt.o +ifdef CONFIG_ZCRYPT_MONOLITHIC + +z90crypt-objs := zcrypt_mono.o ap_bus.o zcrypt_api.o \ + zcrypt_pcica.o zcrypt_pcicc.o zcrypt_pcixcc.o zcrypt_cex2a.o +obj-$(CONFIG_ZCRYPT) += z90crypt.o + +else + +ap-objs := ap_bus.o +obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcicc.o zcrypt_pcixcc.o +obj-$(CONFIG_ZCRYPT) += zcrypt_pcica.o zcrypt_cex2a.o + +endif diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c new file mode 100644 index 000000000000..6ed0985c0c91 --- /dev/null +++ b/drivers/s390/crypto/ap_bus.c @@ -0,0 +1,1221 @@ +/* + * linux/drivers/s390/crypto/ap_bus.c + * + * Copyright (C) 2006 IBM Corporation + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * Adjunct processor bus. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/notifier.h> +#include <linux/kthread.h> +#include <linux/mutex.h> +#include <asm/s390_rdev.h> + +#include "ap_bus.h" + +/* Some prototypes. */ +static void ap_scan_bus(void *); +static void ap_poll_all(unsigned long); +static void ap_poll_timeout(unsigned long); +static int ap_poll_thread_start(void); +static void ap_poll_thread_stop(void); + +/** + * Module description. + */ +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Adjunct Processor Bus driver, " + "Copyright 2006 IBM Corporation"); +MODULE_LICENSE("GPL"); + +/** + * Module parameter + */ +int ap_domain_index = -1; /* Adjunct Processor Domain Index */ +module_param_named(domain, ap_domain_index, int, 0000); +MODULE_PARM_DESC(domain, "domain index for ap devices"); +EXPORT_SYMBOL(ap_domain_index); + +static int ap_thread_flag = 1; +module_param_named(poll_thread, ap_thread_flag, int, 0000); +MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on)."); + +static struct device *ap_root_device = NULL; + +/** + * Workqueue & timer for bus rescan. + */ +static struct workqueue_struct *ap_work_queue; +static struct timer_list ap_config_timer; +static int ap_config_time = AP_CONFIG_TIME; +static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL); + +/** + * Tasklet & timer for AP request polling. + */ +static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0); +static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); +static atomic_t ap_poll_requests = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); +static struct task_struct *ap_poll_kthread = NULL; +static DEFINE_MUTEX(ap_poll_thread_mutex); + +/** + * Test if ap instructions are available. + * + * Returns 0 if the ap instructions are installed. + */ +static inline int ap_instructions_available(void) +{ + register unsigned long reg0 asm ("0") = AP_MKQID(0,0); + register unsigned long reg1 asm ("1") = -ENODEV; + register unsigned long reg2 asm ("2") = 0UL; + + asm volatile( + " .long 0xb2af0000\n" /* PQAP(TAPQ) */ + "0: la %1,0\n" + "1:\n" + EX_TABLE(0b, 1b) + : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); + return reg1; +} + +/** + * Test adjunct processor queue. + * @qid: the ap queue number + * @queue_depth: pointer to queue depth value + * @device_type: pointer to device type value + * + * Returns ap queue status structure. + */ +static inline struct ap_queue_status +ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) +{ + register unsigned long reg0 asm ("0") = qid; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = 0UL; + + asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ + : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); + *device_type = (int) (reg2 >> 24); + *queue_depth = (int) (reg2 & 0xff); + return reg1; +} + +/** + * Reset adjunct processor queue. + * @qid: the ap queue number + * + * Returns ap queue status structure. + */ +static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) +{ + register unsigned long reg0 asm ("0") = qid | 0x01000000UL; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = 0UL; + + asm volatile( + ".long 0xb2af0000" /* PQAP(RAPQ) */ + : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); + return reg1; +} + +/** + * Send message to adjunct processor queue. + * @qid: the ap queue number + * @psmid: the program supplied message identifier + * @msg: the message text + * @length: the message length + * + * Returns ap queue status structure. + * + * Condition code 1 on NQAP can't happen because the L bit is 1. + * + * Condition code 2 on NQAP also means the send is incomplete, + * because a segment boundary was reached. The NQAP is repeated. + */ +static inline struct ap_queue_status +__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) +{ + typedef struct { char _[length]; } msgblock; + register unsigned long reg0 asm ("0") = qid | 0x40000000UL; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm ("2") = (unsigned long) msg; + register unsigned long reg3 asm ("3") = (unsigned long) length; + register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); + register unsigned long reg5 asm ("5") = (unsigned int) psmid; + + asm volatile ( + "0: .long 0xb2ad0042\n" /* DQAP */ + " brc 2,0b" + : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) + : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) + : "cc" ); + return reg1; +} + +int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) +{ + struct ap_queue_status status; + + status = __ap_send(qid, psmid, msg, length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_Q_FULL: + return -EBUSY; + default: /* Device is gone. */ + return -ENODEV; + } +} +EXPORT_SYMBOL(ap_send); + +/* + * Receive message from adjunct processor queue. + * @qid: the ap queue number + * @psmid: pointer to program supplied message identifier + * @msg: the message text + * @length: the message length + * + * Returns ap queue status structure. + * + * Condition code 1 on DQAP means the receive has taken place + * but only partially. The response is incomplete, hence the + * DQAP is repeated. + * + * Condition code 2 on DQAP also means the receive is incomplete, + * this time because a segment boundary was reached. Again, the + * DQAP is repeated. + * + * Note that gpr2 is used by the DQAP instruction to keep track of + * any 'residual' length, in case the instruction gets interrupted. + * Hence it gets zeroed before the instruction. + */ +static inline struct ap_queue_status +__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) +{ + typedef struct { char _[length]; } msgblock; + register unsigned long reg0 asm("0") = qid | 0x80000000UL; + register struct ap_queue_status reg1 asm ("1"); + register unsigned long reg2 asm("2") = 0UL; + register unsigned long reg4 asm("4") = (unsigned long) msg; + register unsigned long reg5 asm("5") = (unsigned long) length; + register unsigned long reg6 asm("6") = 0UL; + register unsigned long reg7 asm("7") = 0UL; + + + asm volatile( + "0: .long 0xb2ae0064\n" + " brc 6,0b\n" + : "+d" (reg0), "=d" (reg1), "+d" (reg2), + "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), + "=m" (*(msgblock *) msg) : : "cc" ); + *psmid = (((unsigned long long) reg6) << 32) + reg7; + return reg1; +} + +int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) +{ + struct ap_queue_status status; + + status = __ap_recv(qid, psmid, msg, length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + return 0; + case AP_RESPONSE_NO_PENDING_REPLY: + if (status.queue_empty) + return -ENOENT; + return -EBUSY; + default: + return -ENODEV; + } +} +EXPORT_SYMBOL(ap_recv); + +/** + * Check if an AP queue is available. The test is repeated for + * AP_MAX_RESET times. + * @qid: the ap queue number + * @queue_depth: pointer to queue depth value + * @device_type: pointer to device type value + */ +static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) +{ + struct ap_queue_status status; + int t_depth, t_device_type, rc, i; + + rc = -EBUSY; + for (i = 0; i < AP_MAX_RESET; i++) { + status = ap_test_queue(qid, &t_depth, &t_device_type); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + *queue_depth = t_depth + 1; + *device_type = t_device_type; + rc = 0; + break; + case AP_RESPONSE_Q_NOT_AVAIL: + rc = -ENODEV; + break; + case AP_RESPONSE_RESET_IN_PROGRESS: + break; + case AP_RESPONSE_DECONFIGURED: + rc = -ENODEV; + break; + case AP_RESPONSE_CHECKSTOPPED: + rc = -ENODEV; + break; + case AP_RESPONSE_BUSY: + break; + default: + BUG(); + } + if (rc != -EBUSY) + break; + if (i < AP_MAX_RESET - 1) + udelay(5); + } + return rc; +} + +/** + * Reset an AP queue and wait for it to become available again. + * @qid: the ap queue number + */ +static int ap_init_queue(ap_qid_t qid) +{ + struct ap_queue_status status; + int rc, dummy, i; + + rc = -ENODEV; + status = ap_reset_queue(qid); + for (i = 0; i < AP_MAX_RESET; i++) { + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + if (status.queue_empty) + rc = 0; + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + i = AP_MAX_RESET; /* return with -ENODEV */ + break; + case AP_RESPONSE_RESET_IN_PROGRESS: + case AP_RESPONSE_BUSY: + default: + break; + } + if (rc != -ENODEV) + break; + if (i < AP_MAX_RESET - 1) { + udelay(5); + status = ap_test_queue(qid, &dummy, &dummy); + } + } + return rc; +} + +/** + * AP device related attributes. + */ +static ssize_t ap_hwtype_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); +} +static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); + +static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); +} +static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); + +static ssize_t ap_request_count_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int rc; + + spin_lock_bh(&ap_dev->lock); + rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); + spin_unlock_bh(&ap_dev->lock); + return rc; +} + +static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); + +static ssize_t ap_modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); +} + +static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); + +static struct attribute *ap_dev_attrs[] = { + &dev_attr_hwtype.attr, + &dev_attr_depth.attr, + &dev_attr_request_count.attr, + &dev_attr_modalias.attr, + NULL +}; +static struct attribute_group ap_dev_attr_group = { + .attrs = ap_dev_attrs +}; + +/** + * AP bus driver registration/unregistration. + */ +static int ap_bus_match(struct device *dev, struct device_driver *drv) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + struct ap_driver *ap_drv = to_ap_drv(drv); + struct ap_device_id *id; + + /** + * Compare device type of the device with the list of + * supported types of the device_driver. + */ + for (id = ap_drv->ids; id->match_flags; id++) { + if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && + (id->dev_type != ap_dev->device_type)) + continue; + return 1; + } + return 0; +} + +/** + * uevent function for AP devices. It sets up a single environment + * variable DEV_TYPE which contains the hardware device type. + */ +static int ap_uevent (struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int length; + + if (!ap_dev) + return -ENODEV; + + /* Set up DEV_TYPE environment variable. */ + envp[0] = buffer; + length = scnprintf(buffer, buffer_size, "DEV_TYPE=%04X", + ap_dev->device_type); + if (buffer_size - length <= 0) + return -ENOMEM; + envp[1] = 0; + return 0; +} + +static struct bus_type ap_bus_type = { + .name = "ap", + .match = &ap_bus_match, + .uevent = &ap_uevent, +}; + +static int ap_device_probe(struct device *dev) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + struct ap_driver *ap_drv = to_ap_drv(dev->driver); + int rc; + + ap_dev->drv = ap_drv; + rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; + if (rc) + ap_dev->unregistered = 1; + return rc; +} + +/** + * Flush all requests from the request/pending queue of an AP device. + * @ap_dev: pointer to the AP device. + */ +static inline void __ap_flush_queue(struct ap_device *ap_dev) +{ + struct ap_message *ap_msg, *next; + + list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { + list_del_init(&ap_msg->list); + ap_dev->pendingq_count--; + ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + } + list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { + list_del_init(&ap_msg->list); + ap_dev->requestq_count--; + ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + } +} + +void ap_flush_queue(struct ap_device *ap_dev) +{ + spin_lock_bh(&ap_dev->lock); + __ap_flush_queue(ap_dev); + spin_unlock_bh(&ap_dev->lock); +} +EXPORT_SYMBOL(ap_flush_queue); + +static int ap_device_remove(struct device *dev) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + struct ap_driver *ap_drv = ap_dev->drv; + + spin_lock_bh(&ap_dev->lock); + __ap_flush_queue(ap_dev); + /** + * set ->unregistered to 1 while holding the lock. This prevents + * new messages to be put on the queue from now on. + */ + ap_dev->unregistered = 1; + spin_unlock_bh(&ap_dev->lock); + if (ap_drv->remove) + ap_drv->remove(ap_dev); + return 0; +} + +int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, + char *name) +{ + struct device_driver *drv = &ap_drv->driver; + + drv->bus = &ap_bus_type; + drv->probe = ap_device_probe; + drv->remove = ap_device_remove; + drv->owner = owner; + drv->name = name; + return driver_register(drv); +} +EXPORT_SYMBOL(ap_driver_register); + +void ap_driver_unregister(struct ap_driver *ap_drv) +{ + driver_unregister(&ap_drv->driver); +} +EXPORT_SYMBOL(ap_driver_unregister); + +/** + * AP bus attributes. + */ +static ssize_t ap_domain_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); +} + +static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); + +static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); +} + +static ssize_t ap_config_time_store(struct bus_type *bus, + const char *buf, size_t count) +{ + int time; + + if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) + return -EINVAL; + ap_config_time = time; + if (!timer_pending(&ap_config_timer) || + !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { + ap_config_timer.expires = jiffies + ap_config_time * HZ; + add_timer(&ap_config_timer); + } + return count; +} + +static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); + +static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); +} + +static ssize_t ap_poll_thread_store(struct bus_type *bus, + const char *buf, size_t count) +{ + int flag, rc; + + if (sscanf(buf, "%d\n", &flag) != 1) + return -EINVAL; + if (flag) { + rc = ap_poll_thread_start(); + if (rc) + return rc; + } + else + ap_poll_thread_stop(); + return count; +} + +static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); + +static struct bus_attribute *const ap_bus_attrs[] = { + &bus_attr_ap_domain, + &bus_attr_config_time, + &bus_attr_poll_thread, + NULL +}; + +/** + * Pick one of the 16 ap domains. + */ +static inline int ap_select_domain(void) +{ + int queue_depth, device_type, count, max_count, best_domain; + int rc, i, j; + + /** + * We want to use a single domain. Either the one specified with + * the "domain=" parameter or the domain with the maximum number + * of devices. + */ + if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) + /* Domain has already been selected. */ + return 0; + best_domain = -1; + max_count = 0; + for (i = 0; i < AP_DOMAINS; i++) { + count = 0; + for (j = 0; j < AP_DEVICES; j++) { + ap_qid_t qid = AP_MKQID(j, i); + rc = ap_query_queue(qid, &queue_depth, &device_type); + if (rc) + continue; + count++; + } + if (count > max_count) { + max_count = count; + best_domain = i; + } + } + if (best_domain >= 0){ + ap_domain_index = best_domain; + return 0; + } + return -ENODEV; +} + +/** + * Find the device type if query queue returned a device type of 0. + * @ap_dev: pointer to the AP device. + */ +static int ap_probe_device_type(struct ap_device *ap_dev) +{ + static unsigned char msg[] = { + 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, + 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, + 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, + 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, + 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, + 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, + 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, + 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, + 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, + 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, + 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, + 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, + 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, + 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, + 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, + 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, + 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, + 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, + 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, + 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, + 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, + 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, + 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, + 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, + 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, + }; + struct ap_queue_status status; + unsigned long long psmid; + char *reply; + int rc, i; + + reply = (void *) get_zeroed_page(GFP_KERNEL); + if (!reply) { + rc = -ENOMEM; + goto out; + } + + status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, + msg, sizeof(msg)); + if (status.response_code != AP_RESPONSE_NORMAL) { + rc = -ENODEV; + goto out_free; + } + + /* Wait for the test message to complete. */ + for (i = 0; i < 6; i++) { + mdelay(300); + status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); + if (status.response_code == AP_RESPONSE_NORMAL && + psmid == 0x0102030405060708ULL) + break; + } + if (i < 6) { + /* Got an answer. */ + if (reply[0] == 0x00 && reply[1] == 0x86) + ap_dev->device_type = AP_DEVICE_TYPE_PCICC; + else + ap_dev->device_type = AP_DEVICE_TYPE_PCICA; + rc = 0; + } else + rc = -ENODEV; + +out_free: + free_page((unsigned long) reply); +out: + return rc; +} + +/** + * Scan the ap bus for new devices. + */ +static int __ap_scan_bus(struct device *dev, void *data) +{ + return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; +} + +static void ap_device_release(struct device *dev) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + + kfree(ap_dev); +} + +static void ap_scan_bus(void *data) +{ + struct ap_device *ap_dev; + struct device *dev; + ap_qid_t qid; + int queue_depth, device_type; + int rc, i; + + if (ap_select_domain() != 0) + return; + for (i = 0; i < AP_DEVICES; i++) { + qid = AP_MKQID(i, ap_domain_index); + dev = bus_find_device(&ap_bus_type, NULL, + (void *)(unsigned long)qid, + __ap_scan_bus); + if (dev) { + put_device(dev); + continue; + } + rc = ap_query_queue(qid, &queue_depth, &device_type); + if (rc) + continue; + rc = ap_init_queue(qid); + if (rc) + continue; + ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); + if (!ap_dev) + break; + ap_dev->qid = qid; + ap_dev->queue_depth = queue_depth; + spin_lock_init(&ap_dev->lock); + INIT_LIST_HEAD(&ap_dev->pendingq); + INIT_LIST_HEAD(&ap_dev->requestq); + if (device_type == 0) + ap_probe_device_type(ap_dev); + else + ap_dev->device_type = device_type; + + ap_dev->device.bus = &ap_bus_type; + ap_dev->device.parent = ap_root_device; + snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x", + AP_QID_DEVICE(ap_dev->qid)); + ap_dev->device.release = ap_device_release; + rc = device_register(&ap_dev->device); + if (rc) { + kfree(ap_dev); + continue; + } + /* Add device attributes. */ + rc = sysfs_create_group(&ap_dev->device.kobj, + &ap_dev_attr_group); + if (rc) + device_unregister(&ap_dev->device); + } +} + +static void +ap_config_timeout(unsigned long ptr) +{ + queue_work(ap_work_queue, &ap_config_work); + ap_config_timer.expires = jiffies + ap_config_time * HZ; + add_timer(&ap_config_timer); +} + +/** + * Set up the timer to run the poll tasklet + */ +static inline void ap_schedule_poll_timer(void) +{ + if (timer_pending(&ap_poll_timer)) + return; + mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME); +} + +/** + * Receive pending reply messages from an AP device. + * @ap_dev: pointer to the AP device + * @flags: pointer to control flags, bit 2^0 is set if another poll is + * required, bit 2^1 is set if the poll timer needs to get armed + * Returns 0 if the device is still present, -ENODEV if not. + */ +static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) +{ + struct ap_queue_status status; + struct ap_message *ap_msg; + + if (ap_dev->queue_count <= 0) + return 0; + status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, + ap_dev->reply->message, ap_dev->reply->length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + atomic_dec(&ap_poll_requests); + ap_dev->queue_count--; + list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { + if (ap_msg->psmid != ap_dev->reply->psmid) + continue; + list_del_init(&ap_msg->list); + ap_dev->pendingq_count--; + ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply); + break; + } + if (ap_dev->queue_count > 0) + *flags |= 1; + break; + case AP_RESPONSE_NO_PENDING_REPLY: + if (status.queue_empty) { + /* The card shouldn't forget requests but who knows. */ + ap_dev->queue_count = 0; + list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); + ap_dev->requestq_count += ap_dev->pendingq_count; + ap_dev->pendingq_count = 0; + } else + *flags |= 2; + break; + default: + return -ENODEV; + } + return 0; +} + +/** + * Send messages from the request queue to an AP device. + * @ap_dev: pointer to the AP device + * @flags: pointer to control flags, bit 2^0 is set if another poll is + * required, bit 2^1 is set if the poll timer needs to get armed + * Returns 0 if the device is still present, -ENODEV if not. + */ +static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) +{ + struct ap_queue_status status; + struct ap_message *ap_msg; + + if (ap_dev->requestq_count <= 0 || + ap_dev->queue_count >= ap_dev->queue_depth) + return 0; + /* Start the next request on the queue. */ + ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); + status = __ap_send(ap_dev->qid, ap_msg->psmid, + ap_msg->message, ap_msg->length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + atomic_inc(&ap_poll_requests); + ap_dev->queue_count++; + list_move_tail(&ap_msg->list, &ap_dev->pendingq); + ap_dev->requestq_count--; + ap_dev->pendingq_count++; + if (ap_dev->queue_count < ap_dev->queue_depth && + ap_dev->requestq_count > 0) + *flags |= 1; + *flags |= 2; + break; + case AP_RESPONSE_Q_FULL: + *flags |= 2; + break; + case AP_RESPONSE_MESSAGE_TOO_BIG: + return -EINVAL; + default: + return -ENODEV; + } + return 0; +} + +/** + * Poll AP device for pending replies and send new messages. If either + * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. + * @ap_dev: pointer to the bus device + * @flags: pointer to control flags, bit 2^0 is set if another poll is + * required, bit 2^1 is set if the poll timer needs to get armed + * Returns 0. + */ +static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) +{ + int rc; + + rc = ap_poll_read(ap_dev, flags); + if (rc) + return rc; + return ap_poll_write(ap_dev, flags); +} + +/** + * Queue a message to a device. + * @ap_dev: pointer to the AP device + * @ap_msg: the message to be queued + */ +static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) +{ + struct ap_queue_status status; + + if (list_empty(&ap_dev->requestq) && + ap_dev->queue_count < ap_dev->queue_depth) { + status = __ap_send(ap_dev->qid, ap_msg->psmid, + ap_msg->message, ap_msg->length); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + list_add_tail(&ap_msg->list, &ap_dev->pendingq); + atomic_inc(&ap_poll_requests); + ap_dev->pendingq_count++; + ap_dev->queue_count++; + ap_dev->total_request_count++; + break; + case AP_RESPONSE_Q_FULL: + list_add_tail(&ap_msg->list, &ap_dev->requestq); + ap_dev->requestq_count++; + ap_dev->total_request_count++; + return -EBUSY; + case AP_RESPONSE_MESSAGE_TOO_BIG: + ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); + return -EINVAL; + default: /* Device is gone. */ + ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + return -ENODEV; + } + } else { + list_add_tail(&ap_msg->list, &ap_dev->requestq); + ap_dev->requestq_count++; + ap_dev->total_request_count++; + return -EBUSY; + } + ap_schedule_poll_timer(); + return 0; +} + +void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) +{ + unsigned long flags; + int rc; + + spin_lock_bh(&ap_dev->lock); + if (!ap_dev->unregistered) { + /* Make room on the queue by polling for finished requests. */ + rc = ap_poll_queue(ap_dev, &flags); + if (!rc) + rc = __ap_queue_message(ap_dev, ap_msg); + if (!rc) + wake_up(&ap_poll_wait); + } else { + ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); + rc = 0; + } + spin_unlock_bh(&ap_dev->lock); + if (rc == -ENODEV) + device_unregister(&ap_dev->device); +} +EXPORT_SYMBOL(ap_queue_message); + +/** + * Cancel a crypto request. This is done by removing the request + * from the devive pendingq or requestq queue. Note that the + * request stays on the AP queue. When it finishes the message + * reply will be discarded because the psmid can't be found. + * @ap_dev: AP device that has the message queued + * @ap_msg: the message that is to be removed + */ +void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) +{ + struct ap_message *tmp; + + spin_lock_bh(&ap_dev->lock); + if (!list_empty(&ap_msg->list)) { + list_for_each_entry(tmp, &ap_dev->pendingq, list) + if (tmp->psmid == ap_msg->psmid) { + ap_dev->pendingq_count--; + goto found; + } + ap_dev->requestq_count--; + found: + list_del_init(&ap_msg->list); + } + spin_unlock_bh(&ap_dev->lock); +} +EXPORT_SYMBOL(ap_cancel_message); + +/** + * AP receive polling for finished AP requests + */ +static void ap_poll_timeout(unsigned long unused) +{ + tasklet_schedule(&ap_tasklet); +} + +/** + * Poll all AP devices on the bus in a round robin fashion. Continue + * polling until bit 2^0 of the control flags is not set. If bit 2^1 + * of the control flags has been set arm the poll timer. + */ +static int __ap_poll_all(struct device *dev, void *data) +{ + struct ap_device *ap_dev = to_ap_dev(dev); + int rc; + + spin_lock(&ap_dev->lock); + if (!ap_dev->unregistered) { + rc = ap_poll_queue(to_ap_dev(dev), (unsigned long *) data); + } else + rc = 0; + spin_unlock(&ap_dev->lock); + if (rc) + device_unregister(&ap_dev->device); + return 0; +} + +static void ap_poll_all(unsigned long dummy) +{ + unsigned long flags; + + do { + flags = 0; + bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all); + } while (flags & 1); + if (flags & 2) + ap_schedule_poll_timer(); +} + +/** + * AP bus poll thread. The purpose of this thread is to poll for + * finished requests in a loop if there is a "free" cpu - that is + * a cpu that doesn't have anything better to do. The polling stops + * as soon as there is another task or if all messages have been + * delivered. + */ +static int ap_poll_thread(void *data) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long flags; + int requests; + + set_user_nice(current, -20); + while (1) { + if (need_resched()) { + schedule(); + continue; + } + add_wait_queue(&ap_poll_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) + break; + requests = atomic_read(&ap_poll_requests); + if (requests <= 0) + schedule(); + set_current_state(TASK_RUNNING); + remove_wait_queue(&ap_poll_wait, &wait); + + local_bh_disable(); + flags = 0; + bus_for_each_dev(&ap_bus_type, NULL, &flags, __ap_poll_all); + local_bh_enable(); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&ap_poll_wait, &wait); + return 0; +} + +static int ap_poll_thread_start(void) +{ + int rc; + + mutex_lock(&ap_poll_thread_mutex); + if (!ap_poll_kthread) { + ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); + rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0; + if (rc) + ap_poll_kthread = NULL; + } + else + rc = 0; + mutex_unlock(&ap_poll_thread_mutex); + return rc; +} + +static void ap_poll_thread_stop(void) +{ + mutex_lock(&ap_poll_thread_mutex); + if (ap_poll_kthread) { + kthread_stop(ap_poll_kthread); + ap_poll_kthread = NULL; + } + mutex_unlock(&ap_poll_thread_mutex); +} + +/** + * The module initialization code. + */ +int __init ap_module_init(void) +{ + int rc, i; + + if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { + printk(KERN_WARNING "Invalid param: domain = %d. " + " Not loading.\n", ap_domain_index); + return -EINVAL; + } + if (ap_instructions_available() != 0) { + printk(KERN_WARNING "AP instructions not installed.\n"); + return -ENODEV; + } + + /* Create /sys/bus/ap. */ + rc = bus_register(&ap_bus_type); + if (rc) + goto out; + for (i = 0; ap_bus_attrs[i]; i++) { + rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); + if (rc) + goto out_bus; + } + + /* Create /sys/devices/ap. */ + ap_root_device = s390_root_dev_register("ap"); + rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0; + if (rc) + goto out_bus; + + ap_work_queue = create_singlethread_workqueue("kapwork"); + if (!ap_work_queue) { + rc = -ENOMEM; + goto out_root; + } + + if (ap_select_domain() == 0) + ap_scan_bus(NULL); + + /* Setup the ap bus rescan timer. */ + init_timer(&ap_config_timer); + ap_config_timer.function = ap_config_timeout; + ap_config_timer.data = 0; + ap_config_timer.expires = jiffies + ap_config_time * HZ; + add_timer(&ap_config_timer); + + /* Start the low priority AP bus poll thread. */ + if (ap_thread_flag) { + rc = ap_poll_thread_start(); + if (rc) + goto out_work; + } + + return 0; + +out_work: + del_timer_sync(&ap_config_timer); + del_timer_sync(&ap_poll_timer); + destroy_workqueue(ap_work_queue); +out_root: + s390_root_dev_unregister(ap_root_device); +out_bus: + while (i--) + bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); + bus_unregister(&ap_bus_type); +out: + return rc; +} + +static int __ap_match_all(struct device *dev, void *data) +{ + return 1; +} + +/** + * The module termination code + */ +void ap_module_exit(void) +{ + int i; + struct device *dev; + + ap_poll_thread_stop(); + del_timer_sync(&ap_config_timer); + del_timer_sync(&ap_poll_timer); + destroy_workqueue(ap_work_queue); + s390_root_dev_unregister(ap_root_device); + while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, + __ap_match_all))) + { + device_unregister(dev); + put_device(dev); + } + for (i = 0; ap_bus_attrs[i]; i++) + bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); + bus_unregister(&ap_bus_type); +} + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +module_init(ap_module_init); +module_exit(ap_module_exit); +#endif diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h new file mode 100644 index 000000000000..83b69c01cd6e --- /dev/null +++ b/drivers/s390/crypto/ap_bus.h @@ -0,0 +1,158 @@ +/* + * linux/drivers/s390/crypto/ap_bus.h + * + * Copyright (C) 2006 IBM Corporation + * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * Adjunct processor bus header file. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _AP_BUS_H_ +#define _AP_BUS_H_ + +#include <linux/device.h> +#include <linux/mod_devicetable.h> +#include <linux/types.h> + +#define AP_DEVICES 64 /* Number of AP devices. */ +#define AP_DOMAINS 16 /* Number of AP domains. */ +#define AP_MAX_RESET 90 /* Maximum number of resets. */ +#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ +#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */ + +extern int ap_domain_index; + +/** + * The ap_qid_t identifier of an ap queue. It contains a + * 6 bit device index and a 4 bit queue index (domain). + */ +typedef unsigned int ap_qid_t; + +#define AP_MKQID(_device,_queue) (((_device) & 63) << 8 | ((_queue) & 15)) +#define AP_QID_DEVICE(_qid) (((_qid) >> 8) & 63) +#define AP_QID_QUEUE(_qid) ((_qid) & 15) + +/** + * The ap queue status word is returned by all three AP functions + * (PQAP, NQAP and DQAP). There's a set of flags in the first + * byte, followed by a 1 byte response code. + */ +struct ap_queue_status { + unsigned int queue_empty : 1; + unsigned int replies_waiting : 1; + unsigned int queue_full : 1; + unsigned int pad1 : 5; + unsigned int response_code : 8; + unsigned int pad2 : 16; +}; + +#define AP_RESPONSE_NORMAL 0x00 +#define AP_RESPONSE_Q_NOT_AVAIL 0x01 +#define AP_RESPONSE_RESET_IN_PROGRESS 0x02 +#define AP_RESPONSE_DECONFIGURED 0x03 +#define AP_RESPONSE_CHECKSTOPPED 0x04 +#define AP_RESPONSE_BUSY 0x05 +#define AP_RESPONSE_Q_FULL 0x10 +#define AP_RESPONSE_NO_PENDING_REPLY 0x10 +#define AP_RESPONSE_INDEX_TOO_BIG 0x11 +#define AP_RESPONSE_NO_FIRST_PART 0x13 +#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 + +/** + * Known device types + */ +#define AP_DEVICE_TYPE_PCICC 3 +#define AP_DEVICE_TYPE_PCICA 4 +#define AP_DEVICE_TYPE_PCIXCC 5 +#define AP_DEVICE_TYPE_CEX2A 6 +#define AP_DEVICE_TYPE_CEX2C 7 + +struct ap_device; +struct ap_message; + +struct ap_driver { + struct device_driver driver; + struct ap_device_id *ids; + + int (*probe)(struct ap_device *); + void (*remove)(struct ap_device *); + /* receive is called from tasklet context */ + void (*receive)(struct ap_device *, struct ap_message *, + struct ap_message *); +}; + +#define to_ap_drv(x) container_of((x), struct ap_driver, driver) + +int ap_driver_register(struct ap_driver *, struct module *, char *); +void ap_driver_unregister(struct ap_driver *); + +struct ap_device { + struct device device; + struct ap_driver *drv; /* Pointer to AP device driver. */ + spinlock_t lock; /* Per device lock. */ + + ap_qid_t qid; /* AP queue id. */ + int queue_depth; /* AP queue depth.*/ + int device_type; /* AP device type. */ + int unregistered; /* marks AP device as unregistered */ + + int queue_count; /* # messages currently on AP queue. */ + + struct list_head pendingq; /* List of message sent to AP queue. */ + int pendingq_count; /* # requests on pendingq list. */ + struct list_head requestq; /* List of message yet to be sent. */ + int requestq_count; /* # requests on requestq list. */ + int total_request_count; /* # requests ever for this AP device. */ + + struct ap_message *reply; /* Per device reply message. */ + + void *private; /* ap driver private pointer. */ +}; + +#define to_ap_dev(x) container_of((x), struct ap_device, device) + +struct ap_message { + struct list_head list; /* Request queueing. */ + unsigned long long psmid; /* Message id. */ + void *message; /* Pointer to message buffer. */ + size_t length; /* Message length. */ + + void *private; /* ap driver private pointer. */ +}; + +#define AP_DEVICE(dt) \ + .dev_type=(dt), \ + .match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE, + +/** + * Note: don't use ap_send/ap_recv after using ap_queue_message + * for the first time. Otherwise the ap message queue will get + * confused. + */ +int ap_send(ap_qid_t, unsigned long long, void *, size_t); +int ap_recv(ap_qid_t, unsigned long long *, void *, size_t); + +void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg); +void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg); +void ap_flush_queue(struct ap_device *ap_dev); + +int ap_module_init(void); +void ap_module_exit(void); + +#endif /* _AP_BUS_H_ */ diff --git a/drivers/s390/crypto/z90common.h b/drivers/s390/crypto/z90common.h deleted file mode 100644 index dbbcda3c846a..000000000000 --- a/drivers/s390/crypto/z90common.h +++ /dev/null @@ -1,166 +0,0 @@ -/* - * linux/drivers/s390/crypto/z90common.h - * - * z90crypt 1.3.3 - * - * Copyright (C) 2001, 2005 IBM Corporation - * Author(s): Robert Burroughs (burrough@us.ibm.com) - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef _Z90COMMON_H_ -#define _Z90COMMON_H_ - - -#define RESPBUFFSIZE 256 -#define PCI_FUNC_KEY_DECRYPT 0x5044 -#define PCI_FUNC_KEY_ENCRYPT 0x504B -extern int ext_bitlens; - -enum devstat { - DEV_GONE, - DEV_ONLINE, - DEV_QUEUE_FULL, - DEV_EMPTY, - DEV_NO_WORK, - DEV_BAD_MESSAGE, - DEV_TSQ_EXCEPTION, - DEV_RSQ_EXCEPTION, - DEV_SEN_EXCEPTION, - DEV_REC_EXCEPTION -}; - -enum hdstat { - HD_NOT_THERE, - HD_BUSY, - HD_DECONFIGURED, - HD_CHECKSTOPPED, - HD_ONLINE, - HD_TSQ_EXCEPTION -}; - -#define Z90C_NO_DEVICES 1 -#define Z90C_AMBIGUOUS_DOMAIN 2 -#define Z90C_INCORRECT_DOMAIN 3 -#define ENOTINIT 4 - -#define SEN_BUSY 7 -#define SEN_USER_ERROR 8 -#define SEN_QUEUE_FULL 11 -#define SEN_NOT_AVAIL 16 -#define SEN_PAD_ERROR 17 -#define SEN_RETRY 18 -#define SEN_RELEASED 24 - -#define REC_EMPTY 4 -#define REC_BUSY 6 -#define REC_OPERAND_INV 8 -#define REC_OPERAND_SIZE 9 -#define REC_EVEN_MOD 10 -#define REC_NO_WORK 11 -#define REC_HARDWAR_ERR 12 -#define REC_NO_RESPONSE 13 -#define REC_RETRY_DEV 14 -#define REC_USER_GONE 15 -#define REC_BAD_MESSAGE 16 -#define REC_INVALID_PAD 17 -#define REC_USE_PCICA 18 - -#define WRONG_DEVICE_TYPE 20 - -#define REC_FATAL_ERROR 32 -#define SEN_FATAL_ERROR 33 -#define TSQ_FATAL_ERROR 34 -#define RSQ_FATAL_ERROR 35 - -#define Z90CRYPT_NUM_TYPES 6 -#define PCICA 0 -#define PCICC 1 -#define PCIXCC_MCL2 2 -#define PCIXCC_MCL3 3 -#define CEX2C 4 -#define CEX2A 5 -#define NILDEV -1 -#define ANYDEV -1 -#define PCIXCC_UNK -2 - -enum hdevice_type { - PCICC_HW = 3, - PCICA_HW = 4, - PCIXCC_HW = 5, - CEX2A_HW = 6, - CEX2C_HW = 7 -}; - -struct CPRBX { - unsigned short cprb_len; - unsigned char cprb_ver_id; - unsigned char pad_000[3]; - unsigned char func_id[2]; - unsigned char cprb_flags[4]; - unsigned int req_parml; - unsigned int req_datal; - unsigned int rpl_msgbl; - unsigned int rpld_parml; - unsigned int rpl_datal; - unsigned int rpld_datal; - unsigned int req_extbl; - unsigned char pad_001[4]; - unsigned int rpld_extbl; - unsigned char req_parmb[16]; - unsigned char req_datab[16]; - unsigned char rpl_parmb[16]; - unsigned char rpl_datab[16]; - unsigned char req_extb[16]; - unsigned char rpl_extb[16]; - unsigned short ccp_rtcode; - unsigned short ccp_rscode; - unsigned int mac_data_len; - unsigned char logon_id[8]; - unsigned char mac_value[8]; - unsigned char mac_content_flgs; - unsigned char pad_002; - unsigned short domain; - unsigned char pad_003[12]; - unsigned char pad_004[36]; -}; - -#ifndef DEV_NAME -#define DEV_NAME "z90crypt" -#endif -#define PRINTK(fmt, args...) \ - printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) -#define PRINTKN(fmt, args...) \ - printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) -#define PRINTKW(fmt, args...) \ - printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) -#define PRINTKC(fmt, args...) \ - printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) - -#ifdef Z90CRYPT_DEBUG -#define PDEBUG(fmt, args...) \ - printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) -#else -#define PDEBUG(fmt, args...) do {} while (0) -#endif - -#define UMIN(a,b) ((a) < (b) ? (a) : (b)) -#define IS_EVEN(x) ((x) == (2 * ((x) / 2))) - -#endif diff --git a/drivers/s390/crypto/z90crypt.h b/drivers/s390/crypto/z90crypt.h deleted file mode 100644 index 0ca1d126ccb6..000000000000 --- a/drivers/s390/crypto/z90crypt.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * linux/drivers/s390/crypto/z90crypt.h - * - * z90crypt 1.3.3 (kernel-private header) - * - * Copyright (C) 2001, 2005 IBM Corporation - * Author(s): Robert Burroughs (burrough@us.ibm.com) - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef _Z90CRYPT_H_ -#define _Z90CRYPT_H_ - -#include <asm/z90crypt.h> - -/** - * local errno definitions - */ -#define ENOBUFF 129 // filp->private_data->...>work_elem_p->buffer is NULL -#define EWORKPEND 130 // user issues ioctl while another pending -#define ERELEASED 131 // user released while ioctl pending -#define EQUIESCE 132 // z90crypt quiescing (no more work allowed) -#define ETIMEOUT 133 // request timed out -#define EUNKNOWN 134 // some unrecognized error occured (retry may succeed) -#define EGETBUFF 135 // Error getting buffer or hardware lacks capability - // (retry in software) - -/** - * DEPRECATED STRUCTURES - */ - -/** - * This structure is DEPRECATED and the corresponding ioctl() has been - * replaced with individual ioctl()s for each piece of data! - * This structure will NOT survive past version 1.3.1, so switch to the - * new ioctl()s. - */ -#define MASK_LENGTH 64 // mask length -struct ica_z90_status { - int totalcount; - int leedslitecount; // PCICA - int leeds2count; // PCICC - // int PCIXCCCount; is not in struct for backward compatibility - int requestqWaitCount; - int pendingqWaitCount; - int totalOpenCount; - int cryptoDomain; - // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3, - // 5=CEX2C - unsigned char status[MASK_LENGTH]; - // qdepth: # work elements waiting for each device - unsigned char qdepth[MASK_LENGTH]; -}; - -#endif /* _Z90CRYPT_H_ */ diff --git a/drivers/s390/crypto/z90hardware.c b/drivers/s390/crypto/z90hardware.c deleted file mode 100644 index be60795f4a74..000000000000 --- a/drivers/s390/crypto/z90hardware.c +++ /dev/null @@ -1,2531 +0,0 @@ -/* - * linux/drivers/s390/crypto/z90hardware.c - * - * z90crypt 1.3.3 - * - * Copyright (C) 2001, 2005 IBM Corporation - * Author(s): Robert Burroughs (burrough@us.ibm.com) - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <asm/uaccess.h> -#include <linux/compiler.h> -#include <linux/delay.h> -#include <linux/init.h> -#include <linux/module.h> -#include "z90crypt.h" -#include "z90common.h" - -struct cca_token_hdr { - unsigned char token_identifier; - unsigned char version; - unsigned short token_length; - unsigned char reserved[4]; -}; - -#define CCA_TKN_HDR_ID_EXT 0x1E - -struct cca_private_ext_ME_sec { - unsigned char section_identifier; - unsigned char version; - unsigned short section_length; - unsigned char private_key_hash[20]; - unsigned char reserved1[4]; - unsigned char key_format; - unsigned char reserved2; - unsigned char key_name_hash[20]; - unsigned char key_use_flags[4]; - unsigned char reserved3[6]; - unsigned char reserved4[24]; - unsigned char confounder[24]; - unsigned char exponent[128]; - unsigned char modulus[128]; -}; - -#define CCA_PVT_USAGE_ALL 0x80 - -struct cca_public_sec { - unsigned char section_identifier; - unsigned char version; - unsigned short section_length; - unsigned char reserved[2]; - unsigned short exponent_len; - unsigned short modulus_bit_len; - unsigned short modulus_byte_len; - unsigned char exponent[3]; -}; - -struct cca_private_ext_ME { - struct cca_token_hdr pvtMEHdr; - struct cca_private_ext_ME_sec pvtMESec; - struct cca_public_sec pubMESec; -}; - -struct cca_public_key { - struct cca_token_hdr pubHdr; - struct cca_public_sec pubSec; -}; - -struct cca_pvt_ext_CRT_sec { - unsigned char section_identifier; - unsigned char version; - unsigned short section_length; - unsigned char private_key_hash[20]; - unsigned char reserved1[4]; - unsigned char key_format; - unsigned char reserved2; - unsigned char key_name_hash[20]; - unsigned char key_use_flags[4]; - unsigned short p_len; - unsigned short q_len; - unsigned short dp_len; - unsigned short dq_len; - unsigned short u_len; - unsigned short mod_len; - unsigned char reserved3[4]; - unsigned short pad_len; - unsigned char reserved4[52]; - unsigned char confounder[8]; -}; - -#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08 -#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40 - -struct cca_private_ext_CRT { - struct cca_token_hdr pvtCrtHdr; - struct cca_pvt_ext_CRT_sec pvtCrtSec; - struct cca_public_sec pubCrtSec; -}; - -struct ap_status_word { - unsigned char q_stat_flags; - unsigned char response_code; - unsigned char reserved[2]; -}; - -#define AP_Q_STATUS_EMPTY 0x80 -#define AP_Q_STATUS_REPLIES_WAITING 0x40 -#define AP_Q_STATUS_ARRAY_FULL 0x20 - -#define AP_RESPONSE_NORMAL 0x00 -#define AP_RESPONSE_Q_NOT_AVAIL 0x01 -#define AP_RESPONSE_RESET_IN_PROGRESS 0x02 -#define AP_RESPONSE_DECONFIGURED 0x03 -#define AP_RESPONSE_CHECKSTOPPED 0x04 -#define AP_RESPONSE_BUSY 0x05 -#define AP_RESPONSE_Q_FULL 0x10 -#define AP_RESPONSE_NO_PENDING_REPLY 0x10 -#define AP_RESPONSE_INDEX_TOO_BIG 0x11 -#define AP_RESPONSE_NO_FIRST_PART 0x13 -#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15 - -#define AP_MAX_CDX_BITL 4 -#define AP_RQID_RESERVED_BITL 4 -#define SKIP_BITL (AP_MAX_CDX_BITL + AP_RQID_RESERVED_BITL) - -struct type4_hdr { - unsigned char reserved1; - unsigned char msg_type_code; - unsigned short msg_len; - unsigned char request_code; - unsigned char msg_fmt; - unsigned short reserved2; -}; - -#define TYPE4_TYPE_CODE 0x04 -#define TYPE4_REQU_CODE 0x40 - -#define TYPE4_SME_LEN 0x0188 -#define TYPE4_LME_LEN 0x0308 -#define TYPE4_SCR_LEN 0x01E0 -#define TYPE4_LCR_LEN 0x03A0 - -#define TYPE4_SME_FMT 0x00 -#define TYPE4_LME_FMT 0x10 -#define TYPE4_SCR_FMT 0x40 -#define TYPE4_LCR_FMT 0x50 - -struct type4_sme { - struct type4_hdr header; - unsigned char message[128]; - unsigned char exponent[128]; - unsigned char modulus[128]; -}; - -struct type4_lme { - struct type4_hdr header; - unsigned char message[256]; - unsigned char exponent[256]; - unsigned char modulus[256]; -}; - -struct type4_scr { - struct type4_hdr header; - unsigned char message[128]; - unsigned char dp[72]; - unsigned char dq[64]; - unsigned char p[72]; - unsigned char q[64]; - unsigned char u[72]; -}; - -struct type4_lcr { - struct type4_hdr header; - unsigned char message[256]; - unsigned char dp[136]; - unsigned char dq[128]; - unsigned char p[136]; - unsigned char q[128]; - unsigned char u[136]; -}; - -union type4_msg { - struct type4_sme sme; - struct type4_lme lme; - struct type4_scr scr; - struct type4_lcr lcr; -}; - -struct type84_hdr { - unsigned char reserved1; - unsigned char code; - unsigned short len; - unsigned char reserved2[4]; -}; - -#define TYPE84_RSP_CODE 0x84 - -struct type6_hdr { - unsigned char reserved1; - unsigned char type; - unsigned char reserved2[2]; - unsigned char right[4]; - unsigned char reserved3[2]; - unsigned char reserved4[2]; - unsigned char apfs[4]; - unsigned int offset1; - unsigned int offset2; - unsigned int offset3; - unsigned int offset4; - unsigned char agent_id[16]; - unsigned char rqid[2]; - unsigned char reserved5[2]; - unsigned char function_code[2]; - unsigned char reserved6[2]; - unsigned int ToCardLen1; - unsigned int ToCardLen2; - unsigned int ToCardLen3; - unsigned int ToCardLen4; - unsigned int FromCardLen1; - unsigned int FromCardLen2; - unsigned int FromCardLen3; - unsigned int FromCardLen4; -}; - -struct CPRB { - unsigned char cprb_len[2]; - unsigned char cprb_ver_id; - unsigned char pad_000; - unsigned char srpi_rtcode[4]; - unsigned char srpi_verb; - unsigned char flags; - unsigned char func_id[2]; - unsigned char checkpoint_flag; - unsigned char resv2; - unsigned char req_parml[2]; - unsigned char req_parmp[4]; - unsigned char req_datal[4]; - unsigned char req_datap[4]; - unsigned char rpl_parml[2]; - unsigned char pad_001[2]; - unsigned char rpl_parmp[4]; - unsigned char rpl_datal[4]; - unsigned char rpl_datap[4]; - unsigned char ccp_rscode[2]; - unsigned char ccp_rtcode[2]; - unsigned char repd_parml[2]; - unsigned char mac_data_len[2]; - unsigned char repd_datal[4]; - unsigned char req_pc[2]; - unsigned char res_origin[8]; - unsigned char mac_value[8]; - unsigned char logon_id[8]; - unsigned char usage_domain[2]; - unsigned char resv3[18]; - unsigned char svr_namel[2]; - unsigned char svr_name[8]; -}; - -struct type6_msg { - struct type6_hdr header; - struct CPRB CPRB; -}; - -struct type86_hdr { - unsigned char reserved1; - unsigned char type; - unsigned char format; - unsigned char reserved2; - unsigned char reply_code; - unsigned char reserved3[3]; -}; - -#define TYPE86_RSP_CODE 0x86 -#define TYPE86_FMT2 0x02 - -struct type86_fmt2_msg { - struct type86_hdr header; - unsigned char reserved[4]; - unsigned char apfs[4]; - unsigned int count1; - unsigned int offset1; - unsigned int count2; - unsigned int offset2; - unsigned int count3; - unsigned int offset3; - unsigned int count4; - unsigned int offset4; -}; - -static struct type6_hdr static_type6_hdr = { - 0x00, - 0x06, - {0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00}, - {0x00,0x00}, - {0x00,0x00,0x00,0x00}, - 0x00000058, - 0x00000000, - 0x00000000, - 0x00000000, - {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50, - 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01}, - {0x00,0x00}, - {0x00,0x00}, - {0x50,0x44}, - {0x00,0x00}, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000 -}; - -static struct type6_hdr static_type6_hdrX = { - 0x00, - 0x06, - {0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00}, - {0x00,0x00}, - {0x00,0x00,0x00,0x00}, - 0x00000058, - 0x00000000, - 0x00000000, - 0x00000000, - {0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00}, - {0x00,0x00}, - {0x50,0x44}, - {0x00,0x00}, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000 -}; - -static struct CPRB static_cprb = { - {0x70,0x00}, - 0x41, - 0x00, - {0x00,0x00,0x00,0x00}, - 0x00, - 0x00, - {0x54,0x32}, - 0x01, - 0x00, - {0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00}, - {0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00}, - {0x00,0x00}, - {0x00,0x00}, - {0x00,0x00}, - {0x00,0x00,0x00,0x00}, - {0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00}, - {0x08,0x00}, - {0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20} -}; - -struct function_and_rules_block { - unsigned char function_code[2]; - unsigned char ulen[2]; - unsigned char only_rule[8]; -}; - -static struct function_and_rules_block static_pkd_function_and_rules = { - {0x50,0x44}, - {0x0A,0x00}, - {'P','K','C','S','-','1','.','2'} -}; - -static struct function_and_rules_block static_pke_function_and_rules = { - {0x50,0x4B}, - {0x0A,0x00}, - {'P','K','C','S','-','1','.','2'} -}; - -struct T6_keyBlock_hdr { - unsigned char blen[2]; - unsigned char ulen[2]; - unsigned char flags[2]; -}; - -static struct T6_keyBlock_hdr static_T6_keyBlock_hdr = { - {0x89,0x01}, - {0x87,0x01}, - {0x00} -}; - -static struct CPRBX static_cprbx = { - 0x00DC, - 0x02, - {0x00,0x00,0x00}, - {0x54,0x32}, - {0x00,0x00,0x00,0x00}, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - {0x00,0x00,0x00,0x00}, - 0x00000000, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - 0x0000, - 0x0000, - 0x00000000, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - 0x00, - 0x00, - 0x0000, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00} -}; - -static struct function_and_rules_block static_pkd_function_and_rulesX_MCL2 = { - {0x50,0x44}, - {0x00,0x0A}, - {'P','K','C','S','-','1','.','2'} -}; - -static struct function_and_rules_block static_pke_function_and_rulesX_MCL2 = { - {0x50,0x4B}, - {0x00,0x0A}, - {'Z','E','R','O','-','P','A','D'} -}; - -static struct function_and_rules_block static_pkd_function_and_rulesX = { - {0x50,0x44}, - {0x00,0x0A}, - {'Z','E','R','O','-','P','A','D'} -}; - -static struct function_and_rules_block static_pke_function_and_rulesX = { - {0x50,0x4B}, - {0x00,0x0A}, - {'M','R','P',' ',' ',' ',' ',' '} -}; - -static unsigned char static_PKE_function_code[2] = {0x50, 0x4B}; - -struct T6_keyBlock_hdrX { - unsigned short blen; - unsigned short ulen; - unsigned char flags[2]; -}; - -static unsigned char static_pad[256] = { -0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD,0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57, -0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B,0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39, -0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5,0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D, -0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB,0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F, -0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9,0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45, -0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9,0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F, -0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD,0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D, -0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD,0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9, -0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B,0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B, -0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B,0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD, -0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7,0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1, -0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3,0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23, -0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55,0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43, -0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F,0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F, -0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5,0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD, -0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41,0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09 -}; - -static struct cca_private_ext_ME static_pvt_me_key = { - { - 0x1E, - 0x00, - 0x0183, - {0x00,0x00,0x00,0x00} - }, - - { - 0x02, - 0x00, - 0x016C, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00}, - 0x00, - 0x00, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00}, - {0x80,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00}, - {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00} - }, - - { - 0x04, - 0x00, - 0x000F, - {0x00,0x00}, - 0x0003, - 0x0000, - 0x0000, - {0x01,0x00,0x01} - } -}; - -static struct cca_public_key static_public_key = { - { - 0x1E, - 0x00, - 0x0000, - {0x00,0x00,0x00,0x00} - }, - - { - 0x04, - 0x00, - 0x0000, - {0x00,0x00}, - 0x0000, - 0x0000, - 0x0000, - {0x01,0x00,0x01} - } -}; - -#define FIXED_TYPE6_ME_LEN 0x0000025F - -#define FIXED_TYPE6_ME_EN_LEN 0x000000F0 - -#define FIXED_TYPE6_ME_LENX 0x000002CB - -#define FIXED_TYPE6_ME_EN_LENX 0x0000015C - -static struct cca_public_sec static_cca_pub_sec = { - 0x04, - 0x00, - 0x000f, - {0x00,0x00}, - 0x0003, - 0x0000, - 0x0000, - {0x01,0x00,0x01} -}; - -#define FIXED_TYPE6_CR_LEN 0x00000177 - -#define FIXED_TYPE6_CR_LENX 0x000001E3 - -#define MAX_RESPONSE_SIZE 0x00000710 - -#define MAX_RESPONSEX_SIZE 0x0000077C - -#define RESPONSE_CPRB_SIZE 0x000006B8 -#define RESPONSE_CPRBX_SIZE 0x00000724 - -struct type50_hdr { - u8 reserved1; - u8 msg_type_code; - u16 msg_len; - u8 reserved2; - u8 ignored; - u16 reserved3; -}; - -#define TYPE50_TYPE_CODE 0x50 - -#define TYPE50_MEB1_LEN (sizeof(struct type50_meb1_msg)) -#define TYPE50_MEB2_LEN (sizeof(struct type50_meb2_msg)) -#define TYPE50_CRB1_LEN (sizeof(struct type50_crb1_msg)) -#define TYPE50_CRB2_LEN (sizeof(struct type50_crb2_msg)) - -#define TYPE50_MEB1_FMT 0x0001 -#define TYPE50_MEB2_FMT 0x0002 -#define TYPE50_CRB1_FMT 0x0011 -#define TYPE50_CRB2_FMT 0x0012 - -struct type50_meb1_msg { - struct type50_hdr header; - u16 keyblock_type; - u8 reserved[6]; - u8 exponent[128]; - u8 modulus[128]; - u8 message[128]; -}; - -struct type50_meb2_msg { - struct type50_hdr header; - u16 keyblock_type; - u8 reserved[6]; - u8 exponent[256]; - u8 modulus[256]; - u8 message[256]; -}; - -struct type50_crb1_msg { - struct type50_hdr header; - u16 keyblock_type; - u8 reserved[6]; - u8 p[64]; - u8 q[64]; - u8 dp[64]; - u8 dq[64]; - u8 u[64]; - u8 message[128]; -}; - -struct type50_crb2_msg { - struct type50_hdr header; - u16 keyblock_type; - u8 reserved[6]; - u8 p[128]; - u8 q[128]; - u8 dp[128]; - u8 dq[128]; - u8 u[128]; - u8 message[256]; -}; - -union type50_msg { - struct type50_meb1_msg meb1; - struct type50_meb2_msg meb2; - struct type50_crb1_msg crb1; - struct type50_crb2_msg crb2; -}; - -struct type80_hdr { - u8 reserved1; - u8 type; - u16 len; - u8 code; - u8 reserved2[3]; - u8 reserved3[8]; -}; - -#define TYPE80_RSP_CODE 0x80 - -struct error_hdr { - unsigned char reserved1; - unsigned char type; - unsigned char reserved2[2]; - unsigned char reply_code; - unsigned char reserved3[3]; -}; - -#define TYPE82_RSP_CODE 0x82 -#define TYPE88_RSP_CODE 0x88 - -#define REP82_ERROR_MACHINE_FAILURE 0x10 -#define REP82_ERROR_PREEMPT_FAILURE 0x12 -#define REP82_ERROR_CHECKPT_FAILURE 0x14 -#define REP82_ERROR_MESSAGE_TYPE 0x20 -#define REP82_ERROR_INVALID_COMM_CD 0x21 -#define REP82_ERROR_INVALID_MSG_LEN 0x23 -#define REP82_ERROR_RESERVD_FIELD 0x24 -#define REP82_ERROR_FORMAT_FIELD 0x29 -#define REP82_ERROR_INVALID_COMMAND 0x30 -#define REP82_ERROR_MALFORMED_MSG 0x40 -#define REP82_ERROR_RESERVED_FIELDO 0x50 -#define REP82_ERROR_WORD_ALIGNMENT 0x60 -#define REP82_ERROR_MESSAGE_LENGTH 0x80 -#define REP82_ERROR_OPERAND_INVALID 0x82 -#define REP82_ERROR_OPERAND_SIZE 0x84 -#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 -#define REP82_ERROR_RESERVED_FIELD 0x88 -#define REP82_ERROR_TRANSPORT_FAIL 0x90 -#define REP82_ERROR_PACKET_TRUNCATED 0xA0 -#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 - -#define REP88_ERROR_MODULE_FAILURE 0x10 -#define REP88_ERROR_MODULE_TIMEOUT 0x11 -#define REP88_ERROR_MODULE_NOTINIT 0x13 -#define REP88_ERROR_MODULE_NOTAVAIL 0x14 -#define REP88_ERROR_MODULE_DISABLED 0x15 -#define REP88_ERROR_MODULE_IN_DIAGN 0x17 -#define REP88_ERROR_FASTPATH_DISABLD 0x19 -#define REP88_ERROR_MESSAGE_TYPE 0x20 -#define REP88_ERROR_MESSAGE_MALFORMD 0x22 -#define REP88_ERROR_MESSAGE_LENGTH 0x23 -#define REP88_ERROR_RESERVED_FIELD 0x24 -#define REP88_ERROR_KEY_TYPE 0x34 -#define REP88_ERROR_INVALID_KEY 0x82 -#define REP88_ERROR_OPERAND 0x84 -#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 - -#define CALLER_HEADER 12 - -static inline int -testq(int q_nr, int *q_depth, int *dev_type, struct ap_status_word *stat) -{ - int ccode; - - asm volatile -#ifdef CONFIG_64BIT - (" llgfr 0,%4 \n" - " slgr 1,1 \n" - " lgr 2,1 \n" - "0: .long 0xb2af0000 \n" - "1: ipm %0 \n" - " srl %0,28 \n" - " iihh %0,0 \n" - " iihl %0,0 \n" - " lgr %1,1 \n" - " lgr %3,2 \n" - " srl %3,24 \n" - " sll 2,24 \n" - " srl 2,24 \n" - " lgr %2,2 \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h5 \n" - " jg 2b \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 8 \n" - " .quad 0b,3b \n" - " .quad 1b,3b \n" - ".previous" - :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type) - :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION) - :"cc","0","1","2","memory"); -#else - (" lr 0,%4 \n" - " slr 1,1 \n" - " lr 2,1 \n" - "0: .long 0xb2af0000 \n" - "1: ipm %0 \n" - " srl %0,28 \n" - " lr %1,1 \n" - " lr %3,2 \n" - " srl %3,24 \n" - " sll 2,24 \n" - " srl 2,24 \n" - " lr %2,2 \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h5 \n" - " bras 1,4f \n" - " .long 2b \n" - "4: \n" - " l 1,0(1) \n" - " br 1 \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 4 \n" - " .long 0b,3b \n" - " .long 1b,3b \n" - ".previous" - :"=d" (ccode),"=d" (*stat),"=d" (*q_depth), "=d" (*dev_type) - :"d" (q_nr), "K" (DEV_TSQ_EXCEPTION) - :"cc","0","1","2","memory"); -#endif - return ccode; -} - -static inline int -resetq(int q_nr, struct ap_status_word *stat_p) -{ - int ccode; - - asm volatile -#ifdef CONFIG_64BIT - (" llgfr 0,%2 \n" - " lghi 1,1 \n" - " sll 1,24 \n" - " or 0,1 \n" - " slgr 1,1 \n" - " lgr 2,1 \n" - "0: .long 0xb2af0000 \n" - "1: ipm %0 \n" - " srl %0,28 \n" - " iihh %0,0 \n" - " iihl %0,0 \n" - " lgr %1,1 \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h3 \n" - " jg 2b \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 8 \n" - " .quad 0b,3b \n" - " .quad 1b,3b \n" - ".previous" - :"=d" (ccode),"=d" (*stat_p) - :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION) - :"cc","0","1","2","memory"); -#else - (" lr 0,%2 \n" - " lhi 1,1 \n" - " sll 1,24 \n" - " or 0,1 \n" - " slr 1,1 \n" - " lr 2,1 \n" - "0: .long 0xb2af0000 \n" - "1: ipm %0 \n" - " srl %0,28 \n" - " lr %1,1 \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h3 \n" - " bras 1,4f \n" - " .long 2b \n" - "4: \n" - " l 1,0(1) \n" - " br 1 \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 4 \n" - " .long 0b,3b \n" - " .long 1b,3b \n" - ".previous" - :"=d" (ccode),"=d" (*stat_p) - :"d" (q_nr), "K" (DEV_RSQ_EXCEPTION) - :"cc","0","1","2","memory"); -#endif - return ccode; -} - -static inline int -sen(int msg_len, unsigned char *msg_ext, struct ap_status_word *stat) -{ - int ccode; - - asm volatile -#ifdef CONFIG_64BIT - (" lgr 6,%3 \n" - " llgfr 7,%2 \n" - " llgt 0,0(6) \n" - " lghi 1,64 \n" - " sll 1,24 \n" - " or 0,1 \n" - " la 6,4(6) \n" - " llgt 2,0(6) \n" - " llgt 3,4(6) \n" - " la 6,8(6) \n" - " slr 1,1 \n" - "0: .long 0xb2ad0026 \n" - "1: brc 2,0b \n" - " ipm %0 \n" - " srl %0,28 \n" - " iihh %0,0 \n" - " iihl %0,0 \n" - " lgr %1,1 \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h4 \n" - " jg 2b \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 8 \n" - " .quad 0b,3b \n" - " .quad 1b,3b \n" - ".previous" - :"=d" (ccode),"=d" (*stat) - :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION) - :"cc","0","1","2","3","6","7","memory"); -#else - (" lr 6,%3 \n" - " lr 7,%2 \n" - " l 0,0(6) \n" - " lhi 1,64 \n" - " sll 1,24 \n" - " or 0,1 \n" - " la 6,4(6) \n" - " l 2,0(6) \n" - " l 3,4(6) \n" - " la 6,8(6) \n" - " slr 1,1 \n" - "0: .long 0xb2ad0026 \n" - "1: brc 2,0b \n" - " ipm %0 \n" - " srl %0,28 \n" - " lr %1,1 \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h4 \n" - " bras 1,4f \n" - " .long 2b \n" - "4: \n" - " l 1,0(1) \n" - " br 1 \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 4 \n" - " .long 0b,3b \n" - " .long 1b,3b \n" - ".previous" - :"=d" (ccode),"=d" (*stat) - :"d" (msg_len),"a" (msg_ext), "K" (DEV_SEN_EXCEPTION) - :"cc","0","1","2","3","6","7","memory"); -#endif - return ccode; -} - -static inline int -rec(int q_nr, int buff_l, unsigned char *rsp, unsigned char *id, - struct ap_status_word *st) -{ - int ccode; - - asm volatile -#ifdef CONFIG_64BIT - (" llgfr 0,%2 \n" - " lgr 3,%4 \n" - " lgr 6,%3 \n" - " llgfr 7,%5 \n" - " lghi 1,128 \n" - " sll 1,24 \n" - " or 0,1 \n" - " slgr 1,1 \n" - " lgr 2,1 \n" - " lgr 4,1 \n" - " lgr 5,1 \n" - "0: .long 0xb2ae0046 \n" - "1: brc 2,0b \n" - " brc 4,0b \n" - " ipm %0 \n" - " srl %0,28 \n" - " iihh %0,0 \n" - " iihl %0,0 \n" - " lgr %1,1 \n" - " st 4,0(3) \n" - " st 5,4(3) \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h6 \n" - " jg 2b \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 8 \n" - " .quad 0b,3b \n" - " .quad 1b,3b \n" - ".previous" - :"=d"(ccode),"=d"(*st) - :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION) - :"cc","0","1","2","3","4","5","6","7","memory"); -#else - (" lr 0,%2 \n" - " lr 3,%4 \n" - " lr 6,%3 \n" - " lr 7,%5 \n" - " lhi 1,128 \n" - " sll 1,24 \n" - " or 0,1 \n" - " slr 1,1 \n" - " lr 2,1 \n" - " lr 4,1 \n" - " lr 5,1 \n" - "0: .long 0xb2ae0046 \n" - "1: brc 2,0b \n" - " brc 4,0b \n" - " ipm %0 \n" - " srl %0,28 \n" - " lr %1,1 \n" - " st 4,0(3) \n" - " st 5,4(3) \n" - "2: \n" - ".section .fixup,\"ax\" \n" - "3: \n" - " lhi %0,%h6 \n" - " bras 1,4f \n" - " .long 2b \n" - "4: \n" - " l 1,0(1) \n" - " br 1 \n" - ".previous \n" - ".section __ex_table,\"a\" \n" - " .align 4 \n" - " .long 0b,3b \n" - " .long 1b,3b \n" - ".previous" - :"=d"(ccode),"=d"(*st) - :"d" (q_nr), "d" (rsp), "d" (id), "d" (buff_l), "K" (DEV_REC_EXCEPTION) - :"cc","0","1","2","3","4","5","6","7","memory"); -#endif - return ccode; -} - -static inline void -itoLe2(int *i_p, unsigned char *lechars) -{ - *lechars = *((unsigned char *) i_p + sizeof(int) - 1); - *(lechars + 1) = *((unsigned char *) i_p + sizeof(int) - 2); -} - -static inline void -le2toI(unsigned char *lechars, int *i_p) -{ - unsigned char *ic_p; - *i_p = 0; - ic_p = (unsigned char *) i_p; - *(ic_p + 2) = *(lechars + 1); - *(ic_p + 3) = *(lechars); -} - -static inline int -is_empty(unsigned char *ptr, int len) -{ - return !memcmp(ptr, (unsigned char *) &static_pvt_me_key+60, len); -} - -enum hdstat -query_online(int deviceNr, int cdx, int resetNr, int *q_depth, int *dev_type) -{ - int q_nr, i, t_depth, t_dev_type; - enum devstat ccode; - struct ap_status_word stat_word; - enum hdstat stat; - int break_out; - - q_nr = (deviceNr << SKIP_BITL) + cdx; - stat = HD_BUSY; - ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word); - PDEBUG("ccode %d response_code %02X\n", ccode, stat_word.response_code); - break_out = 0; - for (i = 0; i < resetNr; i++) { - if (ccode > 3) { - PRINTKC("Exception testing device %d\n", i); - return HD_TSQ_EXCEPTION; - } - switch (ccode) { - case 0: - PDEBUG("t_dev_type %d\n", t_dev_type); - break_out = 1; - stat = HD_ONLINE; - *q_depth = t_depth + 1; - switch (t_dev_type) { - case PCICA_HW: - *dev_type = PCICA; - break; - case PCICC_HW: - *dev_type = PCICC; - break; - case PCIXCC_HW: - *dev_type = PCIXCC_UNK; - break; - case CEX2C_HW: - *dev_type = CEX2C; - break; - case CEX2A_HW: - *dev_type = CEX2A; - break; - default: - *dev_type = NILDEV; - break; - } - PDEBUG("available device %d: Q depth = %d, dev " - "type = %d, stat = %02X%02X%02X%02X\n", - deviceNr, *q_depth, *dev_type, - stat_word.q_stat_flags, - stat_word.response_code, - stat_word.reserved[0], - stat_word.reserved[1]); - break; - case 3: - switch (stat_word.response_code) { - case AP_RESPONSE_NORMAL: - stat = HD_ONLINE; - break_out = 1; - *q_depth = t_depth + 1; - *dev_type = t_dev_type; - PDEBUG("cc3, available device " - "%d: Q depth = %d, dev " - "type = %d, stat = " - "%02X%02X%02X%02X\n", - deviceNr, *q_depth, - *dev_type, - stat_word.q_stat_flags, - stat_word.response_code, - stat_word.reserved[0], - stat_word.reserved[1]); - break; - case AP_RESPONSE_Q_NOT_AVAIL: - stat = HD_NOT_THERE; - break_out = 1; - break; - case AP_RESPONSE_RESET_IN_PROGRESS: - PDEBUG("device %d in reset\n", - deviceNr); - break; - case AP_RESPONSE_DECONFIGURED: - stat = HD_DECONFIGURED; - break_out = 1; - break; - case AP_RESPONSE_CHECKSTOPPED: - stat = HD_CHECKSTOPPED; - break_out = 1; - break; - case AP_RESPONSE_BUSY: - PDEBUG("device %d busy\n", - deviceNr); - break; - default: - break; - } - break; - default: - stat = HD_NOT_THERE; - break_out = 1; - break; - } - if (break_out) - break; - - udelay(5); - - ccode = testq(q_nr, &t_depth, &t_dev_type, &stat_word); - } - return stat; -} - -enum devstat -reset_device(int deviceNr, int cdx, int resetNr) -{ - int q_nr, ccode = 0, dummy_qdepth, dummy_devType, i; - struct ap_status_word stat_word; - enum devstat stat; - int break_out; - - q_nr = (deviceNr << SKIP_BITL) + cdx; - stat = DEV_GONE; - ccode = resetq(q_nr, &stat_word); - if (ccode > 3) - return DEV_RSQ_EXCEPTION; - - break_out = 0; - for (i = 0; i < resetNr; i++) { - switch (ccode) { - case 0: - stat = DEV_ONLINE; - if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY) - break_out = 1; - break; - case 3: - switch (stat_word.response_code) { - case AP_RESPONSE_NORMAL: - stat = DEV_ONLINE; - if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY) - break_out = 1; - break; - case AP_RESPONSE_Q_NOT_AVAIL: - case AP_RESPONSE_DECONFIGURED: - case AP_RESPONSE_CHECKSTOPPED: - stat = DEV_GONE; - break_out = 1; - break; - case AP_RESPONSE_RESET_IN_PROGRESS: - case AP_RESPONSE_BUSY: - default: - break; - } - break; - default: - stat = DEV_GONE; - break_out = 1; - break; - } - if (break_out == 1) - break; - udelay(5); - - ccode = testq(q_nr, &dummy_qdepth, &dummy_devType, &stat_word); - if (ccode > 3) { - stat = DEV_TSQ_EXCEPTION; - break; - } - } - PDEBUG("Number of testq's needed for reset: %d\n", i); - - if (i >= resetNr) { - stat = DEV_GONE; - } - - return stat; -} - -#ifdef DEBUG_HYDRA_MSGS -static inline void -print_buffer(unsigned char *buffer, int bufflen) -{ - int i; - for (i = 0; i < bufflen; i += 16) { - PRINTK("%04X: %02X%02X%02X%02X %02X%02X%02X%02X " - "%02X%02X%02X%02X %02X%02X%02X%02X\n", i, - buffer[i+0], buffer[i+1], buffer[i+2], buffer[i+3], - buffer[i+4], buffer[i+5], buffer[i+6], buffer[i+7], - buffer[i+8], buffer[i+9], buffer[i+10], buffer[i+11], - buffer[i+12], buffer[i+13], buffer[i+14], buffer[i+15]); - } -} -#endif - -enum devstat -send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext) -{ - struct ap_status_word stat_word; - enum devstat stat; - int ccode; - u32 *q_nr_p = (u32 *)msg_ext; - - *q_nr_p = (dev_nr << SKIP_BITL) + cdx; - PDEBUG("msg_len passed to sen: %d\n", msg_len); - PDEBUG("q number passed to sen: %02x%02x%02x%02x\n", - msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3]); - stat = DEV_GONE; - -#ifdef DEBUG_HYDRA_MSGS - PRINTK("Request header: %02X%02X%02X%02X %02X%02X%02X%02X " - "%02X%02X%02X%02X\n", - msg_ext[0], msg_ext[1], msg_ext[2], msg_ext[3], - msg_ext[4], msg_ext[5], msg_ext[6], msg_ext[7], - msg_ext[8], msg_ext[9], msg_ext[10], msg_ext[11]); - print_buffer(msg_ext+CALLER_HEADER, msg_len); -#endif - - ccode = sen(msg_len, msg_ext, &stat_word); - if (ccode > 3) - return DEV_SEN_EXCEPTION; - - PDEBUG("nq cc: %u, st: %02x%02x%02x%02x\n", - ccode, stat_word.q_stat_flags, stat_word.response_code, - stat_word.reserved[0], stat_word.reserved[1]); - switch (ccode) { - case 0: - stat = DEV_ONLINE; - break; - case 1: - stat = DEV_GONE; - break; - case 3: - switch (stat_word.response_code) { - case AP_RESPONSE_NORMAL: - stat = DEV_ONLINE; - break; - case AP_RESPONSE_Q_FULL: - stat = DEV_QUEUE_FULL; - break; - default: - stat = DEV_GONE; - break; - } - break; - default: - stat = DEV_GONE; - break; - } - - return stat; -} - -enum devstat -receive_from_AP(int dev_nr, int cdx, int resplen, unsigned char *resp, - unsigned char *psmid) -{ - int ccode; - struct ap_status_word stat_word; - enum devstat stat; - - memset(resp, 0x00, 8); - - ccode = rec((dev_nr << SKIP_BITL) + cdx, resplen, resp, psmid, - &stat_word); - if (ccode > 3) - return DEV_REC_EXCEPTION; - - PDEBUG("dq cc: %u, st: %02x%02x%02x%02x\n", - ccode, stat_word.q_stat_flags, stat_word.response_code, - stat_word.reserved[0], stat_word.reserved[1]); - - stat = DEV_GONE; - switch (ccode) { - case 0: - stat = DEV_ONLINE; -#ifdef DEBUG_HYDRA_MSGS - print_buffer(resp, resplen); -#endif - break; - case 3: - switch (stat_word.response_code) { - case AP_RESPONSE_NORMAL: - stat = DEV_ONLINE; - break; - case AP_RESPONSE_NO_PENDING_REPLY: - if (stat_word.q_stat_flags & AP_Q_STATUS_EMPTY) - stat = DEV_EMPTY; - else - stat = DEV_NO_WORK; - break; - case AP_RESPONSE_INDEX_TOO_BIG: - case AP_RESPONSE_NO_FIRST_PART: - case AP_RESPONSE_MESSAGE_TOO_BIG: - stat = DEV_BAD_MESSAGE; - break; - default: - break; - } - break; - default: - break; - } - - return stat; -} - -static inline int -pad_msg(unsigned char *buffer, int totalLength, int msgLength) -{ - int pad_len; - - for (pad_len = 0; pad_len < (totalLength - msgLength); pad_len++) - if (buffer[pad_len] != 0x00) - break; - pad_len -= 3; - if (pad_len < 8) - return SEN_PAD_ERROR; - - buffer[0] = 0x00; - buffer[1] = 0x02; - - memcpy(buffer+2, static_pad, pad_len); - - buffer[pad_len + 2] = 0x00; - - return 0; -} - -static inline int -is_common_public_key(unsigned char *key, int len) -{ - int i; - - for (i = 0; i < len; i++) - if (key[i]) - break; - key += i; - len -= i; - if (((len == 1) && (key[0] == 3)) || - ((len == 3) && (key[0] == 1) && (key[1] == 0) && (key[2] == 1))) - return 1; - - return 0; -} - -static int -ICAMEX_msg_to_type4MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p, - union type4_msg *z90cMsg_p) -{ - int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len; - unsigned char *mod_tgt, *exp_tgt, *inp_tgt; - union type4_msg *tmp_type4_msg; - - mod_len = icaMex_p->inputdatalength; - - msg_size = ((mod_len <= 128) ? TYPE4_SME_LEN : TYPE4_LME_LEN) + - CALLER_HEADER; - - memset(z90cMsg_p, 0, msg_size); - - tmp_type4_msg = (union type4_msg *) - ((unsigned char *) z90cMsg_p + CALLER_HEADER); - - tmp_type4_msg->sme.header.msg_type_code = TYPE4_TYPE_CODE; - tmp_type4_msg->sme.header.request_code = TYPE4_REQU_CODE; - - if (mod_len <= 128) { - tmp_type4_msg->sme.header.msg_fmt = TYPE4_SME_FMT; - tmp_type4_msg->sme.header.msg_len = TYPE4_SME_LEN; - mod_tgt = tmp_type4_msg->sme.modulus; - mod_tgt_len = sizeof(tmp_type4_msg->sme.modulus); - exp_tgt = tmp_type4_msg->sme.exponent; - exp_tgt_len = sizeof(tmp_type4_msg->sme.exponent); - inp_tgt = tmp_type4_msg->sme.message; - inp_tgt_len = sizeof(tmp_type4_msg->sme.message); - } else { - tmp_type4_msg->lme.header.msg_fmt = TYPE4_LME_FMT; - tmp_type4_msg->lme.header.msg_len = TYPE4_LME_LEN; - mod_tgt = tmp_type4_msg->lme.modulus; - mod_tgt_len = sizeof(tmp_type4_msg->lme.modulus); - exp_tgt = tmp_type4_msg->lme.exponent; - exp_tgt_len = sizeof(tmp_type4_msg->lme.exponent); - inp_tgt = tmp_type4_msg->lme.message; - inp_tgt_len = sizeof(tmp_type4_msg->lme.message); - } - - mod_tgt += (mod_tgt_len - mod_len); - if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len)) - return SEN_RELEASED; - if (is_empty(mod_tgt, mod_len)) - return SEN_USER_ERROR; - exp_tgt += (exp_tgt_len - mod_len); - if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len)) - return SEN_RELEASED; - if (is_empty(exp_tgt, mod_len)) - return SEN_USER_ERROR; - inp_tgt += (inp_tgt_len - mod_len); - if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len)) - return SEN_RELEASED; - if (is_empty(inp_tgt, mod_len)) - return SEN_USER_ERROR; - - *z90cMsg_l_p = msg_size - CALLER_HEADER; - - return 0; -} - -static int -ICACRT_msg_to_type4CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, - int *z90cMsg_l_p, union type4_msg *z90cMsg_p) -{ - int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len, - dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len; - unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt; - union type4_msg *tmp_type4_msg; - - mod_len = icaMsg_p->inputdatalength; - short_len = mod_len / 2; - long_len = mod_len / 2 + 8; - - tmp_size = ((mod_len <= 128) ? TYPE4_SCR_LEN : TYPE4_LCR_LEN) + - CALLER_HEADER; - - memset(z90cMsg_p, 0, tmp_size); - - tmp_type4_msg = (union type4_msg *) - ((unsigned char *) z90cMsg_p + CALLER_HEADER); - - tmp_type4_msg->scr.header.msg_type_code = TYPE4_TYPE_CODE; - tmp_type4_msg->scr.header.request_code = TYPE4_REQU_CODE; - if (mod_len <= 128) { - tmp_type4_msg->scr.header.msg_fmt = TYPE4_SCR_FMT; - tmp_type4_msg->scr.header.msg_len = TYPE4_SCR_LEN; - p_tgt = tmp_type4_msg->scr.p; - p_tgt_len = sizeof(tmp_type4_msg->scr.p); - q_tgt = tmp_type4_msg->scr.q; - q_tgt_len = sizeof(tmp_type4_msg->scr.q); - dp_tgt = tmp_type4_msg->scr.dp; - dp_tgt_len = sizeof(tmp_type4_msg->scr.dp); - dq_tgt = tmp_type4_msg->scr.dq; - dq_tgt_len = sizeof(tmp_type4_msg->scr.dq); - u_tgt = tmp_type4_msg->scr.u; - u_tgt_len = sizeof(tmp_type4_msg->scr.u); - inp_tgt = tmp_type4_msg->scr.message; - inp_tgt_len = sizeof(tmp_type4_msg->scr.message); - } else { - tmp_type4_msg->lcr.header.msg_fmt = TYPE4_LCR_FMT; - tmp_type4_msg->lcr.header.msg_len = TYPE4_LCR_LEN; - p_tgt = tmp_type4_msg->lcr.p; - p_tgt_len = sizeof(tmp_type4_msg->lcr.p); - q_tgt = tmp_type4_msg->lcr.q; - q_tgt_len = sizeof(tmp_type4_msg->lcr.q); - dp_tgt = tmp_type4_msg->lcr.dp; - dp_tgt_len = sizeof(tmp_type4_msg->lcr.dp); - dq_tgt = tmp_type4_msg->lcr.dq; - dq_tgt_len = sizeof(tmp_type4_msg->lcr.dq); - u_tgt = tmp_type4_msg->lcr.u; - u_tgt_len = sizeof(tmp_type4_msg->lcr.u); - inp_tgt = tmp_type4_msg->lcr.message; - inp_tgt_len = sizeof(tmp_type4_msg->lcr.message); - } - - p_tgt += (p_tgt_len - long_len); - if (copy_from_user(p_tgt, icaMsg_p->np_prime, long_len)) - return SEN_RELEASED; - if (is_empty(p_tgt, long_len)) - return SEN_USER_ERROR; - q_tgt += (q_tgt_len - short_len); - if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len)) - return SEN_RELEASED; - if (is_empty(q_tgt, short_len)) - return SEN_USER_ERROR; - dp_tgt += (dp_tgt_len - long_len); - if (copy_from_user(dp_tgt, icaMsg_p->bp_key, long_len)) - return SEN_RELEASED; - if (is_empty(dp_tgt, long_len)) - return SEN_USER_ERROR; - dq_tgt += (dq_tgt_len - short_len); - if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len)) - return SEN_RELEASED; - if (is_empty(dq_tgt, short_len)) - return SEN_USER_ERROR; - u_tgt += (u_tgt_len - long_len); - if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv, long_len)) - return SEN_RELEASED; - if (is_empty(u_tgt, long_len)) - return SEN_USER_ERROR; - inp_tgt += (inp_tgt_len - mod_len); - if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len)) - return SEN_RELEASED; - if (is_empty(inp_tgt, mod_len)) - return SEN_USER_ERROR; - - *z90cMsg_l_p = tmp_size - CALLER_HEADER; - - return 0; -} - -static int -ICAMEX_msg_to_type6MEX_de_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx, - int *z90cMsg_l_p, struct type6_msg *z90cMsg_p) -{ - int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l; - unsigned char *temp; - struct type6_hdr *tp6Hdr_p; - struct CPRB *cprb_p; - struct cca_private_ext_ME *key_p; - static int deprecated_msg_count = 0; - - mod_len = icaMsg_p->inputdatalength; - tmp_size = FIXED_TYPE6_ME_LEN + mod_len; - total_CPRB_len = tmp_size - sizeof(struct type6_hdr); - parmBlock_l = total_CPRB_len - sizeof(struct CPRB); - tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER; - - memset(z90cMsg_p, 0, tmp_size); - - temp = (unsigned char *)z90cMsg_p + CALLER_HEADER; - memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr)); - tp6Hdr_p = (struct type6_hdr *)temp; - tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4); - tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE; - - temp += sizeof(struct type6_hdr); - memcpy(temp, &static_cprb, sizeof(struct CPRB)); - cprb_p = (struct CPRB *) temp; - cprb_p->usage_domain[0]= (unsigned char)cdx; - itoLe2(&parmBlock_l, cprb_p->req_parml); - itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml); - - temp += sizeof(struct CPRB); - memcpy(temp, &static_pkd_function_and_rules, - sizeof(struct function_and_rules_block)); - - temp += sizeof(struct function_and_rules_block); - vud_len = 2 + icaMsg_p->inputdatalength; - itoLe2(&vud_len, temp); - - temp += 2; - if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) - return SEN_RELEASED; - if (is_empty(temp, mod_len)) - return SEN_USER_ERROR; - - temp += mod_len; - memcpy(temp, &static_T6_keyBlock_hdr, sizeof(struct T6_keyBlock_hdr)); - - temp += sizeof(struct T6_keyBlock_hdr); - memcpy(temp, &static_pvt_me_key, sizeof(struct cca_private_ext_ME)); - key_p = (struct cca_private_ext_ME *)temp; - temp = key_p->pvtMESec.exponent + sizeof(key_p->pvtMESec.exponent) - - mod_len; - if (copy_from_user(temp, icaMsg_p->b_key, mod_len)) - return SEN_RELEASED; - if (is_empty(temp, mod_len)) - return SEN_USER_ERROR; - - if (is_common_public_key(temp, mod_len)) { - if (deprecated_msg_count < 20) { - PRINTK("Common public key used for modex decrypt\n"); - deprecated_msg_count++; - if (deprecated_msg_count == 20) - PRINTK("No longer issuing messages about common" - " public key for modex decrypt.\n"); - } - return SEN_NOT_AVAIL; - } - - temp = key_p->pvtMESec.modulus + sizeof(key_p->pvtMESec.modulus) - - mod_len; - if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len)) - return SEN_RELEASED; - if (is_empty(temp, mod_len)) - return SEN_USER_ERROR; - - key_p->pubMESec.modulus_bit_len = 8 * mod_len; - - *z90cMsg_l_p = tmp_size - CALLER_HEADER; - - return 0; -} - -static int -ICAMEX_msg_to_type6MEX_en_msg(struct ica_rsa_modexpo *icaMsg_p, int cdx, - int *z90cMsg_l_p, struct type6_msg *z90cMsg_p) -{ - int mod_len, vud_len, exp_len, key_len; - int pad_len, tmp_size, total_CPRB_len, parmBlock_l, i; - unsigned char *temp_exp, *exp_p, *temp; - struct type6_hdr *tp6Hdr_p; - struct CPRB *cprb_p; - struct cca_public_key *key_p; - struct T6_keyBlock_hdr *keyb_p; - - temp_exp = kmalloc(256, GFP_KERNEL); - if (!temp_exp) - return EGETBUFF; - mod_len = icaMsg_p->inputdatalength; - if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) { - kfree(temp_exp); - return SEN_RELEASED; - } - if (is_empty(temp_exp, mod_len)) { - kfree(temp_exp); - return SEN_USER_ERROR; - } - - exp_p = temp_exp; - for (i = 0; i < mod_len; i++) - if (exp_p[i]) - break; - if (i >= mod_len) { - kfree(temp_exp); - return SEN_USER_ERROR; - } - - exp_len = mod_len - i; - exp_p += i; - - PDEBUG("exp_len after computation: %08x\n", exp_len); - tmp_size = FIXED_TYPE6_ME_EN_LEN + 2 * mod_len + exp_len; - total_CPRB_len = tmp_size - sizeof(struct type6_hdr); - parmBlock_l = total_CPRB_len - sizeof(struct CPRB); - tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER; - - vud_len = 2 + mod_len; - memset(z90cMsg_p, 0, tmp_size); - - temp = (unsigned char *)z90cMsg_p + CALLER_HEADER; - memcpy(temp, &static_type6_hdr, sizeof(struct type6_hdr)); - tp6Hdr_p = (struct type6_hdr *)temp; - tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4); - tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE; - memcpy(tp6Hdr_p->function_code, static_PKE_function_code, - sizeof(static_PKE_function_code)); - temp += sizeof(struct type6_hdr); - memcpy(temp, &static_cprb, sizeof(struct CPRB)); - cprb_p = (struct CPRB *) temp; - cprb_p->usage_domain[0]= (unsigned char)cdx; - itoLe2((int *)&(tp6Hdr_p->FromCardLen1), cprb_p->rpl_parml); - temp += sizeof(struct CPRB); - memcpy(temp, &static_pke_function_and_rules, - sizeof(struct function_and_rules_block)); - temp += sizeof(struct function_and_rules_block); - temp += 2; - if (copy_from_user(temp, icaMsg_p->inputdata, mod_len)) { - kfree(temp_exp); - return SEN_RELEASED; - } - if (is_empty(temp, mod_len)) { - kfree(temp_exp); - return SEN_USER_ERROR; - } - if ((temp[0] != 0x00) || (temp[1] != 0x02)) { - kfree(temp_exp); - return SEN_NOT_AVAIL; - } - for (i = 2; i < mod_len; i++) - if (temp[i] == 0x00) - break; - if ((i < 9) || (i > (mod_len - 2))) { - kfree(temp_exp); - return SEN_NOT_AVAIL; - } - pad_len = i + 1; - vud_len = mod_len - pad_len; - memmove(temp, temp+pad_len, vud_len); - temp -= 2; - vud_len += 2; - itoLe2(&vud_len, temp); - temp += (vud_len); - keyb_p = (struct T6_keyBlock_hdr *)temp; - temp += sizeof(struct T6_keyBlock_hdr); - memcpy(temp, &static_public_key, sizeof(static_public_key)); - key_p = (struct cca_public_key *)temp; - temp = key_p->pubSec.exponent; - memcpy(temp, exp_p, exp_len); - kfree(temp_exp); - temp += exp_len; - if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len)) - return SEN_RELEASED; - if (is_empty(temp, mod_len)) - return SEN_USER_ERROR; - key_p->pubSec.modulus_bit_len = 8 * mod_len; - key_p->pubSec.modulus_byte_len = mod_len; - key_p->pubSec.exponent_len = exp_len; - key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len; - key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr); - key_p->pubHdr.token_length = key_len; - key_len += 4; - itoLe2(&key_len, keyb_p->ulen); - key_len += 2; - itoLe2(&key_len, keyb_p->blen); - parmBlock_l -= pad_len; - itoLe2(&parmBlock_l, cprb_p->req_parml); - *z90cMsg_l_p = tmp_size - CALLER_HEADER; - - return 0; -} - -static int -ICACRT_msg_to_type6CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx, - int *z90cMsg_l_p, struct type6_msg *z90cMsg_p) -{ - int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len; - int long_len, pad_len, keyPartsLen, tmp_l; - unsigned char *tgt_p, *temp; - struct type6_hdr *tp6Hdr_p; - struct CPRB *cprb_p; - struct cca_token_hdr *keyHdr_p; - struct cca_pvt_ext_CRT_sec *pvtSec_p; - struct cca_public_sec *pubSec_p; - - mod_len = icaMsg_p->inputdatalength; - short_len = mod_len / 2; - long_len = 8 + short_len; - keyPartsLen = 3 * long_len + 2 * short_len; - pad_len = (8 - (keyPartsLen % 8)) % 8; - keyPartsLen += pad_len + mod_len; - tmp_size = FIXED_TYPE6_CR_LEN + keyPartsLen + mod_len; - total_CPRB_len = tmp_size - sizeof(struct type6_hdr); - parmBlock_l = total_CPRB_len - sizeof(struct CPRB); - vud_len = 2 + mod_len; - tmp_size = 4*((tmp_size + 3)/4) + CALLER_HEADER; - - memset(z90cMsg_p, 0, tmp_size); - tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER; - memcpy(tgt_p, &static_type6_hdr, sizeof(struct type6_hdr)); - tp6Hdr_p = (struct type6_hdr *)tgt_p; - tp6Hdr_p->ToCardLen1 = 4*((total_CPRB_len+3)/4); - tp6Hdr_p->FromCardLen1 = RESPONSE_CPRB_SIZE; - tgt_p += sizeof(struct type6_hdr); - cprb_p = (struct CPRB *) tgt_p; - memcpy(tgt_p, &static_cprb, sizeof(struct CPRB)); - cprb_p->usage_domain[0]= *((unsigned char *)(&(cdx))+3); - itoLe2(&parmBlock_l, cprb_p->req_parml); - memcpy(cprb_p->rpl_parml, cprb_p->req_parml, - sizeof(cprb_p->req_parml)); - tgt_p += sizeof(struct CPRB); - memcpy(tgt_p, &static_pkd_function_and_rules, - sizeof(struct function_and_rules_block)); - tgt_p += sizeof(struct function_and_rules_block); - itoLe2(&vud_len, tgt_p); - tgt_p += 2; - if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, mod_len)) - return SEN_USER_ERROR; - tgt_p += mod_len; - tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) + - sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen; - itoLe2(&tmp_l, tgt_p); - temp = tgt_p + 2; - tmp_l -= 2; - itoLe2(&tmp_l, temp); - tgt_p += sizeof(struct T6_keyBlock_hdr); - keyHdr_p = (struct cca_token_hdr *)tgt_p; - keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT; - tmp_l -= 4; - keyHdr_p->token_length = tmp_l; - tgt_p += sizeof(struct cca_token_hdr); - pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p; - pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT; - pvtSec_p->section_length = - sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen; - pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL; - pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL; - pvtSec_p->p_len = long_len; - pvtSec_p->q_len = short_len; - pvtSec_p->dp_len = long_len; - pvtSec_p->dq_len = short_len; - pvtSec_p->u_len = long_len; - pvtSec_p->mod_len = mod_len; - pvtSec_p->pad_len = pad_len; - tgt_p += sizeof(struct cca_pvt_ext_CRT_sec); - if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, long_len)) - return SEN_USER_ERROR; - tgt_p += long_len; - if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, short_len)) - return SEN_USER_ERROR; - tgt_p += short_len; - if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, long_len)) - return SEN_USER_ERROR; - tgt_p += long_len; - if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, short_len)) - return SEN_USER_ERROR; - tgt_p += short_len; - if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, long_len)) - return SEN_USER_ERROR; - tgt_p += long_len; - tgt_p += pad_len; - memset(tgt_p, 0xFF, mod_len); - tgt_p += mod_len; - memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec)); - pubSec_p = (struct cca_public_sec *) tgt_p; - pubSec_p->modulus_bit_len = 8 * mod_len; - *z90cMsg_l_p = tmp_size - CALLER_HEADER; - - return 0; -} - -static int -ICAMEX_msg_to_type6MEX_msgX(struct ica_rsa_modexpo *icaMsg_p, int cdx, - int *z90cMsg_l_p, struct type6_msg *z90cMsg_p, - int dev_type) -{ - int mod_len, exp_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l; - int key_len, i; - unsigned char *temp_exp, *tgt_p, *temp, *exp_p; - struct type6_hdr *tp6Hdr_p; - struct CPRBX *cprbx_p; - struct cca_public_key *key_p; - struct T6_keyBlock_hdrX *keyb_p; - - temp_exp = kmalloc(256, GFP_KERNEL); - if (!temp_exp) - return EGETBUFF; - mod_len = icaMsg_p->inputdatalength; - if (copy_from_user(temp_exp, icaMsg_p->b_key, mod_len)) { - kfree(temp_exp); - return SEN_RELEASED; - } - if (is_empty(temp_exp, mod_len)) { - kfree(temp_exp); - return SEN_USER_ERROR; - } - exp_p = temp_exp; - for (i = 0; i < mod_len; i++) - if (exp_p[i]) - break; - if (i >= mod_len) { - kfree(temp_exp); - return SEN_USER_ERROR; - } - exp_len = mod_len - i; - exp_p += i; - PDEBUG("exp_len after computation: %08x\n", exp_len); - tmp_size = FIXED_TYPE6_ME_EN_LENX + 2 * mod_len + exp_len; - total_CPRB_len = tmp_size - sizeof(struct type6_hdr); - parmBlock_l = total_CPRB_len - sizeof(struct CPRBX); - tmp_size = tmp_size + CALLER_HEADER; - vud_len = 2 + mod_len; - memset(z90cMsg_p, 0, tmp_size); - tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER; - memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr)); - tp6Hdr_p = (struct type6_hdr *)tgt_p; - tp6Hdr_p->ToCardLen1 = total_CPRB_len; - tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE; - memcpy(tp6Hdr_p->function_code, static_PKE_function_code, - sizeof(static_PKE_function_code)); - tgt_p += sizeof(struct type6_hdr); - memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX)); - cprbx_p = (struct CPRBX *) tgt_p; - cprbx_p->domain = (unsigned short)cdx; - cprbx_p->rpl_msgbl = RESPONSE_CPRBX_SIZE; - tgt_p += sizeof(struct CPRBX); - if (dev_type == PCIXCC_MCL2) - memcpy(tgt_p, &static_pke_function_and_rulesX_MCL2, - sizeof(struct function_and_rules_block)); - else - memcpy(tgt_p, &static_pke_function_and_rulesX, - sizeof(struct function_and_rules_block)); - tgt_p += sizeof(struct function_and_rules_block); - - tgt_p += 2; - if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) { - kfree(temp_exp); - return SEN_RELEASED; - } - if (is_empty(tgt_p, mod_len)) { - kfree(temp_exp); - return SEN_USER_ERROR; - } - tgt_p -= 2; - *((short *)tgt_p) = (short) vud_len; - tgt_p += vud_len; - keyb_p = (struct T6_keyBlock_hdrX *)tgt_p; - tgt_p += sizeof(struct T6_keyBlock_hdrX); - memcpy(tgt_p, &static_public_key, sizeof(static_public_key)); - key_p = (struct cca_public_key *)tgt_p; - temp = key_p->pubSec.exponent; - memcpy(temp, exp_p, exp_len); - kfree(temp_exp); - temp += exp_len; - if (copy_from_user(temp, icaMsg_p->n_modulus, mod_len)) - return SEN_RELEASED; - if (is_empty(temp, mod_len)) - return SEN_USER_ERROR; - key_p->pubSec.modulus_bit_len = 8 * mod_len; - key_p->pubSec.modulus_byte_len = mod_len; - key_p->pubSec.exponent_len = exp_len; - key_p->pubSec.section_length = CALLER_HEADER + mod_len + exp_len; - key_len = key_p->pubSec.section_length + sizeof(struct cca_token_hdr); - key_p->pubHdr.token_length = key_len; - key_len += 4; - keyb_p->ulen = (unsigned short)key_len; - key_len += 2; - keyb_p->blen = (unsigned short)key_len; - cprbx_p->req_parml = parmBlock_l; - *z90cMsg_l_p = tmp_size - CALLER_HEADER; - - return 0; -} - -static int -ICACRT_msg_to_type6CRT_msgX(struct ica_rsa_modexpo_crt *icaMsg_p, int cdx, - int *z90cMsg_l_p, struct type6_msg *z90cMsg_p, - int dev_type) -{ - int mod_len, vud_len, tmp_size, total_CPRB_len, parmBlock_l, short_len; - int long_len, pad_len, keyPartsLen, tmp_l; - unsigned char *tgt_p, *temp; - struct type6_hdr *tp6Hdr_p; - struct CPRBX *cprbx_p; - struct cca_token_hdr *keyHdr_p; - struct cca_pvt_ext_CRT_sec *pvtSec_p; - struct cca_public_sec *pubSec_p; - - mod_len = icaMsg_p->inputdatalength; - short_len = mod_len / 2; - long_len = 8 + short_len; - keyPartsLen = 3 * long_len + 2 * short_len; - pad_len = (8 - (keyPartsLen % 8)) % 8; - keyPartsLen += pad_len + mod_len; - tmp_size = FIXED_TYPE6_CR_LENX + keyPartsLen + mod_len; - total_CPRB_len = tmp_size - sizeof(struct type6_hdr); - parmBlock_l = total_CPRB_len - sizeof(struct CPRBX); - vud_len = 2 + mod_len; - tmp_size = tmp_size + CALLER_HEADER; - memset(z90cMsg_p, 0, tmp_size); - tgt_p = (unsigned char *)z90cMsg_p + CALLER_HEADER; - memcpy(tgt_p, &static_type6_hdrX, sizeof(struct type6_hdr)); - tp6Hdr_p = (struct type6_hdr *)tgt_p; - tp6Hdr_p->ToCardLen1 = total_CPRB_len; - tp6Hdr_p->FromCardLen1 = RESPONSE_CPRBX_SIZE; - tgt_p += sizeof(struct type6_hdr); - cprbx_p = (struct CPRBX *) tgt_p; - memcpy(tgt_p, &static_cprbx, sizeof(struct CPRBX)); - cprbx_p->domain = (unsigned short)cdx; - cprbx_p->req_parml = parmBlock_l; - cprbx_p->rpl_msgbl = parmBlock_l; - tgt_p += sizeof(struct CPRBX); - if (dev_type == PCIXCC_MCL2) - memcpy(tgt_p, &static_pkd_function_and_rulesX_MCL2, - sizeof(struct function_and_rules_block)); - else - memcpy(tgt_p, &static_pkd_function_and_rulesX, - sizeof(struct function_and_rules_block)); - tgt_p += sizeof(struct function_and_rules_block); - *((short *)tgt_p) = (short) vud_len; - tgt_p += 2; - if (copy_from_user(tgt_p, icaMsg_p->inputdata, mod_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, mod_len)) - return SEN_USER_ERROR; - tgt_p += mod_len; - tmp_l = sizeof(struct T6_keyBlock_hdr) + sizeof(struct cca_token_hdr) + - sizeof(struct cca_pvt_ext_CRT_sec) + 0x0F + keyPartsLen; - *((short *)tgt_p) = (short) tmp_l; - temp = tgt_p + 2; - tmp_l -= 2; - *((short *)temp) = (short) tmp_l; - tgt_p += sizeof(struct T6_keyBlock_hdr); - keyHdr_p = (struct cca_token_hdr *)tgt_p; - keyHdr_p->token_identifier = CCA_TKN_HDR_ID_EXT; - tmp_l -= 4; - keyHdr_p->token_length = tmp_l; - tgt_p += sizeof(struct cca_token_hdr); - pvtSec_p = (struct cca_pvt_ext_CRT_sec *)tgt_p; - pvtSec_p->section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT; - pvtSec_p->section_length = - sizeof(struct cca_pvt_ext_CRT_sec) + keyPartsLen; - pvtSec_p->key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL; - pvtSec_p->key_use_flags[0] = CCA_PVT_USAGE_ALL; - pvtSec_p->p_len = long_len; - pvtSec_p->q_len = short_len; - pvtSec_p->dp_len = long_len; - pvtSec_p->dq_len = short_len; - pvtSec_p->u_len = long_len; - pvtSec_p->mod_len = mod_len; - pvtSec_p->pad_len = pad_len; - tgt_p += sizeof(struct cca_pvt_ext_CRT_sec); - if (copy_from_user(tgt_p, icaMsg_p->np_prime, long_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, long_len)) - return SEN_USER_ERROR; - tgt_p += long_len; - if (copy_from_user(tgt_p, icaMsg_p->nq_prime, short_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, short_len)) - return SEN_USER_ERROR; - tgt_p += short_len; - if (copy_from_user(tgt_p, icaMsg_p->bp_key, long_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, long_len)) - return SEN_USER_ERROR; - tgt_p += long_len; - if (copy_from_user(tgt_p, icaMsg_p->bq_key, short_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, short_len)) - return SEN_USER_ERROR; - tgt_p += short_len; - if (copy_from_user(tgt_p, icaMsg_p->u_mult_inv, long_len)) - return SEN_RELEASED; - if (is_empty(tgt_p, long_len)) - return SEN_USER_ERROR; - tgt_p += long_len; - tgt_p += pad_len; - memset(tgt_p, 0xFF, mod_len); - tgt_p += mod_len; - memcpy(tgt_p, &static_cca_pub_sec, sizeof(struct cca_public_sec)); - pubSec_p = (struct cca_public_sec *) tgt_p; - pubSec_p->modulus_bit_len = 8 * mod_len; - *z90cMsg_l_p = tmp_size - CALLER_HEADER; - - return 0; -} - -static int -ICAMEX_msg_to_type50MEX_msg(struct ica_rsa_modexpo *icaMex_p, int *z90cMsg_l_p, - union type50_msg *z90cMsg_p) -{ - int mod_len, msg_size, mod_tgt_len, exp_tgt_len, inp_tgt_len; - unsigned char *mod_tgt, *exp_tgt, *inp_tgt; - union type50_msg *tmp_type50_msg; - - mod_len = icaMex_p->inputdatalength; - - msg_size = ((mod_len <= 128) ? TYPE50_MEB1_LEN : TYPE50_MEB2_LEN) + - CALLER_HEADER; - - memset(z90cMsg_p, 0, msg_size); - - tmp_type50_msg = (union type50_msg *) - ((unsigned char *) z90cMsg_p + CALLER_HEADER); - - tmp_type50_msg->meb1.header.msg_type_code = TYPE50_TYPE_CODE; - - if (mod_len <= 128) { - tmp_type50_msg->meb1.header.msg_len = TYPE50_MEB1_LEN; - tmp_type50_msg->meb1.keyblock_type = TYPE50_MEB1_FMT; - mod_tgt = tmp_type50_msg->meb1.modulus; - mod_tgt_len = sizeof(tmp_type50_msg->meb1.modulus); - exp_tgt = tmp_type50_msg->meb1.exponent; - exp_tgt_len = sizeof(tmp_type50_msg->meb1.exponent); - inp_tgt = tmp_type50_msg->meb1.message; - inp_tgt_len = sizeof(tmp_type50_msg->meb1.message); - } else { - tmp_type50_msg->meb2.header.msg_len = TYPE50_MEB2_LEN; - tmp_type50_msg->meb2.keyblock_type = TYPE50_MEB2_FMT; - mod_tgt = tmp_type50_msg->meb2.modulus; - mod_tgt_len = sizeof(tmp_type50_msg->meb2.modulus); - exp_tgt = tmp_type50_msg->meb2.exponent; - exp_tgt_len = sizeof(tmp_type50_msg->meb2.exponent); - inp_tgt = tmp_type50_msg->meb2.message; - inp_tgt_len = sizeof(tmp_type50_msg->meb2.message); - } - - mod_tgt += (mod_tgt_len - mod_len); - if (copy_from_user(mod_tgt, icaMex_p->n_modulus, mod_len)) - return SEN_RELEASED; - if (is_empty(mod_tgt, mod_len)) - return SEN_USER_ERROR; - exp_tgt += (exp_tgt_len - mod_len); - if (copy_from_user(exp_tgt, icaMex_p->b_key, mod_len)) - return SEN_RELEASED; - if (is_empty(exp_tgt, mod_len)) - return SEN_USER_ERROR; - inp_tgt += (inp_tgt_len - mod_len); - if (copy_from_user(inp_tgt, icaMex_p->inputdata, mod_len)) - return SEN_RELEASED; - if (is_empty(inp_tgt, mod_len)) - return SEN_USER_ERROR; - - *z90cMsg_l_p = msg_size - CALLER_HEADER; - - return 0; -} - -static int -ICACRT_msg_to_type50CRT_msg(struct ica_rsa_modexpo_crt *icaMsg_p, - int *z90cMsg_l_p, union type50_msg *z90cMsg_p) -{ - int mod_len, short_len, long_len, tmp_size, p_tgt_len, q_tgt_len, - dp_tgt_len, dq_tgt_len, u_tgt_len, inp_tgt_len, long_offset; - unsigned char *p_tgt, *q_tgt, *dp_tgt, *dq_tgt, *u_tgt, *inp_tgt, - temp[8]; - union type50_msg *tmp_type50_msg; - - mod_len = icaMsg_p->inputdatalength; - short_len = mod_len / 2; - long_len = mod_len / 2 + 8; - long_offset = 0; - - if (long_len > 128) { - memset(temp, 0x00, sizeof(temp)); - if (copy_from_user(temp, icaMsg_p->np_prime, long_len-128)) - return SEN_RELEASED; - if (!is_empty(temp, 8)) - return SEN_NOT_AVAIL; - if (copy_from_user(temp, icaMsg_p->bp_key, long_len-128)) - return SEN_RELEASED; - if (!is_empty(temp, 8)) - return SEN_NOT_AVAIL; - if (copy_from_user(temp, icaMsg_p->u_mult_inv, long_len-128)) - return SEN_RELEASED; - if (!is_empty(temp, 8)) - return SEN_NOT_AVAIL; - long_offset = long_len - 128; - long_len = 128; - } - - tmp_size = ((long_len <= 64) ? TYPE50_CRB1_LEN : TYPE50_CRB2_LEN) + - CALLER_HEADER; - - memset(z90cMsg_p, 0, tmp_size); - - tmp_type50_msg = (union type50_msg *) - ((unsigned char *) z90cMsg_p + CALLER_HEADER); - - tmp_type50_msg->crb1.header.msg_type_code = TYPE50_TYPE_CODE; - if (long_len <= 64) { - tmp_type50_msg->crb1.header.msg_len = TYPE50_CRB1_LEN; - tmp_type50_msg->crb1.keyblock_type = TYPE50_CRB1_FMT; - p_tgt = tmp_type50_msg->crb1.p; - p_tgt_len = sizeof(tmp_type50_msg->crb1.p); - q_tgt = tmp_type50_msg->crb1.q; - q_tgt_len = sizeof(tmp_type50_msg->crb1.q); - dp_tgt = tmp_type50_msg->crb1.dp; - dp_tgt_len = sizeof(tmp_type50_msg->crb1.dp); - dq_tgt = tmp_type50_msg->crb1.dq; - dq_tgt_len = sizeof(tmp_type50_msg->crb1.dq); - u_tgt = tmp_type50_msg->crb1.u; - u_tgt_len = sizeof(tmp_type50_msg->crb1.u); - inp_tgt = tmp_type50_msg->crb1.message; - inp_tgt_len = sizeof(tmp_type50_msg->crb1.message); - } else { - tmp_type50_msg->crb2.header.msg_len = TYPE50_CRB2_LEN; - tmp_type50_msg->crb2.keyblock_type = TYPE50_CRB2_FMT; - p_tgt = tmp_type50_msg->crb2.p; - p_tgt_len = sizeof(tmp_type50_msg->crb2.p); - q_tgt = tmp_type50_msg->crb2.q; - q_tgt_len = sizeof(tmp_type50_msg->crb2.q); - dp_tgt = tmp_type50_msg->crb2.dp; - dp_tgt_len = sizeof(tmp_type50_msg->crb2.dp); - dq_tgt = tmp_type50_msg->crb2.dq; - dq_tgt_len = sizeof(tmp_type50_msg->crb2.dq); - u_tgt = tmp_type50_msg->crb2.u; - u_tgt_len = sizeof(tmp_type50_msg->crb2.u); - inp_tgt = tmp_type50_msg->crb2.message; - inp_tgt_len = sizeof(tmp_type50_msg->crb2.message); - } - - p_tgt += (p_tgt_len - long_len); - if (copy_from_user(p_tgt, icaMsg_p->np_prime + long_offset, long_len)) - return SEN_RELEASED; - if (is_empty(p_tgt, long_len)) - return SEN_USER_ERROR; - q_tgt += (q_tgt_len - short_len); - if (copy_from_user(q_tgt, icaMsg_p->nq_prime, short_len)) - return SEN_RELEASED; - if (is_empty(q_tgt, short_len)) - return SEN_USER_ERROR; - dp_tgt += (dp_tgt_len - long_len); - if (copy_from_user(dp_tgt, icaMsg_p->bp_key + long_offset, long_len)) - return SEN_RELEASED; - if (is_empty(dp_tgt, long_len)) - return SEN_USER_ERROR; - dq_tgt += (dq_tgt_len - short_len); - if (copy_from_user(dq_tgt, icaMsg_p->bq_key, short_len)) - return SEN_RELEASED; - if (is_empty(dq_tgt, short_len)) - return SEN_USER_ERROR; - u_tgt += (u_tgt_len - long_len); - if (copy_from_user(u_tgt, icaMsg_p->u_mult_inv + long_offset, long_len)) - return SEN_RELEASED; - if (is_empty(u_tgt, long_len)) - return SEN_USER_ERROR; - inp_tgt += (inp_tgt_len - mod_len); - if (copy_from_user(inp_tgt, icaMsg_p->inputdata, mod_len)) - return SEN_RELEASED; - if (is_empty(inp_tgt, mod_len)) - return SEN_USER_ERROR; - - *z90cMsg_l_p = tmp_size - CALLER_HEADER; - - return 0; -} - -int -convert_request(unsigned char *buffer, int func, unsigned short function, - int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p) -{ - if (dev_type == PCICA) { - if (func == ICARSACRT) - return ICACRT_msg_to_type4CRT_msg( - (struct ica_rsa_modexpo_crt *) buffer, - msg_l_p, (union type4_msg *) msg_p); - else - return ICAMEX_msg_to_type4MEX_msg( - (struct ica_rsa_modexpo *) buffer, - msg_l_p, (union type4_msg *) msg_p); - } - if (dev_type == PCICC) { - if (func == ICARSACRT) - return ICACRT_msg_to_type6CRT_msg( - (struct ica_rsa_modexpo_crt *) buffer, - cdx, msg_l_p, (struct type6_msg *)msg_p); - if (function == PCI_FUNC_KEY_ENCRYPT) - return ICAMEX_msg_to_type6MEX_en_msg( - (struct ica_rsa_modexpo *) buffer, - cdx, msg_l_p, (struct type6_msg *) msg_p); - else - return ICAMEX_msg_to_type6MEX_de_msg( - (struct ica_rsa_modexpo *) buffer, - cdx, msg_l_p, (struct type6_msg *) msg_p); - } - if ((dev_type == PCIXCC_MCL2) || - (dev_type == PCIXCC_MCL3) || - (dev_type == CEX2C)) { - if (func == ICARSACRT) - return ICACRT_msg_to_type6CRT_msgX( - (struct ica_rsa_modexpo_crt *) buffer, - cdx, msg_l_p, (struct type6_msg *) msg_p, - dev_type); - else - return ICAMEX_msg_to_type6MEX_msgX( - (struct ica_rsa_modexpo *) buffer, - cdx, msg_l_p, (struct type6_msg *) msg_p, - dev_type); - } - if (dev_type == CEX2A) { - if (func == ICARSACRT) - return ICACRT_msg_to_type50CRT_msg( - (struct ica_rsa_modexpo_crt *) buffer, - msg_l_p, (union type50_msg *) msg_p); - else - return ICAMEX_msg_to_type50MEX_msg( - (struct ica_rsa_modexpo *) buffer, - msg_l_p, (union type50_msg *) msg_p); - } - - return 0; -} - -int ext_bitlens_msg_count = 0; -static inline void -unset_ext_bitlens(void) -{ - if (!ext_bitlens_msg_count) { - PRINTK("Unable to use coprocessors for extended bitlengths. " - "Using PCICAs/CEX2As (if present) for extended " - "bitlengths. This is not an error.\n"); - ext_bitlens_msg_count++; - } - ext_bitlens = 0; -} - -int -convert_response(unsigned char *response, unsigned char *buffer, - int *respbufflen_p, unsigned char *resp_buff) -{ - struct ica_rsa_modexpo *icaMsg_p = (struct ica_rsa_modexpo *) buffer; - struct error_hdr *errh_p = (struct error_hdr *) response; - struct type80_hdr *t80h_p = (struct type80_hdr *) response; - struct type84_hdr *t84h_p = (struct type84_hdr *) response; - struct type86_fmt2_msg *t86m_p = (struct type86_fmt2_msg *) response; - int reply_code, service_rc, service_rs, src_l; - unsigned char *src_p, *tgt_p; - struct CPRB *cprb_p; - struct CPRBX *cprbx_p; - - src_p = 0; - reply_code = 0; - service_rc = 0; - service_rs = 0; - src_l = 0; - switch (errh_p->type) { - case TYPE82_RSP_CODE: - case TYPE88_RSP_CODE: - reply_code = errh_p->reply_code; - src_p = (unsigned char *)errh_p; - PRINTK("Hardware error: Type %02X Message Header: " - "%02x%02x%02x%02x%02x%02x%02x%02x\n", - errh_p->type, - src_p[0], src_p[1], src_p[2], src_p[3], - src_p[4], src_p[5], src_p[6], src_p[7]); - break; - case TYPE80_RSP_CODE: - src_l = icaMsg_p->outputdatalength; - src_p = response + (int)t80h_p->len - src_l; - break; - case TYPE84_RSP_CODE: - src_l = icaMsg_p->outputdatalength; - src_p = response + (int)t84h_p->len - src_l; - break; - case TYPE86_RSP_CODE: - reply_code = t86m_p->header.reply_code; - if (reply_code != 0) - break; - cprb_p = (struct CPRB *) - (response + sizeof(struct type86_fmt2_msg)); - cprbx_p = (struct CPRBX *) cprb_p; - if (cprb_p->cprb_ver_id != 0x02) { - le2toI(cprb_p->ccp_rtcode, &service_rc); - if (service_rc != 0) { - le2toI(cprb_p->ccp_rscode, &service_rs); - if ((service_rc == 8) && (service_rs == 66)) - PDEBUG("Bad block format on PCICC\n"); - else if ((service_rc == 8) && (service_rs == 65)) - PDEBUG("Probably an even modulus on " - "PCICC\n"); - else if ((service_rc == 8) && (service_rs == 770)) { - PDEBUG("Invalid key length on PCICC\n"); - unset_ext_bitlens(); - return REC_USE_PCICA; - } - else if ((service_rc == 8) && (service_rs == 783)) { - PDEBUG("Extended bitlengths not enabled" - "on PCICC\n"); - unset_ext_bitlens(); - return REC_USE_PCICA; - } - else - PRINTK("service rc/rs (PCICC): %d/%d\n", - service_rc, service_rs); - return REC_OPERAND_INV; - } - src_p = (unsigned char *)cprb_p + sizeof(struct CPRB); - src_p += 4; - le2toI(src_p, &src_l); - src_l -= 2; - src_p += 2; - } else { - service_rc = (int)cprbx_p->ccp_rtcode; - if (service_rc != 0) { - service_rs = (int) cprbx_p->ccp_rscode; - if ((service_rc == 8) && (service_rs == 66)) - PDEBUG("Bad block format on PCIXCC\n"); - else if ((service_rc == 8) && (service_rs == 65)) - PDEBUG("Probably an even modulus on " - "PCIXCC\n"); - else if ((service_rc == 8) && (service_rs == 770)) { - PDEBUG("Invalid key length on PCIXCC\n"); - unset_ext_bitlens(); - return REC_USE_PCICA; - } - else if ((service_rc == 8) && (service_rs == 783)) { - PDEBUG("Extended bitlengths not enabled" - "on PCIXCC\n"); - unset_ext_bitlens(); - return REC_USE_PCICA; - } - else - PRINTK("service rc/rs (PCIXCC): %d/%d\n", - service_rc, service_rs); - return REC_OPERAND_INV; - } - src_p = (unsigned char *) - cprbx_p + sizeof(struct CPRBX); - src_p += 4; - src_l = (int)(*((short *) src_p)); - src_l -= 2; - src_p += 2; - } - break; - default: - src_p = (unsigned char *)errh_p; - PRINTK("Unrecognized Message Header: " - "%02x%02x%02x%02x%02x%02x%02x%02x\n", - src_p[0], src_p[1], src_p[2], src_p[3], - src_p[4], src_p[5], src_p[6], src_p[7]); - return REC_BAD_MESSAGE; - } - - if (reply_code) - switch (reply_code) { - case REP82_ERROR_MACHINE_FAILURE: - if (errh_p->type == TYPE82_RSP_CODE) - PRINTKW("Machine check failure\n"); - else - PRINTKW("Module failure\n"); - return REC_HARDWAR_ERR; - case REP82_ERROR_OPERAND_INVALID: - return REC_OPERAND_INV; - case REP88_ERROR_MESSAGE_MALFORMD: - PRINTKW("Message malformed\n"); - return REC_OPERAND_INV; - case REP82_ERROR_OPERAND_SIZE: - return REC_OPERAND_SIZE; - case REP82_ERROR_EVEN_MOD_IN_OPND: - return REC_EVEN_MOD; - case REP82_ERROR_MESSAGE_TYPE: - return WRONG_DEVICE_TYPE; - case REP82_ERROR_TRANSPORT_FAIL: - PRINTKW("Transport failed (APFS = %02X%02X%02X%02X)\n", - t86m_p->apfs[0], t86m_p->apfs[1], - t86m_p->apfs[2], t86m_p->apfs[3]); - return REC_HARDWAR_ERR; - default: - PRINTKW("reply code = %d\n", reply_code); - return REC_HARDWAR_ERR; - } - - if (service_rc != 0) - return REC_OPERAND_INV; - - if ((src_l > icaMsg_p->outputdatalength) || - (src_l > RESPBUFFSIZE) || - (src_l <= 0)) - return REC_OPERAND_SIZE; - - PDEBUG("Length returned = %d\n", src_l); - tgt_p = resp_buff + icaMsg_p->outputdatalength - src_l; - memcpy(tgt_p, src_p, src_l); - if ((errh_p->type == TYPE86_RSP_CODE) && (resp_buff < tgt_p)) { - memset(resp_buff, 0, icaMsg_p->outputdatalength - src_l); - if (pad_msg(resp_buff, icaMsg_p->outputdatalength, src_l)) - return REC_INVALID_PAD; - } - *respbufflen_p = icaMsg_p->outputdatalength; - if (*respbufflen_p == 0) - PRINTK("Zero *respbufflen_p\n"); - - return 0; -} - diff --git a/drivers/s390/crypto/z90main.c b/drivers/s390/crypto/z90main.c deleted file mode 100644 index b2f20ab8431a..000000000000 --- a/drivers/s390/crypto/z90main.c +++ /dev/null @@ -1,3379 +0,0 @@ -/* - * linux/drivers/s390/crypto/z90main.c - * - * z90crypt 1.3.3 - * - * Copyright (C) 2001, 2005 IBM Corporation - * Author(s): Robert Burroughs (burrough@us.ibm.com) - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <asm/uaccess.h> // copy_(from|to)_user -#include <linux/compat.h> -#include <linux/compiler.h> -#include <linux/delay.h> // mdelay -#include <linux/init.h> -#include <linux/interrupt.h> // for tasklets -#include <linux/miscdevice.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/proc_fs.h> -#include <linux/syscalls.h> -#include "z90crypt.h" -#include "z90common.h" - -/** - * Defaults that may be modified. - */ - -/** - * You can specify a different minor at compile time. - */ -#ifndef Z90CRYPT_MINOR -#define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR -#endif - -/** - * You can specify a different domain at compile time or on the insmod - * command line. - */ -#ifndef DOMAIN_INDEX -#define DOMAIN_INDEX -1 -#endif - -/** - * This is the name under which the device is registered in /proc/modules. - */ -#define REG_NAME "z90crypt" - -/** - * Cleanup should run every CLEANUPTIME seconds and should clean up requests - * older than CLEANUPTIME seconds in the past. - */ -#ifndef CLEANUPTIME -#define CLEANUPTIME 15 -#endif - -/** - * Config should run every CONFIGTIME seconds - */ -#ifndef CONFIGTIME -#define CONFIGTIME 30 -#endif - -/** - * The first execution of the config task should take place - * immediately after initialization - */ -#ifndef INITIAL_CONFIGTIME -#define INITIAL_CONFIGTIME 1 -#endif - -/** - * Reader should run every READERTIME milliseconds - * With the 100Hz patch for s390, z90crypt can lock the system solid while - * under heavy load. We'll try to avoid that. - */ -#ifndef READERTIME -#if HZ > 1000 -#define READERTIME 2 -#else -#define READERTIME 10 -#endif -#endif - -/** - * turn long device array index into device pointer - */ -#define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)]) - -/** - * turn short device array index into long device array index - */ -#define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)]) - -/** - * turn short device array index into device pointer - */ -#define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx)) - -/** - * Status for a work-element - */ -#define STAT_DEFAULT 0x00 // request has not been processed - -#define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device - // else, device is determined each write -#define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed - // before being sent to the hardware. -#define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device -// 0x20 // UNUSED state -#define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now -#define STAT_NOWORK 0x00 // bits off: no work on any queue -#define STAT_RDWRMASK 0x30 // mask for bits 5-4 - -/** - * Macros to check the status RDWRMASK - */ -#define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK) -#define SET_RDWRMASK(statbyte, newval) \ - {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;} - -/** - * Audit Trail. Progress of a Work element - * audit[0]: Unless noted otherwise, these bits are all set by the process - */ -#define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element -#define FP_BUFFREQ 0x40 // Low Level buffer requested -#define FP_BUFFGOT 0x20 // Low Level buffer obtained -#define FP_SENT 0x10 // Work element sent to a crypto device - // (may be set by process or by reader task) -#define FP_PENDING 0x08 // Work element placed on pending queue - // (may be set by process or by reader task) -#define FP_REQUEST 0x04 // Work element placed on request queue -#define FP_ASLEEP 0x02 // Work element about to sleep -#define FP_AWAKE 0x01 // Work element has been awakened - -/** - * audit[1]: These bits are set by the reader task and/or the cleanup task - */ -#define FP_NOTPENDING 0x80 // Work element removed from pending queue -#define FP_AWAKENING 0x40 // Caller about to be awakened -#define FP_TIMEDOUT 0x20 // Caller timed out -#define FP_RESPSIZESET 0x10 // Response size copied to work element -#define FP_RESPADDRCOPIED 0x08 // Response address copied to work element -#define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element -#define FP_REMREQUEST 0x02 // Work element removed from request queue -#define FP_SIGNALED 0x01 // Work element was awakened by a signal - -/** - * audit[2]: unused - */ - -/** - * state of the file handle in private_data.status - */ -#define STAT_OPEN 0 -#define STAT_CLOSED 1 - -/** - * PID() expands to the process ID of the current process - */ -#define PID() (current->pid) - -/** - * Selected Constants. The number of APs and the number of devices - */ -#ifndef Z90CRYPT_NUM_APS -#define Z90CRYPT_NUM_APS 64 -#endif -#ifndef Z90CRYPT_NUM_DEVS -#define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS -#endif - -/** - * Buffer size for receiving responses. The maximum Response Size - * is actually the maximum request size, since in an error condition - * the request itself may be returned unchanged. - */ -#define MAX_RESPONSE_SIZE 0x0000077C - -/** - * A count and status-byte mask - */ -struct status { - int st_count; // # of enabled devices - int disabled_count; // # of disabled devices - int user_disabled_count; // # of devices disabled via proc fs - unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask -}; - -/** - * The array of device indexes is a mechanism for fast indexing into - * a long (and sparse) array. For instance, if APs 3, 9 and 47 are - * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and - * z90CDeviceIndex[2] is 47. - */ -struct device_x { - int device_index[Z90CRYPT_NUM_DEVS]; -}; - -/** - * All devices are arranged in a single array: 64 APs - */ -struct device { - int dev_type; // PCICA, PCICC, PCIXCC_MCL2, - // PCIXCC_MCL3, CEX2C, CEX2A - enum devstat dev_stat; // current device status - int dev_self_x; // Index in array - int disabled; // Set when device is in error - int user_disabled; // Set when device is disabled by user - int dev_q_depth; // q depth - unsigned char * dev_resp_p; // Response buffer address - int dev_resp_l; // Response Buffer length - int dev_caller_count; // Number of callers - int dev_total_req_cnt; // # requests for device since load - struct list_head dev_caller_list; // List of callers -}; - -/** - * There's a struct status and a struct device_x for each device type. - */ -struct hdware_block { - struct status hdware_mask; - struct status type_mask[Z90CRYPT_NUM_TYPES]; - struct device_x type_x_addr[Z90CRYPT_NUM_TYPES]; - unsigned char device_type_array[Z90CRYPT_NUM_APS]; -}; - -/** - * z90crypt is the topmost data structure in the hierarchy. - */ -struct z90crypt { - int max_count; // Nr of possible crypto devices - struct status mask; - int q_depth_array[Z90CRYPT_NUM_DEVS]; - int dev_type_array[Z90CRYPT_NUM_DEVS]; - struct device_x overall_device_x; // array device indexes - struct device * device_p[Z90CRYPT_NUM_DEVS]; - int terminating; - int domain_established;// TRUE: domain has been found - int cdx; // Crypto Domain Index - int len; // Length of this data structure - struct hdware_block *hdware_info; -}; - -/** - * An array of these structures is pointed to from dev_caller - * The length of the array depends on the device type. For APs, - * there are 8. - * - * The caller buffer is allocated to the user at OPEN. At WRITE, - * it contains the request; at READ, the response. The function - * send_to_crypto_device converts the request to device-dependent - * form and use the caller's OPEN-allocated buffer for the response. - * - * For the contents of caller_dev_dep_req and caller_dev_dep_req_p - * because that points to it, see the discussion in z90hardware.c. - * Search for "extended request message block". - */ -struct caller { - int caller_buf_l; // length of original request - unsigned char * caller_buf_p; // Original request on WRITE - int caller_dev_dep_req_l; // len device dependent request - unsigned char * caller_dev_dep_req_p; // Device dependent form - unsigned char caller_id[8]; // caller-supplied message id - struct list_head caller_liste; - unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE]; -}; - -/** - * Function prototypes from z90hardware.c - */ -enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth, - int *dev_type); -enum devstat reset_device(int deviceNr, int cdx, int resetNr); -enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext); -enum devstat receive_from_AP(int dev_nr, int cdx, int resplen, - unsigned char *resp, unsigned char *psmid); -int convert_request(unsigned char *buffer, int func, unsigned short function, - int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p); -int convert_response(unsigned char *response, unsigned char *buffer, - int *respbufflen_p, unsigned char *resp_buff); - -/** - * Low level function prototypes - */ -static int create_z90crypt(int *cdx_p); -static int refresh_z90crypt(int *cdx_p); -static int find_crypto_devices(struct status *deviceMask); -static int create_crypto_device(int index); -static int destroy_crypto_device(int index); -static void destroy_z90crypt(void); -static int refresh_index_array(struct status *status_str, - struct device_x *index_array); -static int probe_device_type(struct device *devPtr); -static int probe_PCIXCC_type(struct device *devPtr); - -/** - * proc fs definitions - */ -static struct proc_dir_entry *z90crypt_entry; - -/** - * data structures - */ - -/** - * work_element.opener points back to this structure - */ -struct priv_data { - pid_t opener_pid; - unsigned char status; // 0: open 1: closed -}; - -/** - * A work element is allocated for each request - */ -struct work_element { - struct priv_data *priv_data; - pid_t pid; - int devindex; // index of device processing this w_e - // (If request did not specify device, - // -1 until placed onto a queue) - int devtype; - struct list_head liste; // used for requestq and pendingq - char buffer[128]; // local copy of user request - int buff_size; // size of the buffer for the request - char resp_buff[RESPBUFFSIZE]; - int resp_buff_size; - char __user * resp_addr; // address of response in user space - unsigned int funccode; // function code of request - wait_queue_head_t waitq; - unsigned long requestsent; // time at which the request was sent - atomic_t alarmrung; // wake-up signal - unsigned char caller_id[8]; // pid + counter, for this w_e - unsigned char status[1]; // bits to mark status of the request - unsigned char audit[3]; // record of work element's progress - unsigned char * requestptr; // address of request buffer - int retcode; // return code of request -}; - -/** - * High level function prototypes - */ -static int z90crypt_open(struct inode *, struct file *); -static int z90crypt_release(struct inode *, struct file *); -static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *); -static ssize_t z90crypt_write(struct file *, const char __user *, - size_t, loff_t *); -static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long); -static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long); - -static void z90crypt_reader_task(unsigned long); -static void z90crypt_schedule_reader_task(unsigned long); -static void z90crypt_config_task(unsigned long); -static void z90crypt_cleanup_task(unsigned long); - -static int z90crypt_status(char *, char **, off_t, int, int *, void *); -static int z90crypt_status_write(struct file *, const char __user *, - unsigned long, void *); - -/** - * Storage allocated at initialization and used throughout the life of - * this insmod - */ -static int domain = DOMAIN_INDEX; -static struct z90crypt z90crypt; -static int quiesce_z90crypt; -static spinlock_t queuespinlock; -static struct list_head request_list; -static int requestq_count; -static struct list_head pending_list; -static int pendingq_count; - -static struct tasklet_struct reader_tasklet; -static struct timer_list reader_timer; -static struct timer_list config_timer; -static struct timer_list cleanup_timer; -static atomic_t total_open; -static atomic_t z90crypt_step; - -static struct file_operations z90crypt_fops = { - .owner = THIS_MODULE, - .read = z90crypt_read, - .write = z90crypt_write, - .unlocked_ioctl = z90crypt_unlocked_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = z90crypt_compat_ioctl, -#endif - .open = z90crypt_open, - .release = z90crypt_release -}; - -static struct miscdevice z90crypt_misc_device = { - .minor = Z90CRYPT_MINOR, - .name = DEV_NAME, - .fops = &z90crypt_fops, -}; - -/** - * Documentation values. - */ -MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman" - "and Jochen Roehrig"); -MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, " - "Copyright 2001, 2005 IBM Corporation"); -MODULE_LICENSE("GPL"); -module_param(domain, int, 0); -MODULE_PARM_DESC(domain, "domain index for device"); - -#ifdef CONFIG_COMPAT -/** - * ioctl32 conversion routines - */ -struct ica_rsa_modexpo_32 { // For 32-bit callers - compat_uptr_t inputdata; - unsigned int inputdatalength; - compat_uptr_t outputdata; - unsigned int outputdatalength; - compat_uptr_t b_key; - compat_uptr_t n_modulus; -}; - -static long -trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg) -{ - struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg); - struct ica_rsa_modexpo_32 mex32k; - struct ica_rsa_modexpo __user *mex64; - long ret = 0; - unsigned int i; - - if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32))) - return -EFAULT; - mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo)); - if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo))) - return -EFAULT; - if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32))) - return -EFAULT; - if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) || - __put_user(mex32k.inputdatalength, &mex64->inputdatalength) || - __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) || - __put_user(mex32k.outputdatalength, &mex64->outputdatalength) || - __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) || - __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus)) - return -EFAULT; - ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64); - if (!ret) - if (__get_user(i, &mex64->outputdatalength) || - __put_user(i, &mex32u->outputdatalength)) - ret = -EFAULT; - return ret; -} - -struct ica_rsa_modexpo_crt_32 { // For 32-bit callers - compat_uptr_t inputdata; - unsigned int inputdatalength; - compat_uptr_t outputdata; - unsigned int outputdatalength; - compat_uptr_t bp_key; - compat_uptr_t bq_key; - compat_uptr_t np_prime; - compat_uptr_t nq_prime; - compat_uptr_t u_mult_inv; -}; - -static long -trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg) -{ - struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg); - struct ica_rsa_modexpo_crt_32 crt32k; - struct ica_rsa_modexpo_crt __user *crt64; - long ret = 0; - unsigned int i; - - if (!access_ok(VERIFY_WRITE, crt32u, - sizeof(struct ica_rsa_modexpo_crt_32))) - return -EFAULT; - crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt)); - if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt))) - return -EFAULT; - if (copy_from_user(&crt32k, crt32u, - sizeof(struct ica_rsa_modexpo_crt_32))) - return -EFAULT; - if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) || - __put_user(crt32k.inputdatalength, &crt64->inputdatalength) || - __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) || - __put_user(crt32k.outputdatalength, &crt64->outputdatalength) || - __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) || - __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) || - __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) || - __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) || - __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv)) - return -EFAULT; - ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64); - if (!ret) - if (__get_user(i, &crt64->outputdatalength) || - __put_user(i, &crt32u->outputdatalength)) - ret = -EFAULT; - return ret; -} - -static long -z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - switch (cmd) { - case ICAZ90STATUS: - case Z90QUIESCE: - case Z90STAT_TOTALCOUNT: - case Z90STAT_PCICACOUNT: - case Z90STAT_PCICCCOUNT: - case Z90STAT_PCIXCCCOUNT: - case Z90STAT_PCIXCCMCL2COUNT: - case Z90STAT_PCIXCCMCL3COUNT: - case Z90STAT_CEX2CCOUNT: - case Z90STAT_REQUESTQ_COUNT: - case Z90STAT_PENDINGQ_COUNT: - case Z90STAT_TOTALOPEN_COUNT: - case Z90STAT_DOMAIN_INDEX: - case Z90STAT_STATUS_MASK: - case Z90STAT_QDEPTH_MASK: - case Z90STAT_PERDEV_REQCNT: - return z90crypt_unlocked_ioctl(filp, cmd, arg); - case ICARSAMODEXPO: - return trans_modexpo32(filp, cmd, arg); - case ICARSACRT: - return trans_modexpo_crt32(filp, cmd, arg); - default: - return -ENOIOCTLCMD; - } -} -#endif - -/** - * The module initialization code. - */ -static int __init -z90crypt_init_module(void) -{ - int result, nresult; - struct proc_dir_entry *entry; - - PDEBUG("PID %d\n", PID()); - - if ((domain < -1) || (domain > 15)) { - PRINTKW("Invalid param: domain = %d. Not loading.\n", domain); - return -EINVAL; - } - - /* Register as misc device with given minor (or get a dynamic one). */ - result = misc_register(&z90crypt_misc_device); - if (result < 0) { - PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n", - z90crypt_misc_device.minor, result); - return result; - } - - PDEBUG("Registered " DEV_NAME " with result %d\n", result); - - result = create_z90crypt(&domain); - if (result != 0) { - PRINTKW("create_z90crypt (domain index %d) failed with %d.\n", - domain, result); - result = -ENOMEM; - goto init_module_cleanup; - } - - if (result == 0) { - PRINTKN("Version %d.%d.%d loaded, built on %s %s\n", - z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT, - __DATE__, __TIME__); - PDEBUG("create_z90crypt (domain index %d) successful.\n", - domain); - } else - PRINTK("No devices at startup\n"); - - /* Initialize globals. */ - spin_lock_init(&queuespinlock); - - INIT_LIST_HEAD(&pending_list); - pendingq_count = 0; - - INIT_LIST_HEAD(&request_list); - requestq_count = 0; - - quiesce_z90crypt = 0; - - atomic_set(&total_open, 0); - atomic_set(&z90crypt_step, 0); - - /* Set up the cleanup task. */ - init_timer(&cleanup_timer); - cleanup_timer.function = z90crypt_cleanup_task; - cleanup_timer.data = 0; - cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ); - add_timer(&cleanup_timer); - - /* Set up the proc file system */ - entry = create_proc_entry("driver/z90crypt", 0644, 0); - if (entry) { - entry->nlink = 1; - entry->data = 0; - entry->read_proc = z90crypt_status; - entry->write_proc = z90crypt_status_write; - } - else - PRINTK("Couldn't create z90crypt proc entry\n"); - z90crypt_entry = entry; - - /* Set up the configuration task. */ - init_timer(&config_timer); - config_timer.function = z90crypt_config_task; - config_timer.data = 0; - config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ); - add_timer(&config_timer); - - /* Set up the reader task */ - tasklet_init(&reader_tasklet, z90crypt_reader_task, 0); - init_timer(&reader_timer); - reader_timer.function = z90crypt_schedule_reader_task; - reader_timer.data = 0; - reader_timer.expires = jiffies + (READERTIME * HZ / 1000); - add_timer(&reader_timer); - - return 0; // success - -init_module_cleanup: - if ((nresult = misc_deregister(&z90crypt_misc_device))) - PRINTK("misc_deregister failed with %d.\n", nresult); - else - PDEBUG("misc_deregister successful.\n"); - - return result; // failure -} - -/** - * The module termination code - */ -static void __exit -z90crypt_cleanup_module(void) -{ - int nresult; - - PDEBUG("PID %d\n", PID()); - - remove_proc_entry("driver/z90crypt", 0); - - if ((nresult = misc_deregister(&z90crypt_misc_device))) - PRINTK("misc_deregister failed with %d.\n", nresult); - else - PDEBUG("misc_deregister successful.\n"); - - /* Remove the tasks */ - tasklet_kill(&reader_tasklet); - del_timer(&reader_timer); - del_timer(&config_timer); - del_timer(&cleanup_timer); - - destroy_z90crypt(); - - PRINTKN("Unloaded.\n"); -} - -/** - * Functions running under a process id - * - * The I/O functions: - * z90crypt_open - * z90crypt_release - * z90crypt_read - * z90crypt_write - * z90crypt_unlocked_ioctl - * z90crypt_status - * z90crypt_status_write - * disable_card - * enable_card - * - * Helper functions: - * z90crypt_rsa - * z90crypt_prepare - * z90crypt_send - * z90crypt_process_results - * - */ -static int -z90crypt_open(struct inode *inode, struct file *filp) -{ - struct priv_data *private_data_p; - - if (quiesce_z90crypt) - return -EQUIESCE; - - private_data_p = kzalloc(sizeof(struct priv_data), GFP_KERNEL); - if (!private_data_p) { - PRINTK("Memory allocate failed\n"); - return -ENOMEM; - } - - private_data_p->status = STAT_OPEN; - private_data_p->opener_pid = PID(); - filp->private_data = private_data_p; - atomic_inc(&total_open); - - return 0; -} - -static int -z90crypt_release(struct inode *inode, struct file *filp) -{ - struct priv_data *private_data_p = filp->private_data; - - PDEBUG("PID %d (filp %p)\n", PID(), filp); - - private_data_p->status = STAT_CLOSED; - memset(private_data_p, 0, sizeof(struct priv_data)); - kfree(private_data_p); - atomic_dec(&total_open); - - return 0; -} - -/* - * there are two read functions, of which compile options will choose one - * without USE_GET_RANDOM_BYTES - * => read() always returns -EPERM; - * otherwise - * => read() uses get_random_bytes() kernel function - */ -#ifndef USE_GET_RANDOM_BYTES -/** - * z90crypt_read will not be supported beyond z90crypt 1.3.1 - */ -static ssize_t -z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) -{ - PDEBUG("filp %p (PID %d)\n", filp, PID()); - return -EPERM; -} -#else // we want to use get_random_bytes -/** - * read() just returns a string of random bytes. Since we have no way - * to generate these cryptographically, we just execute get_random_bytes - * for the length specified. - */ -#include <linux/random.h> -static ssize_t -z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) -{ - unsigned char *temp_buff; - - PDEBUG("filp %p (PID %d)\n", filp, PID()); - - if (quiesce_z90crypt) - return -EQUIESCE; - if (count < 0) { - PRINTK("Requested random byte count negative: %ld\n", count); - return -EINVAL; - } - if (count > RESPBUFFSIZE) { - PDEBUG("count[%d] > RESPBUFFSIZE", count); - return -EINVAL; - } - if (count == 0) - return 0; - temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL); - if (!temp_buff) { - PRINTK("Memory allocate failed\n"); - return -ENOMEM; - } - get_random_bytes(temp_buff, count); - - if (copy_to_user(buf, temp_buff, count) != 0) { - kfree(temp_buff); - return -EFAULT; - } - kfree(temp_buff); - return count; -} -#endif - -/** - * Write is is not allowed - */ -static ssize_t -z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) -{ - PDEBUG("filp %p (PID %d)\n", filp, PID()); - return -EPERM; -} - -/** - * New status functions - */ -static inline int -get_status_totalcount(void) -{ - return z90crypt.hdware_info->hdware_mask.st_count; -} - -static inline int -get_status_PCICAcount(void) -{ - return z90crypt.hdware_info->type_mask[PCICA].st_count; -} - -static inline int -get_status_PCICCcount(void) -{ - return z90crypt.hdware_info->type_mask[PCICC].st_count; -} - -static inline int -get_status_PCIXCCcount(void) -{ - return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count + - z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count; -} - -static inline int -get_status_PCIXCCMCL2count(void) -{ - return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count; -} - -static inline int -get_status_PCIXCCMCL3count(void) -{ - return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count; -} - -static inline int -get_status_CEX2Ccount(void) -{ - return z90crypt.hdware_info->type_mask[CEX2C].st_count; -} - -static inline int -get_status_CEX2Acount(void) -{ - return z90crypt.hdware_info->type_mask[CEX2A].st_count; -} - -static inline int -get_status_requestq_count(void) -{ - return requestq_count; -} - -static inline int -get_status_pendingq_count(void) -{ - return pendingq_count; -} - -static inline int -get_status_totalopen_count(void) -{ - return atomic_read(&total_open); -} - -static inline int -get_status_domain_index(void) -{ - return z90crypt.cdx; -} - -static inline unsigned char * -get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS]) -{ - int i, ix; - - memcpy(status, z90crypt.hdware_info->device_type_array, - Z90CRYPT_NUM_APS); - - for (i = 0; i < get_status_totalcount(); i++) { - ix = SHRT2LONG(i); - if (LONG2DEVPTR(ix)->user_disabled) - status[ix] = 0x0d; - } - - return status; -} - -static inline unsigned char * -get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS]) -{ - int i, ix; - - memset(qdepth, 0, Z90CRYPT_NUM_APS); - - for (i = 0; i < get_status_totalcount(); i++) { - ix = SHRT2LONG(i); - qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count; - } - - return qdepth; -} - -static inline unsigned int * -get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS]) -{ - int i, ix; - - memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int)); - - for (i = 0; i < get_status_totalcount(); i++) { - ix = SHRT2LONG(i); - reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt; - } - - return reqcnt; -} - -static inline void -init_work_element(struct work_element *we_p, - struct priv_data *priv_data, pid_t pid) -{ - int step; - - we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element); - /* Come up with a unique id for this caller. */ - step = atomic_inc_return(&z90crypt_step); - memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid)); - memcpy(we_p->caller_id+4, (void *) &step, sizeof(step)); - we_p->pid = pid; - we_p->priv_data = priv_data; - we_p->status[0] = STAT_DEFAULT; - we_p->audit[0] = 0x00; - we_p->audit[1] = 0x00; - we_p->audit[2] = 0x00; - we_p->resp_buff_size = 0; - we_p->retcode = 0; - we_p->devindex = -1; - we_p->devtype = -1; - atomic_set(&we_p->alarmrung, 0); - init_waitqueue_head(&we_p->waitq); - INIT_LIST_HEAD(&(we_p->liste)); -} - -static inline int -allocate_work_element(struct work_element **we_pp, - struct priv_data *priv_data_p, pid_t pid) -{ - struct work_element *we_p; - - we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL); - if (!we_p) - return -ENOMEM; - init_work_element(we_p, priv_data_p, pid); - *we_pp = we_p; - return 0; -} - -static inline void -remove_device(struct device *device_p) -{ - if (!device_p || (device_p->disabled != 0)) - return; - device_p->disabled = 1; - z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++; - z90crypt.hdware_info->hdware_mask.disabled_count++; -} - -/** - * Bitlength limits for each card - * - * There are new MCLs which allow more bitlengths. See the table for details. - * The MCL must be applied and the newer bitlengths enabled for these to work. - * - * Card Type Old limit New limit - * PCICA ??-2048 same (the lower limit is less than 128 bit...) - * PCICC 512-1024 512-2048 - * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card) - * PCIXCC_MCL3 ----- 128-2048 - * CEX2C 512-2048 128-2048 - * CEX2A ??-2048 same (the lower limit is less than 128 bit...) - * - * ext_bitlens (extended bitlengths) is a global, since you should not apply an - * MCL to just one card in a machine. We assume, at first, that all cards have - * these capabilities. - */ -int ext_bitlens = 1; // This is global -#define PCIXCC_MIN_MOD_SIZE 16 // 128 bits -#define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits -#define PCICC_MIN_MOD_SIZE 64 // 512 bits -#define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits -#define MAX_MOD_SIZE 256 // 2048 bits - -static inline int -select_device_type(int *dev_type_p, int bytelength) -{ - static int count = 0; - int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail, - index_to_use; - struct status *stat; - if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) && - (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) && - (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) && - (*dev_type_p != ANYDEV)) - return -1; - if (*dev_type_p != ANYDEV) { - stat = &z90crypt.hdware_info->type_mask[*dev_type_p]; - if (stat->st_count > - (stat->disabled_count + stat->user_disabled_count)) - return 0; - return -1; - } - - /** - * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in - * speed. - * - * PCICA and CEX2A do NOT co-exist, so it would be either one or the - * other present. - */ - stat = &z90crypt.hdware_info->type_mask[PCICA]; - PCICA_avail = stat->st_count - - (stat->disabled_count + stat->user_disabled_count); - stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3]; - PCIXCC_MCL3_avail = stat->st_count - - (stat->disabled_count + stat->user_disabled_count); - stat = &z90crypt.hdware_info->type_mask[CEX2C]; - CEX2C_avail = stat->st_count - - (stat->disabled_count + stat->user_disabled_count); - stat = &z90crypt.hdware_info->type_mask[CEX2A]; - CEX2A_avail = stat->st_count - - (stat->disabled_count + stat->user_disabled_count); - if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) { - /** - * bitlength is a factor, PCICA or CEX2A are the most capable, - * even with the new MCL for PCIXCC. - */ - if ((bytelength < PCIXCC_MIN_MOD_SIZE) || - (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) { - if (PCICA_avail) { - *dev_type_p = PCICA; - return 0; - } - if (CEX2A_avail) { - *dev_type_p = CEX2A; - return 0; - } - return -1; - } - - index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail + - CEX2C_avail + CEX2A_avail); - if (index_to_use < PCICA_avail) - *dev_type_p = PCICA; - else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail)) - *dev_type_p = PCIXCC_MCL3; - else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail + - CEX2C_avail)) - *dev_type_p = CEX2C; - else - *dev_type_p = CEX2A; - count++; - return 0; - } - - /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */ - if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE) - return -1; - stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2]; - if (stat->st_count > - (stat->disabled_count + stat->user_disabled_count)) { - *dev_type_p = PCIXCC_MCL2; - return 0; - } - - /** - * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE - * (if we don't have the MCL applied and the newer bitlengths enabled) - * cannot go to a PCICC - */ - if ((bytelength < PCICC_MIN_MOD_SIZE) || - (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) { - return -1; - } - stat = &z90crypt.hdware_info->type_mask[PCICC]; - if (stat->st_count > - (stat->disabled_count + stat->user_disabled_count)) { - *dev_type_p = PCICC; - return 0; - } - - return -1; -} - -/** - * Try the selected number, then the selected type (can be ANYDEV) - */ -static inline int -select_device(int *dev_type_p, int *device_nr_p, int bytelength) -{ - int i, indx, devTp, low_count, low_indx; - struct device_x *index_p; - struct device *dev_ptr; - - PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p); - if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) { - PDEBUG("trying index = %d\n", *device_nr_p); - dev_ptr = z90crypt.device_p[*device_nr_p]; - - if (dev_ptr && - (dev_ptr->dev_stat != DEV_GONE) && - (dev_ptr->disabled == 0) && - (dev_ptr->user_disabled == 0)) { - PDEBUG("selected by number, index = %d\n", - *device_nr_p); - *dev_type_p = dev_ptr->dev_type; - return *device_nr_p; - } - } - *device_nr_p = -1; - PDEBUG("trying type = %d\n", *dev_type_p); - devTp = *dev_type_p; - if (select_device_type(&devTp, bytelength) == -1) { - PDEBUG("failed to select by type\n"); - return -1; - } - PDEBUG("selected type = %d\n", devTp); - index_p = &z90crypt.hdware_info->type_x_addr[devTp]; - low_count = 0x0000FFFF; - low_indx = -1; - for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) { - indx = index_p->device_index[i]; - dev_ptr = z90crypt.device_p[indx]; - if (dev_ptr && - (dev_ptr->dev_stat != DEV_GONE) && - (dev_ptr->disabled == 0) && - (dev_ptr->user_disabled == 0) && - (devTp == dev_ptr->dev_type) && - (low_count > dev_ptr->dev_caller_count)) { - low_count = dev_ptr->dev_caller_count; - low_indx = indx; - } - } - *device_nr_p = low_indx; - return low_indx; -} - -static inline int -send_to_crypto_device(struct work_element *we_p) -{ - struct caller *caller_p; - struct device *device_p; - int dev_nr; - int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength; - - if (!we_p->requestptr) - return SEN_FATAL_ERROR; - caller_p = (struct caller *)we_p->requestptr; - dev_nr = we_p->devindex; - if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) { - if (z90crypt.hdware_info->hdware_mask.st_count != 0) - return SEN_RETRY; - else - return SEN_NOT_AVAIL; - } - we_p->devindex = dev_nr; - device_p = z90crypt.device_p[dev_nr]; - if (!device_p) - return SEN_NOT_AVAIL; - if (device_p->dev_type != we_p->devtype) - return SEN_RETRY; - if (device_p->dev_caller_count >= device_p->dev_q_depth) - return SEN_QUEUE_FULL; - PDEBUG("device number prior to send: %d\n", dev_nr); - switch (send_to_AP(dev_nr, z90crypt.cdx, - caller_p->caller_dev_dep_req_l, - caller_p->caller_dev_dep_req_p)) { - case DEV_SEN_EXCEPTION: - PRINTKC("Exception during send to device %d\n", dev_nr); - z90crypt.terminating = 1; - return SEN_FATAL_ERROR; - case DEV_GONE: - PRINTK("Device %d not available\n", dev_nr); - remove_device(device_p); - return SEN_NOT_AVAIL; - case DEV_EMPTY: - return SEN_NOT_AVAIL; - case DEV_NO_WORK: - return SEN_FATAL_ERROR; - case DEV_BAD_MESSAGE: - return SEN_USER_ERROR; - case DEV_QUEUE_FULL: - return SEN_QUEUE_FULL; - default: - case DEV_ONLINE: - break; - } - list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list)); - device_p->dev_caller_count++; - return 0; -} - -/** - * Send puts the user's work on one of two queues: - * the pending queue if the send was successful - * the request queue if the send failed because device full or busy - */ -static inline int -z90crypt_send(struct work_element *we_p, const char *buf) -{ - int rv; - - PDEBUG("PID %d\n", PID()); - - if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) { - PDEBUG("PID %d tried to send more work but has outstanding " - "work.\n", PID()); - return -EWORKPEND; - } - we_p->devindex = -1; // Reset device number - spin_lock_irq(&queuespinlock); - rv = send_to_crypto_device(we_p); - switch (rv) { - case 0: - we_p->requestsent = jiffies; - we_p->audit[0] |= FP_SENT; - list_add_tail(&we_p->liste, &pending_list); - ++pendingq_count; - we_p->audit[0] |= FP_PENDING; - break; - case SEN_BUSY: - case SEN_QUEUE_FULL: - rv = 0; - we_p->devindex = -1; // any device will do - we_p->requestsent = jiffies; - list_add_tail(&we_p->liste, &request_list); - ++requestq_count; - we_p->audit[0] |= FP_REQUEST; - break; - case SEN_RETRY: - rv = -ERESTARTSYS; - break; - case SEN_NOT_AVAIL: - PRINTK("*** No devices available.\n"); - rv = we_p->retcode = -ENODEV; - we_p->status[0] |= STAT_FAILED; - break; - case REC_OPERAND_INV: - case REC_OPERAND_SIZE: - case REC_EVEN_MOD: - case REC_INVALID_PAD: - rv = we_p->retcode = -EINVAL; - we_p->status[0] |= STAT_FAILED; - break; - default: - we_p->retcode = rv; - we_p->status[0] |= STAT_FAILED; - break; - } - if (rv != -ERESTARTSYS) - SET_RDWRMASK(we_p->status[0], STAT_WRITTEN); - spin_unlock_irq(&queuespinlock); - if (rv == 0) - tasklet_schedule(&reader_tasklet); - return rv; -} - -/** - * process_results copies the user's work from kernel space. - */ -static inline int -z90crypt_process_results(struct work_element *we_p, char __user *buf) -{ - int rv; - - PDEBUG("we_p %p (PID %d)\n", we_p, PID()); - - LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++; - SET_RDWRMASK(we_p->status[0], STAT_READPEND); - - rv = 0; - if (!we_p->buffer) { - PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n", - we_p, PID()); - rv = -ENOBUFF; - } - - if (!rv) - if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) { - PDEBUG("copy_to_user failed: rv = %d\n", rv); - rv = -EFAULT; - } - - if (!rv) - rv = we_p->retcode; - if (!rv) - if (we_p->resp_buff_size - && copy_to_user(we_p->resp_addr, we_p->resp_buff, - we_p->resp_buff_size)) - rv = -EFAULT; - - SET_RDWRMASK(we_p->status[0], STAT_NOWORK); - return rv; -} - -static unsigned char NULL_psmid[8] = -{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; - -/** - * Used in device configuration functions - */ -#define MAX_RESET 90 - -/** - * This is used only for PCICC support - */ -static inline int -is_PKCS11_padded(unsigned char *buffer, int length) -{ - int i; - if ((buffer[0] != 0x00) || (buffer[1] != 0x01)) - return 0; - for (i = 2; i < length; i++) - if (buffer[i] != 0xFF) - break; - if ((i < 10) || (i == length)) - return 0; - if (buffer[i] != 0x00) - return 0; - return 1; -} - -/** - * This is used only for PCICC support - */ -static inline int -is_PKCS12_padded(unsigned char *buffer, int length) -{ - int i; - if ((buffer[0] != 0x00) || (buffer[1] != 0x02)) - return 0; - for (i = 2; i < length; i++) - if (buffer[i] == 0x00) - break; - if ((i < 10) || (i == length)) - return 0; - if (buffer[i] != 0x00) - return 0; - return 1; -} - -/** - * builds struct caller and converts message from generic format to - * device-dependent format - * func is ICARSAMODEXPO or ICARSACRT - * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT - */ -static inline int -build_caller(struct work_element *we_p, short function) -{ - int rv; - struct caller *caller_p = (struct caller *)we_p->requestptr; - - if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) && - (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && - (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A)) - return SEN_NOT_AVAIL; - - memcpy(caller_p->caller_id, we_p->caller_id, - sizeof(caller_p->caller_id)); - caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req; - caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE; - caller_p->caller_buf_p = we_p->buffer; - INIT_LIST_HEAD(&(caller_p->caller_liste)); - - rv = convert_request(we_p->buffer, we_p->funccode, function, - z90crypt.cdx, we_p->devtype, - &caller_p->caller_dev_dep_req_l, - caller_p->caller_dev_dep_req_p); - if (rv) { - if (rv == SEN_NOT_AVAIL) - PDEBUG("request can't be processed on hdwr avail\n"); - else - PRINTK("Error from convert_request: %d\n", rv); - } - else - memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8); - return rv; -} - -static inline void -unbuild_caller(struct device *device_p, struct caller *caller_p) -{ - if (!caller_p) - return; - if (caller_p->caller_liste.next && caller_p->caller_liste.prev) - if (!list_empty(&caller_p->caller_liste)) { - list_del_init(&caller_p->caller_liste); - device_p->dev_caller_count--; - } - memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id)); -} - -static inline int -get_crypto_request_buffer(struct work_element *we_p) -{ - struct ica_rsa_modexpo *mex_p; - struct ica_rsa_modexpo_crt *crt_p; - unsigned char *temp_buffer; - short function; - int rv; - - mex_p = (struct ica_rsa_modexpo *) we_p->buffer; - crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer; - - PDEBUG("device type input = %d\n", we_p->devtype); - - if (z90crypt.terminating) - return REC_NO_RESPONSE; - if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) { - PRINTK("psmid zeroes\n"); - return SEN_FATAL_ERROR; - } - if (!we_p->buffer) { - PRINTK("buffer pointer NULL\n"); - return SEN_USER_ERROR; - } - if (!we_p->requestptr) { - PRINTK("caller pointer NULL\n"); - return SEN_USER_ERROR; - } - - if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) && - (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) && - (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) && - (we_p->devtype != ANYDEV)) { - PRINTK("invalid device type\n"); - return SEN_USER_ERROR; - } - - if ((mex_p->inputdatalength < 1) || - (mex_p->inputdatalength > MAX_MOD_SIZE)) { - PRINTK("inputdatalength[%d] is not valid\n", - mex_p->inputdatalength); - return SEN_USER_ERROR; - } - - if (mex_p->outputdatalength < mex_p->inputdatalength) { - PRINTK("outputdatalength[%d] < inputdatalength[%d]\n", - mex_p->outputdatalength, mex_p->inputdatalength); - return SEN_USER_ERROR; - } - - if (!mex_p->inputdata || !mex_p->outputdata) { - PRINTK("inputdata[%p] or outputdata[%p] is NULL\n", - mex_p->outputdata, mex_p->inputdata); - return SEN_USER_ERROR; - } - - /** - * As long as outputdatalength is big enough, we can set the - * outputdatalength equal to the inputdatalength, since that is the - * number of bytes we will copy in any case - */ - mex_p->outputdatalength = mex_p->inputdatalength; - - rv = 0; - switch (we_p->funccode) { - case ICARSAMODEXPO: - if (!mex_p->b_key || !mex_p->n_modulus) - rv = SEN_USER_ERROR; - break; - case ICARSACRT: - if (!IS_EVEN(crt_p->inputdatalength)) { - PRINTK("inputdatalength[%d] is odd, CRT form\n", - crt_p->inputdatalength); - rv = SEN_USER_ERROR; - break; - } - if (!crt_p->bp_key || - !crt_p->bq_key || - !crt_p->np_prime || - !crt_p->nq_prime || - !crt_p->u_mult_inv) { - PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n", - crt_p->bp_key, crt_p->bq_key, - crt_p->np_prime, crt_p->nq_prime, - crt_p->u_mult_inv); - rv = SEN_USER_ERROR; - } - break; - default: - PRINTK("bad func = %d\n", we_p->funccode); - rv = SEN_USER_ERROR; - break; - } - if (rv != 0) - return rv; - - if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0) - return SEN_NOT_AVAIL; - - temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) + - sizeof(struct caller); - if (copy_from_user(temp_buffer, mex_p->inputdata, - mex_p->inputdatalength) != 0) - return SEN_RELEASED; - - function = PCI_FUNC_KEY_ENCRYPT; - switch (we_p->devtype) { - /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */ - case PCICA: - case CEX2A: - function = PCI_FUNC_KEY_ENCRYPT; - break; - /** - * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo - * operation, and all CRT forms with a PKCS-1.2 format decrypt. - * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA - * mod-expo operation - */ - case PCIXCC_MCL2: - if (we_p->funccode == ICARSAMODEXPO) - function = PCI_FUNC_KEY_ENCRYPT; - else - function = PCI_FUNC_KEY_DECRYPT; - break; - case PCIXCC_MCL3: - case CEX2C: - if (we_p->funccode == ICARSAMODEXPO) - function = PCI_FUNC_KEY_ENCRYPT; - else - function = PCI_FUNC_KEY_DECRYPT; - break; - /** - * PCICC does everything as a PKCS-1.2 format request - */ - case PCICC: - /* PCICC cannot handle input that is is PKCS#1.1 padded */ - if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) { - return SEN_NOT_AVAIL; - } - if (we_p->funccode == ICARSAMODEXPO) { - if (is_PKCS12_padded(temp_buffer, - mex_p->inputdatalength)) - function = PCI_FUNC_KEY_ENCRYPT; - else - function = PCI_FUNC_KEY_DECRYPT; - } else - /* all CRT forms are decrypts */ - function = PCI_FUNC_KEY_DECRYPT; - break; - } - PDEBUG("function: %04x\n", function); - rv = build_caller(we_p, function); - PDEBUG("rv from build_caller = %d\n", rv); - return rv; -} - -static inline int -z90crypt_prepare(struct work_element *we_p, unsigned int funccode, - const char __user *buffer) -{ - int rv; - - we_p->devindex = -1; - if (funccode == ICARSAMODEXPO) - we_p->buff_size = sizeof(struct ica_rsa_modexpo); - else - we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt); - - if (copy_from_user(we_p->buffer, buffer, we_p->buff_size)) - return -EFAULT; - - we_p->audit[0] |= FP_COPYFROM; - SET_RDWRMASK(we_p->status[0], STAT_WRITTEN); - we_p->funccode = funccode; - we_p->devtype = -1; - we_p->audit[0] |= FP_BUFFREQ; - rv = get_crypto_request_buffer(we_p); - switch (rv) { - case 0: - we_p->audit[0] |= FP_BUFFGOT; - break; - case SEN_USER_ERROR: - rv = -EINVAL; - break; - case SEN_QUEUE_FULL: - rv = 0; - break; - case SEN_RELEASED: - rv = -EFAULT; - break; - case REC_NO_RESPONSE: - rv = -ENODEV; - break; - case SEN_NOT_AVAIL: - case EGETBUFF: - rv = -EGETBUFF; - break; - default: - PRINTK("rv = %d\n", rv); - rv = -EGETBUFF; - break; - } - if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN) - SET_RDWRMASK(we_p->status[0], STAT_DEFAULT); - return rv; -} - -static inline void -purge_work_element(struct work_element *we_p) -{ - struct list_head *lptr; - - spin_lock_irq(&queuespinlock); - list_for_each(lptr, &request_list) { - if (lptr == &we_p->liste) { - list_del_init(lptr); - requestq_count--; - break; - } - } - list_for_each(lptr, &pending_list) { - if (lptr == &we_p->liste) { - list_del_init(lptr); - pendingq_count--; - break; - } - } - spin_unlock_irq(&queuespinlock); -} - -/** - * Build the request and send it. - */ -static inline int -z90crypt_rsa(struct priv_data *private_data_p, pid_t pid, - unsigned int cmd, unsigned long arg) -{ - struct work_element *we_p; - int rv; - - if ((rv = allocate_work_element(&we_p, private_data_p, pid))) { - PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid); - return rv; - } - if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg))) - PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv); - if (!rv) - if ((rv = z90crypt_send(we_p, (const char *)arg))) - PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv); - if (!rv) { - we_p->audit[0] |= FP_ASLEEP; - wait_event(we_p->waitq, atomic_read(&we_p->alarmrung)); - we_p->audit[0] |= FP_AWAKE; - rv = we_p->retcode; - } - if (!rv) - rv = z90crypt_process_results(we_p, (char __user *)arg); - - if ((we_p->status[0] & STAT_FAILED)) { - switch (rv) { - /** - * EINVAL *after* receive is almost always a padding error or - * length error issued by a coprocessor (not an accelerator). - * We convert this return value to -EGETBUFF which should - * trigger a fallback to software. - */ - case -EINVAL: - if ((we_p->devtype != PCICA) && - (we_p->devtype != CEX2A)) - rv = -EGETBUFF; - break; - case -ETIMEOUT: - if (z90crypt.mask.st_count > 0) - rv = -ERESTARTSYS; // retry with another - else - rv = -ENODEV; // no cards left - /* fall through to clean up request queue */ - case -ERESTARTSYS: - case -ERELEASED: - switch (CHK_RDWRMASK(we_p->status[0])) { - case STAT_WRITTEN: - purge_work_element(we_p); - break; - case STAT_READPEND: - case STAT_NOWORK: - default: - break; - } - break; - default: - we_p->status[0] ^= STAT_FAILED; - break; - } - } - free_page((long)we_p); - return rv; -} - -/** - * This function is a little long, but it's really just one large switch - * statement. - */ -static long -z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - struct priv_data *private_data_p = filp->private_data; - unsigned char *status; - unsigned char *qdepth; - unsigned int *reqcnt; - struct ica_z90_status *pstat; - int ret, i, loopLim, tempstat; - static int deprecated_msg_count1 = 0; - static int deprecated_msg_count2 = 0; - - PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd); - PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n", - cmd, - !_IOC_DIR(cmd) ? "NO" - : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW" - : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD" - : "WR")), - _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd)); - - if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) { - PRINTK("cmd 0x%08X contains bad magic\n", cmd); - return -ENOTTY; - } - - ret = 0; - switch (cmd) { - case ICARSAMODEXPO: - case ICARSACRT: - if (quiesce_z90crypt) { - ret = -EQUIESCE; - break; - } - ret = -ENODEV; // Default if no devices - loopLim = z90crypt.hdware_info->hdware_mask.st_count - - (z90crypt.hdware_info->hdware_mask.disabled_count + - z90crypt.hdware_info->hdware_mask.user_disabled_count); - for (i = 0; i < loopLim; i++) { - ret = z90crypt_rsa(private_data_p, PID(), cmd, arg); - if (ret != -ERESTARTSYS) - break; - } - if (ret == -ERESTARTSYS) - ret = -ENODEV; - break; - - case Z90STAT_TOTALCOUNT: - tempstat = get_status_totalcount(); - if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_PCICACOUNT: - tempstat = get_status_PCICAcount(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_PCICCCOUNT: - tempstat = get_status_PCICCcount(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_PCIXCCMCL2COUNT: - tempstat = get_status_PCIXCCMCL2count(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_PCIXCCMCL3COUNT: - tempstat = get_status_PCIXCCMCL3count(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_CEX2CCOUNT: - tempstat = get_status_CEX2Ccount(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_CEX2ACOUNT: - tempstat = get_status_CEX2Acount(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_REQUESTQ_COUNT: - tempstat = get_status_requestq_count(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_PENDINGQ_COUNT: - tempstat = get_status_pendingq_count(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_TOTALOPEN_COUNT: - tempstat = get_status_totalopen_count(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_DOMAIN_INDEX: - tempstat = get_status_domain_index(); - if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90STAT_STATUS_MASK: - status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL); - if (!status) { - PRINTK("kmalloc for status failed!\n"); - ret = -ENOMEM; - break; - } - get_status_status_mask(status); - if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS) - != 0) - ret = -EFAULT; - kfree(status); - break; - - case Z90STAT_QDEPTH_MASK: - qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL); - if (!qdepth) { - PRINTK("kmalloc for qdepth failed!\n"); - ret = -ENOMEM; - break; - } - get_status_qdepth_mask(qdepth); - if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0) - ret = -EFAULT; - kfree(qdepth); - break; - - case Z90STAT_PERDEV_REQCNT: - reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL); - if (!reqcnt) { - PRINTK("kmalloc for reqcnt failed!\n"); - ret = -ENOMEM; - break; - } - get_status_perdevice_reqcnt(reqcnt); - if (copy_to_user((char __user *) arg, reqcnt, - Z90CRYPT_NUM_APS * sizeof(int)) != 0) - ret = -EFAULT; - kfree(reqcnt); - break; - - /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */ - case ICAZ90STATUS: - if (deprecated_msg_count1 < 20) { - PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n"); - deprecated_msg_count1++; - if (deprecated_msg_count1 == 20) - PRINTK("No longer issuing messages related to " - "deprecated call to ICAZ90STATUS.\n"); - } - - pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL); - if (!pstat) { - PRINTK("kmalloc for pstat failed!\n"); - ret = -ENOMEM; - break; - } - - pstat->totalcount = get_status_totalcount(); - pstat->leedslitecount = get_status_PCICAcount(); - pstat->leeds2count = get_status_PCICCcount(); - pstat->requestqWaitCount = get_status_requestq_count(); - pstat->pendingqWaitCount = get_status_pendingq_count(); - pstat->totalOpenCount = get_status_totalopen_count(); - pstat->cryptoDomain = get_status_domain_index(); - get_status_status_mask(pstat->status); - get_status_qdepth_mask(pstat->qdepth); - - if (copy_to_user((struct ica_z90_status __user *) arg, pstat, - sizeof(struct ica_z90_status)) != 0) - ret = -EFAULT; - kfree(pstat); - break; - - /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */ - case Z90STAT_PCIXCCCOUNT: - if (deprecated_msg_count2 < 20) { - PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n"); - deprecated_msg_count2++; - if (deprecated_msg_count2 == 20) - PRINTK("No longer issuing messages about depre" - "cated ioctl Z90STAT_PCIXCCCOUNT.\n"); - } - - tempstat = get_status_PCIXCCcount(); - if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0) - ret = -EFAULT; - break; - - case Z90QUIESCE: - if (current->euid != 0) { - PRINTK("QUIESCE fails: euid %d\n", - current->euid); - ret = -EACCES; - } else { - PRINTK("QUIESCE device from PID %d\n", PID()); - quiesce_z90crypt = 1; - } - break; - - default: - /* user passed an invalid IOCTL number */ - PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd); - ret = -ENOTTY; - break; - } - - return ret; -} - -static inline int -sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len) -{ - int hl, i; - - hl = 0; - for (i = 0; i < len; i++) - hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); - hl += sprintf(outaddr+hl, " "); - - return hl; -} - -static inline int -sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len) -{ - int hl, inl, c, cx; - - hl = sprintf(outaddr, " "); - inl = 0; - for (c = 0; c < (len / 16); c++) { - hl += sprintcl(outaddr+hl, addr+inl, 16); - inl += 16; - } - - cx = len%16; - if (cx) { - hl += sprintcl(outaddr+hl, addr+inl, cx); - inl += cx; - } - - hl += sprintf(outaddr+hl, "\n"); - - return hl; -} - -static inline int -sprinthx(unsigned char *title, unsigned char *outaddr, - unsigned char *addr, unsigned int len) -{ - int hl, inl, r, rx; - - hl = sprintf(outaddr, "\n%s\n", title); - inl = 0; - for (r = 0; r < (len / 64); r++) { - hl += sprintrw(outaddr+hl, addr+inl, 64); - inl += 64; - } - rx = len % 64; - if (rx) { - hl += sprintrw(outaddr+hl, addr+inl, rx); - inl += rx; - } - - hl += sprintf(outaddr+hl, "\n"); - - return hl; -} - -static inline int -sprinthx4(unsigned char *title, unsigned char *outaddr, - unsigned int *array, unsigned int len) -{ - int hl, r; - - hl = sprintf(outaddr, "\n%s\n", title); - - for (r = 0; r < len; r++) { - if ((r % 8) == 0) - hl += sprintf(outaddr+hl, " "); - hl += sprintf(outaddr+hl, "%08X ", array[r]); - if ((r % 8) == 7) - hl += sprintf(outaddr+hl, "\n"); - } - - hl += sprintf(outaddr+hl, "\n"); - - return hl; -} - -static int -z90crypt_status(char *resp_buff, char **start, off_t offset, - int count, int *eof, void *data) -{ - unsigned char *workarea; - int len; - - /* resp_buff is a page. Use the right half for a work area */ - workarea = resp_buff+2000; - len = 0; - len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n", - z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT); - len += sprintf(resp_buff+len, "Cryptographic domain: %d\n", - get_status_domain_index()); - len += sprintf(resp_buff+len, "Total device count: %d\n", - get_status_totalcount()); - len += sprintf(resp_buff+len, "PCICA count: %d\n", - get_status_PCICAcount()); - len += sprintf(resp_buff+len, "PCICC count: %d\n", - get_status_PCICCcount()); - len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n", - get_status_PCIXCCMCL2count()); - len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n", - get_status_PCIXCCMCL3count()); - len += sprintf(resp_buff+len, "CEX2C count: %d\n", - get_status_CEX2Ccount()); - len += sprintf(resp_buff+len, "CEX2A count: %d\n", - get_status_CEX2Acount()); - len += sprintf(resp_buff+len, "requestq count: %d\n", - get_status_requestq_count()); - len += sprintf(resp_buff+len, "pendingq count: %d\n", - get_status_pendingq_count()); - len += sprintf(resp_buff+len, "Total open handles: %d\n\n", - get_status_totalopen_count()); - len += sprinthx( - "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " - "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", - resp_buff+len, - get_status_status_mask(workarea), - Z90CRYPT_NUM_APS); - len += sprinthx("Waiting work element counts", - resp_buff+len, - get_status_qdepth_mask(workarea), - Z90CRYPT_NUM_APS); - len += sprinthx4( - "Per-device successfully completed request counts", - resp_buff+len, - get_status_perdevice_reqcnt((unsigned int *)workarea), - Z90CRYPT_NUM_APS); - *eof = 1; - memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int)); - return len; -} - -static inline void -disable_card(int card_index) -{ - struct device *devp; - - devp = LONG2DEVPTR(card_index); - if (!devp || devp->user_disabled) - return; - devp->user_disabled = 1; - z90crypt.hdware_info->hdware_mask.user_disabled_count++; - if (devp->dev_type == -1) - return; - z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++; -} - -static inline void -enable_card(int card_index) -{ - struct device *devp; - - devp = LONG2DEVPTR(card_index); - if (!devp || !devp->user_disabled) - return; - devp->user_disabled = 0; - z90crypt.hdware_info->hdware_mask.user_disabled_count--; - if (devp->dev_type == -1) - return; - z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--; -} - -static int -z90crypt_status_write(struct file *file, const char __user *buffer, - unsigned long count, void *data) -{ - int j, eol; - unsigned char *lbuf, *ptr; - unsigned int local_count; - -#define LBUFSIZE 1200 - lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); - if (!lbuf) { - PRINTK("kmalloc failed!\n"); - return 0; - } - - if (count <= 0) - return 0; - - local_count = UMIN((unsigned int)count, LBUFSIZE-1); - - if (copy_from_user(lbuf, buffer, local_count) != 0) { - kfree(lbuf); - return -EFAULT; - } - - lbuf[local_count] = '\0'; - - ptr = strstr(lbuf, "Online devices"); - if (ptr == 0) { - PRINTK("Unable to parse data (missing \"Online devices\")\n"); - kfree(lbuf); - return count; - } - - ptr = strstr(ptr, "\n"); - if (ptr == 0) { - PRINTK("Unable to parse data (missing newline after \"Online devices\")\n"); - kfree(lbuf); - return count; - } - ptr++; - - if (strstr(ptr, "Waiting work element counts") == NULL) { - PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n"); - kfree(lbuf); - return count; - } - - j = 0; - eol = 0; - while ((j < 64) && (*ptr != '\0')) { - switch (*ptr) { - case '\t': - case ' ': - break; - case '\n': - default: - eol = 1; - break; - case '0': // no device - case '1': // PCICA - case '2': // PCICC - case '3': // PCIXCC_MCL2 - case '4': // PCIXCC_MCL3 - case '5': // CEX2C - case '6': // CEX2A - j++; - break; - case 'd': - case 'D': - disable_card(j); - j++; - break; - case 'e': - case 'E': - enable_card(j); - j++; - break; - } - if (eol) - break; - ptr++; - } - - kfree(lbuf); - return count; -} - -/** - * Functions that run under a timer, with no process id - * - * The task functions: - * z90crypt_reader_task - * helper_send_work - * helper_handle_work_element - * helper_receive_rc - * z90crypt_config_task - * z90crypt_cleanup_task - * - * Helper functions: - * z90crypt_schedule_reader_timer - * z90crypt_schedule_reader_task - * z90crypt_schedule_config_task - * z90crypt_schedule_cleanup_task - */ -static inline int -receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p, - unsigned char *buff, unsigned char __user **dest_p_p) -{ - int dv, rv; - struct device *dev_ptr; - struct caller *caller_p; - struct ica_rsa_modexpo *icaMsg_p; - struct list_head *ptr, *tptr; - - memcpy(psmid, NULL_psmid, sizeof(NULL_psmid)); - - if (z90crypt.terminating) - return REC_FATAL_ERROR; - - caller_p = 0; - dev_ptr = z90crypt.device_p[index]; - rv = 0; - do { - if (!dev_ptr || dev_ptr->disabled) { - rv = REC_NO_WORK; // a disabled device can't return work - break; - } - if (dev_ptr->dev_self_x != index) { - PRINTKC("Corrupt dev ptr\n"); - z90crypt.terminating = 1; - rv = REC_FATAL_ERROR; - break; - } - if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) { - dv = DEV_REC_EXCEPTION; - PRINTK("dev_resp_l = %d, dev_resp_p = %p\n", - dev_ptr->dev_resp_l, dev_ptr->dev_resp_p); - } else { - PDEBUG("Dequeue called for device %d\n", index); - dv = receive_from_AP(index, z90crypt.cdx, - dev_ptr->dev_resp_l, - dev_ptr->dev_resp_p, psmid); - } - switch (dv) { - case DEV_REC_EXCEPTION: - rv = REC_FATAL_ERROR; - z90crypt.terminating = 1; - PRINTKC("Exception in receive from device %d\n", - index); - break; - case DEV_ONLINE: - rv = 0; - break; - case DEV_EMPTY: - rv = REC_EMPTY; - break; - case DEV_NO_WORK: - rv = REC_NO_WORK; - break; - case DEV_BAD_MESSAGE: - case DEV_GONE: - case REC_HARDWAR_ERR: - default: - rv = REC_NO_RESPONSE; - break; - } - if (rv) - break; - if (dev_ptr->dev_caller_count <= 0) { - rv = REC_USER_GONE; - break; - } - - list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) { - caller_p = list_entry(ptr, struct caller, caller_liste); - if (!memcmp(caller_p->caller_id, psmid, - sizeof(caller_p->caller_id))) { - if (!list_empty(&caller_p->caller_liste)) { - list_del_init(ptr); - dev_ptr->dev_caller_count--; - break; - } - } - caller_p = 0; - } - if (!caller_p) { - PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X" - "%02X%02X%02X in device list\n", - psmid[0], psmid[1], psmid[2], psmid[3], - psmid[4], psmid[5], psmid[6], psmid[7]); - rv = REC_USER_GONE; - break; - } - - PDEBUG("caller_p after successful receive: %p\n", caller_p); - rv = convert_response(dev_ptr->dev_resp_p, - caller_p->caller_buf_p, buff_len_p, buff); - switch (rv) { - case REC_USE_PCICA: - break; - case REC_OPERAND_INV: - case REC_OPERAND_SIZE: - case REC_EVEN_MOD: - case REC_INVALID_PAD: - PDEBUG("device %d: 'user error' %d\n", index, rv); - break; - case WRONG_DEVICE_TYPE: - case REC_HARDWAR_ERR: - case REC_BAD_MESSAGE: - PRINTKW("device %d: hardware error %d\n", index, rv); - rv = REC_NO_RESPONSE; - break; - default: - PDEBUG("device %d: rv = %d\n", index, rv); - break; - } - } while (0); - - switch (rv) { - case 0: - PDEBUG("Successful receive from device %d\n", index); - icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p; - *dest_p_p = icaMsg_p->outputdata; - if (*buff_len_p == 0) - PRINTK("Zero *buff_len_p\n"); - break; - case REC_NO_RESPONSE: - PRINTKW("Removing device %d from availability\n", index); - remove_device(dev_ptr); - break; - } - - if (caller_p) - unbuild_caller(dev_ptr, caller_p); - - return rv; -} - -static inline void -helper_send_work(int index) -{ - struct work_element *rq_p; - int rv; - - if (list_empty(&request_list)) - return; - requestq_count--; - rq_p = list_entry(request_list.next, struct work_element, liste); - list_del_init(&rq_p->liste); - rq_p->audit[1] |= FP_REMREQUEST; - if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) { - rq_p->devindex = SHRT2LONG(index); - rv = send_to_crypto_device(rq_p); - if (rv == 0) { - rq_p->requestsent = jiffies; - rq_p->audit[0] |= FP_SENT; - list_add_tail(&rq_p->liste, &pending_list); - ++pendingq_count; - rq_p->audit[0] |= FP_PENDING; - } else { - switch (rv) { - case REC_OPERAND_INV: - case REC_OPERAND_SIZE: - case REC_EVEN_MOD: - case REC_INVALID_PAD: - rq_p->retcode = -EINVAL; - break; - case SEN_NOT_AVAIL: - case SEN_RETRY: - case REC_NO_RESPONSE: - default: - if (z90crypt.mask.st_count > 1) - rq_p->retcode = - -ERESTARTSYS; - else - rq_p->retcode = -ENODEV; - break; - } - rq_p->status[0] |= STAT_FAILED; - rq_p->audit[1] |= FP_AWAKENING; - atomic_set(&rq_p->alarmrung, 1); - wake_up(&rq_p->waitq); - } - } else { - if (z90crypt.mask.st_count > 1) - rq_p->retcode = -ERESTARTSYS; - else - rq_p->retcode = -ENODEV; - rq_p->status[0] |= STAT_FAILED; - rq_p->audit[1] |= FP_AWAKENING; - atomic_set(&rq_p->alarmrung, 1); - wake_up(&rq_p->waitq); - } -} - -static inline void -helper_handle_work_element(int index, unsigned char psmid[8], int rc, - int buff_len, unsigned char *buff, - unsigned char __user *resp_addr) -{ - struct work_element *pq_p; - struct list_head *lptr, *tptr; - - pq_p = 0; - list_for_each_safe(lptr, tptr, &pending_list) { - pq_p = list_entry(lptr, struct work_element, liste); - if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) { - list_del_init(lptr); - pendingq_count--; - pq_p->audit[1] |= FP_NOTPENDING; - break; - } - pq_p = 0; - } - - if (!pq_p) { - PRINTK("device %d has work but no caller exists on pending Q\n", - SHRT2LONG(index)); - return; - } - - switch (rc) { - case 0: - pq_p->resp_buff_size = buff_len; - pq_p->audit[1] |= FP_RESPSIZESET; - if (buff_len) { - pq_p->resp_addr = resp_addr; - pq_p->audit[1] |= FP_RESPADDRCOPIED; - memcpy(pq_p->resp_buff, buff, buff_len); - pq_p->audit[1] |= FP_RESPBUFFCOPIED; - } - break; - case REC_OPERAND_INV: - case REC_OPERAND_SIZE: - case REC_EVEN_MOD: - case REC_INVALID_PAD: - PDEBUG("-EINVAL after application error %d\n", rc); - pq_p->retcode = -EINVAL; - pq_p->status[0] |= STAT_FAILED; - break; - case REC_USE_PCICA: - pq_p->retcode = -ERESTARTSYS; - pq_p->status[0] |= STAT_FAILED; - break; - case REC_NO_RESPONSE: - default: - if (z90crypt.mask.st_count > 1) - pq_p->retcode = -ERESTARTSYS; - else - pq_p->retcode = -ENODEV; - pq_p->status[0] |= STAT_FAILED; - break; - } - if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) { - pq_p->audit[1] |= FP_AWAKENING; - atomic_set(&pq_p->alarmrung, 1); - wake_up(&pq_p->waitq); - } -} - -/** - * return TRUE if the work element should be removed from the queue - */ -static inline int -helper_receive_rc(int index, int *rc_p) -{ - switch (*rc_p) { - case 0: - case REC_OPERAND_INV: - case REC_OPERAND_SIZE: - case REC_EVEN_MOD: - case REC_INVALID_PAD: - case REC_USE_PCICA: - break; - - case REC_BUSY: - case REC_NO_WORK: - case REC_EMPTY: - case REC_RETRY_DEV: - case REC_FATAL_ERROR: - return 0; - - case REC_NO_RESPONSE: - break; - - default: - PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n", - *rc_p, SHRT2LONG(index)); - *rc_p = REC_NO_RESPONSE; - break; - } - return 1; -} - -static inline void -z90crypt_schedule_reader_timer(void) -{ - if (timer_pending(&reader_timer)) - return; - if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0) - PRINTK("Timer pending while modifying reader timer\n"); -} - -static void -z90crypt_reader_task(unsigned long ptr) -{ - int workavail, index, rc, buff_len; - unsigned char psmid[8]; - unsigned char __user *resp_addr; - static unsigned char buff[1024]; - - /** - * we use workavail = 2 to ensure 2 passes with nothing dequeued before - * exiting the loop. If (pendingq_count+requestq_count) == 0 after the - * loop, there is no work remaining on the queues. - */ - resp_addr = 0; - workavail = 2; - buff_len = 0; - while (workavail) { - workavail--; - rc = 0; - spin_lock_irq(&queuespinlock); - memset(buff, 0x00, sizeof(buff)); - - /* Dequeue once from each device in round robin. */ - for (index = 0; index < z90crypt.mask.st_count; index++) { - PDEBUG("About to receive.\n"); - rc = receive_from_crypto_device(SHRT2LONG(index), - psmid, - &buff_len, - buff, - &resp_addr); - PDEBUG("Dequeued: rc = %d.\n", rc); - - if (helper_receive_rc(index, &rc)) { - if (rc != REC_NO_RESPONSE) { - helper_send_work(index); - workavail = 2; - } - - helper_handle_work_element(index, psmid, rc, - buff_len, buff, - resp_addr); - } - - if (rc == REC_FATAL_ERROR) - PRINTKW("REC_FATAL_ERROR from device %d!\n", - SHRT2LONG(index)); - } - spin_unlock_irq(&queuespinlock); - } - - if (pendingq_count + requestq_count) - z90crypt_schedule_reader_timer(); -} - -static inline void -z90crypt_schedule_config_task(unsigned int expiration) -{ - if (timer_pending(&config_timer)) - return; - if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0) - PRINTK("Timer pending while modifying config timer\n"); -} - -static void -z90crypt_config_task(unsigned long ptr) -{ - int rc; - - PDEBUG("jiffies %ld\n", jiffies); - - if ((rc = refresh_z90crypt(&z90crypt.cdx))) - PRINTK("Error %d detected in refresh_z90crypt.\n", rc); - /* If return was fatal, don't bother reconfiguring */ - if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR)) - z90crypt_schedule_config_task(CONFIGTIME); -} - -static inline void -z90crypt_schedule_cleanup_task(void) -{ - if (timer_pending(&cleanup_timer)) - return; - if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0) - PRINTK("Timer pending while modifying cleanup timer\n"); -} - -static inline void -helper_drain_queues(void) -{ - struct work_element *pq_p; - struct list_head *lptr, *tptr; - - list_for_each_safe(lptr, tptr, &pending_list) { - pq_p = list_entry(lptr, struct work_element, liste); - pq_p->retcode = -ENODEV; - pq_p->status[0] |= STAT_FAILED; - unbuild_caller(LONG2DEVPTR(pq_p->devindex), - (struct caller *)pq_p->requestptr); - list_del_init(lptr); - pendingq_count--; - pq_p->audit[1] |= FP_NOTPENDING; - pq_p->audit[1] |= FP_AWAKENING; - atomic_set(&pq_p->alarmrung, 1); - wake_up(&pq_p->waitq); - } - - list_for_each_safe(lptr, tptr, &request_list) { - pq_p = list_entry(lptr, struct work_element, liste); - pq_p->retcode = -ENODEV; - pq_p->status[0] |= STAT_FAILED; - list_del_init(lptr); - requestq_count--; - pq_p->audit[1] |= FP_REMREQUEST; - pq_p->audit[1] |= FP_AWAKENING; - atomic_set(&pq_p->alarmrung, 1); - wake_up(&pq_p->waitq); - } -} - -static inline void -helper_timeout_requests(void) -{ - struct work_element *pq_p; - struct list_head *lptr, *tptr; - long timelimit; - - timelimit = jiffies - (CLEANUPTIME * HZ); - /* The list is in strict chronological order */ - list_for_each_safe(lptr, tptr, &pending_list) { - pq_p = list_entry(lptr, struct work_element, liste); - if (pq_p->requestsent >= timelimit) - break; - PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n", - ((struct caller *)pq_p->requestptr)->caller_id[0], - ((struct caller *)pq_p->requestptr)->caller_id[1], - ((struct caller *)pq_p->requestptr)->caller_id[2], - ((struct caller *)pq_p->requestptr)->caller_id[3], - ((struct caller *)pq_p->requestptr)->caller_id[4], - ((struct caller *)pq_p->requestptr)->caller_id[5], - ((struct caller *)pq_p->requestptr)->caller_id[6], - ((struct caller *)pq_p->requestptr)->caller_id[7]); - pq_p->retcode = -ETIMEOUT; - pq_p->status[0] |= STAT_FAILED; - /* get this off any caller queue it may be on */ - unbuild_caller(LONG2DEVPTR(pq_p->devindex), - (struct caller *) pq_p->requestptr); - list_del_init(lptr); - pendingq_count--; - pq_p->audit[1] |= FP_TIMEDOUT; - pq_p->audit[1] |= FP_NOTPENDING; - pq_p->audit[1] |= FP_AWAKENING; - atomic_set(&pq_p->alarmrung, 1); - wake_up(&pq_p->waitq); - } - - /** - * If pending count is zero, items left on the request queue may - * never be processed. - */ - if (pendingq_count <= 0) { - list_for_each_safe(lptr, tptr, &request_list) { - pq_p = list_entry(lptr, struct work_element, liste); - if (pq_p->requestsent >= timelimit) - break; - PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n", - ((struct caller *)pq_p->requestptr)->caller_id[0], - ((struct caller *)pq_p->requestptr)->caller_id[1], - ((struct caller *)pq_p->requestptr)->caller_id[2], - ((struct caller *)pq_p->requestptr)->caller_id[3], - ((struct caller *)pq_p->requestptr)->caller_id[4], - ((struct caller *)pq_p->requestptr)->caller_id[5], - ((struct caller *)pq_p->requestptr)->caller_id[6], - ((struct caller *)pq_p->requestptr)->caller_id[7]); - pq_p->retcode = -ETIMEOUT; - pq_p->status[0] |= STAT_FAILED; - list_del_init(lptr); - requestq_count--; - pq_p->audit[1] |= FP_TIMEDOUT; - pq_p->audit[1] |= FP_REMREQUEST; - pq_p->audit[1] |= FP_AWAKENING; - atomic_set(&pq_p->alarmrung, 1); - wake_up(&pq_p->waitq); - } - } -} - -static void -z90crypt_cleanup_task(unsigned long ptr) -{ - PDEBUG("jiffies %ld\n", jiffies); - spin_lock_irq(&queuespinlock); - if (z90crypt.mask.st_count <= 0) // no devices! - helper_drain_queues(); - else - helper_timeout_requests(); - spin_unlock_irq(&queuespinlock); - z90crypt_schedule_cleanup_task(); -} - -static void -z90crypt_schedule_reader_task(unsigned long ptr) -{ - tasklet_schedule(&reader_tasklet); -} - -/** - * Lowlevel Functions: - * - * create_z90crypt: creates and initializes basic data structures - * refresh_z90crypt: re-initializes basic data structures - * find_crypto_devices: returns a count and mask of hardware status - * create_crypto_device: builds the descriptor for a device - * destroy_crypto_device: unallocates the descriptor for a device - * destroy_z90crypt: drains all work, unallocates structs - */ - -/** - * build the z90crypt root structure using the given domain index - */ -static int -create_z90crypt(int *cdx_p) -{ - struct hdware_block *hdware_blk_p; - - memset(&z90crypt, 0x00, sizeof(struct z90crypt)); - z90crypt.domain_established = 0; - z90crypt.len = sizeof(struct z90crypt); - z90crypt.max_count = Z90CRYPT_NUM_DEVS; - z90crypt.cdx = *cdx_p; - - hdware_blk_p = kzalloc(sizeof(struct hdware_block), GFP_ATOMIC); - if (!hdware_blk_p) { - PDEBUG("kmalloc for hardware block failed\n"); - return ENOMEM; - } - z90crypt.hdware_info = hdware_blk_p; - - return 0; -} - -static inline int -helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found) -{ - enum hdstat hd_stat; - int q_depth, dev_type; - int indx, chkdom, numdomains; - - q_depth = dev_type = numdomains = 0; - for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1); - for (indx = 0; indx < z90crypt.max_count; indx++) { - hd_stat = HD_NOT_THERE; - numdomains = 0; - for (chkdom = 0; chkdom <= 15; chkdom++) { - hd_stat = query_online(indx, chkdom, MAX_RESET, - &q_depth, &dev_type); - if (hd_stat == HD_TSQ_EXCEPTION) { - z90crypt.terminating = 1; - PRINTKC("exception taken!\n"); - break; - } - if (hd_stat == HD_ONLINE) { - cdx_array[numdomains++] = chkdom; - if (*cdx_p == chkdom) { - *correct_cdx_found = 1; - break; - } - } - } - if ((*correct_cdx_found == 1) || (numdomains != 0)) - break; - if (z90crypt.terminating) - break; - } - return numdomains; -} - -static inline int -probe_crypto_domain(int *cdx_p) -{ - int cdx_array[16]; - char cdx_array_text[53], temp[5]; - int correct_cdx_found, numdomains; - - correct_cdx_found = 0; - numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found); - - if (z90crypt.terminating) - return TSQ_FATAL_ERROR; - - if (correct_cdx_found) - return 0; - - if (numdomains == 0) { - PRINTKW("Unable to find crypto domain: No devices found\n"); - return Z90C_NO_DEVICES; - } - - if (numdomains == 1) { - if (*cdx_p == -1) { - *cdx_p = cdx_array[0]; - return 0; - } - PRINTKW("incorrect domain: specified = %d, found = %d\n", - *cdx_p, cdx_array[0]); - return Z90C_INCORRECT_DOMAIN; - } - - numdomains--; - sprintf(cdx_array_text, "%d", cdx_array[numdomains]); - while (numdomains) { - numdomains--; - sprintf(temp, ", %d", cdx_array[numdomains]); - strcat(cdx_array_text, temp); - } - - PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n", - *cdx_p, cdx_array_text); - return Z90C_AMBIGUOUS_DOMAIN; -} - -static int -refresh_z90crypt(int *cdx_p) -{ - int i, j, indx, rv; - static struct status local_mask; - struct device *devPtr; - unsigned char oldStat, newStat; - int return_unchanged; - - if (z90crypt.len != sizeof(z90crypt)) - return ENOTINIT; - if (z90crypt.terminating) - return TSQ_FATAL_ERROR; - rv = 0; - if (!z90crypt.hdware_info->hdware_mask.st_count && - !z90crypt.domain_established) { - rv = probe_crypto_domain(cdx_p); - if (z90crypt.terminating) - return TSQ_FATAL_ERROR; - if (rv == Z90C_NO_DEVICES) - return 0; // try later - if (rv) - return rv; - z90crypt.cdx = *cdx_p; - z90crypt.domain_established = 1; - } - rv = find_crypto_devices(&local_mask); - if (rv) { - PRINTK("find crypto devices returned %d\n", rv); - return rv; - } - if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask, - sizeof(struct status))) { - return_unchanged = 1; - for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) { - /** - * Check for disabled cards. If any device is marked - * disabled, destroy it. - */ - for (j = 0; - j < z90crypt.hdware_info->type_mask[i].st_count; - j++) { - indx = z90crypt.hdware_info->type_x_addr[i]. - device_index[j]; - devPtr = z90crypt.device_p[indx]; - if (devPtr && devPtr->disabled) { - local_mask.st_mask[indx] = HD_NOT_THERE; - return_unchanged = 0; - } - } - } - if (return_unchanged == 1) - return 0; - } - - spin_lock_irq(&queuespinlock); - for (i = 0; i < z90crypt.max_count; i++) { - oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i]; - newStat = local_mask.st_mask[i]; - if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE)) - destroy_crypto_device(i); - else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) { - rv = create_crypto_device(i); - if (rv >= REC_FATAL_ERROR) - return rv; - if (rv != 0) { - local_mask.st_mask[i] = HD_NOT_THERE; - local_mask.st_count--; - } - } - } - memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask, - sizeof(local_mask.st_mask)); - z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count; - z90crypt.hdware_info->hdware_mask.disabled_count = - local_mask.disabled_count; - refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x); - for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) - refresh_index_array(&(z90crypt.hdware_info->type_mask[i]), - &(z90crypt.hdware_info->type_x_addr[i])); - spin_unlock_irq(&queuespinlock); - - return rv; -} - -static int -find_crypto_devices(struct status *deviceMask) -{ - int i, q_depth, dev_type; - enum hdstat hd_stat; - - deviceMask->st_count = 0; - deviceMask->disabled_count = 0; - deviceMask->user_disabled_count = 0; - - for (i = 0; i < z90crypt.max_count; i++) { - hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth, - &dev_type); - if (hd_stat == HD_TSQ_EXCEPTION) { - z90crypt.terminating = 1; - PRINTKC("Exception during probe for crypto devices\n"); - return TSQ_FATAL_ERROR; - } - deviceMask->st_mask[i] = hd_stat; - if (hd_stat == HD_ONLINE) { - PDEBUG("Got an online crypto!: %d\n", i); - PDEBUG("Got a queue depth of %d\n", q_depth); - PDEBUG("Got a device type of %d\n", dev_type); - if (q_depth <= 0) - return TSQ_FATAL_ERROR; - deviceMask->st_count++; - z90crypt.q_depth_array[i] = q_depth; - z90crypt.dev_type_array[i] = dev_type; - } - } - - return 0; -} - -static int -refresh_index_array(struct status *status_str, struct device_x *index_array) -{ - int i, count; - enum devstat stat; - - i = -1; - count = 0; - do { - stat = status_str->st_mask[++i]; - if (stat == DEV_ONLINE) - index_array->device_index[count++] = i; - } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count)); - - return count; -} - -static int -create_crypto_device(int index) -{ - int rv, devstat, total_size; - struct device *dev_ptr; - struct status *type_str_p; - int deviceType; - - dev_ptr = z90crypt.device_p[index]; - if (!dev_ptr) { - total_size = sizeof(struct device) + - z90crypt.q_depth_array[index] * sizeof(int); - - dev_ptr = kzalloc(total_size, GFP_ATOMIC); - if (!dev_ptr) { - PRINTK("kmalloc device %d failed\n", index); - return ENOMEM; - } - dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC); - if (!dev_ptr->dev_resp_p) { - kfree(dev_ptr); - PRINTK("kmalloc device %d rec buffer failed\n", index); - return ENOMEM; - } - dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE; - INIT_LIST_HEAD(&(dev_ptr->dev_caller_list)); - } - - devstat = reset_device(index, z90crypt.cdx, MAX_RESET); - if (devstat == DEV_RSQ_EXCEPTION) { - PRINTK("exception during reset device %d\n", index); - kfree(dev_ptr->dev_resp_p); - kfree(dev_ptr); - return RSQ_FATAL_ERROR; - } - if (devstat == DEV_ONLINE) { - dev_ptr->dev_self_x = index; - dev_ptr->dev_type = z90crypt.dev_type_array[index]; - if (dev_ptr->dev_type == NILDEV) { - rv = probe_device_type(dev_ptr); - if (rv) { - PRINTK("rv = %d from probe_device_type %d\n", - rv, index); - kfree(dev_ptr->dev_resp_p); - kfree(dev_ptr); - return rv; - } - } - if (dev_ptr->dev_type == PCIXCC_UNK) { - rv = probe_PCIXCC_type(dev_ptr); - if (rv) { - PRINTK("rv = %d from probe_PCIXCC_type %d\n", - rv, index); - kfree(dev_ptr->dev_resp_p); - kfree(dev_ptr); - return rv; - } - } - deviceType = dev_ptr->dev_type; - z90crypt.dev_type_array[index] = deviceType; - if (deviceType == PCICA) - z90crypt.hdware_info->device_type_array[index] = 1; - else if (deviceType == PCICC) - z90crypt.hdware_info->device_type_array[index] = 2; - else if (deviceType == PCIXCC_MCL2) - z90crypt.hdware_info->device_type_array[index] = 3; - else if (deviceType == PCIXCC_MCL3) - z90crypt.hdware_info->device_type_array[index] = 4; - else if (deviceType == CEX2C) - z90crypt.hdware_info->device_type_array[index] = 5; - else if (deviceType == CEX2A) - z90crypt.hdware_info->device_type_array[index] = 6; - else // No idea how this would happen. - z90crypt.hdware_info->device_type_array[index] = -1; - } - - /** - * 'q_depth' returned by the hardware is one less than - * the actual depth - */ - dev_ptr->dev_q_depth = z90crypt.q_depth_array[index]; - dev_ptr->dev_type = z90crypt.dev_type_array[index]; - dev_ptr->dev_stat = devstat; - dev_ptr->disabled = 0; - z90crypt.device_p[index] = dev_ptr; - - if (devstat == DEV_ONLINE) { - if (z90crypt.mask.st_mask[index] != DEV_ONLINE) { - z90crypt.mask.st_mask[index] = DEV_ONLINE; - z90crypt.mask.st_count++; - } - deviceType = dev_ptr->dev_type; - type_str_p = &z90crypt.hdware_info->type_mask[deviceType]; - if (type_str_p->st_mask[index] != DEV_ONLINE) { - type_str_p->st_mask[index] = DEV_ONLINE; - type_str_p->st_count++; - } - } - - return 0; -} - -static int -destroy_crypto_device(int index) -{ - struct device *dev_ptr; - int t, disabledFlag; - - dev_ptr = z90crypt.device_p[index]; - - /* remember device type; get rid of device struct */ - if (dev_ptr) { - disabledFlag = dev_ptr->disabled; - t = dev_ptr->dev_type; - kfree(dev_ptr->dev_resp_p); - kfree(dev_ptr); - } else { - disabledFlag = 0; - t = -1; - } - z90crypt.device_p[index] = 0; - - /* if the type is valid, remove the device from the type_mask */ - if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) { - z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00; - z90crypt.hdware_info->type_mask[t].st_count--; - if (disabledFlag == 1) - z90crypt.hdware_info->type_mask[t].disabled_count--; - } - if (z90crypt.mask.st_mask[index] != DEV_GONE) { - z90crypt.mask.st_mask[index] = DEV_GONE; - z90crypt.mask.st_count--; - } - z90crypt.hdware_info->device_type_array[index] = 0; - - return 0; -} - -static void -destroy_z90crypt(void) -{ - int i; - - for (i = 0; i < z90crypt.max_count; i++) - if (z90crypt.device_p[i]) - destroy_crypto_device(i); - kfree(z90crypt.hdware_info); - memset((void *)&z90crypt, 0, sizeof(z90crypt)); -} - -static unsigned char static_testmsg[384] = { -0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43, -0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00, -0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32, -0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46, -0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32, -0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, -0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, -0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, -0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00, -0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00, -0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58, -0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8, -0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e, -0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f, -0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 -}; - -static int -probe_device_type(struct device *devPtr) -{ - int rv, dv, i, index, length; - unsigned char psmid[8]; - static unsigned char loc_testmsg[sizeof(static_testmsg)]; - - index = devPtr->dev_self_x; - rv = 0; - do { - memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg)); - length = sizeof(static_testmsg) - 24; - /* the -24 allows for the header */ - dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg); - if (dv) { - PDEBUG("dv returned by send during probe: %d\n", dv); - if (dv == DEV_SEN_EXCEPTION) { - rv = SEN_FATAL_ERROR; - PRINTKC("exception in send to AP %d\n", index); - break; - } - PDEBUG("return value from send_to_AP: %d\n", rv); - switch (dv) { - case DEV_GONE: - PDEBUG("dev %d not available\n", index); - rv = SEN_NOT_AVAIL; - break; - case DEV_ONLINE: - rv = 0; - break; - case DEV_EMPTY: - rv = SEN_NOT_AVAIL; - break; - case DEV_NO_WORK: - rv = SEN_FATAL_ERROR; - break; - case DEV_BAD_MESSAGE: - rv = SEN_USER_ERROR; - break; - case DEV_QUEUE_FULL: - rv = SEN_QUEUE_FULL; - break; - default: - PRINTK("unknown dv=%d for dev %d\n", dv, index); - rv = SEN_NOT_AVAIL; - break; - } - } - - if (rv) - break; - - for (i = 0; i < 6; i++) { - mdelay(300); - dv = receive_from_AP(index, z90crypt.cdx, - devPtr->dev_resp_l, - devPtr->dev_resp_p, psmid); - PDEBUG("dv returned by DQ = %d\n", dv); - if (dv == DEV_REC_EXCEPTION) { - rv = REC_FATAL_ERROR; - PRINTKC("exception in dequeue %d\n", - index); - break; - } - switch (dv) { - case DEV_ONLINE: - rv = 0; - break; - case DEV_EMPTY: - rv = REC_EMPTY; - break; - case DEV_NO_WORK: - rv = REC_NO_WORK; - break; - case DEV_BAD_MESSAGE: - case DEV_GONE: - default: - rv = REC_NO_RESPONSE; - break; - } - if ((rv != 0) && (rv != REC_NO_WORK)) - break; - if (rv == 0) - break; - } - if (rv) - break; - rv = (devPtr->dev_resp_p[0] == 0x00) && - (devPtr->dev_resp_p[1] == 0x86); - if (rv) - devPtr->dev_type = PCICC; - else - devPtr->dev_type = PCICA; - rv = 0; - } while (0); - /* In a general error case, the card is not marked online */ - return rv; -} - -static unsigned char MCL3_testmsg[] = { -0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE, -0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20, -0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D, -0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD, -0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22, -0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54, -0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00, -0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C, -0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9, -0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5, -0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01, -0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91, -0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C, -0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96, -0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47, -0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53 -}; - -static int -probe_PCIXCC_type(struct device *devPtr) -{ - int rv, dv, i, index, length; - unsigned char psmid[8]; - static unsigned char loc_testmsg[548]; - struct CPRBX *cprbx_p; - - index = devPtr->dev_self_x; - rv = 0; - do { - memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg)); - length = sizeof(MCL3_testmsg) - 0x0C; - dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg); - if (dv) { - PDEBUG("dv returned = %d\n", dv); - if (dv == DEV_SEN_EXCEPTION) { - rv = SEN_FATAL_ERROR; - PRINTKC("exception in send to AP %d\n", index); - break; - } - PDEBUG("return value from send_to_AP: %d\n", rv); - switch (dv) { - case DEV_GONE: - PDEBUG("dev %d not available\n", index); - rv = SEN_NOT_AVAIL; - break; - case DEV_ONLINE: - rv = 0; - break; - case DEV_EMPTY: - rv = SEN_NOT_AVAIL; - break; - case DEV_NO_WORK: - rv = SEN_FATAL_ERROR; - break; - case DEV_BAD_MESSAGE: - rv = SEN_USER_ERROR; - break; - case DEV_QUEUE_FULL: - rv = SEN_QUEUE_FULL; - break; - default: - PRINTK("unknown dv=%d for dev %d\n", dv, index); - rv = SEN_NOT_AVAIL; - break; - } - } - - if (rv) - break; - - for (i = 0; i < 6; i++) { - mdelay(300); - dv = receive_from_AP(index, z90crypt.cdx, - devPtr->dev_resp_l, - devPtr->dev_resp_p, psmid); - PDEBUG("dv returned by DQ = %d\n", dv); - if (dv == DEV_REC_EXCEPTION) { - rv = REC_FATAL_ERROR; - PRINTKC("exception in dequeue %d\n", - index); - break; - } - switch (dv) { - case DEV_ONLINE: - rv = 0; - break; - case DEV_EMPTY: - rv = REC_EMPTY; - break; - case DEV_NO_WORK: - rv = REC_NO_WORK; - break; - case DEV_BAD_MESSAGE: - case DEV_GONE: - default: - rv = REC_NO_RESPONSE; - break; - } - if ((rv != 0) && (rv != REC_NO_WORK)) - break; - if (rv == 0) - break; - } - if (rv) - break; - cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48); - if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) { - devPtr->dev_type = PCIXCC_MCL2; - PDEBUG("device %d is MCL2\n", index); - } else { - devPtr->dev_type = PCIXCC_MCL3; - PDEBUG("device %d is MCL3\n", index); - } - } while (0); - /* In a general error case, the card is not marked online */ - return rv; -} - -module_init(z90crypt_init_module); -module_exit(z90crypt_cleanup_module); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c new file mode 100644 index 000000000000..1edc10a7a6f2 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_api.c @@ -0,0 +1,1091 @@ +/* + * linux/drivers/s390/crypto/zcrypt_api.c + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/compat.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> + +#include "zcrypt_api.h" + +/** + * Module description. + */ +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " + "Copyright 2001, 2006 IBM Corporation"); +MODULE_LICENSE("GPL"); + +static DEFINE_SPINLOCK(zcrypt_device_lock); +static LIST_HEAD(zcrypt_device_list); +static int zcrypt_device_count = 0; +static atomic_t zcrypt_open_count = ATOMIC_INIT(0); + +/** + * Device attributes common for all crypto devices. + */ +static ssize_t zcrypt_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zcrypt_device *zdev = to_ap_dev(dev)->private; + return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); +} + +static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); + +static ssize_t zcrypt_online_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct zcrypt_device *zdev = to_ap_dev(dev)->private; + return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); +} + +static ssize_t zcrypt_online_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct zcrypt_device *zdev = to_ap_dev(dev)->private; + int online; + + if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) + return -EINVAL; + zdev->online = online; + if (!online) + ap_flush_queue(zdev->ap_dev); + return count; +} + +static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); + +static struct attribute * zcrypt_device_attrs[] = { + &dev_attr_type.attr, + &dev_attr_online.attr, + NULL, +}; + +static struct attribute_group zcrypt_device_attr_group = { + .attrs = zcrypt_device_attrs, +}; + +/** + * Move the device towards the head of the device list. + * Need to be called while holding the zcrypt device list lock. + * Note: cards with speed_rating of 0 are kept at the end of the list. + */ +static void __zcrypt_increase_preference(struct zcrypt_device *zdev) +{ + struct zcrypt_device *tmp; + struct list_head *l; + + if (zdev->speed_rating == 0) + return; + for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { + tmp = list_entry(l, struct zcrypt_device, list); + if ((tmp->request_count + 1) * tmp->speed_rating <= + (zdev->request_count + 1) * zdev->speed_rating && + tmp->speed_rating != 0) + break; + } + if (l == zdev->list.prev) + return; + /* Move zdev behind l */ + list_del(&zdev->list); + list_add(&zdev->list, l); +} + +/** + * Move the device towards the tail of the device list. + * Need to be called while holding the zcrypt device list lock. + * Note: cards with speed_rating of 0 are kept at the end of the list. + */ +static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) +{ + struct zcrypt_device *tmp; + struct list_head *l; + + if (zdev->speed_rating == 0) + return; + for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { + tmp = list_entry(l, struct zcrypt_device, list); + if ((tmp->request_count + 1) * tmp->speed_rating > + (zdev->request_count + 1) * zdev->speed_rating || + tmp->speed_rating == 0) + break; + } + if (l == zdev->list.next) + return; + /* Move zdev before l */ + list_del(&zdev->list); + list_add_tail(&zdev->list, l); +} + +static void zcrypt_device_release(struct kref *kref) +{ + struct zcrypt_device *zdev = + container_of(kref, struct zcrypt_device, refcount); + zcrypt_device_free(zdev); +} + +void zcrypt_device_get(struct zcrypt_device *zdev) +{ + kref_get(&zdev->refcount); +} +EXPORT_SYMBOL(zcrypt_device_get); + +int zcrypt_device_put(struct zcrypt_device *zdev) +{ + return kref_put(&zdev->refcount, zcrypt_device_release); +} +EXPORT_SYMBOL(zcrypt_device_put); + +struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) +{ + struct zcrypt_device *zdev; + + zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); + if (!zdev) + return NULL; + zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); + if (!zdev->reply.message) + goto out_free; + zdev->reply.length = max_response_size; + spin_lock_init(&zdev->lock); + INIT_LIST_HEAD(&zdev->list); + return zdev; + +out_free: + kfree(zdev); + return NULL; +} +EXPORT_SYMBOL(zcrypt_device_alloc); + +void zcrypt_device_free(struct zcrypt_device *zdev) +{ + kfree(zdev->reply.message); + kfree(zdev); +} +EXPORT_SYMBOL(zcrypt_device_free); + +/** + * Register a crypto device. + */ +int zcrypt_device_register(struct zcrypt_device *zdev) +{ + int rc; + + rc = sysfs_create_group(&zdev->ap_dev->device.kobj, + &zcrypt_device_attr_group); + if (rc) + goto out; + get_device(&zdev->ap_dev->device); + kref_init(&zdev->refcount); + spin_lock_bh(&zcrypt_device_lock); + zdev->online = 1; /* New devices are online by default. */ + list_add_tail(&zdev->list, &zcrypt_device_list); + __zcrypt_increase_preference(zdev); + zcrypt_device_count++; + spin_unlock_bh(&zcrypt_device_lock); +out: + return rc; +} +EXPORT_SYMBOL(zcrypt_device_register); + +/** + * Unregister a crypto device. + */ +void zcrypt_device_unregister(struct zcrypt_device *zdev) +{ + spin_lock_bh(&zcrypt_device_lock); + zcrypt_device_count--; + list_del_init(&zdev->list); + spin_unlock_bh(&zcrypt_device_lock); + sysfs_remove_group(&zdev->ap_dev->device.kobj, + &zcrypt_device_attr_group); + put_device(&zdev->ap_dev->device); + zcrypt_device_put(zdev); +} +EXPORT_SYMBOL(zcrypt_device_unregister); + +/** + * zcrypt_read is not be supported beyond zcrypt 1.3.1 + */ +static ssize_t zcrypt_read(struct file *filp, char __user *buf, + size_t count, loff_t *f_pos) +{ + return -EPERM; +} + +/** + * Write is is not allowed + */ +static ssize_t zcrypt_write(struct file *filp, const char __user *buf, + size_t count, loff_t *f_pos) +{ + return -EPERM; +} + +/** + * Device open/close functions to count number of users. + */ +static int zcrypt_open(struct inode *inode, struct file *filp) +{ + atomic_inc(&zcrypt_open_count); + return 0; +} + +static int zcrypt_release(struct inode *inode, struct file *filp) +{ + atomic_dec(&zcrypt_open_count); + return 0; +} + +/** + * zcrypt ioctls. + */ +static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) +{ + struct zcrypt_device *zdev; + int rc; + + if (mex->outputdatalength < mex->inputdatalength) + return -EINVAL; + /** + * As long as outputdatalength is big enough, we can set the + * outputdatalength equal to the inputdatalength, since that is the + * number of bytes we will copy in any case + */ + mex->outputdatalength = mex->inputdatalength; + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + if (!zdev->online || + !zdev->ops->rsa_modexpo || + zdev->min_mod_size > mex->inputdatalength || + zdev->max_mod_size < mex->inputdatalength) + continue; + zcrypt_device_get(zdev); + get_device(&zdev->ap_dev->device); + zdev->request_count++; + __zcrypt_decrease_preference(zdev); + spin_unlock_bh(&zcrypt_device_lock); + if (try_module_get(zdev->ap_dev->drv->driver.owner)) { + rc = zdev->ops->rsa_modexpo(zdev, mex); + module_put(zdev->ap_dev->drv->driver.owner); + } + else + rc = -EAGAIN; + spin_lock_bh(&zcrypt_device_lock); + zdev->request_count--; + __zcrypt_increase_preference(zdev); + put_device(&zdev->ap_dev->device); + zcrypt_device_put(zdev); + spin_unlock_bh(&zcrypt_device_lock); + return rc; + } + spin_unlock_bh(&zcrypt_device_lock); + return -ENODEV; +} + +static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) +{ + struct zcrypt_device *zdev; + unsigned long long z1, z2, z3; + int rc, copied; + + if (crt->outputdatalength < crt->inputdatalength || + (crt->inputdatalength & 1)) + return -EINVAL; + /** + * As long as outputdatalength is big enough, we can set the + * outputdatalength equal to the inputdatalength, since that is the + * number of bytes we will copy in any case + */ + crt->outputdatalength = crt->inputdatalength; + + copied = 0; + restart: + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + if (!zdev->online || + !zdev->ops->rsa_modexpo_crt || + zdev->min_mod_size > crt->inputdatalength || + zdev->max_mod_size < crt->inputdatalength) + continue; + if (zdev->short_crt && crt->inputdatalength > 240) { + /** + * Check inputdata for leading zeros for cards + * that can't handle np_prime, bp_key, or + * u_mult_inv > 128 bytes. + */ + if (copied == 0) { + int len; + spin_unlock_bh(&zcrypt_device_lock); + /* len is max 256 / 2 - 120 = 8 */ + len = crt->inputdatalength / 2 - 120; + z1 = z2 = z3 = 0; + if (copy_from_user(&z1, crt->np_prime, len) || + copy_from_user(&z2, crt->bp_key, len) || + copy_from_user(&z3, crt->u_mult_inv, len)) + return -EFAULT; + copied = 1; + /** + * We have to restart device lookup - + * the device list may have changed by now. + */ + goto restart; + } + if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) + /* The device can't handle this request. */ + continue; + } + zcrypt_device_get(zdev); + get_device(&zdev->ap_dev->device); + zdev->request_count++; + __zcrypt_decrease_preference(zdev); + spin_unlock_bh(&zcrypt_device_lock); + if (try_module_get(zdev->ap_dev->drv->driver.owner)) { + rc = zdev->ops->rsa_modexpo_crt(zdev, crt); + module_put(zdev->ap_dev->drv->driver.owner); + } + else + rc = -EAGAIN; + spin_lock_bh(&zcrypt_device_lock); + zdev->request_count--; + __zcrypt_increase_preference(zdev); + put_device(&zdev->ap_dev->device); + zcrypt_device_put(zdev); + spin_unlock_bh(&zcrypt_device_lock); + return rc; + } + spin_unlock_bh(&zcrypt_device_lock); + return -ENODEV; +} + +static long zcrypt_send_cprb(struct ica_xcRB *xcRB) +{ + struct zcrypt_device *zdev; + int rc; + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + if (!zdev->online || !zdev->ops->send_cprb || + (xcRB->user_defined != AUTOSELECT && + AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) + ) + continue; + zcrypt_device_get(zdev); + get_device(&zdev->ap_dev->device); + zdev->request_count++; + __zcrypt_decrease_preference(zdev); + spin_unlock_bh(&zcrypt_device_lock); + if (try_module_get(zdev->ap_dev->drv->driver.owner)) { + rc = zdev->ops->send_cprb(zdev, xcRB); + module_put(zdev->ap_dev->drv->driver.owner); + } + else + rc = -EAGAIN; + spin_lock_bh(&zcrypt_device_lock); + zdev->request_count--; + __zcrypt_increase_preference(zdev); + put_device(&zdev->ap_dev->device); + zcrypt_device_put(zdev); + spin_unlock_bh(&zcrypt_device_lock); + return rc; + } + spin_unlock_bh(&zcrypt_device_lock); + return -ENODEV; +} + +static void zcrypt_status_mask(char status[AP_DEVICES]) +{ + struct zcrypt_device *zdev; + + memset(status, 0, sizeof(char) * AP_DEVICES); + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) + status[AP_QID_DEVICE(zdev->ap_dev->qid)] = + zdev->online ? zdev->user_space_type : 0x0d; + spin_unlock_bh(&zcrypt_device_lock); +} + +static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) +{ + struct zcrypt_device *zdev; + + memset(qdepth, 0, sizeof(char) * AP_DEVICES); + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + spin_lock(&zdev->ap_dev->lock); + qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = + zdev->ap_dev->pendingq_count + + zdev->ap_dev->requestq_count; + spin_unlock(&zdev->ap_dev->lock); + } + spin_unlock_bh(&zcrypt_device_lock); +} + +static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) +{ + struct zcrypt_device *zdev; + + memset(reqcnt, 0, sizeof(int) * AP_DEVICES); + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + spin_lock(&zdev->ap_dev->lock); + reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = + zdev->ap_dev->total_request_count; + spin_unlock(&zdev->ap_dev->lock); + } + spin_unlock_bh(&zcrypt_device_lock); +} + +static int zcrypt_pendingq_count(void) +{ + struct zcrypt_device *zdev; + int pendingq_count = 0; + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + spin_lock(&zdev->ap_dev->lock); + pendingq_count += zdev->ap_dev->pendingq_count; + spin_unlock(&zdev->ap_dev->lock); + } + spin_unlock_bh(&zcrypt_device_lock); + return pendingq_count; +} + +static int zcrypt_requestq_count(void) +{ + struct zcrypt_device *zdev; + int requestq_count = 0; + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) { + spin_lock(&zdev->ap_dev->lock); + requestq_count += zdev->ap_dev->requestq_count; + spin_unlock(&zdev->ap_dev->lock); + } + spin_unlock_bh(&zcrypt_device_lock); + return requestq_count; +} + +static int zcrypt_count_type(int type) +{ + struct zcrypt_device *zdev; + int device_count = 0; + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) + if (zdev->user_space_type == type) + device_count++; + spin_unlock_bh(&zcrypt_device_lock); + return device_count; +} + +/** + * Old, deprecated combi status call. + */ +static long zcrypt_ica_status(struct file *filp, unsigned long arg) +{ + struct ica_z90_status *pstat; + int ret; + + pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); + if (!pstat) + return -ENOMEM; + pstat->totalcount = zcrypt_device_count; + pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); + pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); + pstat->requestqWaitCount = zcrypt_requestq_count(); + pstat->pendingqWaitCount = zcrypt_pendingq_count(); + pstat->totalOpenCount = atomic_read(&zcrypt_open_count); + pstat->cryptoDomain = ap_domain_index; + zcrypt_status_mask(pstat->status); + zcrypt_qdepth_mask(pstat->qdepth); + ret = 0; + if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) + ret = -EFAULT; + kfree(pstat); + return ret; +} + +static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int rc; + + switch (cmd) { + case ICARSAMODEXPO: { + struct ica_rsa_modexpo __user *umex = (void __user *) arg; + struct ica_rsa_modexpo mex; + if (copy_from_user(&mex, umex, sizeof(mex))) + return -EFAULT; + do { + rc = zcrypt_rsa_modexpo(&mex); + } while (rc == -EAGAIN); + if (rc) + return rc; + return put_user(mex.outputdatalength, &umex->outputdatalength); + } + case ICARSACRT: { + struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; + struct ica_rsa_modexpo_crt crt; + if (copy_from_user(&crt, ucrt, sizeof(crt))) + return -EFAULT; + do { + rc = zcrypt_rsa_crt(&crt); + } while (rc == -EAGAIN); + if (rc) + return rc; + return put_user(crt.outputdatalength, &ucrt->outputdatalength); + } + case ZSECSENDCPRB: { + struct ica_xcRB __user *uxcRB = (void __user *) arg; + struct ica_xcRB xcRB; + if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) + return -EFAULT; + do { + rc = zcrypt_send_cprb(&xcRB); + } while (rc == -EAGAIN); + if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) + return -EFAULT; + return rc; + } + case Z90STAT_STATUS_MASK: { + char status[AP_DEVICES]; + zcrypt_status_mask(status); + if (copy_to_user((char __user *) arg, status, + sizeof(char) * AP_DEVICES)) + return -EFAULT; + return 0; + } + case Z90STAT_QDEPTH_MASK: { + char qdepth[AP_DEVICES]; + zcrypt_qdepth_mask(qdepth); + if (copy_to_user((char __user *) arg, qdepth, + sizeof(char) * AP_DEVICES)) + return -EFAULT; + return 0; + } + case Z90STAT_PERDEV_REQCNT: { + int reqcnt[AP_DEVICES]; + zcrypt_perdev_reqcnt(reqcnt); + if (copy_to_user((int __user *) arg, reqcnt, + sizeof(int) * AP_DEVICES)) + return -EFAULT; + return 0; + } + case Z90STAT_REQUESTQ_COUNT: + return put_user(zcrypt_requestq_count(), (int __user *) arg); + case Z90STAT_PENDINGQ_COUNT: + return put_user(zcrypt_pendingq_count(), (int __user *) arg); + case Z90STAT_TOTALOPEN_COUNT: + return put_user(atomic_read(&zcrypt_open_count), + (int __user *) arg); + case Z90STAT_DOMAIN_INDEX: + return put_user(ap_domain_index, (int __user *) arg); + /** + * Deprecated ioctls. Don't add another device count ioctl, + * you can count them yourself in the user space with the + * output of the Z90STAT_STATUS_MASK ioctl. + */ + case ICAZ90STATUS: + return zcrypt_ica_status(filp, arg); + case Z90STAT_TOTALCOUNT: + return put_user(zcrypt_device_count, (int __user *) arg); + case Z90STAT_PCICACOUNT: + return put_user(zcrypt_count_type(ZCRYPT_PCICA), + (int __user *) arg); + case Z90STAT_PCICCCOUNT: + return put_user(zcrypt_count_type(ZCRYPT_PCICC), + (int __user *) arg); + case Z90STAT_PCIXCCMCL2COUNT: + return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), + (int __user *) arg); + case Z90STAT_PCIXCCMCL3COUNT: + return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), + (int __user *) arg); + case Z90STAT_PCIXCCCOUNT: + return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + + zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), + (int __user *) arg); + case Z90STAT_CEX2CCOUNT: + return put_user(zcrypt_count_type(ZCRYPT_CEX2C), + (int __user *) arg); + case Z90STAT_CEX2ACOUNT: + return put_user(zcrypt_count_type(ZCRYPT_CEX2A), + (int __user *) arg); + default: + /* unknown ioctl number */ + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +/** + * ioctl32 conversion routines + */ +struct compat_ica_rsa_modexpo { + compat_uptr_t inputdata; + unsigned int inputdatalength; + compat_uptr_t outputdata; + unsigned int outputdatalength; + compat_uptr_t b_key; + compat_uptr_t n_modulus; +}; + +static long trans_modexpo32(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); + struct compat_ica_rsa_modexpo mex32; + struct ica_rsa_modexpo mex64; + long rc; + + if (copy_from_user(&mex32, umex32, sizeof(mex32))) + return -EFAULT; + mex64.inputdata = compat_ptr(mex32.inputdata); + mex64.inputdatalength = mex32.inputdatalength; + mex64.outputdata = compat_ptr(mex32.outputdata); + mex64.outputdatalength = mex32.outputdatalength; + mex64.b_key = compat_ptr(mex32.b_key); + mex64.n_modulus = compat_ptr(mex32.n_modulus); + do { + rc = zcrypt_rsa_modexpo(&mex64); + } while (rc == -EAGAIN); + if (!rc) + rc = put_user(mex64.outputdatalength, + &umex32->outputdatalength); + return rc; +} + +struct compat_ica_rsa_modexpo_crt { + compat_uptr_t inputdata; + unsigned int inputdatalength; + compat_uptr_t outputdata; + unsigned int outputdatalength; + compat_uptr_t bp_key; + compat_uptr_t bq_key; + compat_uptr_t np_prime; + compat_uptr_t nq_prime; + compat_uptr_t u_mult_inv; +}; + +static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); + struct compat_ica_rsa_modexpo_crt crt32; + struct ica_rsa_modexpo_crt crt64; + long rc; + + if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) + return -EFAULT; + crt64.inputdata = compat_ptr(crt32.inputdata); + crt64.inputdatalength = crt32.inputdatalength; + crt64.outputdata= compat_ptr(crt32.outputdata); + crt64.outputdatalength = crt32.outputdatalength; + crt64.bp_key = compat_ptr(crt32.bp_key); + crt64.bq_key = compat_ptr(crt32.bq_key); + crt64.np_prime = compat_ptr(crt32.np_prime); + crt64.nq_prime = compat_ptr(crt32.nq_prime); + crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); + do { + rc = zcrypt_rsa_crt(&crt64); + } while (rc == -EAGAIN); + if (!rc) + rc = put_user(crt64.outputdatalength, + &ucrt32->outputdatalength); + return rc; +} + +struct compat_ica_xcRB { + unsigned short agent_ID; + unsigned int user_defined; + unsigned short request_ID; + unsigned int request_control_blk_length; + unsigned char padding1[16 - sizeof (compat_uptr_t)]; + compat_uptr_t request_control_blk_addr; + unsigned int request_data_length; + char padding2[16 - sizeof (compat_uptr_t)]; + compat_uptr_t request_data_address; + unsigned int reply_control_blk_length; + char padding3[16 - sizeof (compat_uptr_t)]; + compat_uptr_t reply_control_blk_addr; + unsigned int reply_data_length; + char padding4[16 - sizeof (compat_uptr_t)]; + compat_uptr_t reply_data_addr; + unsigned short priority_window; + unsigned int status; +} __attribute__((packed)); + +static long trans_xcRB32(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); + struct compat_ica_xcRB xcRB32; + struct ica_xcRB xcRB64; + long rc; + + if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) + return -EFAULT; + xcRB64.agent_ID = xcRB32.agent_ID; + xcRB64.user_defined = xcRB32.user_defined; + xcRB64.request_ID = xcRB32.request_ID; + xcRB64.request_control_blk_length = + xcRB32.request_control_blk_length; + xcRB64.request_control_blk_addr = + compat_ptr(xcRB32.request_control_blk_addr); + xcRB64.request_data_length = + xcRB32.request_data_length; + xcRB64.request_data_address = + compat_ptr(xcRB32.request_data_address); + xcRB64.reply_control_blk_length = + xcRB32.reply_control_blk_length; + xcRB64.reply_control_blk_addr = + compat_ptr(xcRB32.reply_control_blk_addr); + xcRB64.reply_data_length = xcRB32.reply_data_length; + xcRB64.reply_data_addr = + compat_ptr(xcRB32.reply_data_addr); + xcRB64.priority_window = xcRB32.priority_window; + xcRB64.status = xcRB32.status; + do { + rc = zcrypt_send_cprb(&xcRB64); + } while (rc == -EAGAIN); + xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; + xcRB32.reply_data_length = xcRB64.reply_data_length; + xcRB32.status = xcRB64.status; + if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) + return -EFAULT; + return rc; +} + +long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + if (cmd == ICARSAMODEXPO) + return trans_modexpo32(filp, cmd, arg); + if (cmd == ICARSACRT) + return trans_modexpo_crt32(filp, cmd, arg); + if (cmd == ZSECSENDCPRB) + return trans_xcRB32(filp, cmd, arg); + return zcrypt_unlocked_ioctl(filp, cmd, arg); +} +#endif + +/** + * Misc device file operations. + */ +static struct file_operations zcrypt_fops = { + .owner = THIS_MODULE, + .read = zcrypt_read, + .write = zcrypt_write, + .unlocked_ioctl = zcrypt_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = zcrypt_compat_ioctl, +#endif + .open = zcrypt_open, + .release = zcrypt_release +}; + +/** + * Misc device. + */ +static struct miscdevice zcrypt_misc_device = { + .minor = MISC_DYNAMIC_MINOR, + .name = "z90crypt", + .fops = &zcrypt_fops, +}; + +/** + * Deprecated /proc entry support. + */ +static struct proc_dir_entry *zcrypt_entry; + +static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, + unsigned int len) +{ + int hl, i; + + hl = 0; + for (i = 0; i < len; i++) + hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); + hl += sprintf(outaddr+hl, " "); + return hl; +} + +static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, + unsigned int len) +{ + int hl, inl, c, cx; + + hl = sprintf(outaddr, " "); + inl = 0; + for (c = 0; c < (len / 16); c++) { + hl += sprintcl(outaddr+hl, addr+inl, 16); + inl += 16; + } + cx = len%16; + if (cx) { + hl += sprintcl(outaddr+hl, addr+inl, cx); + inl += cx; + } + hl += sprintf(outaddr+hl, "\n"); + return hl; +} + +static inline int sprinthx(unsigned char *title, unsigned char *outaddr, + unsigned char *addr, unsigned int len) +{ + int hl, inl, r, rx; + + hl = sprintf(outaddr, "\n%s\n", title); + inl = 0; + for (r = 0; r < (len / 64); r++) { + hl += sprintrw(outaddr+hl, addr+inl, 64); + inl += 64; + } + rx = len % 64; + if (rx) { + hl += sprintrw(outaddr+hl, addr+inl, rx); + inl += rx; + } + hl += sprintf(outaddr+hl, "\n"); + return hl; +} + +static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, + unsigned int *array, unsigned int len) +{ + int hl, r; + + hl = sprintf(outaddr, "\n%s\n", title); + for (r = 0; r < len; r++) { + if ((r % 8) == 0) + hl += sprintf(outaddr+hl, " "); + hl += sprintf(outaddr+hl, "%08X ", array[r]); + if ((r % 8) == 7) + hl += sprintf(outaddr+hl, "\n"); + } + hl += sprintf(outaddr+hl, "\n"); + return hl; +} + +static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, + int count, int *eof, void *data) +{ + unsigned char *workarea; + int len; + + len = 0; + + /* resp_buff is a page. Use the right half for a work area */ + workarea = resp_buff + 2000; + len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", + ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); + len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", + ap_domain_index); + len += sprintf(resp_buff + len, "Total device count: %d\n", + zcrypt_device_count); + len += sprintf(resp_buff + len, "PCICA count: %d\n", + zcrypt_count_type(ZCRYPT_PCICA)); + len += sprintf(resp_buff + len, "PCICC count: %d\n", + zcrypt_count_type(ZCRYPT_PCICC)); + len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", + zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); + len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", + zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); + len += sprintf(resp_buff + len, "CEX2C count: %d\n", + zcrypt_count_type(ZCRYPT_CEX2C)); + len += sprintf(resp_buff + len, "CEX2A count: %d\n", + zcrypt_count_type(ZCRYPT_CEX2A)); + len += sprintf(resp_buff + len, "requestq count: %d\n", + zcrypt_requestq_count()); + len += sprintf(resp_buff + len, "pendingq count: %d\n", + zcrypt_pendingq_count()); + len += sprintf(resp_buff + len, "Total open handles: %d\n\n", + atomic_read(&zcrypt_open_count)); + zcrypt_status_mask(workarea); + len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " + "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", + resp_buff+len, workarea, AP_DEVICES); + zcrypt_qdepth_mask(workarea); + len += sprinthx("Waiting work element counts", + resp_buff+len, workarea, AP_DEVICES); + zcrypt_perdev_reqcnt((unsigned int *) workarea); + len += sprinthx4("Per-device successfully completed request counts", + resp_buff+len,(unsigned int *) workarea, AP_DEVICES); + *eof = 1; + memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); + return len; +} + +static void zcrypt_disable_card(int index) +{ + struct zcrypt_device *zdev; + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) + if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { + zdev->online = 0; + ap_flush_queue(zdev->ap_dev); + break; + } + spin_unlock_bh(&zcrypt_device_lock); +} + +static void zcrypt_enable_card(int index) +{ + struct zcrypt_device *zdev; + + spin_lock_bh(&zcrypt_device_lock); + list_for_each_entry(zdev, &zcrypt_device_list, list) + if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { + zdev->online = 1; + break; + } + spin_unlock_bh(&zcrypt_device_lock); +} + +static int zcrypt_status_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + unsigned char *lbuf, *ptr; + unsigned long local_count; + int j; + + if (count <= 0) + return 0; + +#define LBUFSIZE 1200UL + lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); + if (!lbuf) { + PRINTK("kmalloc failed!\n"); + return 0; + } + + local_count = min(LBUFSIZE - 1, count); + if (copy_from_user(lbuf, buffer, local_count) != 0) { + kfree(lbuf); + return -EFAULT; + } + lbuf[local_count] = '\0'; + + ptr = strstr(lbuf, "Online devices"); + if (!ptr) { + PRINTK("Unable to parse data (missing \"Online devices\")\n"); + goto out; + } + ptr = strstr(ptr, "\n"); + if (!ptr) { + PRINTK("Unable to parse data (missing newline " + "after \"Online devices\")\n"); + goto out; + } + ptr++; + + if (strstr(ptr, "Waiting work element counts") == NULL) { + PRINTK("Unable to parse data (missing " + "\"Waiting work element counts\")\n"); + goto out; + } + + for (j = 0; j < 64 && *ptr; ptr++) { + /** + * '0' for no device, '1' for PCICA, '2' for PCICC, + * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, + * '5' for CEX2C and '6' for CEX2A' + */ + if (*ptr >= '0' && *ptr <= '6') + j++; + else if (*ptr == 'd' || *ptr == 'D') + zcrypt_disable_card(j++); + else if (*ptr == 'e' || *ptr == 'E') + zcrypt_enable_card(j++); + else if (*ptr != ' ' && *ptr != '\t') + break; + } +out: + kfree(lbuf); + return count; +} + +/** + * The module initialization code. + */ +int __init zcrypt_api_init(void) +{ + int rc; + + /* Register the request sprayer. */ + rc = misc_register(&zcrypt_misc_device); + if (rc < 0) { + PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n", + zcrypt_misc_device.minor, rc); + goto out; + } + + /* Set up the proc file system */ + zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); + if (!zcrypt_entry) { + PRINTK("Couldn't create z90crypt proc entry\n"); + rc = -ENOMEM; + goto out_misc; + } + zcrypt_entry->nlink = 1; + zcrypt_entry->data = NULL; + zcrypt_entry->read_proc = zcrypt_status_read; + zcrypt_entry->write_proc = zcrypt_status_write; + + return 0; + +out_misc: + misc_deregister(&zcrypt_misc_device); +out: + return rc; +} + +/** + * The module termination code. + */ +void zcrypt_api_exit(void) +{ + remove_proc_entry("driver/z90crypt", NULL); + misc_deregister(&zcrypt_misc_device); +} + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +module_init(zcrypt_api_init); +module_exit(zcrypt_api_exit); +#endif diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h new file mode 100644 index 000000000000..de4877ee618f --- /dev/null +++ b/drivers/s390/crypto/zcrypt_api.h @@ -0,0 +1,141 @@ +/* + * linux/drivers/s390/crypto/zcrypt_api.h + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_API_H_ +#define _ZCRYPT_API_H_ + +/** + * Macro definitions + * + * PDEBUG debugs in the form "zcrypt: function_name -> message" + * + * PRINTK is like PDEBUG, except that it is always enabled + * PRINTKN is like PRINTK, except that it does not include the function name + * PRINTKW is like PRINTK, except that it uses KERN_WARNING + * PRINTKC is like PRINTK, except that it uses KERN_CRIT + */ +#define DEV_NAME "zcrypt" + +#define PRINTK(fmt, args...) \ + printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) +#define PRINTKN(fmt, args...) \ + printk(KERN_DEBUG DEV_NAME ": " fmt, ## args) +#define PRINTKW(fmt, args...) \ + printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) +#define PRINTKC(fmt, args...) \ + printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) + +#ifdef ZCRYPT_DEBUG +#define PDEBUG(fmt, args...) \ + printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args) +#else +#define PDEBUG(fmt, args...) do {} while (0) +#endif + +#include "ap_bus.h" +#include <asm/zcrypt.h> + +/* deprecated status calls */ +#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status) +#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int) + +/** + * This structure is deprecated and the corresponding ioctl() has been + * replaced with individual ioctl()s for each piece of data! + */ +struct ica_z90_status { + int totalcount; + int leedslitecount; // PCICA + int leeds2count; // PCICC + // int PCIXCCCount; is not in struct for backward compatibility + int requestqWaitCount; + int pendingqWaitCount; + int totalOpenCount; + int cryptoDomain; + // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3, + // 5=CEX2C + unsigned char status[64]; + // qdepth: # work elements waiting for each device + unsigned char qdepth[64]; +}; + +/** + * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2, + * PCIXCC_MCL3, CEX2C, or CEX2A + * + * NOTE: PCIXCC_MCL3 refers to a PCIXCC with May 2004 version of Licensed + * Internal Code (LIC) (EC J12220 level 29). + * PCIXCC_MCL2 refers to any LIC before this level. + */ +#define ZCRYPT_PCICA 1 +#define ZCRYPT_PCICC 2 +#define ZCRYPT_PCIXCC_MCL2 3 +#define ZCRYPT_PCIXCC_MCL3 4 +#define ZCRYPT_CEX2C 5 +#define ZCRYPT_CEX2A 6 + +struct zcrypt_device; + +struct zcrypt_ops { + long (*rsa_modexpo)(struct zcrypt_device *, struct ica_rsa_modexpo *); + long (*rsa_modexpo_crt)(struct zcrypt_device *, + struct ica_rsa_modexpo_crt *); + long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *); +}; + +struct zcrypt_device { + struct list_head list; /* Device list. */ + spinlock_t lock; /* Per device lock. */ + struct kref refcount; /* device refcounting */ + struct ap_device *ap_dev; /* The "real" ap device. */ + struct zcrypt_ops *ops; /* Crypto operations. */ + int online; /* User online/offline */ + + int user_space_type; /* User space device id. */ + char *type_string; /* User space device name. */ + int min_mod_size; /* Min number of bits. */ + int max_mod_size; /* Max number of bits. */ + int short_crt; /* Card has crt length restriction. */ + int speed_rating; /* Speed of the crypto device. */ + + int request_count; /* # current requests. */ + + struct ap_message reply; /* Per-device reply structure. */ +}; + +struct zcrypt_device *zcrypt_device_alloc(size_t); +void zcrypt_device_free(struct zcrypt_device *); +void zcrypt_device_get(struct zcrypt_device *); +int zcrypt_device_put(struct zcrypt_device *); +int zcrypt_device_register(struct zcrypt_device *); +void zcrypt_device_unregister(struct zcrypt_device *); +int zcrypt_api_init(void); +void zcrypt_api_exit(void); + +#endif /* _ZCRYPT_API_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h new file mode 100644 index 000000000000..8dbcf0eef3e5 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cca_key.h @@ -0,0 +1,350 @@ +/* + * linux/drivers/s390/crypto/zcrypt_cca_key.h + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_CCA_KEY_H_ +#define _ZCRYPT_CCA_KEY_H_ + +struct T6_keyBlock_hdr { + unsigned short blen; + unsigned short ulen; + unsigned short flags; +}; + +/** + * mapping for the cca private ME key token. + * Three parts of interest here: the header, the private section and + * the public section. + * + * mapping for the cca key token header + */ +struct cca_token_hdr { + unsigned char token_identifier; + unsigned char version; + unsigned short token_length; + unsigned char reserved[4]; +} __attribute__((packed)); + +#define CCA_TKN_HDR_ID_EXT 0x1E + +/** + * mapping for the cca private ME section + */ +struct cca_private_ext_ME_sec { + unsigned char section_identifier; + unsigned char version; + unsigned short section_length; + unsigned char private_key_hash[20]; + unsigned char reserved1[4]; + unsigned char key_format; + unsigned char reserved2; + unsigned char key_name_hash[20]; + unsigned char key_use_flags[4]; + unsigned char reserved3[6]; + unsigned char reserved4[24]; + unsigned char confounder[24]; + unsigned char exponent[128]; + unsigned char modulus[128]; +} __attribute__((packed)); + +#define CCA_PVT_USAGE_ALL 0x80 + +/** + * mapping for the cca public section + * In a private key, the modulus doesn't appear in the public + * section. So, an arbitrary public exponent of 0x010001 will be + * used, for a section length of 0x0F always. + */ +struct cca_public_sec { + unsigned char section_identifier; + unsigned char version; + unsigned short section_length; + unsigned char reserved[2]; + unsigned short exponent_len; + unsigned short modulus_bit_len; + unsigned short modulus_byte_len; /* In a private key, this is 0 */ +} __attribute__((packed)); + +/** + * mapping for the cca private CRT key 'token' + * The first three parts (the only parts considered in this release) + * are: the header, the private section and the public section. + * The header and public section are the same as for the + * struct cca_private_ext_ME + * + * Following the structure are the quantities p, q, dp, dq, u, pad, + * and modulus, in that order, where pad_len is the modulo 8 + * complement of the residue modulo 8 of the sum of + * (p_len + q_len + dp_len + dq_len + u_len). + */ +struct cca_pvt_ext_CRT_sec { + unsigned char section_identifier; + unsigned char version; + unsigned short section_length; + unsigned char private_key_hash[20]; + unsigned char reserved1[4]; + unsigned char key_format; + unsigned char reserved2; + unsigned char key_name_hash[20]; + unsigned char key_use_flags[4]; + unsigned short p_len; + unsigned short q_len; + unsigned short dp_len; + unsigned short dq_len; + unsigned short u_len; + unsigned short mod_len; + unsigned char reserved3[4]; + unsigned short pad_len; + unsigned char reserved4[52]; + unsigned char confounder[8]; +} __attribute__((packed)); + +#define CCA_PVT_EXT_CRT_SEC_ID_PVT 0x08 +#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40 + +/** + * Set up private key fields of a type6 MEX message. + * Note that all numerics in the key token are big-endian, + * while the entries in the key block header are little-endian. + * + * @mex: pointer to user input data + * @p: pointer to memory area for the key + * + * Returns the size of the key area or -EFAULT + */ +static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex, + void *p, int big_endian) +{ + static struct cca_token_hdr static_pvt_me_hdr = { + .token_identifier = 0x1E, + .token_length = 0x0183, + }; + static struct cca_private_ext_ME_sec static_pvt_me_sec = { + .section_identifier = 0x02, + .section_length = 0x016C, + .key_use_flags = {0x80,0x00,0x00,0x00}, + }; + static struct cca_public_sec static_pub_me_sec = { + .section_identifier = 0x04, + .section_length = 0x000F, + .exponent_len = 0x0003, + }; + static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; + struct { + struct T6_keyBlock_hdr t6_hdr; + struct cca_token_hdr pvtMeHdr; + struct cca_private_ext_ME_sec pvtMeSec; + struct cca_public_sec pubMeSec; + char exponent[3]; + } __attribute__((packed)) *key = p; + unsigned char *temp; + + memset(key, 0, sizeof(*key)); + + if (big_endian) { + key->t6_hdr.blen = cpu_to_be16(0x189); + key->t6_hdr.ulen = cpu_to_be16(0x189 - 2); + } else { + key->t6_hdr.blen = cpu_to_le16(0x189); + key->t6_hdr.ulen = cpu_to_le16(0x189 - 2); + } + key->pvtMeHdr = static_pvt_me_hdr; + key->pvtMeSec = static_pvt_me_sec; + key->pubMeSec = static_pub_me_sec; + /** + * In a private key, the modulus doesn't appear in the public + * section. So, an arbitrary public exponent of 0x010001 will be + * used. + */ + memcpy(key->exponent, pk_exponent, 3); + + /* key parameter block */ + temp = key->pvtMeSec.exponent + + sizeof(key->pvtMeSec.exponent) - mex->inputdatalength; + if (copy_from_user(temp, mex->b_key, mex->inputdatalength)) + return -EFAULT; + + /* modulus */ + temp = key->pvtMeSec.modulus + + sizeof(key->pvtMeSec.modulus) - mex->inputdatalength; + if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) + return -EFAULT; + key->pubMeSec.modulus_bit_len = 8 * mex->inputdatalength; + return sizeof(*key); +} + +/** + * Set up private key fields of a type6 MEX message. The _pad variant + * strips leading zeroes from the b_key. + * Note that all numerics in the key token are big-endian, + * while the entries in the key block header are little-endian. + * + * @mex: pointer to user input data + * @p: pointer to memory area for the key + * + * Returns the size of the key area or -EFAULT + */ +static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, + void *p, int big_endian) +{ + static struct cca_token_hdr static_pub_hdr = { + .token_identifier = 0x1E, + }; + static struct cca_public_sec static_pub_sec = { + .section_identifier = 0x04, + }; + struct { + struct T6_keyBlock_hdr t6_hdr; + struct cca_token_hdr pubHdr; + struct cca_public_sec pubSec; + char exponent[0]; + } __attribute__((packed)) *key = p; + unsigned char *temp; + int i; + + memset(key, 0, sizeof(*key)); + + key->pubHdr = static_pub_hdr; + key->pubSec = static_pub_sec; + + /* key parameter block */ + temp = key->exponent; + if (copy_from_user(temp, mex->b_key, mex->inputdatalength)) + return -EFAULT; + /* Strip leading zeroes from b_key. */ + for (i = 0; i < mex->inputdatalength; i++) + if (temp[i]) + break; + if (i >= mex->inputdatalength) + return -EINVAL; + memmove(temp, temp + i, mex->inputdatalength - i); + temp += mex->inputdatalength - i; + /* modulus */ + if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength)) + return -EFAULT; + + key->pubSec.modulus_bit_len = 8 * mex->inputdatalength; + key->pubSec.modulus_byte_len = mex->inputdatalength; + key->pubSec.exponent_len = mex->inputdatalength - i; + key->pubSec.section_length = sizeof(key->pubSec) + + 2*mex->inputdatalength - i; + key->pubHdr.token_length = + key->pubSec.section_length + sizeof(key->pubHdr); + if (big_endian) { + key->t6_hdr.ulen = cpu_to_be16(key->pubHdr.token_length + 4); + key->t6_hdr.blen = cpu_to_be16(key->pubHdr.token_length + 6); + } else { + key->t6_hdr.ulen = cpu_to_le16(key->pubHdr.token_length + 4); + key->t6_hdr.blen = cpu_to_le16(key->pubHdr.token_length + 6); + } + return sizeof(*key) + 2*mex->inputdatalength - i; +} + +/** + * Set up private key fields of a type6 CRT message. + * Note that all numerics in the key token are big-endian, + * while the entries in the key block header are little-endian. + * + * @mex: pointer to user input data + * @p: pointer to memory area for the key + * + * Returns the size of the key area or -EFAULT + */ +static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, + void *p, int big_endian) +{ + static struct cca_public_sec static_cca_pub_sec = { + .section_identifier = 4, + .section_length = 0x000f, + .exponent_len = 0x0003, + }; + static char pk_exponent[3] = { 0x01, 0x00, 0x01 }; + struct { + struct T6_keyBlock_hdr t6_hdr; + struct cca_token_hdr token; + struct cca_pvt_ext_CRT_sec pvt; + char key_parts[0]; + } __attribute__((packed)) *key = p; + struct cca_public_sec *pub; + int short_len, long_len, pad_len, key_len, size; + + memset(key, 0, sizeof(*key)); + + short_len = crt->inputdatalength / 2; + long_len = short_len + 8; + pad_len = -(3*long_len + 2*short_len) & 7; + key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength; + size = sizeof(*key) + key_len + sizeof(*pub) + 3; + + /* parameter block.key block */ + if (big_endian) { + key->t6_hdr.blen = cpu_to_be16(size); + key->t6_hdr.ulen = cpu_to_be16(size - 2); + } else { + key->t6_hdr.blen = cpu_to_le16(size); + key->t6_hdr.ulen = cpu_to_le16(size - 2); + } + + /* key token header */ + key->token.token_identifier = CCA_TKN_HDR_ID_EXT; + key->token.token_length = size - 6; + + /* private section */ + key->pvt.section_identifier = CCA_PVT_EXT_CRT_SEC_ID_PVT; + key->pvt.section_length = sizeof(key->pvt) + key_len; + key->pvt.key_format = CCA_PVT_EXT_CRT_SEC_FMT_CL; + key->pvt.key_use_flags[0] = CCA_PVT_USAGE_ALL; + key->pvt.p_len = key->pvt.dp_len = key->pvt.u_len = long_len; + key->pvt.q_len = key->pvt.dq_len = short_len; + key->pvt.mod_len = crt->inputdatalength; + key->pvt.pad_len = pad_len; + + /* key parts */ + if (copy_from_user(key->key_parts, crt->np_prime, long_len) || + copy_from_user(key->key_parts + long_len, + crt->nq_prime, short_len) || + copy_from_user(key->key_parts + long_len + short_len, + crt->bp_key, long_len) || + copy_from_user(key->key_parts + 2*long_len + short_len, + crt->bq_key, short_len) || + copy_from_user(key->key_parts + 2*long_len + 2*short_len, + crt->u_mult_inv, long_len)) + return -EFAULT; + memset(key->key_parts + 3*long_len + 2*short_len + pad_len, + 0xff, crt->inputdatalength); + pub = (struct cca_public_sec *)(key->key_parts + key_len); + *pub = static_cca_pub_sec; + pub->modulus_bit_len = 8 * crt->inputdatalength; + /** + * In a private key, the modulus doesn't appear in the public + * section. So, an arbitrary public exponent of 0x010001 will be + * used. + */ + memcpy((char *) (pub + 1), pk_exponent, 3); + return size; +} + +#endif /* _ZCRYPT_CCA_KEY_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c new file mode 100644 index 000000000000..a62b00083d0c --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex2a.c @@ -0,0 +1,435 @@ +/* + * linux/drivers/s390/crypto/zcrypt_cex2a.c + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_cex2a.h" + +#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */ +#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */ + +#define CEX2A_SPEED_RATING 970 + +#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */ +#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ + +#define CEX2A_CLEANUP_TIME (15*HZ) + +static struct ap_device_id zcrypt_cex2a_ids[] = { + { AP_DEVICE(AP_DEVICE_TYPE_CEX2A) }, + { /* end of list */ }, +}; + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, " + "Copyright 2001, 2006 IBM Corporation"); +MODULE_LICENSE("GPL"); +#endif + +static int zcrypt_cex2a_probe(struct ap_device *ap_dev); +static void zcrypt_cex2a_remove(struct ap_device *ap_dev); +static void zcrypt_cex2a_receive(struct ap_device *, struct ap_message *, + struct ap_message *); + +static struct ap_driver zcrypt_cex2a_driver = { + .probe = zcrypt_cex2a_probe, + .remove = zcrypt_cex2a_remove, + .receive = zcrypt_cex2a_receive, + .ids = zcrypt_cex2a_ids, +}; + +/** + * Convert a ICAMEX message to a type50 MEX message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @mex: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + unsigned char *mod, *exp, *inp; + int mod_len; + + mod_len = mex->inputdatalength; + + if (mod_len <= 128) { + struct type50_meb1_msg *meb1 = ap_msg->message; + memset(meb1, 0, sizeof(*meb1)); + ap_msg->length = sizeof(*meb1); + meb1->header.msg_type_code = TYPE50_TYPE_CODE; + meb1->header.msg_len = sizeof(*meb1); + meb1->keyblock_type = TYPE50_MEB1_FMT; + mod = meb1->modulus + sizeof(meb1->modulus) - mod_len; + exp = meb1->exponent + sizeof(meb1->exponent) - mod_len; + inp = meb1->message + sizeof(meb1->message) - mod_len; + } else { + struct type50_meb2_msg *meb2 = ap_msg->message; + memset(meb2, 0, sizeof(*meb2)); + ap_msg->length = sizeof(*meb2); + meb2->header.msg_type_code = TYPE50_TYPE_CODE; + meb2->header.msg_len = sizeof(*meb2); + meb2->keyblock_type = TYPE50_MEB2_FMT; + mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; + exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; + inp = meb2->message + sizeof(meb2->message) - mod_len; + } + + if (copy_from_user(mod, mex->n_modulus, mod_len) || + copy_from_user(exp, mex->b_key, mod_len) || + copy_from_user(inp, mex->inputdata, mod_len)) + return -EFAULT; + return 0; +} + +/** + * Convert a ICACRT message to a type50 CRT message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @crt: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + int mod_len, short_len, long_len, long_offset; + unsigned char *p, *q, *dp, *dq, *u, *inp; + + mod_len = crt->inputdatalength; + short_len = mod_len / 2; + long_len = mod_len / 2 + 8; + + /* + * CEX2A cannot handle p, dp, or U > 128 bytes. + * If we have one of these, we need to do extra checking. + */ + if (long_len > 128) { + /* + * zcrypt_rsa_crt already checked for the leading + * zeroes of np_prime, bp_key and u_mult_inc. + */ + long_offset = long_len - 128; + long_len = 128; + } else + long_offset = 0; + + /* + * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use + * the larger message structure. + */ + if (long_len <= 64) { + struct type50_crb1_msg *crb1 = ap_msg->message; + memset(crb1, 0, sizeof(*crb1)); + ap_msg->length = sizeof(*crb1); + crb1->header.msg_type_code = TYPE50_TYPE_CODE; + crb1->header.msg_len = sizeof(*crb1); + crb1->keyblock_type = TYPE50_CRB1_FMT; + p = crb1->p + sizeof(crb1->p) - long_len; + q = crb1->q + sizeof(crb1->q) - short_len; + dp = crb1->dp + sizeof(crb1->dp) - long_len; + dq = crb1->dq + sizeof(crb1->dq) - short_len; + u = crb1->u + sizeof(crb1->u) - long_len; + inp = crb1->message + sizeof(crb1->message) - mod_len; + } else { + struct type50_crb2_msg *crb2 = ap_msg->message; + memset(crb2, 0, sizeof(*crb2)); + ap_msg->length = sizeof(*crb2); + crb2->header.msg_type_code = TYPE50_TYPE_CODE; + crb2->header.msg_len = sizeof(*crb2); + crb2->keyblock_type = TYPE50_CRB2_FMT; + p = crb2->p + sizeof(crb2->p) - long_len; + q = crb2->q + sizeof(crb2->q) - short_len; + dp = crb2->dp + sizeof(crb2->dp) - long_len; + dq = crb2->dq + sizeof(crb2->dq) - short_len; + u = crb2->u + sizeof(crb2->u) - long_len; + inp = crb2->message + sizeof(crb2->message) - mod_len; + } + + if (copy_from_user(p, crt->np_prime + long_offset, long_len) || + copy_from_user(q, crt->nq_prime, short_len) || + copy_from_user(dp, crt->bp_key + long_offset, long_len) || + copy_from_user(dq, crt->bq_key, short_len) || + copy_from_user(u, crt->u_mult_inv + long_offset, long_len) || + copy_from_user(inp, crt->inputdata, mod_len)) + return -EFAULT; + + + return 0; +} + +/** + * Copy results from a type 80 reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EFAULT. + */ +static int convert_type80(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type80_hdr *t80h = reply->message; + unsigned char *data; + + if (t80h->len < sizeof(*t80h) + outputdatalength) { + /* The result is too short, the CEX2A card may not do that.. */ + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } + BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE); + data = reply->message + t80h->len - outputdatalength; + if (copy_to_user(outputdata, data, outputdatalength)) + return -EFAULT; + return 0; +} + +static int convert_response(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *) reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zdev, reply); + case TYPE80_RSP_CODE: + return convert_type80(zdev, reply, + outputdata, outputdatalength); + default: /* Unknown response type, this should NEVER EVER happen */ + PRINTK("Unrecognized Message Header: %08x%08x\n", + *(unsigned int *) reply->message, + *(unsigned int *) (reply->message+4)); + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_cex2a_receive(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct type80_hdr *t80h = reply->message; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) + memcpy(msg->message, &error_reply, sizeof(error_reply)); + else if (t80h->type == TYPE80_RSP_CODE) { + length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); + memcpy(msg->message, reply->message, length); + } else + memcpy(msg->message, reply->message, sizeof error_reply); + complete((struct completion *) msg->private); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the CEX2A + * device to handle a modexpo request. + * @zdev: pointer to zcrypt_device structure that identifies the + * CEX2A device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_cex2a_modexpo(struct zcrypt_device *zdev, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICAMEX_msg_to_type50MEX_msg(zdev, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &work, CEX2A_CLEANUP_TIME); + if (rc > 0) + rc = convert_response(zdev, &ap_msg, mex->outputdata, + mex->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the CEX2A + * device to handle a modexpo_crt request. + * @zdev: pointer to zcrypt_device structure that identifies the + * CEX2A device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_cex2a_modexpo_crt(struct zcrypt_device *zdev, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_msg.message = (void *) kmalloc(CEX2A_MAX_MESSAGE_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICACRT_msg_to_type50CRT_msg(zdev, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &work, CEX2A_CLEANUP_TIME); + if (rc > 0) + rc = convert_response(zdev, &ap_msg, crt->outputdata, + crt->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The crypto operations for a CEX2A card. + */ +static struct zcrypt_ops zcrypt_cex2a_ops = { + .rsa_modexpo = zcrypt_cex2a_modexpo, + .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt, +}; + +/** + * Probe function for CEX2A cards. It always accepts the AP device + * since the bus_match already checked the hardware type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_cex2a_probe(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev; + int rc; + + zdev = zcrypt_device_alloc(CEX2A_MAX_RESPONSE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->ap_dev = ap_dev; + zdev->ops = &zcrypt_cex2a_ops; + zdev->online = 1; + zdev->user_space_type = ZCRYPT_CEX2A; + zdev->type_string = "CEX2A"; + zdev->min_mod_size = CEX2A_MIN_MOD_SIZE; + zdev->max_mod_size = CEX2A_MAX_MOD_SIZE; + zdev->short_crt = 1; + zdev->speed_rating = CEX2A_SPEED_RATING; + ap_dev->reply = &zdev->reply; + ap_dev->private = zdev; + rc = zcrypt_device_register(zdev); + if (rc) + goto out_free; + return 0; + +out_free: + ap_dev->private = NULL; + zcrypt_device_free(zdev); + return rc; +} + +/** + * This is called to remove the extended CEX2A driver information + * if an AP device is removed. + */ +static void zcrypt_cex2a_remove(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev = ap_dev->private; + + zcrypt_device_unregister(zdev); +} + +int __init zcrypt_cex2a_init(void) +{ + return ap_driver_register(&zcrypt_cex2a_driver, THIS_MODULE, "cex2a"); +} + +void __exit zcrypt_cex2a_exit(void) +{ + ap_driver_unregister(&zcrypt_cex2a_driver); +} + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +module_init(zcrypt_cex2a_init); +module_exit(zcrypt_cex2a_exit); +#endif diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h new file mode 100644 index 000000000000..8f69d1dacab8 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_cex2a.h @@ -0,0 +1,126 @@ +/* + * linux/drivers/s390/crypto/zcrypt_cex2a.h + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_CEX2A_H_ +#define _ZCRYPT_CEX2A_H_ + +/** + * The type 50 message family is associated with a CEX2A card. + * + * The four members of the family are described below. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ +struct type50_hdr { + unsigned char reserved1; + unsigned char msg_type_code; /* 0x50 */ + unsigned short msg_len; + unsigned char reserved2; + unsigned char ignored; + unsigned short reserved3; +} __attribute__((packed)); + +#define TYPE50_TYPE_CODE 0x50 + +#define TYPE50_MEB1_FMT 0x0001 +#define TYPE50_MEB2_FMT 0x0002 +#define TYPE50_CRB1_FMT 0x0011 +#define TYPE50_CRB2_FMT 0x0012 + +/* Mod-Exp, with a small modulus */ +struct type50_meb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0001 */ + unsigned char reserved[6]; + unsigned char exponent[128]; + unsigned char modulus[128]; + unsigned char message[128]; +} __attribute__((packed)); + +/* Mod-Exp, with a large modulus */ +struct type50_meb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0002 */ + unsigned char reserved[6]; + unsigned char exponent[256]; + unsigned char modulus[256]; + unsigned char message[256]; +} __attribute__((packed)); + +/* CRT, with a small modulus */ +struct type50_crb1_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0011 */ + unsigned char reserved[6]; + unsigned char p[64]; + unsigned char q[64]; + unsigned char dp[64]; + unsigned char dq[64]; + unsigned char u[64]; + unsigned char message[128]; +} __attribute__((packed)); + +/* CRT, with a large modulus */ +struct type50_crb2_msg { + struct type50_hdr header; + unsigned short keyblock_type; /* 0x0012 */ + unsigned char reserved[6]; + unsigned char p[128]; + unsigned char q[128]; + unsigned char dp[128]; + unsigned char dq[128]; + unsigned char u[128]; + unsigned char message[256]; +} __attribute__((packed)); + +/** + * The type 80 response family is associated with a CEX2A card. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ + +#define TYPE80_RSP_CODE 0x80 + +struct type80_hdr { + unsigned char reserved1; + unsigned char type; /* 0x80 */ + unsigned short len; + unsigned char code; /* 0x00 */ + unsigned char reserved2[3]; + unsigned char reserved3[8]; +} __attribute__((packed)); + +int zcrypt_cex2a_init(void); +void zcrypt_cex2a_exit(void); + +#endif /* _ZCRYPT_CEX2A_H_ */ diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h new file mode 100644 index 000000000000..2cb616ba8bec --- /dev/null +++ b/drivers/s390/crypto/zcrypt_error.h @@ -0,0 +1,133 @@ +/* + * linux/drivers/s390/crypto/zcrypt_error.h + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_ERROR_H_ +#define _ZCRYPT_ERROR_H_ + +#include "zcrypt_api.h" + +/** + * Reply Messages + * + * Error reply messages are of two types: + * 82: Error (see below) + * 88: Error (see below) + * Both type 82 and type 88 have the same structure in the header. + * + * Request reply messages are of three known types: + * 80: Reply from a Type 50 Request (see CEX2A-RELATED STRUCTS) + * 84: Reply from a Type 4 Request (see PCICA-RELATED STRUCTS) + * 86: Reply from a Type 6 Request (see PCICC/PCIXCC/CEX2C-RELATED STRUCTS) + * + */ +struct error_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x82 or 0x88 */ + unsigned char reserved2[2]; /* 0x0000 */ + unsigned char reply_code; /* reply code */ + unsigned char reserved3[3]; /* 0x000000 */ +}; + +#define TYPE82_RSP_CODE 0x82 +#define TYPE88_RSP_CODE 0x88 + +#define REP82_ERROR_MACHINE_FAILURE 0x10 +#define REP82_ERROR_PREEMPT_FAILURE 0x12 +#define REP82_ERROR_CHECKPT_FAILURE 0x14 +#define REP82_ERROR_MESSAGE_TYPE 0x20 +#define REP82_ERROR_INVALID_COMM_CD 0x21 /* Type 84 */ +#define REP82_ERROR_INVALID_MSG_LEN 0x23 +#define REP82_ERROR_RESERVD_FIELD 0x24 /* was 0x50 */ +#define REP82_ERROR_FORMAT_FIELD 0x29 +#define REP82_ERROR_INVALID_COMMAND 0x30 +#define REP82_ERROR_MALFORMED_MSG 0x40 +#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */ +#define REP82_ERROR_WORD_ALIGNMENT 0x60 +#define REP82_ERROR_MESSAGE_LENGTH 0x80 +#define REP82_ERROR_OPERAND_INVALID 0x82 +#define REP82_ERROR_OPERAND_SIZE 0x84 +#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 +#define REP82_ERROR_RESERVED_FIELD 0x88 +#define REP82_ERROR_TRANSPORT_FAIL 0x90 +#define REP82_ERROR_PACKET_TRUNCATED 0xA0 +#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 + +#define REP88_ERROR_MODULE_FAILURE 0x10 + +#define REP88_ERROR_MESSAGE_TYPE 0x20 +#define REP88_ERROR_MESSAGE_MALFORMD 0x22 +#define REP88_ERROR_MESSAGE_LENGTH 0x23 +#define REP88_ERROR_RESERVED_FIELD 0x24 +#define REP88_ERROR_KEY_TYPE 0x34 +#define REP88_ERROR_INVALID_KEY 0x82 /* CEX2A */ +#define REP88_ERROR_OPERAND 0x84 /* CEX2A */ +#define REP88_ERROR_OPERAND_EVEN_MOD 0x85 /* CEX2A */ + +static inline int convert_error(struct zcrypt_device *zdev, + struct ap_message *reply) +{ + struct error_hdr *ehdr = reply->message; + + PRINTK("Hardware error : Type %02x Message Header: %08x%08x\n", + ehdr->type, *(unsigned int *) reply->message, + *(unsigned int *) (reply->message + 4)); + + switch (ehdr->reply_code) { + case REP82_ERROR_OPERAND_INVALID: + case REP82_ERROR_OPERAND_SIZE: + case REP82_ERROR_EVEN_MOD_IN_OPND: + case REP88_ERROR_MESSAGE_MALFORMD: + // REP88_ERROR_INVALID_KEY // '82' CEX2A + // REP88_ERROR_OPERAND // '84' CEX2A + // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A + /* Invalid input data. */ + return -EINVAL; + case REP82_ERROR_MESSAGE_TYPE: + // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A + /** + * To sent a message of the wrong type is a bug in the + * device driver. Warn about it, disable the device + * and then repeat the request. + */ + WARN_ON(1); + zdev->online = 0; + return -EAGAIN; + case REP82_ERROR_TRANSPORT_FAIL: + case REP82_ERROR_MACHINE_FAILURE: + // REP88_ERROR_MODULE_FAILURE // '10' CEX2A + /* If a card fails disable it and repeat the request. */ + zdev->online = 0; + return -EAGAIN; + default: + PRINTKW("unknown type %02x reply code = %d\n", + ehdr->type, ehdr->reply_code); + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +#endif /* _ZCRYPT_ERROR_H_ */ diff --git a/drivers/s390/crypto/zcrypt_mono.c b/drivers/s390/crypto/zcrypt_mono.c new file mode 100644 index 000000000000..2a9349ad68b7 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_mono.c @@ -0,0 +1,100 @@ +/* + * linux/drivers/s390/crypto/zcrypt_mono.c + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/compat.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_pcica.h" +#include "zcrypt_pcicc.h" +#include "zcrypt_pcixcc.h" +#include "zcrypt_cex2a.h" + +/** + * The module initialization code. + */ +int __init zcrypt_init(void) +{ + int rc; + + rc = ap_module_init(); + if (rc) + goto out; + rc = zcrypt_api_init(); + if (rc) + goto out_ap; + rc = zcrypt_pcica_init(); + if (rc) + goto out_api; + rc = zcrypt_pcicc_init(); + if (rc) + goto out_pcica; + rc = zcrypt_pcixcc_init(); + if (rc) + goto out_pcicc; + rc = zcrypt_cex2a_init(); + if (rc) + goto out_pcixcc; + return 0; + +out_pcixcc: + zcrypt_pcixcc_exit(); +out_pcicc: + zcrypt_pcicc_exit(); +out_pcica: + zcrypt_pcica_exit(); +out_api: + zcrypt_api_exit(); +out_ap: + ap_module_exit(); +out: + return rc; +} + +/** + * The module termination code. + */ +void __exit zcrypt_exit(void) +{ + zcrypt_cex2a_exit(); + zcrypt_pcixcc_exit(); + zcrypt_pcicc_exit(); + zcrypt_pcica_exit(); + zcrypt_api_exit(); + ap_module_exit(); +} + +module_init(zcrypt_init); +module_exit(zcrypt_exit); diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c new file mode 100644 index 000000000000..b6a4ecdc8025 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcica.c @@ -0,0 +1,418 @@ +/* + * linux/drivers/s390/crypto/zcrypt_pcica.c + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_pcica.h" + +#define PCICA_MIN_MOD_SIZE 1 /* 8 bits */ +#define PCICA_MAX_MOD_SIZE 256 /* 2048 bits */ + +#define PCICA_SPEED_RATING 2800 + +#define PCICA_MAX_MESSAGE_SIZE 0x3a0 /* sizeof(struct type4_lcr) */ +#define PCICA_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */ + +#define PCICA_CLEANUP_TIME (15*HZ) + +static struct ap_device_id zcrypt_pcica_ids[] = { + { AP_DEVICE(AP_DEVICE_TYPE_PCICA) }, + { /* end of list */ }, +}; + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, " + "Copyright 2001, 2006 IBM Corporation"); +MODULE_LICENSE("GPL"); +#endif + +static int zcrypt_pcica_probe(struct ap_device *ap_dev); +static void zcrypt_pcica_remove(struct ap_device *ap_dev); +static void zcrypt_pcica_receive(struct ap_device *, struct ap_message *, + struct ap_message *); + +static struct ap_driver zcrypt_pcica_driver = { + .probe = zcrypt_pcica_probe, + .remove = zcrypt_pcica_remove, + .receive = zcrypt_pcica_receive, + .ids = zcrypt_pcica_ids, +}; + +/** + * Convert a ICAMEX message to a type4 MEX message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @mex: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICAMEX_msg_to_type4MEX_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + unsigned char *modulus, *exponent, *message; + int mod_len; + + mod_len = mex->inputdatalength; + + if (mod_len <= 128) { + struct type4_sme *sme = ap_msg->message; + memset(sme, 0, sizeof(*sme)); + ap_msg->length = sizeof(*sme); + sme->header.msg_fmt = TYPE4_SME_FMT; + sme->header.msg_len = sizeof(*sme); + sme->header.msg_type_code = TYPE4_TYPE_CODE; + sme->header.request_code = TYPE4_REQU_CODE; + modulus = sme->modulus + sizeof(sme->modulus) - mod_len; + exponent = sme->exponent + sizeof(sme->exponent) - mod_len; + message = sme->message + sizeof(sme->message) - mod_len; + } else { + struct type4_lme *lme = ap_msg->message; + memset(lme, 0, sizeof(*lme)); + ap_msg->length = sizeof(*lme); + lme->header.msg_fmt = TYPE4_LME_FMT; + lme->header.msg_len = sizeof(*lme); + lme->header.msg_type_code = TYPE4_TYPE_CODE; + lme->header.request_code = TYPE4_REQU_CODE; + modulus = lme->modulus + sizeof(lme->modulus) - mod_len; + exponent = lme->exponent + sizeof(lme->exponent) - mod_len; + message = lme->message + sizeof(lme->message) - mod_len; + } + + if (copy_from_user(modulus, mex->n_modulus, mod_len) || + copy_from_user(exponent, mex->b_key, mod_len) || + copy_from_user(message, mex->inputdata, mod_len)) + return -EFAULT; + return 0; +} + +/** + * Convert a ICACRT message to a type4 CRT message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @crt: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + unsigned char *p, *q, *dp, *dq, *u, *inp; + int mod_len, short_len, long_len; + + mod_len = crt->inputdatalength; + short_len = mod_len / 2; + long_len = mod_len / 2 + 8; + + if (mod_len <= 128) { + struct type4_scr *scr = ap_msg->message; + memset(scr, 0, sizeof(*scr)); + ap_msg->length = sizeof(*scr); + scr->header.msg_type_code = TYPE4_TYPE_CODE; + scr->header.request_code = TYPE4_REQU_CODE; + scr->header.msg_fmt = TYPE4_SCR_FMT; + scr->header.msg_len = sizeof(*scr); + p = scr->p + sizeof(scr->p) - long_len; + q = scr->q + sizeof(scr->q) - short_len; + dp = scr->dp + sizeof(scr->dp) - long_len; + dq = scr->dq + sizeof(scr->dq) - short_len; + u = scr->u + sizeof(scr->u) - long_len; + inp = scr->message + sizeof(scr->message) - mod_len; + } else { + struct type4_lcr *lcr = ap_msg->message; + memset(lcr, 0, sizeof(*lcr)); + ap_msg->length = sizeof(*lcr); + lcr->header.msg_type_code = TYPE4_TYPE_CODE; + lcr->header.request_code = TYPE4_REQU_CODE; + lcr->header.msg_fmt = TYPE4_LCR_FMT; + lcr->header.msg_len = sizeof(*lcr); + p = lcr->p + sizeof(lcr->p) - long_len; + q = lcr->q + sizeof(lcr->q) - short_len; + dp = lcr->dp + sizeof(lcr->dp) - long_len; + dq = lcr->dq + sizeof(lcr->dq) - short_len; + u = lcr->u + sizeof(lcr->u) - long_len; + inp = lcr->message + sizeof(lcr->message) - mod_len; + } + + if (copy_from_user(p, crt->np_prime, long_len) || + copy_from_user(q, crt->nq_prime, short_len) || + copy_from_user(dp, crt->bp_key, long_len) || + copy_from_user(dq, crt->bq_key, short_len) || + copy_from_user(u, crt->u_mult_inv, long_len) || + copy_from_user(inp, crt->inputdata, mod_len)) + return -EFAULT; + return 0; +} + +/** + * Copy results from a type 84 reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EFAULT. + */ +static inline int convert_type84(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type84_hdr *t84h = reply->message; + char *data; + + if (t84h->len < sizeof(*t84h) + outputdatalength) { + /* The result is too short, the PCICA card may not do that.. */ + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } + BUG_ON(t84h->len > PCICA_MAX_RESPONSE_SIZE); + data = reply->message + t84h->len - outputdatalength; + if (copy_to_user(outputdata, data, outputdatalength)) + return -EFAULT; + return 0; +} + +static int convert_response(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *) reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zdev, reply); + case TYPE84_RSP_CODE: + return convert_type84(zdev, reply, + outputdata, outputdatalength); + default: /* Unknown response type, this should NEVER EVER happen */ + PRINTK("Unrecognized Message Header: %08x%08x\n", + *(unsigned int *) reply->message, + *(unsigned int *) (reply->message+4)); + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_pcica_receive(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct type84_hdr *t84h = reply->message; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) + memcpy(msg->message, &error_reply, sizeof(error_reply)); + else if (t84h->code == TYPE84_RSP_CODE) { + length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); + memcpy(msg->message, reply->message, length); + } else + memcpy(msg->message, reply->message, sizeof error_reply); + complete((struct completion *) msg->private); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the PCICA + * device to handle a modexpo request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCICA device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_pcica_modexpo(struct zcrypt_device *zdev, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICAMEX_msg_to_type4MEX_msg(zdev, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &work, PCICA_CLEANUP_TIME); + if (rc > 0) + rc = convert_response(zdev, &ap_msg, mex->outputdata, + mex->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCICA + * device to handle a modexpo_crt request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCICA device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_pcica_modexpo_crt(struct zcrypt_device *zdev, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_msg.message = (void *) kmalloc(PCICA_MAX_MESSAGE_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICACRT_msg_to_type4CRT_msg(zdev, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &work, PCICA_CLEANUP_TIME); + if (rc > 0) + rc = convert_response(zdev, &ap_msg, crt->outputdata, + crt->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + kfree(ap_msg.message); + return rc; +} + +/** + * The crypto operations for a PCICA card. + */ +static struct zcrypt_ops zcrypt_pcica_ops = { + .rsa_modexpo = zcrypt_pcica_modexpo, + .rsa_modexpo_crt = zcrypt_pcica_modexpo_crt, +}; + +/** + * Probe function for PCICA cards. It always accepts the AP device + * since the bus_match already checked the hardware type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_pcica_probe(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev; + int rc; + + zdev = zcrypt_device_alloc(PCICA_MAX_RESPONSE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->ap_dev = ap_dev; + zdev->ops = &zcrypt_pcica_ops; + zdev->online = 1; + zdev->user_space_type = ZCRYPT_PCICA; + zdev->type_string = "PCICA"; + zdev->min_mod_size = PCICA_MIN_MOD_SIZE; + zdev->max_mod_size = PCICA_MAX_MOD_SIZE; + zdev->speed_rating = PCICA_SPEED_RATING; + ap_dev->reply = &zdev->reply; + ap_dev->private = zdev; + rc = zcrypt_device_register(zdev); + if (rc) + goto out_free; + return 0; + +out_free: + ap_dev->private = NULL; + zcrypt_device_free(zdev); + return rc; +} + +/** + * This is called to remove the extended PCICA driver information + * if an AP device is removed. + */ +static void zcrypt_pcica_remove(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev = ap_dev->private; + + zcrypt_device_unregister(zdev); +} + +int __init zcrypt_pcica_init(void) +{ + return ap_driver_register(&zcrypt_pcica_driver, THIS_MODULE, "pcica"); +} + +void zcrypt_pcica_exit(void) +{ + ap_driver_unregister(&zcrypt_pcica_driver); +} + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +module_init(zcrypt_pcica_init); +module_exit(zcrypt_pcica_exit); +#endif diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h new file mode 100644 index 000000000000..3be11187f6df --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcica.h @@ -0,0 +1,117 @@ +/* + * linux/drivers/s390/crypto/zcrypt_pcica.h + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_PCICA_H_ +#define _ZCRYPT_PCICA_H_ + +/** + * The type 4 message family is associated with a PCICA card. + * + * The four members of the family are described below. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ +struct type4_hdr { + unsigned char reserved1; + unsigned char msg_type_code; /* 0x04 */ + unsigned short msg_len; + unsigned char request_code; /* 0x40 */ + unsigned char msg_fmt; + unsigned short reserved2; +} __attribute__((packed)); + +#define TYPE4_TYPE_CODE 0x04 +#define TYPE4_REQU_CODE 0x40 + +#define TYPE4_SME_FMT 0x00 +#define TYPE4_LME_FMT 0x10 +#define TYPE4_SCR_FMT 0x40 +#define TYPE4_LCR_FMT 0x50 + +/* Mod-Exp, with a small modulus */ +struct type4_sme { + struct type4_hdr header; + unsigned char message[128]; + unsigned char exponent[128]; + unsigned char modulus[128]; +} __attribute__((packed)); + +/* Mod-Exp, with a large modulus */ +struct type4_lme { + struct type4_hdr header; + unsigned char message[256]; + unsigned char exponent[256]; + unsigned char modulus[256]; +} __attribute__((packed)); + +/* CRT, with a small modulus */ +struct type4_scr { + struct type4_hdr header; + unsigned char message[128]; + unsigned char dp[72]; + unsigned char dq[64]; + unsigned char p[72]; + unsigned char q[64]; + unsigned char u[72]; +} __attribute__((packed)); + +/* CRT, with a large modulus */ +struct type4_lcr { + struct type4_hdr header; + unsigned char message[256]; + unsigned char dp[136]; + unsigned char dq[128]; + unsigned char p[136]; + unsigned char q[128]; + unsigned char u[136]; +} __attribute__((packed)); + +/** + * The type 84 response family is associated with a PCICA card. + * + * Note that all unsigned char arrays are right-justified and left-padded + * with zeroes. + * + * Note that all reserved fields must be zeroes. + */ + +struct type84_hdr { + unsigned char reserved1; + unsigned char code; + unsigned short len; + unsigned char reserved2[4]; +} __attribute__((packed)); + +#define TYPE84_RSP_CODE 0x84 + +int zcrypt_pcica_init(void); +void zcrypt_pcica_exit(void); + +#endif /* _ZCRYPT_PCICA_H_ */ diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c new file mode 100644 index 000000000000..f295a403b29a --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcicc.c @@ -0,0 +1,630 @@ +/* + * linux/drivers/s390/crypto/zcrypt_pcicc.c + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_pcicc.h" +#include "zcrypt_cca_key.h" + +#define PCICC_MIN_MOD_SIZE 64 /* 512 bits */ +#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */ +#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */ + +/** + * PCICC cards need a speed rating of 0. This keeps them at the end of + * the zcrypt device list (see zcrypt_api.c). PCICC cards are only + * used if no other cards are present because they are slow and can only + * cope with PKCS12 padded requests. The logic is queer. PKCS11 padded + * requests are rejected. The modexpo function encrypts PKCS12 padded data + * and decrypts any non-PKCS12 padded data (except PKCS11) in the assumption + * that it's encrypted PKCS12 data. The modexpo_crt function always decrypts + * the data in the assumption that its PKCS12 encrypted data. + */ +#define PCICC_SPEED_RATING 0 + +#define PCICC_MAX_MESSAGE_SIZE 0x710 /* max size type6 v1 crt message */ +#define PCICC_MAX_RESPONSE_SIZE 0x710 /* max size type86 v1 reply */ + +#define PCICC_CLEANUP_TIME (15*HZ) + +static struct ap_device_id zcrypt_pcicc_ids[] = { + { AP_DEVICE(AP_DEVICE_TYPE_PCICC) }, + { /* end of list */ }, +}; + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, " + "Copyright 2001, 2006 IBM Corporation"); +MODULE_LICENSE("GPL"); +#endif + +static int zcrypt_pcicc_probe(struct ap_device *ap_dev); +static void zcrypt_pcicc_remove(struct ap_device *ap_dev); +static void zcrypt_pcicc_receive(struct ap_device *, struct ap_message *, + struct ap_message *); + +static struct ap_driver zcrypt_pcicc_driver = { + .probe = zcrypt_pcicc_probe, + .remove = zcrypt_pcicc_remove, + .receive = zcrypt_pcicc_receive, + .ids = zcrypt_pcicc_ids, +}; + +/** + * The following is used to initialize the CPRB passed to the PCICC card + * in a type6 message. The 3 fields that must be filled in at execution + * time are req_parml, rpl_parml and usage_domain. Note that all three + * fields are *little*-endian. Actually, everything about this interface + * is ascii/little-endian, since the device has 'Intel inside'. + * + * The CPRB is followed immediately by the parm block. + * The parm block contains: + * - function code ('PD' 0x5044 or 'PK' 0x504B) + * - rule block (0x0A00 'PKCS-1.2' or 0x0A00 'ZERO-PAD') + * - VUD block + */ +static struct CPRB static_cprb = { + .cprb_len = __constant_cpu_to_le16(0x0070), + .cprb_ver_id = 0x41, + .func_id = {0x54,0x32}, + .checkpoint_flag= 0x01, + .svr_namel = __constant_cpu_to_le16(0x0008), + .svr_name = {'I','C','S','F',' ',' ',' ',' '} +}; + +/** + * Check the message for PKCS11 padding. + */ +static inline int is_PKCS11_padded(unsigned char *buffer, int length) +{ + int i; + if ((buffer[0] != 0x00) || (buffer[1] != 0x01)) + return 0; + for (i = 2; i < length; i++) + if (buffer[i] != 0xFF) + break; + if (i < 10 || i == length) + return 0; + if (buffer[i] != 0x00) + return 0; + return 1; +} + +/** + * Check the message for PKCS12 padding. + */ +static inline int is_PKCS12_padded(unsigned char *buffer, int length) +{ + int i; + if ((buffer[0] != 0x00) || (buffer[1] != 0x02)) + return 0; + for (i = 2; i < length; i++) + if (buffer[i] == 0x00) + break; + if ((i < 10) || (i == length)) + return 0; + if (buffer[i] != 0x00) + return 0; + return 1; +} + +/** + * Convert a ICAMEX message to a type6 MEX message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @mex: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICAMEX_msg_to_type6MEX_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + static struct type6_hdr static_type6_hdr = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50, + 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01}, + .function_code = {'P','K'}, + }; + static struct function_and_rules_block static_pke_function_and_rules ={ + .function_code = {'P','K'}, + .ulen = __constant_cpu_to_le16(10), + .only_rule = {'P','K','C','S','-','1','.','2'} + }; + struct { + struct type6_hdr hdr; + struct CPRB cprb; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __attribute__((packed)) *msg = ap_msg->message; + int vud_len, pad_len, size; + + /* VUD.ciphertext */ + if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) + return -EFAULT; + + if (is_PKCS11_padded(msg->text, mex->inputdatalength)) + return -EINVAL; + + /* static message header and f&r */ + msg->hdr = static_type6_hdr; + msg->fr = static_pke_function_and_rules; + + if (is_PKCS12_padded(msg->text, mex->inputdatalength)) { + /* strip the padding and adjust the data length */ + pad_len = strnlen(msg->text + 2, mex->inputdatalength - 2) + 3; + if (pad_len <= 9 || pad_len >= mex->inputdatalength) + return -ENODEV; + vud_len = mex->inputdatalength - pad_len; + memmove(msg->text, msg->text + pad_len, vud_len); + msg->length = cpu_to_le16(vud_len + 2); + + /* Set up key after the variable length text. */ + size = zcrypt_type6_mex_key_en(mex, msg->text + vud_len, 0); + if (size < 0) + return size; + size += sizeof(*msg) + vud_len; /* total size of msg */ + } else { + vud_len = mex->inputdatalength; + msg->length = cpu_to_le16(2 + vud_len); + + msg->hdr.function_code[1] = 'D'; + msg->fr.function_code[1] = 'D'; + + /* Set up key after the variable length text. */ + size = zcrypt_type6_mex_key_de(mex, msg->text + vud_len, 0); + if (size < 0) + return size; + size += sizeof(*msg) + vud_len; /* total size of msg */ + } + + /* message header, cprb and f&r */ + msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; + msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprb = static_cprb; + msg->cprb.usage_domain[0]= AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprb.req_parml = cpu_to_le16(size - sizeof(msg->hdr) - + sizeof(msg->cprb)); + msg->cprb.rpl_parml = cpu_to_le16(msg->hdr.FromCardLen1); + + ap_msg->length = (size + 3) & -4; + return 0; +} + +/** + * Convert a ICACRT message to a type6 CRT message. + * + * @zdev: crypto device pointer + * @zreq: crypto request pointer + * @crt: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICACRT_msg_to_type6CRT_msg(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + static struct type6_hdr static_type6_hdr = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {0x01,0x00,0x43,0x43,0x41,0x2D,0x41,0x50, + 0x50,0x4C,0x20,0x20,0x20,0x01,0x01,0x01}, + .function_code = {'P','D'}, + }; + static struct function_and_rules_block static_pkd_function_and_rules ={ + .function_code = {'P','D'}, + .ulen = __constant_cpu_to_le16(10), + .only_rule = {'P','K','C','S','-','1','.','2'} + }; + struct { + struct type6_hdr hdr; + struct CPRB cprb; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __attribute__((packed)) *msg = ap_msg->message; + int size; + + /* VUD.ciphertext */ + msg->length = cpu_to_le16(2 + crt->inputdatalength); + if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) + return -EFAULT; + + if (is_PKCS11_padded(msg->text, crt->inputdatalength)) + return -EINVAL; + + /* Set up key after the variable length text. */ + size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 0); + if (size < 0) + return size; + size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ + + /* message header, cprb and f&r */ + msg->hdr = static_type6_hdr; + msg->hdr.ToCardLen1 = (size - sizeof(msg->hdr) + 3) & -4; + msg->hdr.FromCardLen1 = PCICC_MAX_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprb = static_cprb; + msg->cprb.usage_domain[0] = AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprb.req_parml = msg->cprb.rpl_parml = + cpu_to_le16(size - sizeof(msg->hdr) - sizeof(msg->cprb)); + + msg->fr = static_pkd_function_and_rules; + + ap_msg->length = (size + 3) & -4; + return 0; +} + +/** + * Copy results from a type 86 reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +struct type86_reply { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct CPRB cprb; + unsigned char pad[4]; /* 4 byte function code/rules block ? */ + unsigned short length; + char text[0]; +} __attribute__((packed)); + +static int convert_type86(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + static unsigned char static_pad[] = { + 0x00,0x02, + 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD, + 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57, + 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B, + 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39, + 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5, + 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D, + 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB, + 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F, + 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9, + 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45, + 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9, + 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F, + 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD, + 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D, + 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD, + 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9, + 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B, + 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B, + 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B, + 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD, + 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7, + 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1, + 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3, + 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23, + 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55, + 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43, + 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F, + 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F, + 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5, + 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD, + 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41, + 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09 + }; + struct type86_reply *msg = reply->message; + unsigned short service_rc, service_rs; + unsigned int reply_len, pad_len; + char *data; + + service_rc = le16_to_cpu(msg->cprb.ccp_rtcode); + if (unlikely(service_rc != 0)) { + service_rs = le16_to_cpu(msg->cprb.ccp_rscode); + if (service_rc == 8 && service_rs == 66) { + PDEBUG("Bad block format on PCICC\n"); + return -EINVAL; + } + if (service_rc == 8 && service_rs == 65) { + PDEBUG("Probably an even modulus on PCICC\n"); + return -EINVAL; + } + if (service_rc == 8 && service_rs == 770) { + PDEBUG("Invalid key length on PCICC\n"); + zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; + return -EAGAIN; + } + if (service_rc == 8 && service_rs == 783) { + PDEBUG("Extended bitlengths not enabled on PCICC\n"); + zdev->max_mod_size = PCICC_MAX_MOD_SIZE_OLD; + return -EAGAIN; + } + PRINTK("Unknown service rc/rs (PCICC): %d/%d\n", + service_rc, service_rs); + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } + data = msg->text; + reply_len = le16_to_cpu(msg->length) - 2; + if (reply_len > outputdatalength) + return -EINVAL; + /** + * For all encipher requests, the length of the ciphertext (reply_len) + * will always equal the modulus length. For MEX decipher requests + * the output needs to get padded. Minimum pad size is 10. + * + * Currently, the cases where padding will be added is for: + * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support + * ZERO-PAD and CRT is only supported for PKD requests) + * - PCICC, always + */ + pad_len = outputdatalength - reply_len; + if (pad_len > 0) { + if (pad_len < 10) + return -EINVAL; + /* 'restore' padding left in the PCICC/PCIXCC card. */ + if (copy_to_user(outputdata, static_pad, pad_len - 1)) + return -EFAULT; + if (put_user(0, outputdata + pad_len - 1)) + return -EFAULT; + } + /* Copy the crypto response to user space. */ + if (copy_to_user(outputdata + pad_len, data, reply_len)) + return -EFAULT; + return 0; +} + +static int convert_response(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type86_reply *msg = reply->message; + + /* Response type byte is the second byte in the response. */ + switch (msg->hdr.type) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zdev, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) + return convert_error(zdev, reply); + if (msg->cprb.cprb_ver_id == 0x01) + return convert_type86(zdev, reply, + outputdata, outputdatalength); + /* no break, incorrect cprb version is an unknown response */ + default: /* Unknown response type, this should NEVER EVER happen */ + PRINTK("Unrecognized Message Header: %08x%08x\n", + *(unsigned int *) reply->message, + *(unsigned int *) (reply->message+4)); + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_pcicc_receive(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct type86_reply *t86r = reply->message; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) + memcpy(msg->message, &error_reply, sizeof(error_reply)); + else if (t86r->hdr.type == TYPE86_RSP_CODE && + t86r->cprb.cprb_ver_id == 0x01) { + length = sizeof(struct type86_reply) + t86r->length - 2; + length = min(PCICC_MAX_RESPONSE_SIZE, length); + memcpy(msg->message, reply->message, length); + } else + memcpy(msg->message, reply->message, sizeof error_reply); + complete((struct completion *) msg->private); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the PCICC + * device to handle a modexpo request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCICC device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_pcicc_modexpo(struct zcrypt_device *zdev, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.length = PAGE_SIZE; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICAMEX_msg_to_type6MEX_msg(zdev, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &work, PCICC_CLEANUP_TIME); + if (rc > 0) + rc = convert_response(zdev, &ap_msg, mex->outputdata, + mex->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCICC + * device to handle a modexpo_crt request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCICC device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_pcicc_modexpo_crt(struct zcrypt_device *zdev, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct completion work; + int rc; + + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.length = PAGE_SIZE; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &work; + rc = ICACRT_msg_to_type6CRT_msg(zdev, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &work, PCICC_CLEANUP_TIME); + if (rc > 0) + rc = convert_response(zdev, &ap_msg, crt->outputdata, + crt->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * The crypto operations for a PCICC card. + */ +static struct zcrypt_ops zcrypt_pcicc_ops = { + .rsa_modexpo = zcrypt_pcicc_modexpo, + .rsa_modexpo_crt = zcrypt_pcicc_modexpo_crt, +}; + +/** + * Probe function for PCICC cards. It always accepts the AP device + * since the bus_match already checked the hardware type. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_pcicc_probe(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev; + int rc; + + zdev = zcrypt_device_alloc(PCICC_MAX_RESPONSE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->ap_dev = ap_dev; + zdev->ops = &zcrypt_pcicc_ops; + zdev->online = 1; + zdev->user_space_type = ZCRYPT_PCICC; + zdev->type_string = "PCICC"; + zdev->min_mod_size = PCICC_MIN_MOD_SIZE; + zdev->max_mod_size = PCICC_MAX_MOD_SIZE; + zdev->speed_rating = PCICC_SPEED_RATING; + ap_dev->reply = &zdev->reply; + ap_dev->private = zdev; + rc = zcrypt_device_register(zdev); + if (rc) + goto out_free; + return 0; + + out_free: + ap_dev->private = NULL; + zcrypt_device_free(zdev); + return rc; +} + +/** + * This is called to remove the extended PCICC driver information + * if an AP device is removed. + */ +static void zcrypt_pcicc_remove(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev = ap_dev->private; + + zcrypt_device_unregister(zdev); +} + +int __init zcrypt_pcicc_init(void) +{ + return ap_driver_register(&zcrypt_pcicc_driver, THIS_MODULE, "pcicc"); +} + +void zcrypt_pcicc_exit(void) +{ + ap_driver_unregister(&zcrypt_pcicc_driver); +} + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +module_init(zcrypt_pcicc_init); +module_exit(zcrypt_pcicc_exit); +#endif diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h new file mode 100644 index 000000000000..6d4454846c8f --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcicc.h @@ -0,0 +1,176 @@ +/* + * linux/drivers/s390/crypto/zcrypt_pcicc.h + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_PCICC_H_ +#define _ZCRYPT_PCICC_H_ + +/** + * The type 6 message family is associated with PCICC or PCIXCC cards. + * + * It contains a message header followed by a CPRB, both of which + * are described below. + * + * Note that all reserved fields must be zeroes. + */ +struct type6_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x06 */ + unsigned char reserved2[2]; /* 0x0000 */ + unsigned char right[4]; /* 0x00000000 */ + unsigned char reserved3[2]; /* 0x0000 */ + unsigned char reserved4[2]; /* 0x0000 */ + unsigned char apfs[4]; /* 0x00000000 */ + unsigned int offset1; /* 0x00000058 (offset to CPRB) */ + unsigned int offset2; /* 0x00000000 */ + unsigned int offset3; /* 0x00000000 */ + unsigned int offset4; /* 0x00000000 */ + unsigned char agent_id[16]; /* PCICC: */ + /* 0x0100 */ + /* 0x4343412d4150504c202020 */ + /* 0x010101 */ + /* PCIXCC: */ + /* 0x4341000000000000 */ + /* 0x0000000000000000 */ + unsigned char rqid[2]; /* rqid. internal to 603 */ + unsigned char reserved5[2]; /* 0x0000 */ + unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */ + unsigned char reserved6[2]; /* 0x0000 */ + unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */ + unsigned int ToCardLen2; /* db len 0x00000000 for PKD */ + unsigned int ToCardLen3; /* 0x00000000 */ + unsigned int ToCardLen4; /* 0x00000000 */ + unsigned int FromCardLen1; /* response buffer length */ + unsigned int FromCardLen2; /* db len 0x00000000 for PKD */ + unsigned int FromCardLen3; /* 0x00000000 */ + unsigned int FromCardLen4; /* 0x00000000 */ +} __attribute__((packed)); + +/** + * CPRB + * Note that all shorts, ints and longs are little-endian. + * All pointer fields are 32-bits long, and mean nothing + * + * A request CPRB is followed by a request_parameter_block. + * + * The request (or reply) parameter block is organized thus: + * function code + * VUD block + * key block + */ +struct CPRB { + unsigned short cprb_len; /* CPRB length */ + unsigned char cprb_ver_id; /* CPRB version id. */ + unsigned char pad_000; /* Alignment pad byte. */ + unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */ + unsigned char srpi_verb; /* SRPI verb type */ + unsigned char flags; /* flags */ + unsigned char func_id[2]; /* function id */ + unsigned char checkpoint_flag; /* */ + unsigned char resv2; /* reserved */ + unsigned short req_parml; /* request parameter buffer */ + /* length 16-bit little endian */ + unsigned char req_parmp[4]; /* request parameter buffer * + * pointer (means nothing: the * + * parameter buffer follows * + * the CPRB). */ + unsigned char req_datal[4]; /* request data buffer */ + /* length ULELONG */ + unsigned char req_datap[4]; /* request data buffer */ + /* pointer */ + unsigned short rpl_parml; /* reply parameter buffer */ + /* length 16-bit little endian */ + unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */ + unsigned char rpl_parmp[4]; /* reply parameter buffer * + * pointer (means nothing: the * + * parameter buffer follows * + * the CPRB). */ + unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */ + unsigned char rpl_datap[4]; /* reply data buffer */ + /* pointer */ + unsigned short ccp_rscode; /* server reason code ULESHORT */ + unsigned short ccp_rtcode; /* server return code ULESHORT */ + unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/ + unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */ + unsigned char repd_datal[4]; /* replied data length ULELONG */ + unsigned char req_pc[2]; /* PC identifier */ + unsigned char res_origin[8]; /* resource origin */ + unsigned char mac_value[8]; /* Mac Value */ + unsigned char logon_id[8]; /* Logon Identifier */ + unsigned char usage_domain[2]; /* cdx */ + unsigned char resv3[18]; /* reserved for requestor */ + unsigned short svr_namel; /* server name length ULESHORT */ + unsigned char svr_name[8]; /* server name */ +} __attribute__((packed)); + +/** + * The type 86 message family is associated with PCICC and PCIXCC cards. + * + * It contains a message header followed by a CPRB. The CPRB is + * the same as the request CPRB, which is described above. + * + * If format is 1, an error condition exists and no data beyond + * the 8-byte message header is of interest. + * + * The non-error message is shown below. + * + * Note that all reserved fields must be zeroes. + */ +struct type86_hdr { + unsigned char reserved1; /* 0x00 */ + unsigned char type; /* 0x86 */ + unsigned char format; /* 0x01 (error) or 0x02 (ok) */ + unsigned char reserved2; /* 0x00 */ + unsigned char reply_code; /* reply code (see above) */ + unsigned char reserved3[3]; /* 0x000000 */ +} __attribute__((packed)); + +#define TYPE86_RSP_CODE 0x86 +#define TYPE86_FMT2 0x02 + +struct type86_fmt2_ext { + unsigned char reserved[4]; /* 0x00000000 */ + unsigned char apfs[4]; /* final status */ + unsigned int count1; /* length of CPRB + parameters */ + unsigned int offset1; /* offset to CPRB */ + unsigned int count2; /* 0x00000000 */ + unsigned int offset2; /* db offset 0x00000000 for PKD */ + unsigned int count3; /* 0x00000000 */ + unsigned int offset3; /* 0x00000000 */ + unsigned int count4; /* 0x00000000 */ + unsigned int offset4; /* 0x00000000 */ +} __attribute__((packed)); + +struct function_and_rules_block { + unsigned char function_code[2]; + unsigned short ulen; + unsigned char only_rule[8]; +} __attribute__((packed)); + +int zcrypt_pcicc_init(void); +void zcrypt_pcicc_exit(void); + +#endif /* _ZCRYPT_PCICC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c new file mode 100644 index 000000000000..2da8b9381407 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcixcc.c @@ -0,0 +1,951 @@ +/* + * linux/drivers/s390/crypto/zcrypt_pcixcc.c + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * Ralph Wuerthner <rwuerthn@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/delay.h> +#include <asm/atomic.h> +#include <asm/uaccess.h> + +#include "ap_bus.h" +#include "zcrypt_api.h" +#include "zcrypt_error.h" +#include "zcrypt_pcicc.h" +#include "zcrypt_pcixcc.h" +#include "zcrypt_cca_key.h" + +#define PCIXCC_MIN_MOD_SIZE 16 /* 128 bits */ +#define PCIXCC_MIN_MOD_SIZE_OLD 64 /* 512 bits */ +#define PCIXCC_MAX_MOD_SIZE 256 /* 2048 bits */ + +#define PCIXCC_MCL2_SPEED_RATING 7870 /* FIXME: needs finetuning */ +#define PCIXCC_MCL3_SPEED_RATING 7870 +#define CEX2C_SPEED_RATING 8540 + +#define PCIXCC_MAX_ICA_MESSAGE_SIZE 0x77c /* max size type6 v2 crt message */ +#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */ + +#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024) +#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE +#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024) +#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024) + +#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE + +#define PCIXCC_CLEANUP_TIME (15*HZ) + +#define CEIL4(x) ((((x)+3)/4)*4) + +struct response_type { + struct completion work; + int type; +}; +#define PCIXCC_RESPONSE_TYPE_ICA 0 +#define PCIXCC_RESPONSE_TYPE_XCRB 1 + +static struct ap_device_id zcrypt_pcixcc_ids[] = { + { AP_DEVICE(AP_DEVICE_TYPE_PCIXCC) }, + { AP_DEVICE(AP_DEVICE_TYPE_CEX2C) }, + { /* end of list */ }, +}; + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids); +MODULE_AUTHOR("IBM Corporation"); +MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, " + "Copyright 2001, 2006 IBM Corporation"); +MODULE_LICENSE("GPL"); +#endif + +static int zcrypt_pcixcc_probe(struct ap_device *ap_dev); +static void zcrypt_pcixcc_remove(struct ap_device *ap_dev); +static void zcrypt_pcixcc_receive(struct ap_device *, struct ap_message *, + struct ap_message *); + +static struct ap_driver zcrypt_pcixcc_driver = { + .probe = zcrypt_pcixcc_probe, + .remove = zcrypt_pcixcc_remove, + .receive = zcrypt_pcixcc_receive, + .ids = zcrypt_pcixcc_ids, +}; + +/** + * The following is used to initialize the CPRBX passed to the PCIXCC/CEX2C + * card in a type6 message. The 3 fields that must be filled in at execution + * time are req_parml, rpl_parml and usage_domain. + * Everything about this interface is ascii/big-endian, since the + * device does *not* have 'Intel inside'. + * + * The CPRBX is followed immediately by the parm block. + * The parm block contains: + * - function code ('PD' 0x5044 or 'PK' 0x504B) + * - rule block (one of:) + * + 0x000A 'PKCS-1.2' (MCL2 'PD') + * + 0x000A 'ZERO-PAD' (MCL2 'PK') + * + 0x000A 'ZERO-PAD' (MCL3 'PD' or CEX2C 'PD') + * + 0x000A 'MRP ' (MCL3 'PK' or CEX2C 'PK') + * - VUD block + */ +static struct CPRBX static_cprbx = { + .cprb_len = 0x00DC, + .cprb_ver_id = 0x02, + .func_id = {0x54,0x32}, +}; + +/** + * Convert a ICAMEX message to a type6 MEX message. + * + * @zdev: crypto device pointer + * @ap_msg: pointer to AP message + * @mex: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo *mex) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C','A',}, + .function_code = {'P','K'}, + }; + static struct function_and_rules_block static_pke_fnr = { + .function_code = {'P','K'}, + .ulen = 10, + .only_rule = {'M','R','P',' ',' ',' ',' ',' '} + }; + static struct function_and_rules_block static_pke_fnr_MCL2 = { + .function_code = {'P','K'}, + .ulen = 10, + .only_rule = {'Z','E','R','O','-','P','A','D'} + }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __attribute__((packed)) *msg = ap_msg->message; + int size; + + /* VUD.ciphertext */ + msg->length = mex->inputdatalength + 2; + if (copy_from_user(msg->text, mex->inputdata, mex->inputdatalength)) + return -EFAULT; + + /* Set up key which is located after the variable length text. */ + size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength, 1); + if (size < 0) + return size; + size += sizeof(*msg) + mex->inputdatalength; + + /* message header, cprbx and f&r */ + msg->hdr = static_type6_hdrX; + msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); + msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprbx = static_cprbx; + msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1; + + msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? + static_pke_fnr_MCL2 : static_pke_fnr; + + msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx); + + ap_msg->length = size; + return 0; +} + +/** + * Convert a ICACRT message to a type6 CRT message. + * + * @zdev: crypto device pointer + * @ap_msg: pointer to AP message + * @crt: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_rsa_modexpo_crt *crt) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + .agent_id = {'C','A',}, + .function_code = {'P','D'}, + }; + static struct function_and_rules_block static_pkd_fnr = { + .function_code = {'P','D'}, + .ulen = 10, + .only_rule = {'Z','E','R','O','-','P','A','D'} + }; + + static struct function_and_rules_block static_pkd_fnr_MCL2 = { + .function_code = {'P','D'}, + .ulen = 10, + .only_rule = {'P','K','C','S','-','1','.','2'} + }; + struct { + struct type6_hdr hdr; + struct CPRBX cprbx; + struct function_and_rules_block fr; + unsigned short length; + char text[0]; + } __attribute__((packed)) *msg = ap_msg->message; + int size; + + /* VUD.ciphertext */ + msg->length = crt->inputdatalength + 2; + if (copy_from_user(msg->text, crt->inputdata, crt->inputdatalength)) + return -EFAULT; + + /* Set up key which is located after the variable length text. */ + size = zcrypt_type6_crt_key(crt, msg->text + crt->inputdatalength, 1); + if (size < 0) + return size; + size += sizeof(*msg) + crt->inputdatalength; /* total size of msg */ + + /* message header, cprbx and f&r */ + msg->hdr = static_type6_hdrX; + msg->hdr.ToCardLen1 = size - sizeof(msg->hdr); + msg->hdr.FromCardLen1 = PCIXCC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr); + + msg->cprbx = static_cprbx; + msg->cprbx.domain = AP_QID_QUEUE(zdev->ap_dev->qid); + msg->cprbx.req_parml = msg->cprbx.rpl_msgbl = + size - sizeof(msg->hdr) - sizeof(msg->cprbx); + + msg->fr = (zdev->user_space_type == ZCRYPT_PCIXCC_MCL2) ? + static_pkd_fnr_MCL2 : static_pkd_fnr; + + ap_msg->length = size; + return 0; +} + +/** + * Convert a XCRB message to a type6 CPRB message. + * + * @zdev: crypto device pointer + * @ap_msg: pointer to AP message + * @xcRB: pointer to user input data + * + * Returns 0 on success or -EFAULT. + */ +struct type86_fmt2_msg { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; +} __attribute__((packed)); + +static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev, + struct ap_message *ap_msg, + struct ica_xcRB *xcRB) +{ + static struct type6_hdr static_type6_hdrX = { + .type = 0x06, + .offset1 = 0x00000058, + }; + struct { + struct type6_hdr hdr; + struct ica_CPRBX cprbx; + } __attribute__((packed)) *msg = ap_msg->message; + + int rcblen = CEIL4(xcRB->request_control_blk_length); + int replylen; + char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen; + char *function_code; + + /* length checks */ + ap_msg->length = sizeof(struct type6_hdr) + + CEIL4(xcRB->request_control_blk_length) + + xcRB->request_data_length; + if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE) { + PRINTK("Combined message is too large (%ld/%d/%d).\n", + sizeof(struct type6_hdr), + xcRB->request_control_blk_length, + xcRB->request_data_length); + return -EFAULT; + } + if (CEIL4(xcRB->reply_control_blk_length) > + PCIXCC_MAX_XCRB_REPLY_SIZE) { + PDEBUG("Reply CPRB length is too large (%d).\n", + xcRB->request_control_blk_length); + return -EFAULT; + } + if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE) { + PDEBUG("Reply data block length is too large (%d).\n", + xcRB->reply_data_length); + return -EFAULT; + } + replylen = CEIL4(xcRB->reply_control_blk_length) + + CEIL4(xcRB->reply_data_length) + + sizeof(struct type86_fmt2_msg); + if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) { + PDEBUG("Reply CPRB + data block > PCIXCC_MAX_XCRB_RESPONSE_SIZE" + " (%d/%d/%d).\n", + sizeof(struct type86_fmt2_msg), + xcRB->reply_control_blk_length, + xcRB->reply_data_length); + xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE - + (sizeof(struct type86_fmt2_msg) + + CEIL4(xcRB->reply_data_length)); + PDEBUG("Capping Reply CPRB length at %d\n", + xcRB->reply_control_blk_length); + } + + /* prepare type6 header */ + msg->hdr = static_type6_hdrX; + memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID)); + msg->hdr.ToCardLen1 = xcRB->request_control_blk_length; + if (xcRB->request_data_length) { + msg->hdr.offset2 = msg->hdr.offset1 + rcblen; + msg->hdr.ToCardLen2 = xcRB->request_data_length; + } + msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length; + msg->hdr.FromCardLen2 = xcRB->reply_data_length; + + /* prepare CPRB */ + if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr, + xcRB->request_control_blk_length)) + return -EFAULT; + if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > + xcRB->request_control_blk_length) { + PDEBUG("cprb_len too large (%d/%d)\n", msg->cprbx.cprb_len, + xcRB->request_control_blk_length); + return -EFAULT; + } + function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len; + memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code)); + + /* copy data block */ + if (xcRB->request_data_length && + copy_from_user(req_data, xcRB->request_data_address, + xcRB->request_data_length)) + return -EFAULT; + return 0; +} + +/** + * Copy results from a type 86 ICA reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @data: pointer to user output data + * @length: size of user output data + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +struct type86x_reply { + struct type86_hdr hdr; + struct type86_fmt2_ext fmt2; + struct CPRBX cprbx; + unsigned char pad[4]; /* 4 byte function code/rules block ? */ + unsigned short length; + char text[0]; +} __attribute__((packed)); + +static int convert_type86_ica(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + static unsigned char static_pad[] = { + 0x00,0x02, + 0x1B,0x7B,0x5D,0xB5,0x75,0x01,0x3D,0xFD, + 0x8D,0xD1,0xC7,0x03,0x2D,0x09,0x23,0x57, + 0x89,0x49,0xB9,0x3F,0xBB,0x99,0x41,0x5B, + 0x75,0x21,0x7B,0x9D,0x3B,0x6B,0x51,0x39, + 0xBB,0x0D,0x35,0xB9,0x89,0x0F,0x93,0xA5, + 0x0B,0x47,0xF1,0xD3,0xBB,0xCB,0xF1,0x9D, + 0x23,0x73,0x71,0xFF,0xF3,0xF5,0x45,0xFB, + 0x61,0x29,0x23,0xFD,0xF1,0x29,0x3F,0x7F, + 0x17,0xB7,0x1B,0xA9,0x19,0xBD,0x57,0xA9, + 0xD7,0x95,0xA3,0xCB,0xED,0x1D,0xDB,0x45, + 0x7D,0x11,0xD1,0x51,0x1B,0xED,0x71,0xE9, + 0xB1,0xD1,0xAB,0xAB,0x21,0x2B,0x1B,0x9F, + 0x3B,0x9F,0xF7,0xF7,0xBD,0x63,0xEB,0xAD, + 0xDF,0xB3,0x6F,0x5B,0xDB,0x8D,0xA9,0x5D, + 0xE3,0x7D,0x77,0x49,0x47,0xF5,0xA7,0xFD, + 0xAB,0x2F,0x27,0x35,0x77,0xD3,0x49,0xC9, + 0x09,0xEB,0xB1,0xF9,0xBF,0x4B,0xCB,0x2B, + 0xEB,0xEB,0x05,0xFF,0x7D,0xC7,0x91,0x8B, + 0x09,0x83,0xB9,0xB9,0x69,0x33,0x39,0x6B, + 0x79,0x75,0x19,0xBF,0xBB,0x07,0x1D,0xBD, + 0x29,0xBF,0x39,0x95,0x93,0x1D,0x35,0xC7, + 0xC9,0x4D,0xE5,0x97,0x0B,0x43,0x9B,0xF1, + 0x16,0x93,0x03,0x1F,0xA5,0xFB,0xDB,0xF3, + 0x27,0x4F,0x27,0x61,0x05,0x1F,0xB9,0x23, + 0x2F,0xC3,0x81,0xA9,0x23,0x71,0x55,0x55, + 0xEB,0xED,0x41,0xE5,0xF3,0x11,0xF1,0x43, + 0x69,0x03,0xBD,0x0B,0x37,0x0F,0x51,0x8F, + 0x0B,0xB5,0x89,0x5B,0x67,0xA9,0xD9,0x4F, + 0x01,0xF9,0x21,0x77,0x37,0x73,0x79,0xC5, + 0x7F,0x51,0xC1,0xCF,0x97,0xA1,0x75,0xAD, + 0x35,0x9D,0xD3,0xD3,0xA7,0x9D,0x5D,0x41, + 0x6F,0x65,0x1B,0xCF,0xA9,0x87,0x91,0x09 + }; + struct type86x_reply *msg = reply->message; + unsigned short service_rc, service_rs; + unsigned int reply_len, pad_len; + char *data; + + service_rc = msg->cprbx.ccp_rtcode; + if (unlikely(service_rc != 0)) { + service_rs = msg->cprbx.ccp_rscode; + if (service_rc == 8 && service_rs == 66) { + PDEBUG("Bad block format on PCIXCC/CEX2C\n"); + return -EINVAL; + } + if (service_rc == 8 && service_rs == 65) { + PDEBUG("Probably an even modulus on PCIXCC/CEX2C\n"); + return -EINVAL; + } + if (service_rc == 8 && service_rs == 770) { + PDEBUG("Invalid key length on PCIXCC/CEX2C\n"); + zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; + return -EAGAIN; + } + if (service_rc == 8 && service_rs == 783) { + PDEBUG("Extended bitlengths not enabled on PCIXCC/CEX2C\n"); + zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; + return -EAGAIN; + } + PRINTK("Unknown service rc/rs (PCIXCC/CEX2C): %d/%d\n", + service_rc, service_rs); + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } + data = msg->text; + reply_len = msg->length - 2; + if (reply_len > outputdatalength) + return -EINVAL; + /** + * For all encipher requests, the length of the ciphertext (reply_len) + * will always equal the modulus length. For MEX decipher requests + * the output needs to get padded. Minimum pad size is 10. + * + * Currently, the cases where padding will be added is for: + * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support + * ZERO-PAD and CRT is only supported for PKD requests) + * - PCICC, always + */ + pad_len = outputdatalength - reply_len; + if (pad_len > 0) { + if (pad_len < 10) + return -EINVAL; + /* 'restore' padding left in the PCICC/PCIXCC card. */ + if (copy_to_user(outputdata, static_pad, pad_len - 1)) + return -EFAULT; + if (put_user(0, outputdata + pad_len - 1)) + return -EFAULT; + } + /* Copy the crypto response to user space. */ + if (copy_to_user(outputdata + pad_len, data, reply_len)) + return -EFAULT; + return 0; +} + +/** + * Copy results from a type 86 XCRB reply message back to user space. + * + * @zdev: crypto device pointer + * @reply: reply AP message. + * @xcRB: pointer to XCRB + * + * Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error. + */ +static int convert_type86_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, + struct ica_xcRB *xcRB) +{ + struct type86_fmt2_msg *msg = reply->message; + char *data = reply->message; + + /* Copy CPRB to user */ + if (copy_to_user(xcRB->reply_control_blk_addr, + data + msg->fmt2.offset1, msg->fmt2.count1)) + return -EFAULT; + xcRB->reply_control_blk_length = msg->fmt2.count1; + + /* Copy data buffer to user */ + if (msg->fmt2.count2) + if (copy_to_user(xcRB->reply_data_addr, + data + msg->fmt2.offset2, msg->fmt2.count2)) + return -EFAULT; + xcRB->reply_data_length = msg->fmt2.count2; + return 0; +} + +static int convert_response_ica(struct zcrypt_device *zdev, + struct ap_message *reply, + char __user *outputdata, + unsigned int outputdatalength) +{ + struct type86x_reply *msg = reply->message; + + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *) reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + return convert_error(zdev, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) + return convert_error(zdev, reply); + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_ica(zdev, reply, + outputdata, outputdatalength); + /* no break, incorrect cprb version is an unknown response */ + default: /* Unknown response type, this should NEVER EVER happen */ + PRINTK("Unrecognized Message Header: %08x%08x\n", + *(unsigned int *) reply->message, + *(unsigned int *) (reply->message+4)); + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +static int convert_response_xcrb(struct zcrypt_device *zdev, + struct ap_message *reply, + struct ica_xcRB *xcRB) +{ + struct type86x_reply *msg = reply->message; + + /* Response type byte is the second byte in the response. */ + switch (((unsigned char *) reply->message)[1]) { + case TYPE82_RSP_CODE: + case TYPE88_RSP_CODE: + xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + return convert_error(zdev, reply); + case TYPE86_RSP_CODE: + if (msg->hdr.reply_code) { + memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32)); + return convert_error(zdev, reply); + } + if (msg->cprbx.cprb_ver_id == 0x02) + return convert_type86_xcrb(zdev, reply, xcRB); + /* no break, incorrect cprb version is an unknown response */ + default: /* Unknown response type, this should NEVER EVER happen */ + PRINTK("Unrecognized Message Header: %08x%08x\n", + *(unsigned int *) reply->message, + *(unsigned int *) (reply->message+4)); + xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ + zdev->online = 0; + return -EAGAIN; /* repeat the request on a different device. */ + } +} + +/** + * This function is called from the AP bus code after a crypto request + * "msg" has finished with the reply message "reply". + * It is called from tasklet context. + * @ap_dev: pointer to the AP device + * @msg: pointer to the AP message + * @reply: pointer to the AP reply message + */ +static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, + struct ap_message *msg, + struct ap_message *reply) +{ + static struct error_hdr error_reply = { + .type = TYPE82_RSP_CODE, + .reply_code = REP82_ERROR_MACHINE_FAILURE, + }; + struct response_type *resp_type = + (struct response_type *) msg->private; + struct type86x_reply *t86r = reply->message; + int length; + + /* Copy the reply message to the request message buffer. */ + if (IS_ERR(reply)) + memcpy(msg->message, &error_reply, sizeof(error_reply)); + else if (t86r->hdr.type == TYPE86_RSP_CODE && + t86r->cprbx.cprb_ver_id == 0x02) { + switch (resp_type->type) { + case PCIXCC_RESPONSE_TYPE_ICA: + length = sizeof(struct type86x_reply) + + t86r->length - 2; + length = min(PCIXCC_MAX_ICA_RESPONSE_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + case PCIXCC_RESPONSE_TYPE_XCRB: + length = t86r->fmt2.offset2 + t86r->fmt2.count2; + length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length); + memcpy(msg->message, reply->message, length); + break; + default: + PRINTK("Invalid internal response type: %i\n", + resp_type->type); + memcpy(msg->message, &error_reply, + sizeof error_reply); + } + } else + memcpy(msg->message, reply->message, sizeof error_reply); + complete(&(resp_type->work)); +} + +static atomic_t zcrypt_step = ATOMIC_INIT(0); + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a modexpo request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @mex: pointer to the modexpo request buffer + */ +static long zcrypt_pcixcc_modexpo(struct zcrypt_device *zdev, + struct ica_rsa_modexpo *mex) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_ICA, + }; + int rc; + + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = ICAMEX_msg_to_type6MEX_msgX(zdev, &ap_msg, mex); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &resp_type.work, PCIXCC_CLEANUP_TIME); + if (rc > 0) + rc = convert_response_ica(zdev, &ap_msg, mex->outputdata, + mex->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a modexpo_crt request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @crt: pointer to the modexpoc_crt request buffer + */ +static long zcrypt_pcixcc_modexpo_crt(struct zcrypt_device *zdev, + struct ica_rsa_modexpo_crt *crt) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_ICA, + }; + int rc; + + ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = ICACRT_msg_to_type6CRT_msgX(zdev, &ap_msg, crt); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &resp_type.work, PCIXCC_CLEANUP_TIME); + if (rc > 0) + rc = convert_response_ica(zdev, &ap_msg, crt->outputdata, + crt->outputdatalength); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + free_page((unsigned long) ap_msg.message); + return rc; +} + +/** + * The request distributor calls this function if it picked the PCIXCC/CEX2C + * device to handle a send_cprb request. + * @zdev: pointer to zcrypt_device structure that identifies the + * PCIXCC/CEX2C device to the request distributor + * @xcRB: pointer to the send_cprb request buffer + */ +long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) +{ + struct ap_message ap_msg; + struct response_type resp_type = { + .type = PCIXCC_RESPONSE_TYPE_XCRB, + }; + int rc; + + ap_msg.message = (void *) kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL); + if (!ap_msg.message) + return -ENOMEM; + ap_msg.psmid = (((unsigned long long) current->pid) << 32) + + atomic_inc_return(&zcrypt_step); + ap_msg.private = &resp_type; + rc = XCRB_msg_to_type6CPRB_msgX(zdev, &ap_msg, xcRB); + if (rc) + goto out_free; + init_completion(&resp_type.work); + ap_queue_message(zdev->ap_dev, &ap_msg); + rc = wait_for_completion_interruptible_timeout( + &resp_type.work, PCIXCC_CLEANUP_TIME); + if (rc > 0) + rc = convert_response_xcrb(zdev, &ap_msg, xcRB); + else { + /* Signal pending or message timed out. */ + ap_cancel_message(zdev->ap_dev, &ap_msg); + if (rc == 0) + /* Message timed out. */ + rc = -ETIME; + } +out_free: + memset(ap_msg.message, 0x0, ap_msg.length); + kfree(ap_msg.message); + return rc; +} + +/** + * The crypto operations for a PCIXCC/CEX2C card. + */ +static struct zcrypt_ops zcrypt_pcixcc_ops = { + .rsa_modexpo = zcrypt_pcixcc_modexpo, + .rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt, + .send_cprb = zcrypt_pcixcc_send_cprb, +}; + +/** + * Micro-code detection function. Its sends a message to a pcixcc card + * to find out the microcode level. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_pcixcc_mcl(struct ap_device *ap_dev) +{ + static unsigned char msg[] = { + 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00, + 0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8, + 0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A, + 0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20, + 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05, + 0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D, + 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55, + 0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD, + 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA, + 0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22, + 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB, + 0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54, + 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00, + 0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00, + 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40, + 0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C, + 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF, + 0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9, + 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63, + 0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5, + 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A, + 0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01, + 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28, + 0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91, + 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5, + 0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C, + 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98, + 0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96, + 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19, + 0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47, + 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36, + 0xF1,0x3D,0x93,0x53 + }; + unsigned long long psmid; + struct CPRBX *cprbx; + char *reply; + int rc, i; + + reply = (void *) get_zeroed_page(GFP_KERNEL); + if (!reply) + return -ENOMEM; + + rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, msg, sizeof(msg)); + if (rc) + goto out_free; + + /* Wait for the test message to complete. */ + for (i = 0; i < 6; i++) { + mdelay(300); + rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); + if (rc == 0 && psmid == 0x0102030405060708ULL) + break; + } + + if (i >= 6) { + /* Got no answer. */ + rc = -ENODEV; + goto out_free; + } + + cprbx = (struct CPRBX *) (reply + 48); + if (cprbx->ccp_rtcode == 8 && cprbx->ccp_rscode == 33) + rc = ZCRYPT_PCIXCC_MCL2; + else + rc = ZCRYPT_PCIXCC_MCL3; +out_free: + free_page((unsigned long) reply); + return rc; +} + +/** + * Probe function for PCIXCC/CEX2C cards. It always accepts the AP device + * since the bus_match already checked the hardware type. The PCIXCC + * cards come in two flavours: micro code level 2 and micro code level 3. + * This is checked by sending a test message to the device. + * @ap_dev: pointer to the AP device. + */ +static int zcrypt_pcixcc_probe(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev; + int rc; + + zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE); + if (!zdev) + return -ENOMEM; + zdev->ap_dev = ap_dev; + zdev->ops = &zcrypt_pcixcc_ops; + zdev->online = 1; + if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) { + rc = zcrypt_pcixcc_mcl(ap_dev); + if (rc < 0) { + zcrypt_device_free(zdev); + return rc; + } + zdev->user_space_type = rc; + if (rc == ZCRYPT_PCIXCC_MCL2) { + zdev->type_string = "PCIXCC_MCL2"; + zdev->speed_rating = PCIXCC_MCL2_SPEED_RATING; + zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE_OLD; + zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; + } else { + zdev->type_string = "PCIXCC_MCL3"; + zdev->speed_rating = PCIXCC_MCL3_SPEED_RATING; + zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; + zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; + } + } else { + zdev->user_space_type = ZCRYPT_CEX2C; + zdev->type_string = "CEX2C"; + zdev->speed_rating = CEX2C_SPEED_RATING; + zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE; + zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE; + } + ap_dev->reply = &zdev->reply; + ap_dev->private = zdev; + rc = zcrypt_device_register(zdev); + if (rc) + goto out_free; + return 0; + + out_free: + ap_dev->private = NULL; + zcrypt_device_free(zdev); + return rc; +} + +/** + * This is called to remove the extended PCIXCC/CEX2C driver information + * if an AP device is removed. + */ +static void zcrypt_pcixcc_remove(struct ap_device *ap_dev) +{ + struct zcrypt_device *zdev = ap_dev->private; + + zcrypt_device_unregister(zdev); +} + +int __init zcrypt_pcixcc_init(void) +{ + return ap_driver_register(&zcrypt_pcixcc_driver, THIS_MODULE, "pcixcc"); +} + +void zcrypt_pcixcc_exit(void) +{ + ap_driver_unregister(&zcrypt_pcixcc_driver); +} + +#ifndef CONFIG_ZCRYPT_MONOLITHIC +module_init(zcrypt_pcixcc_init); +module_exit(zcrypt_pcixcc_exit); +#endif diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h new file mode 100644 index 000000000000..a78ff307fd19 --- /dev/null +++ b/drivers/s390/crypto/zcrypt_pcixcc.h @@ -0,0 +1,79 @@ +/* + * linux/drivers/s390/crypto/zcrypt_pcixcc.h + * + * zcrypt 2.1.0 + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _ZCRYPT_PCIXCC_H_ +#define _ZCRYPT_PCIXCC_H_ + +/** + * CPRBX + * Note that all shorts and ints are big-endian. + * All pointer fields are 16 bytes long, and mean nothing. + * + * A request CPRB is followed by a request_parameter_block. + * + * The request (or reply) parameter block is organized thus: + * function code + * VUD block + * key block + */ +struct CPRBX { + unsigned short cprb_len; /* CPRB length 220 */ + unsigned char cprb_ver_id; /* CPRB version id. 0x02 */ + unsigned char pad_000[3]; /* Alignment pad bytes */ + unsigned char func_id[2]; /* function id 0x5432 */ + unsigned char cprb_flags[4]; /* Flags */ + unsigned int req_parml; /* request parameter buffer len */ + unsigned int req_datal; /* request data buffer */ + unsigned int rpl_msgbl; /* reply message block length */ + unsigned int rpld_parml; /* replied parameter block len */ + unsigned int rpl_datal; /* reply data block len */ + unsigned int rpld_datal; /* replied data block len */ + unsigned int req_extbl; /* request extension block len */ + unsigned char pad_001[4]; /* reserved */ + unsigned int rpld_extbl; /* replied extension block len */ + unsigned char req_parmb[16]; /* request parm block 'address' */ + unsigned char req_datab[16]; /* request data block 'address' */ + unsigned char rpl_parmb[16]; /* reply parm block 'address' */ + unsigned char rpl_datab[16]; /* reply data block 'address' */ + unsigned char req_extb[16]; /* request extension block 'addr'*/ + unsigned char rpl_extb[16]; /* reply extension block 'addres'*/ + unsigned short ccp_rtcode; /* server return code */ + unsigned short ccp_rscode; /* server reason code */ + unsigned int mac_data_len; /* Mac Data Length */ + unsigned char logon_id[8]; /* Logon Identifier */ + unsigned char mac_value[8]; /* Mac Value */ + unsigned char mac_content_flgs;/* Mac content flag byte */ + unsigned char pad_002; /* Alignment */ + unsigned short domain; /* Domain */ + unsigned char pad_003[12]; /* Domain masks */ + unsigned char pad_004[36]; /* reserved */ +} __attribute__((packed)); + +int zcrypt_pcixcc_init(void); +void zcrypt_pcixcc_exit(void); + +#endif /* _ZCRYPT_PCIXCC_H_ */ diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 5399c5d99b81..a914129a4da9 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c @@ -19,9 +19,6 @@ #include "s390mach.h" -#define DBG printk -// #define DBG(args,...) do {} while (0); - static struct semaphore m_sem; extern int css_process_crw(int, int); @@ -83,11 +80,11 @@ repeat: ccode = stcrw(&crw[chain]); if (ccode != 0) break; - DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " - "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", - crw[chain].slct, crw[chain].oflw, crw[chain].chn, - crw[chain].rsc, crw[chain].anc, crw[chain].erc, - crw[chain].rsid); + printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " + "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", + crw[chain].slct, crw[chain].oflw, crw[chain].chn, + crw[chain].rsc, crw[chain].anc, crw[chain].erc, + crw[chain].rsid); /* Check for overflows. */ if (crw[chain].oflw) { pr_debug("%s: crw overflow detected!\n", __FUNCTION__); @@ -117,8 +114,8 @@ repeat: * reported to the common I/O layer. */ if (crw[chain].slct) { - DBG(KERN_INFO"solicited machine check for " - "channel path %02X\n", crw[0].rsid); + pr_debug("solicited machine check for " + "channel path %02X\n", crw[0].rsid); break; } switch (crw[0].erc) { diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 94d1b74db356..7c84b3d4bd94 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -543,7 +543,7 @@ do { \ } while (0) #if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_NORMAL -# define ZFCP_LOG_NORMAL(fmt, args...) +# define ZFCP_LOG_NORMAL(fmt, args...) do { } while (0) #else # define ZFCP_LOG_NORMAL(fmt, args...) \ do { \ @@ -553,7 +553,7 @@ do { \ #endif #if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_INFO -# define ZFCP_LOG_INFO(fmt, args...) +# define ZFCP_LOG_INFO(fmt, args...) do { } while (0) #else # define ZFCP_LOG_INFO(fmt, args...) \ do { \ @@ -563,14 +563,14 @@ do { \ #endif #if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_DEBUG -# define ZFCP_LOG_DEBUG(fmt, args...) +# define ZFCP_LOG_DEBUG(fmt, args...) do { } while (0) #else # define ZFCP_LOG_DEBUG(fmt, args...) \ ZFCP_LOG(ZFCP_LOG_LEVEL_DEBUG, fmt , ##args) #endif #if ZFCP_LOG_LEVEL_LIMIT < ZFCP_LOG_LEVEL_TRACE -# define ZFCP_LOG_TRACE(fmt, args...) +# define ZFCP_LOG_TRACE(fmt, args...) do { } while (0) #else # define ZFCP_LOG_TRACE(fmt, args...) \ ZFCP_LOG(ZFCP_LOG_LEVEL_TRACE, fmt , ##args) diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index d1c1e75bfd60..1e788e815ce7 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c @@ -11,19 +11,18 @@ #include <linux/init.h> #include <asm/ebcdic.h> -struct sysinfo_1_1_1 -{ +struct sysinfo_1_1_1 { char reserved_0[32]; char manufacturer[16]; char type[4]; char reserved_1[12]; - char model[16]; + char model_capacity[16]; char sequence[16]; char plant[4]; + char model[16]; }; -struct sysinfo_1_2_1 -{ +struct sysinfo_1_2_1 { char reserved_0[80]; char sequence[16]; char plant[4]; @@ -31,9 +30,12 @@ struct sysinfo_1_2_1 unsigned short cpu_address; }; -struct sysinfo_1_2_2 -{ - char reserved_0[32]; +struct sysinfo_1_2_2 { + char format; + char reserved_0[1]; + unsigned short acc_offset; + char reserved_1[24]; + unsigned int secondary_capability; unsigned int capability; unsigned short cpus_total; unsigned short cpus_configured; @@ -42,8 +44,12 @@ struct sysinfo_1_2_2 unsigned short adjustment[0]; }; -struct sysinfo_2_2_1 -{ +struct sysinfo_1_2_2_extension { + unsigned int alt_capability; + unsigned short alt_adjustment[0]; +}; + +struct sysinfo_2_2_1 { char reserved_0[80]; char sequence[16]; char plant[4]; @@ -51,15 +57,11 @@ struct sysinfo_2_2_1 unsigned short cpu_address; }; -struct sysinfo_2_2_2 -{ +struct sysinfo_2_2_2 { char reserved_0[32]; unsigned short lpar_number; char reserved_1; unsigned char characteristics; - #define LPAR_CHAR_DEDICATED (1 << 7) - #define LPAR_CHAR_SHARED (1 << 6) - #define LPAR_CHAR_LIMITED (1 << 5) unsigned short cpus_total; unsigned short cpus_configured; unsigned short cpus_standby; @@ -71,12 +73,14 @@ struct sysinfo_2_2_2 unsigned short cpus_shared; }; -struct sysinfo_3_2_2 -{ +#define LPAR_CHAR_DEDICATED (1 << 7) +#define LPAR_CHAR_SHARED (1 << 6) +#define LPAR_CHAR_LIMITED (1 << 5) + +struct sysinfo_3_2_2 { char reserved_0[31]; unsigned char count; - struct - { + struct { char reserved_0[4]; unsigned short cpus_total; unsigned short cpus_configured; @@ -90,136 +94,223 @@ struct sysinfo_3_2_2 } vm[8]; }; -union s390_sysinfo +static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) { - struct sysinfo_1_1_1 sysinfo_1_1_1; - struct sysinfo_1_2_1 sysinfo_1_2_1; - struct sysinfo_1_2_2 sysinfo_1_2_2; - struct sysinfo_2_2_1 sysinfo_2_2_1; - struct sysinfo_2_2_2 sysinfo_2_2_2; - struct sysinfo_3_2_2 sysinfo_3_2_2; -}; - -static inline int stsi (void *sysinfo, - int fc, int sel1, int sel2) -{ - int cc, retv; - -#ifndef CONFIG_64BIT - __asm__ __volatile__ ( "lr\t0,%2\n" - "\tlr\t1,%3\n" - "\tstsi\t0(%4)\n" - "0:\tipm\t%0\n" - "\tsrl\t%0,28\n" - "1:lr\t%1,0\n" - ".section .fixup,\"ax\"\n" - "2:\tlhi\t%0,3\n" - "\tbras\t1,3f\n" - "\t.long 1b\n" - "3:\tl\t1,0(1)\n" - "\tbr\t1\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - "\t.align 4\n" - "\t.long 0b,2b\n" - ".previous\n" - : "=d" (cc), "=d" (retv) - : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo) - : "cc", "memory", "0", "1" ); -#else - __asm__ __volatile__ ( "lr\t0,%2\n" - "lr\t1,%3\n" - "\tstsi\t0(%4)\n" - "0:\tipm\t%0\n" - "\tsrl\t%0,28\n" - "1:lr\t%1,0\n" - ".section .fixup,\"ax\"\n" - "2:\tlhi\t%0,3\n" - "\tjg\t1b\n" - ".previous\n" - ".section __ex_table,\"a\"\n" - "\t.align 8\n" - "\t.quad 0b,2b\n" - ".previous\n" - : "=d" (cc), "=d" (retv) - : "d" ((fc << 28) | sel1), "d" (sel2), "a" (sysinfo) - : "cc", "memory", "0", "1" ); -#endif - - return cc? -1 : retv; + register int r0 asm("0") = (fc << 28) | sel1; + register int r1 asm("1") = sel2; + + asm volatile( + " stsi 0(%2)\n" + "0: jz 2f\n" + "1: lhi %0,%3\n" + "2:\n" + EX_TABLE(0b,1b) + : "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS) + : "cc", "memory" ); + return r0; } -static inline int stsi_0 (void) +static inline int stsi_0(void) { int rc = stsi (NULL, 0, 0, 0); - return rc == -1 ? rc : (((unsigned int)rc) >> 28); + return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28); } -static inline int stsi_1_1_1 (struct sysinfo_1_1_1 *info) +static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len) { - int rc = stsi (info, 1, 1, 1); - if (rc != -1) - { - EBCASC (info->manufacturer, sizeof(info->manufacturer)); - EBCASC (info->type, sizeof(info->type)); - EBCASC (info->model, sizeof(info->model)); - EBCASC (info->sequence, sizeof(info->sequence)); - EBCASC (info->plant, sizeof(info->plant)); - } - return rc == -1 ? rc : 0; + if (stsi(info, 1, 1, 1) == -ENOSYS) + return len; + + EBCASC(info->manufacturer, sizeof(info->manufacturer)); + EBCASC(info->type, sizeof(info->type)); + EBCASC(info->model, sizeof(info->model)); + EBCASC(info->sequence, sizeof(info->sequence)); + EBCASC(info->plant, sizeof(info->plant)); + EBCASC(info->model_capacity, sizeof(info->model_capacity)); + len += sprintf(page + len, "Manufacturer: %-16.16s\n", + info->manufacturer); + len += sprintf(page + len, "Type: %-4.4s\n", + info->type); + if (info->model[0] != '\0') + /* + * Sigh: the model field has been renamed with System z9 + * to model_capacity and a new model field has been added + * after the plant field. To avoid confusing older programs + * the "Model:" prints "model_capacity model" or just + * "model_capacity" if the model string is empty . + */ + len += sprintf(page + len, + "Model: %-16.16s %-16.16s\n", + info->model_capacity, info->model); + else + len += sprintf(page + len, "Model: %-16.16s\n", + info->model_capacity); + len += sprintf(page + len, "Sequence Code: %-16.16s\n", + info->sequence); + len += sprintf(page + len, "Plant: %-4.4s\n", + info->plant); + len += sprintf(page + len, "Model Capacity: %-16.16s\n", + info->model_capacity); + return len; } -static inline int stsi_1_2_1 (struct sysinfo_1_2_1 *info) +#if 0 /* Currently unused */ +static int stsi_1_2_1(struct sysinfo_1_2_1 *info, char *page, int len) { - int rc = stsi (info, 1, 2, 1); - if (rc != -1) - { - EBCASC (info->sequence, sizeof(info->sequence)); - EBCASC (info->plant, sizeof(info->plant)); - } - return rc == -1 ? rc : 0; + if (stsi(info, 1, 2, 1) == -ENOSYS) + return len; + + len += sprintf(page + len, "\n"); + EBCASC(info->sequence, sizeof(info->sequence)); + EBCASC(info->plant, sizeof(info->plant)); + len += sprintf(page + len, "Sequence Code of CPU: %-16.16s\n", + info->sequence); + len += sprintf(page + len, "Plant of CPU: %-16.16s\n", + info->plant); + return len; } +#endif -static inline int stsi_1_2_2 (struct sysinfo_1_2_2 *info) +static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) { - int rc = stsi (info, 1, 2, 2); - return rc == -1 ? rc : 0; + struct sysinfo_1_2_2_extension *ext; + int i; + + if (stsi(info, 1, 2, 2) == -ENOSYS) + return len; + ext = (struct sysinfo_1_2_2_extension *) + ((unsigned long) info + info->acc_offset); + + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "CPUs Total: %d\n", + info->cpus_total); + len += sprintf(page + len, "CPUs Configured: %d\n", + info->cpus_configured); + len += sprintf(page + len, "CPUs Standby: %d\n", + info->cpus_standby); + len += sprintf(page + len, "CPUs Reserved: %d\n", + info->cpus_reserved); + + if (info->format == 1) { + /* + * Sigh 2. According to the specification the alternate + * capability field is a 32 bit floating point number + * if the higher order 8 bits are not zero. Printing + * a floating point number in the kernel is a no-no, + * always print the number as 32 bit unsigned integer. + * The user-space needs to know about the stange + * encoding of the alternate cpu capability. + */ + len += sprintf(page + len, "Capability: %u %u\n", + info->capability, ext->alt_capability); + for (i = 2; i <= info->cpus_total; i++) + len += sprintf(page + len, + "Adjustment %02d-way: %u %u\n", + i, info->adjustment[i-2], + ext->alt_adjustment[i-2]); + + } else { + len += sprintf(page + len, "Capability: %u\n", + info->capability); + for (i = 2; i <= info->cpus_total; i++) + len += sprintf(page + len, + "Adjustment %02d-way: %u\n", + i, info->adjustment[i-2]); + } + + if (info->secondary_capability != 0) + len += sprintf(page + len, "Secondary Capability: %d\n", + info->secondary_capability); + + return len; } -static inline int stsi_2_2_1 (struct sysinfo_2_2_1 *info) +#if 0 /* Currently unused */ +static int stsi_2_2_1(struct sysinfo_2_2_1 *info, char *page, int len) { - int rc = stsi (info, 2, 2, 1); - if (rc != -1) - { - EBCASC (info->sequence, sizeof(info->sequence)); - EBCASC (info->plant, sizeof(info->plant)); - } - return rc == -1 ? rc : 0; + if (stsi(info, 2, 2, 1) == -ENOSYS) + return len; + + len += sprintf(page + len, "\n"); + EBCASC (info->sequence, sizeof(info->sequence)); + EBCASC (info->plant, sizeof(info->plant)); + len += sprintf(page + len, "Sequence Code of logical CPU: %-16.16s\n", + info->sequence); + len += sprintf(page + len, "Plant of logical CPU: %-16.16s\n", + info->plant); + return len; } +#endif -static inline int stsi_2_2_2 (struct sysinfo_2_2_2 *info) +static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len) { - int rc = stsi (info, 2, 2, 2); - if (rc != -1) - { - EBCASC (info->name, sizeof(info->name)); - } - return rc == -1 ? rc : 0; + if (stsi(info, 2, 2, 2) == -ENOSYS) + return len; + + EBCASC (info->name, sizeof(info->name)); + + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "LPAR Number: %d\n", + info->lpar_number); + + len += sprintf(page + len, "LPAR Characteristics: "); + if (info->characteristics & LPAR_CHAR_DEDICATED) + len += sprintf(page + len, "Dedicated "); + if (info->characteristics & LPAR_CHAR_SHARED) + len += sprintf(page + len, "Shared "); + if (info->characteristics & LPAR_CHAR_LIMITED) + len += sprintf(page + len, "Limited "); + len += sprintf(page + len, "\n"); + + len += sprintf(page + len, "LPAR Name: %-8.8s\n", + info->name); + + len += sprintf(page + len, "LPAR Adjustment: %d\n", + info->caf); + + len += sprintf(page + len, "LPAR CPUs Total: %d\n", + info->cpus_total); + len += sprintf(page + len, "LPAR CPUs Configured: %d\n", + info->cpus_configured); + len += sprintf(page + len, "LPAR CPUs Standby: %d\n", + info->cpus_standby); + len += sprintf(page + len, "LPAR CPUs Reserved: %d\n", + info->cpus_reserved); + len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n", + info->cpus_dedicated); + len += sprintf(page + len, "LPAR CPUs Shared: %d\n", + info->cpus_shared); + return len; } -static inline int stsi_3_2_2 (struct sysinfo_3_2_2 *info) +static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len) { - int rc = stsi (info, 3, 2, 2); - if (rc != -1) - { - int i; - for (i = 0; i < info->count; i++) - { - EBCASC (info->vm[i].name, sizeof(info->vm[i].name)); - EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi)); - } + int i; + + if (stsi(info, 3, 2, 2) == -ENOSYS) + return len; + for (i = 0; i < info->count; i++) { + EBCASC (info->vm[i].name, sizeof(info->vm[i].name)); + EBCASC (info->vm[i].cpi, sizeof(info->vm[i].cpi)); + len += sprintf(page + len, "\n"); + len += sprintf(page + len, "VM%02d Name: %-8.8s\n", + i, info->vm[i].name); + len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n", + i, info->vm[i].cpi); + + len += sprintf(page + len, "VM%02d Adjustment: %d\n", + i, info->vm[i].caf); + + len += sprintf(page + len, "VM%02d CPUs Total: %d\n", + i, info->vm[i].cpus_total); + len += sprintf(page + len, "VM%02d CPUs Configured: %d\n", + i, info->vm[i].cpus_configured); + len += sprintf(page + len, "VM%02d CPUs Standby: %d\n", + i, info->vm[i].cpus_standby); + len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n", + i, info->vm[i].cpus_reserved); } - return rc == -1 ? rc : 0; + return len; } @@ -227,118 +318,34 @@ static int proc_read_sysinfo(char *page, char **start, off_t off, int count, int *eof, void *data) { - unsigned long info_page = get_zeroed_page (GFP_KERNEL); - union s390_sysinfo *info = (union s390_sysinfo *) info_page; - int len = 0; - int level; - int i; + unsigned long info = get_zeroed_page (GFP_KERNEL); + int level, len; if (!info) return 0; - level = stsi_0 (); - - if (level >= 1 && stsi_1_1_1 (&info->sysinfo_1_1_1) == 0) - { - len += sprintf (page+len, "Manufacturer: %-16.16s\n", - info->sysinfo_1_1_1.manufacturer); - len += sprintf (page+len, "Type: %-4.4s\n", - info->sysinfo_1_1_1.type); - len += sprintf (page+len, "Model: %-16.16s\n", - info->sysinfo_1_1_1.model); - len += sprintf (page+len, "Sequence Code: %-16.16s\n", - info->sysinfo_1_1_1.sequence); - len += sprintf (page+len, "Plant: %-4.4s\n", - info->sysinfo_1_1_1.plant); - } - - if (level >= 1 && stsi_1_2_2 (&info->sysinfo_1_2_2) == 0) - { - len += sprintf (page+len, "\n"); - len += sprintf (page+len, "CPUs Total: %d\n", - info->sysinfo_1_2_2.cpus_total); - len += sprintf (page+len, "CPUs Configured: %d\n", - info->sysinfo_1_2_2.cpus_configured); - len += sprintf (page+len, "CPUs Standby: %d\n", - info->sysinfo_1_2_2.cpus_standby); - len += sprintf (page+len, "CPUs Reserved: %d\n", - info->sysinfo_1_2_2.cpus_reserved); - - len += sprintf (page+len, "Capability: %d\n", - info->sysinfo_1_2_2.capability); + len = 0; + level = stsi_0(); + if (level >= 1) + len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len); - for (i = 2; i <= info->sysinfo_1_2_2.cpus_total; i++) - len += sprintf (page+len, "Adjustment %02d-way: %d\n", - i, info->sysinfo_1_2_2.adjustment[i-2]); - } + if (level >= 1) + len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len); - if (level >= 2 && stsi_2_2_2 (&info->sysinfo_2_2_2) == 0) - { - len += sprintf (page+len, "\n"); - len += sprintf (page+len, "LPAR Number: %d\n", - info->sysinfo_2_2_2.lpar_number); - - len += sprintf (page+len, "LPAR Characteristics: "); - if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_DEDICATED) - len += sprintf (page+len, "Dedicated "); - if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_SHARED) - len += sprintf (page+len, "Shared "); - if (info->sysinfo_2_2_2.characteristics & LPAR_CHAR_LIMITED) - len += sprintf (page+len, "Limited "); - len += sprintf (page+len, "\n"); - - len += sprintf (page+len, "LPAR Name: %-8.8s\n", - info->sysinfo_2_2_2.name); - - len += sprintf (page+len, "LPAR Adjustment: %d\n", - info->sysinfo_2_2_2.caf); - - len += sprintf (page+len, "LPAR CPUs Total: %d\n", - info->sysinfo_2_2_2.cpus_total); - len += sprintf (page+len, "LPAR CPUs Configured: %d\n", - info->sysinfo_2_2_2.cpus_configured); - len += sprintf (page+len, "LPAR CPUs Standby: %d\n", - info->sysinfo_2_2_2.cpus_standby); - len += sprintf (page+len, "LPAR CPUs Reserved: %d\n", - info->sysinfo_2_2_2.cpus_reserved); - len += sprintf (page+len, "LPAR CPUs Dedicated: %d\n", - info->sysinfo_2_2_2.cpus_dedicated); - len += sprintf (page+len, "LPAR CPUs Shared: %d\n", - info->sysinfo_2_2_2.cpus_shared); - } + if (level >= 2) + len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len); - if (level >= 3 && stsi_3_2_2 (&info->sysinfo_3_2_2) == 0) - { - for (i = 0; i < info->sysinfo_3_2_2.count; i++) - { - len += sprintf (page+len, "\n"); - len += sprintf (page+len, "VM%02d Name: %-8.8s\n", - i, info->sysinfo_3_2_2.vm[i].name); - len += sprintf (page+len, "VM%02d Control Program: %-16.16s\n", - i, info->sysinfo_3_2_2.vm[i].cpi); - - len += sprintf (page+len, "VM%02d Adjustment: %d\n", - i, info->sysinfo_3_2_2.vm[i].caf); - - len += sprintf (page+len, "VM%02d CPUs Total: %d\n", - i, info->sysinfo_3_2_2.vm[i].cpus_total); - len += sprintf (page+len, "VM%02d CPUs Configured: %d\n", - i, info->sysinfo_3_2_2.vm[i].cpus_configured); - len += sprintf (page+len, "VM%02d CPUs Standby: %d\n", - i, info->sysinfo_3_2_2.vm[i].cpus_standby); - len += sprintf (page+len, "VM%02d CPUs Reserved: %d\n", - i, info->sysinfo_3_2_2.vm[i].cpus_reserved); - } - } + if (level >= 3) + len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len); - free_page (info_page); + free_page (info); return len; } static __init int create_proc_sysinfo(void) { - create_proc_read_entry ("sysinfo", 0444, NULL, - proc_read_sysinfo, NULL); + create_proc_read_entry("sysinfo", 0444, NULL, + proc_read_sysinfo, NULL); return 0; } diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index ed22b96580c6..01b8ac641eb8 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c @@ -156,8 +156,8 @@ static void gather_partition_info(void) { struct device_node *rootdn; - char *ppartition_name; - unsigned int *p_number_ptr; + const char *ppartition_name; + const unsigned int *p_number_ptr; /* Retrieve information about this partition */ rootdn = find_path_device("/"); @@ -165,14 +165,11 @@ static void gather_partition_info(void) return; } - ppartition_name = - get_property(rootdn, "ibm,partition-name", NULL); + ppartition_name = get_property(rootdn, "ibm,partition-name", NULL); if (ppartition_name) strncpy(partition_name, ppartition_name, sizeof(partition_name)); - p_number_ptr = - (unsigned int *)get_property(rootdn, "ibm,partition-no", - NULL); + p_number_ptr = get_property(rootdn, "ibm,partition-no", NULL); if (p_number_ptr) partition_number = *p_number_ptr; } diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 058f094f945a..66a1ae1d6982 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -26,6 +26,7 @@ * Zhenyu Wang */ +#include <linux/err.h> #include <linux/types.h> #include <linux/list.h> #include <linux/inet.h> @@ -107,8 +108,11 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf, u8* crc) { struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct hash_desc desc; - crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc); + desc.tfm = tcp_conn->tx_tfm; + desc.flags = 0; + crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc); buf->sg.length += sizeof(uint32_t); } @@ -452,11 +456,14 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn) } if (conn->hdrdgst_en) { + struct hash_desc desc; struct scatterlist sg; sg_init_one(&sg, (u8 *)hdr, sizeof(struct iscsi_hdr) + ahslen); - crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst); + desc.tfm = tcp_conn->rx_tfm; + desc.flags = 0; + crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst); rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) + ahslen); if (cdgst != rdgst) { @@ -673,7 +680,7 @@ partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn, memcpy(&temp, sg, sizeof(struct scatterlist)); temp.offset = offset; temp.length = length; - crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1); + crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length); } static void @@ -682,7 +689,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len) struct scatterlist tmp; sg_init_one(&tmp, buf, len); - crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1); + crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len); } static int iscsi_scsi_data_in(struct iscsi_conn *conn) @@ -736,9 +743,9 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn) if (!rc) { if (conn->datadgst_en) { if (!offset) - crypto_digest_update( - tcp_conn->data_rx_tfm, - &sg[i], 1); + crypto_hash_update( + &tcp_conn->data_rx_hash, + &sg[i], sg[i].length); else partial_sg_digest_update(tcp_conn, &sg[i], @@ -877,8 +884,7 @@ more: rc = iscsi_tcp_hdr_recv(conn); if (!rc && tcp_conn->in.datalen) { if (conn->datadgst_en) { - BUG_ON(!tcp_conn->data_rx_tfm); - crypto_digest_init(tcp_conn->data_rx_tfm); + crypto_hash_init(&tcp_conn->data_rx_hash); } tcp_conn->in_progress = IN_PROGRESS_DATA_RECV; } else if (rc) { @@ -931,11 +937,11 @@ more: tcp_conn->in.padding); memset(pad, 0, tcp_conn->in.padding); sg_init_one(&sg, pad, tcp_conn->in.padding); - crypto_digest_update(tcp_conn->data_rx_tfm, - &sg, 1); + crypto_hash_update(&tcp_conn->data_rx_hash, + &sg, sg.length); } - crypto_digest_final(tcp_conn->data_rx_tfm, - (u8 *) & tcp_conn->in.datadgst); + crypto_hash_final(&tcp_conn->data_rx_hash, + (u8 *)&tcp_conn->in.datadgst); debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst); tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV; } else @@ -1181,8 +1187,7 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn, { struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data; - BUG_ON(!tcp_conn->data_tx_tfm); - crypto_digest_init(tcp_conn->data_tx_tfm); + crypto_hash_init(&tcp_conn->data_tx_hash); tcp_ctask->digest_count = 4; } @@ -1196,7 +1201,7 @@ iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask, int sent = 0; if (final) - crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest); + crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest); iscsi_buf_init_iov(buf, (char*)digest, 4); rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent); @@ -1491,16 +1496,17 @@ handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) if (rc) { tcp_ctask->xmstate |= XMSTATE_IMM_DATA; if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, - (u8*)&tcp_ctask->immdigest); + crypto_hash_final(&tcp_conn->data_tx_hash, + (u8 *)&tcp_ctask->immdigest); debug_tcp("tx imm sendpage fail 0x%x\n", tcp_ctask->datadigest); } return rc; } if (conn->datadgst_en) - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); + crypto_hash_update(&tcp_conn->data_tx_hash, + &tcp_ctask->sendbuf.sg, + tcp_ctask->sendbuf.sg.length); if (!ctask->imm_count) break; @@ -1577,8 +1583,8 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) tcp_ctask->xmstate |= XMSTATE_UNS_DATA; /* will continue with this ctask later.. */ if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, - (u8 *)&dtask->digest); + crypto_hash_final(&tcp_conn->data_tx_hash, + (u8 *)&dtask->digest); debug_tcp("tx uns data fail 0x%x\n", dtask->digest); } @@ -1593,8 +1599,9 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) * so pass it */ if (conn->datadgst_en && tcp_ctask->sent - start > 0) - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); + crypto_hash_update(&tcp_conn->data_tx_hash, + &tcp_ctask->sendbuf.sg, + tcp_ctask->sendbuf.sg.length); if (!ctask->data_count) break; @@ -1668,7 +1675,7 @@ solicit_again: tcp_ctask->xmstate |= XMSTATE_SOL_DATA; /* will continue with this ctask later.. */ if (conn->datadgst_en) { - crypto_digest_final(tcp_conn->data_tx_tfm, + crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)&dtask->digest); debug_tcp("r2t data send fail 0x%x\n", dtask->digest); } @@ -1677,8 +1684,8 @@ solicit_again: BUG_ON(r2t->data_count < 0); if (conn->datadgst_en) - crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg, - 1); + crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg, + r2t->sendbuf.sg.length); if (r2t->data_count) { BUG_ON(ctask->sc->use_sg == 0); @@ -1766,8 +1773,9 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask) } if (conn->datadgst_en) { - crypto_digest_update(tcp_conn->data_tx_tfm, - &tcp_ctask->sendbuf.sg, 1); + crypto_hash_update(&tcp_conn->data_tx_hash, + &tcp_ctask->sendbuf.sg, + tcp_ctask->sendbuf.sg.length); /* imm data? */ if (!dtask) { rc = iscsi_digest_final_send(conn, ctask, @@ -1963,13 +1971,13 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn) /* now free tcp_conn */ if (digest) { if (tcp_conn->tx_tfm) - crypto_free_tfm(tcp_conn->tx_tfm); + crypto_free_hash(tcp_conn->tx_tfm); if (tcp_conn->rx_tfm) - crypto_free_tfm(tcp_conn->rx_tfm); - if (tcp_conn->data_tx_tfm) - crypto_free_tfm(tcp_conn->data_tx_tfm); - if (tcp_conn->data_rx_tfm) - crypto_free_tfm(tcp_conn->data_rx_tfm); + crypto_free_hash(tcp_conn->rx_tfm); + if (tcp_conn->data_tx_hash.tfm) + crypto_free_hash(tcp_conn->data_tx_hash.tfm); + if (tcp_conn->data_rx_hash.tfm) + crypto_free_hash(tcp_conn->data_rx_hash.tfm); } kfree(tcp_conn); @@ -2130,44 +2138,48 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, if (conn->hdrdgst_en) { tcp_conn->hdr_size += sizeof(__u32); if (!tcp_conn->tx_tfm) - tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c", - 0); - if (!tcp_conn->tx_tfm) - return -ENOMEM; + tcp_conn->tx_tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->tx_tfm)) + return PTR_ERR(tcp_conn->tx_tfm); if (!tcp_conn->rx_tfm) - tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c", - 0); - if (!tcp_conn->rx_tfm) { - crypto_free_tfm(tcp_conn->tx_tfm); - return -ENOMEM; + tcp_conn->rx_tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->rx_tfm)) { + crypto_free_hash(tcp_conn->tx_tfm); + return PTR_ERR(tcp_conn->rx_tfm); } } else { if (tcp_conn->tx_tfm) - crypto_free_tfm(tcp_conn->tx_tfm); + crypto_free_hash(tcp_conn->tx_tfm); if (tcp_conn->rx_tfm) - crypto_free_tfm(tcp_conn->rx_tfm); + crypto_free_hash(tcp_conn->rx_tfm); } break; case ISCSI_PARAM_DATADGST_EN: iscsi_set_param(cls_conn, param, buf, buflen); if (conn->datadgst_en) { - if (!tcp_conn->data_tx_tfm) - tcp_conn->data_tx_tfm = - crypto_alloc_tfm("crc32c", 0); - if (!tcp_conn->data_tx_tfm) - return -ENOMEM; - if (!tcp_conn->data_rx_tfm) - tcp_conn->data_rx_tfm = - crypto_alloc_tfm("crc32c", 0); - if (!tcp_conn->data_rx_tfm) { - crypto_free_tfm(tcp_conn->data_tx_tfm); - return -ENOMEM; + if (!tcp_conn->data_tx_hash.tfm) + tcp_conn->data_tx_hash.tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->data_tx_hash.tfm)) + return PTR_ERR(tcp_conn->data_tx_hash.tfm); + if (!tcp_conn->data_rx_hash.tfm) + tcp_conn->data_rx_hash.tfm = + crypto_alloc_hash("crc32c", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tcp_conn->data_rx_hash.tfm)) { + crypto_free_hash(tcp_conn->data_tx_hash.tfm); + return PTR_ERR(tcp_conn->data_rx_hash.tfm); } } else { - if (tcp_conn->data_tx_tfm) - crypto_free_tfm(tcp_conn->data_tx_tfm); - if (tcp_conn->data_rx_tfm) - crypto_free_tfm(tcp_conn->data_rx_tfm); + if (tcp_conn->data_tx_hash.tfm) + crypto_free_hash(tcp_conn->data_tx_hash.tfm); + if (tcp_conn->data_rx_hash.tfm) + crypto_free_hash(tcp_conn->data_rx_hash.tfm); } tcp_conn->sendpage = conn->datadgst_en ? sock_no_sendpage : tcp_conn->sock->ops->sendpage; diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 6a4ee704e46e..e35701305fc9 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -51,6 +51,7 @@ #define ISCSI_SG_TABLESIZE SG_ALL #define ISCSI_TCP_MAX_CMD_LEN 16 +struct crypto_hash; struct socket; /* Socket connection recieve helper */ @@ -84,8 +85,8 @@ struct iscsi_tcp_conn { /* iSCSI connection-wide sequencing */ int hdr_size; /* PDU header size */ - struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */ - struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */ + struct crypto_hash *rx_tfm; /* CRC32C (Rx) */ + struct hash_desc data_rx_hash; /* CRC32C (Rx) for data */ /* control data */ struct iscsi_tcp_recv in; /* TCP receive context */ @@ -97,8 +98,8 @@ struct iscsi_tcp_conn { void (*old_write_space)(struct sock *); /* xmit */ - struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */ - struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */ + struct crypto_hash *tx_tfm; /* CRC32C (Tx) */ + struct hash_desc data_tx_hash; /* CRC32C (Tx) for data */ /* MIB custom statistics */ uint32_t sendpage_failures_cnt; diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 89ef34df5a1d..6422de72bf43 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -431,7 +431,7 @@ static int mac53c94_probe(struct macio_dev *mdev, const struct of_device_id *mat struct fsc_state *state; struct Scsi_Host *host; void *dma_cmd_space; - unsigned char *clkprop; + const unsigned char *clkprop; int proplen, rc = -ENODEV; if (macio_resource_count(mdev) != 2 || macio_irq_count(mdev) != 2) { diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 5572981a9f92..592b52afe658 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1850,7 +1850,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) { struct device_node *mesh = macio_get_of_node(mdev); struct pci_dev* pdev = macio_get_pci_dev(mdev); - int tgt, *cfp, minper; + int tgt, minper; + const int *cfp; struct mesh_state *ms; struct Scsi_Host *mesh_host; void *dma_cmd_space; @@ -1939,7 +1940,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) ms->tgts[tgt].current_req = NULL; } - if ((cfp = (int *) get_property(mesh, "clock-frequency", NULL))) + if ((cfp = get_property(mesh, "clock-frequency", NULL))) ms->clk_freq = *cfp; else { printk(KERN_INFO "mesh: assuming 50MHz clock frequency\n"); diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c index 7d0858095e1f..6b70c3c76dfd 100644 --- a/drivers/scsi/sata_svw.c +++ b/drivers/scsi/sata_svw.c @@ -268,7 +268,7 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start, /* Match it to a port node */ index = (ap == ap->host_set->ports[0]) ? 0 : 1; for (np = np->child; np != NULL; np = np->sibling) { - u32 *reg = (u32 *)get_property(np, "reg", NULL); + const u32 *reg = get_property(np, "reg", NULL); if (!reg) continue; if (index == *reg) diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c index bfd2a22759eb..a3b99caf80e6 100644 --- a/drivers/serial/pmac_zilog.c +++ b/drivers/serial/pmac_zilog.c @@ -1400,8 +1400,8 @@ static struct uart_ops pmz_pops = { static int __init pmz_init_port(struct uart_pmac_port *uap) { struct device_node *np = uap->node; - char *conn; - struct slot_names_prop { + const char *conn; + const struct slot_names_prop { int count; char name[1]; } *slots; @@ -1458,7 +1458,7 @@ no_dma: uap->flags |= PMACZILOG_FLAG_IS_IRDA; uap->port_type = PMAC_SCC_ASYNC; /* 1999 Powerbook G3 has slot-names property instead */ - slots = (struct slot_names_prop *)get_property(np, "slot-names", &len); + slots = get_property(np, "slot-names", &len); if (slots && slots->count > 0) { if (strcmp(slots->name, "IrDA") == 0) uap->flags |= PMACZILOG_FLAG_IS_IRDA; @@ -1470,7 +1470,8 @@ no_dma: if (ZS_IS_INTMODEM(uap)) { struct device_node* i2c_modem = find_devices("i2c-modem"); if (i2c_modem) { - char* mid = get_property(i2c_modem, "modem-id", NULL); + const char* mid = + get_property(i2c_modem, "modem-id", NULL); if (mid) switch(*mid) { case 0x04 : case 0x05 : diff --git a/drivers/video/S3triofb.c b/drivers/video/S3triofb.c index afd146f5f683..397005eb392d 100644 --- a/drivers/video/S3triofb.c +++ b/drivers/video/S3triofb.c @@ -349,30 +349,30 @@ static void __init s3triofb_of_init(struct device_node *dp) s3trio_name[sizeof(s3trio_name)-1] = '\0'; strcpy(fb_fix.id, s3trio_name); - if((pp = (int *)get_property(dp, "vendor-id", &len)) != NULL + if((pp = get_property(dp, "vendor-id", &len)) != NULL && *pp!=PCI_VENDOR_ID_S3) { printk("%s: can't find S3 Trio board\n", dp->full_name); return; } - if((pp = (int *)get_property(dp, "device-id", &len)) != NULL + if((pp = get_property(dp, "device-id", &len)) != NULL && *pp!=PCI_DEVICE_ID_S3_TRIO) { printk("%s: can't find S3 Trio board\n", dp->full_name); return; } - if ((pp = (int *)get_property(dp, "depth", &len)) != NULL + if ((pp = get_property(dp, "depth", &len)) != NULL && len == sizeof(int) && *pp != 8) { printk("%s: can't use depth = %d\n", dp->full_name, *pp); return; } - if ((pp = (int *)get_property(dp, "width", &len)) != NULL + if ((pp = get_property(dp, "width", &len)) != NULL && len == sizeof(int)) fb_var.xres = fb_var.xres_virtual = *pp; - if ((pp = (int *)get_property(dp, "height", &len)) != NULL + if ((pp = get_property(dp, "height", &len)) != NULL && len == sizeof(int)) fb_var.yres = fb_var.yres_virtual = *pp; - if ((pp = (int *)get_property(dp, "linebytes", &len)) != NULL + if ((pp = get_property(dp, "linebytes", &len)) != NULL && len == sizeof(int)) fb_fix.line_length = *pp; else diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 8e3400d5dd21..0ed577e7cc21 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c @@ -413,11 +413,11 @@ static int __devinit radeon_find_mem_vbios(struct radeonfb_info *rinfo) static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo) { struct device_node *dp = rinfo->of_node; - u32 *val; + const u32 *val; if (dp == NULL) return -ENODEV; - val = (u32 *) get_property(dp, "ATY,RefCLK", NULL); + val = get_property(dp, "ATY,RefCLK", NULL); if (!val || !*val) { printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n"); return -EINVAL; @@ -425,11 +425,11 @@ static int __devinit radeon_read_xtal_OF (struct radeonfb_info *rinfo) rinfo->pll.ref_clk = (*val) / 10; - val = (u32 *) get_property(dp, "ATY,SCLK", NULL); + val = get_property(dp, "ATY,SCLK", NULL); if (val && *val) rinfo->pll.sclk = (*val) / 10; - val = (u32 *) get_property(dp, "ATY,MCLK", NULL); + val = get_property(dp, "ATY,MCLK", NULL); if (val && *val) rinfo->pll.mclk = (*val) / 10; diff --git a/drivers/video/aty/radeon_monitor.c b/drivers/video/aty/radeon_monitor.c index 98c05bc0de44..ea531a6f45d1 100644 --- a/drivers/video/aty/radeon_monitor.c +++ b/drivers/video/aty/radeon_monitor.c @@ -64,13 +64,13 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_ { static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID2", NULL }; - u8 *pedid = NULL; - u8 *pmt = NULL; + const u8 *pedid = NULL; + const u8 *pmt = NULL; u8 *tmp; int i, mt = MT_NONE; RTRACE("analyzing OF properties...\n"); - pmt = (u8 *)get_property(dp, "display-type", NULL); + pmt = get_property(dp, "display-type", NULL); if (!pmt) return MT_NONE; RTRACE("display-type: %s\n", pmt); @@ -89,7 +89,7 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_ } for (i = 0; propnames[i] != NULL; ++i) { - pedid = (u8 *)get_property(dp, propnames[i], NULL); + pedid = get_property(dp, propnames[i], NULL); if (pedid != NULL) break; } @@ -124,14 +124,14 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_ return MT_NONE; if (rinfo->has_CRTC2) { - char *pname; + const char *pname; int len, second = 0; dp = dp->child; do { if (!dp) return MT_NONE; - pname = (char *)get_property(dp, "name", NULL); + pname = get_property(dp, "name", NULL); if (!pname) return MT_NONE; len = strlen(pname); diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c index f31e606a2ded..e308ed2d249a 100644 --- a/drivers/video/aty/radeon_pm.c +++ b/drivers/video/aty/radeon_pm.c @@ -1268,7 +1268,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo) 0x21320032, 0xa1320032, 0x21320032, 0xffffffff, 0x31320032 }; - u32 *mrtable = default_mrtable; + const u32 *mrtable = default_mrtable; int i, mrtable_size = ARRAY_SIZE(default_mrtable); mdelay(30); @@ -1287,7 +1287,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo) if (rinfo->of_node != NULL) { int size; - mrtable = (u32 *)get_property(rinfo->of_node, "ATY,MRT", &size); + mrtable = get_property(rinfo->of_node, "ATY,MRT", &size); if (mrtable) mrtable_size = size >> 2; else diff --git a/drivers/video/nvidia/nv_of.c b/drivers/video/nvidia/nv_of.c index 8209106e26ee..d9af88c2b580 100644 --- a/drivers/video/nvidia/nv_of.c +++ b/drivers/video/nvidia/nv_of.c @@ -32,7 +32,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid) { struct nvidia_par *par = info->par; struct device_node *parent, *dp; - unsigned char *pedid = NULL; + const unsigned char *pedid = NULL; static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID", "EDID1", "EDID,B", "EDID,A", NULL }; @@ -42,20 +42,19 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid) if (parent == NULL) return -1; if (par->twoHeads) { - char *pname; + const char *pname; int len; for (dp = NULL; (dp = of_get_next_child(parent, dp)) != NULL;) { - pname = (char *)get_property(dp, "name", NULL); + pname = get_property(dp, "name", NULL); if (!pname) continue; len = strlen(pname); if ((pname[len-1] == 'A' && conn == 1) || (pname[len-1] == 'B' && conn == 2)) { for (i = 0; propnames[i] != NULL; ++i) { - pedid = (unsigned char *) - get_property(dp, propnames[i], + pedid = get_property(dp, propnames[i], NULL); if (pedid != NULL) break; @@ -67,8 +66,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid) } if (pedid == NULL) { for (i = 0; propnames[i] != NULL; ++i) { - pedid = (unsigned char *) - get_property(parent, propnames[i], NULL); + pedid = get_property(parent, propnames[i], NULL); if (pedid != NULL) break; } diff --git a/drivers/video/offb.c b/drivers/video/offb.c index 0013311e0564..bad0e98fb3b6 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -409,30 +409,30 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) unsigned int flags, rsize, addr_prop = 0; unsigned long max_size = 0; u64 rstart, address = OF_BAD_ADDR; - u32 *pp, *addrp, *up; + const u32 *pp, *addrp, *up; u64 asize; - pp = (u32 *)get_property(dp, "linux,bootx-depth", &len); + pp = get_property(dp, "linux,bootx-depth", &len); if (pp == NULL) - pp = (u32 *)get_property(dp, "depth", &len); + pp = get_property(dp, "depth", &len); if (pp && len == sizeof(u32)) depth = *pp; - pp = (u32 *)get_property(dp, "linux,bootx-width", &len); + pp = get_property(dp, "linux,bootx-width", &len); if (pp == NULL) - pp = (u32 *)get_property(dp, "width", &len); + pp = get_property(dp, "width", &len); if (pp && len == sizeof(u32)) width = *pp; - pp = (u32 *)get_property(dp, "linux,bootx-height", &len); + pp = get_property(dp, "linux,bootx-height", &len); if (pp == NULL) - pp = (u32 *)get_property(dp, "height", &len); + pp = get_property(dp, "height", &len); if (pp && len == sizeof(u32)) height = *pp; - pp = (u32 *)get_property(dp, "linux,bootx-linebytes", &len); + pp = get_property(dp, "linux,bootx-linebytes", &len); if (pp == NULL) - pp = (u32 *)get_property(dp, "linebytes", &len); + pp = get_property(dp, "linebytes", &len); if (pp && len == sizeof(u32)) pitch = *pp; else @@ -450,9 +450,9 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) * ranges and pick one that is both big enough and if possible encloses * the "address" property. If none match, we pick the biggest */ - up = (u32 *)get_property(dp, "linux,bootx-addr", &len); + up = get_property(dp, "linux,bootx-addr", &len); if (up == NULL) - up = (u32 *)get_property(dp, "address", &len); + up = get_property(dp, "address", &len); if (up && len == sizeof(u32)) addr_prop = *up; diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c index 8ddb47a56b07..67d1e1c8813d 100644 --- a/drivers/video/riva/fbdev.c +++ b/drivers/video/riva/fbdev.c @@ -1835,14 +1835,13 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd) NVTRACE_ENTER(); dp = pci_device_to_OF_node(pd); for (; dp != NULL; dp = dp->child) { - disptype = (unsigned char *)get_property(dp, "display-type", NULL); + disptype = get_property(dp, "display-type", NULL); if (disptype == NULL) continue; if (strncmp(disptype, "LCD", 3) != 0) continue; for (i = 0; propnames[i] != NULL; ++i) { - pedid = (unsigned char *) - get_property(dp, propnames[i], NULL); + pedid = get_property(dp, propnames[i], NULL); if (pedid != NULL) { par->EDID = pedid; NVTRACE("LCD found.\n"); diff --git a/fs/Kconfig b/fs/Kconfig index 3f00a9faabcb..530581628311 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -325,8 +325,8 @@ config FS_POSIX_ACL source "fs/xfs/Kconfig" config OCFS2_FS - tristate "OCFS2 file system support (EXPERIMENTAL)" - depends on NET && SYSFS && EXPERIMENTAL + tristate "OCFS2 file system support" + depends on NET && SYSFS select CONFIGFS_FS select JBD select CRC32 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index df025453dd97..816e8ef64560 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -86,6 +86,32 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent * pare return sd; } +/* + * + * Return -EEXIST if there is already a configfs element with the same + * name for the same parent. + * + * called with parent inode's i_mutex held + */ +int configfs_dirent_exists(struct configfs_dirent *parent_sd, + const unsigned char *new) +{ + struct configfs_dirent * sd; + + list_for_each_entry(sd, &parent_sd->s_children, s_sibling) { + if (sd->s_element) { + const unsigned char *existing = configfs_get_name(sd); + if (strcmp(existing, new)) + continue; + else + return -EEXIST; + } + } + + return 0; +} + + int configfs_make_dirent(struct configfs_dirent * parent_sd, struct dentry * dentry, void * element, umode_t mode, int type) @@ -136,8 +162,10 @@ static int create_dir(struct config_item * k, struct dentry * p, int error; umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; - error = configfs_make_dirent(p->d_fsdata, d, k, mode, - CONFIGFS_DIR); + error = configfs_dirent_exists(p->d_fsdata, d->d_name.name); + if (!error) + error = configfs_make_dirent(p->d_fsdata, d, k, mode, + CONFIGFS_DIR); if (!error) { error = configfs_create(d, mode, init_dir); if (!error) { diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h index 2e0cc8e00b85..3a566077ac95 100644 --- a/fs/jffs2/jffs2_fs_i.h +++ b/fs/jffs2/jffs2_fs_i.h @@ -41,11 +41,7 @@ struct jffs2_inode_info { uint16_t flags; uint8_t usercompr; -#if !defined (__ECOS) -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2) struct inode vfs_inode; -#endif -#endif #ifdef CONFIG_JFFS2_FS_POSIX_ACL struct posix_acl *i_acl_access; struct posix_acl *i_acl_default; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 06da7506363c..e35d7e52fdeb 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -33,7 +33,7 @@ * */ - +#include <linux/err.h> #include <linux/sunrpc/svc.h> #include <linux/nfsd/nfsd.h> #include <linux/nfs4.h> @@ -87,34 +87,35 @@ int nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname) { struct xdr_netobj cksum; - struct crypto_tfm *tfm; + struct hash_desc desc; struct scatterlist sg[1]; int status = nfserr_resource; dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n", clname->len, clname->data); - tfm = crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP); - if (tfm == NULL) - goto out; - cksum.len = crypto_tfm_alg_digestsize(tfm); + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(desc.tfm)) + goto out_no_tfm; + cksum.len = crypto_hash_digestsize(desc.tfm); cksum.data = kmalloc(cksum.len, GFP_KERNEL); if (cksum.data == NULL) goto out; - crypto_digest_init(tfm); sg[0].page = virt_to_page(clname->data); sg[0].offset = offset_in_page(clname->data); sg[0].length = clname->len; - crypto_digest_update(tfm, sg, 1); - crypto_digest_final(tfm, cksum.data); + if (crypto_hash_digest(&desc, sg, sg->length, cksum.data)) + goto out; md5_to_hex(dname, cksum.data); kfree(cksum.data); status = nfs_ok; out: - crypto_free_tfm(tfm); + crypto_free_hash(desc.tfm); +out_no_tfm: return status; } diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index 7d3be845a614..9fb8132f19b0 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile @@ -16,6 +16,7 @@ ocfs2-objs := \ file.o \ heartbeat.o \ inode.o \ + ioctl.o \ journal.o \ localalloc.o \ mmap.o \ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index edaab05a93e0..f43bc5f18a35 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1717,17 +1717,29 @@ static int ocfs2_do_truncate(struct ocfs2_super *osb, ocfs2_remove_from_cache(inode, eb_bh); - BUG_ON(eb->h_suballoc_slot); BUG_ON(el->l_recs[0].e_clusters); BUG_ON(el->l_recs[0].e_cpos); BUG_ON(el->l_recs[0].e_blkno); - status = ocfs2_free_extent_block(handle, - tc->tc_ext_alloc_inode, - tc->tc_ext_alloc_bh, - eb); - if (status < 0) { - mlog_errno(status); - goto bail; + if (eb->h_suballoc_slot == 0) { + /* + * This code only understands how to + * lock the suballocator in slot 0, + * which is fine because allocation is + * only ever done out of that + * suballocator too. A future version + * might change that however, so avoid + * a free if we don't know how to + * handle it. This way an fs incompat + * bit will not be necessary. + */ + status = ocfs2_free_extent_block(handle, + tc->tc_ext_alloc_inode, + tc->tc_ext_alloc_bh, + eb); + if (status < 0) { + mlog_errno(status); + goto bail; + } } } brelse(eb_bh); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index f1d1c342ce01..3d7c082a8f58 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -391,31 +391,28 @@ out: static int ocfs2_commit_write(struct file *file, struct page *page, unsigned from, unsigned to) { - int ret, extending = 0, locklevel = 0; - loff_t new_i_size; + int ret; struct buffer_head *di_bh = NULL; struct inode *inode = page->mapping->host; struct ocfs2_journal_handle *handle = NULL; + struct ocfs2_dinode *di; mlog_entry("(0x%p, 0x%p, %u, %u)\n", file, page, from, to); /* NOTE: ocfs2_file_aio_write has ensured that it's safe for - * us to sample inode->i_size here without the metadata lock: + * us to continue here without rechecking the I/O against + * changed inode values. * * 1) We're currently holding the inode alloc lock, so no * nodes can change it underneath us. * * 2) We've had to take the metadata lock at least once - * already to check for extending writes, hence insuring - * that our current copy is also up to date. + * already to check for extending writes, suid removal, etc. + * The meta data update code then ensures that we don't get a + * stale inode allocation image (i_size, i_clusters, etc). */ - new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; - if (new_i_size > i_size_read(inode)) { - extending = 1; - locklevel = 1; - } - ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, locklevel, page); + ret = ocfs2_meta_lock_with_page(inode, NULL, &di_bh, 1, page); if (ret != 0) { mlog_errno(ret); goto out; @@ -427,23 +424,20 @@ static int ocfs2_commit_write(struct file *file, struct page *page, goto out_unlock_meta; } - if (extending) { - handle = ocfs2_start_walk_page_trans(inode, page, from, to); - if (IS_ERR(handle)) { - ret = PTR_ERR(handle); - handle = NULL; - goto out_unlock_data; - } + handle = ocfs2_start_walk_page_trans(inode, page, from, to); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + goto out_unlock_data; + } - /* Mark our buffer early. We'd rather catch this error up here - * as opposed to after a successful commit_write which would - * require us to set back inode->i_size. */ - ret = ocfs2_journal_access(handle, inode, di_bh, - OCFS2_JOURNAL_ACCESS_WRITE); - if (ret < 0) { - mlog_errno(ret); - goto out_commit; - } + /* Mark our buffer early. We'd rather catch this error up here + * as opposed to after a successful commit_write which would + * require us to set back inode->i_size. */ + ret = ocfs2_journal_access(handle, inode, di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; } /* might update i_size */ @@ -453,37 +447,28 @@ static int ocfs2_commit_write(struct file *file, struct page *page, goto out_commit; } - if (extending) { - loff_t size = (u64) i_size_read(inode); - struct ocfs2_dinode *di = - (struct ocfs2_dinode *)di_bh->b_data; + di = (struct ocfs2_dinode *)di_bh->b_data; - /* ocfs2_mark_inode_dirty is too heavy to use here. */ - inode->i_blocks = ocfs2_align_bytes_to_sectors(size); - inode->i_ctime = inode->i_mtime = CURRENT_TIME; + /* ocfs2_mark_inode_dirty() is too heavy to use here. */ + inode->i_mtime = inode->i_ctime = CURRENT_TIME; + di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); + di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); - di->i_size = cpu_to_le64(size); - di->i_ctime = di->i_mtime = - cpu_to_le64(inode->i_mtime.tv_sec); - di->i_ctime_nsec = di->i_mtime_nsec = - cpu_to_le32(inode->i_mtime.tv_nsec); + inode->i_blocks = ocfs2_align_bytes_to_sectors((u64)(i_size_read(inode))); + di->i_size = cpu_to_le64((u64)i_size_read(inode)); - ret = ocfs2_journal_dirty(handle, di_bh); - if (ret < 0) { - mlog_errno(ret); - goto out_commit; - } + ret = ocfs2_journal_dirty(handle, di_bh); + if (ret < 0) { + mlog_errno(ret); + goto out_commit; } - BUG_ON(extending && (i_size_read(inode) != new_i_size)); - out_commit: - if (handle) - ocfs2_commit_trans(handle); + ocfs2_commit_trans(handle); out_unlock_data: ocfs2_data_unlock(inode, 1); out_unlock_meta: - ocfs2_meta_unlock(inode, locklevel); + ocfs2_meta_unlock(inode, 1); out: if (di_bh) brelse(di_bh); diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index 9a24adf9be6e..c9037414f4f6 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c @@ -100,6 +100,9 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, mlog_entry("(block=(%llu), nr=(%d), flags=%d, inode=%p)\n", (unsigned long long)block, nr, flags, inode); + BUG_ON((flags & OCFS2_BH_READAHEAD) && + (!inode || !(flags & OCFS2_BH_CACHED))); + if (osb == NULL || osb->sb == NULL || bhs == NULL) { status = -EINVAL; mlog_errno(status); @@ -140,6 +143,30 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, bh = bhs[i]; ignore_cache = 0; + /* There are three read-ahead cases here which we need to + * be concerned with. All three assume a buffer has + * previously been submitted with OCFS2_BH_READAHEAD + * and it hasn't yet completed I/O. + * + * 1) The current request is sync to disk. This rarely + * happens these days, and never when performance + * matters - the code can just wait on the buffer + * lock and re-submit. + * + * 2) The current request is cached, but not + * readahead. ocfs2_buffer_uptodate() will return + * false anyway, so we'll wind up waiting on the + * buffer lock to do I/O. We re-check the request + * with after getting the lock to avoid a re-submit. + * + * 3) The current request is readahead (and so must + * also be a caching one). We short circuit if the + * buffer is locked (under I/O) and if it's in the + * uptodate cache. The re-check from #2 catches the + * case that the previous read-ahead completes just + * before our is-it-in-flight check. + */ + if (flags & OCFS2_BH_CACHED && !ocfs2_buffer_uptodate(inode, bh)) { mlog(ML_UPTODATE, @@ -169,6 +196,14 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, continue; } + /* A read-ahead request was made - if the + * buffer is already under read-ahead from a + * previously submitted request than we are + * done here. */ + if ((flags & OCFS2_BH_READAHEAD) + && ocfs2_buffer_read_ahead(inode, bh)) + continue; + lock_buffer(bh); if (buffer_jbd(bh)) { #ifdef CATCH_BH_JBD_RACES @@ -181,13 +216,22 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, continue; #endif } + + /* Re-check ocfs2_buffer_uptodate() as a + * previously read-ahead buffer may have + * completed I/O while we were waiting for the + * buffer lock. */ + if ((flags & OCFS2_BH_CACHED) + && !(flags & OCFS2_BH_READAHEAD) + && ocfs2_buffer_uptodate(inode, bh)) { + unlock_buffer(bh); + continue; + } + clear_buffer_uptodate(bh); get_bh(bh); /* for end_buffer_read_sync() */ bh->b_end_io = end_buffer_read_sync; - if (flags & OCFS2_BH_READAHEAD) - submit_bh(READA, bh); - else - submit_bh(READ, bh); + submit_bh(READ, bh); continue; } } @@ -197,34 +241,39 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, u64 block, int nr, for (i = (nr - 1); i >= 0; i--) { bh = bhs[i]; - /* We know this can't have changed as we hold the - * inode sem. Avoid doing any work on the bh if the - * journal has it. */ - if (!buffer_jbd(bh)) - wait_on_buffer(bh); - - if (!buffer_uptodate(bh)) { - /* Status won't be cleared from here on out, - * so we can safely record this and loop back - * to cleanup the other buffers. Don't need to - * remove the clustered uptodate information - * for this bh as it's not marked locally - * uptodate. */ - status = -EIO; - brelse(bh); - bhs[i] = NULL; - continue; + if (!(flags & OCFS2_BH_READAHEAD)) { + /* We know this can't have changed as we hold the + * inode sem. Avoid doing any work on the bh if the + * journal has it. */ + if (!buffer_jbd(bh)) + wait_on_buffer(bh); + + if (!buffer_uptodate(bh)) { + /* Status won't be cleared from here on out, + * so we can safely record this and loop back + * to cleanup the other buffers. Don't need to + * remove the clustered uptodate information + * for this bh as it's not marked locally + * uptodate. */ + status = -EIO; + brelse(bh); + bhs[i] = NULL; + continue; + } } + /* Always set the buffer in the cache, even if it was + * a forced read, or read-ahead which hasn't yet + * completed. */ if (inode) ocfs2_set_buffer_uptodate(inode, bh); } if (inode) mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); - mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s\n", + mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n", (unsigned long long)block, nr, - (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes"); + (!(flags & OCFS2_BH_CACHED) || ignore_cache) ? "no" : "yes", flags); bail: diff --git a/fs/ocfs2/buffer_head_io.h b/fs/ocfs2/buffer_head_io.h index 6ecb90937b68..6cc20930fac3 100644 --- a/fs/ocfs2/buffer_head_io.h +++ b/fs/ocfs2/buffer_head_io.h @@ -49,7 +49,7 @@ int ocfs2_read_blocks(struct ocfs2_super *osb, #define OCFS2_BH_CACHED 1 -#define OCFS2_BH_READAHEAD 8 /* use this to pass READA down to submit_bh */ +#define OCFS2_BH_READAHEAD 8 static inline int ocfs2_read_block(struct ocfs2_super * osb, u64 off, struct buffer_head **bh, int flags, diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 504595d6cf65..305cba3681fe 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -320,8 +320,12 @@ static int compute_max_sectors(struct block_device *bdev) max_pages = q->max_hw_segments; max_pages--; /* Handle I/Os that straddle a page */ - max_sectors = max_pages << (PAGE_SHIFT - 9); - + if (max_pages) { + max_sectors = max_pages << (PAGE_SHIFT - 9); + } else { + /* If BIO contains 1 or less than 1 page. */ + max_sectors = q->max_sectors; + } /* Why is fls() 1-based???? */ pow_two_sectors = 1 << (fls(max_sectors) - 1); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 3d494d1a5f36..04e01915b86e 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -74,14 +74,14 @@ static int ocfs2_extend_dir(struct ocfs2_super *osb, int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) { int error = 0; - unsigned long offset, blk; - int i, num, stored; + unsigned long offset, blk, last_ra_blk = 0; + int i, stored; struct buffer_head * bh, * tmp; struct ocfs2_dir_entry * de; int err; struct inode *inode = filp->f_dentry->d_inode; struct super_block * sb = inode->i_sb; - int have_disk_lock = 0; + unsigned int ra_sectors = 16; mlog_entry("dirino=%llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -95,9 +95,8 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) mlog_errno(error); /* we haven't got any yet, so propagate the error. */ stored = error; - goto bail; + goto bail_nolock; } - have_disk_lock = 1; offset = filp->f_pos & (sb->s_blocksize - 1); @@ -113,16 +112,21 @@ int ocfs2_readdir(struct file * filp, void * dirent, filldir_t filldir) continue; } - /* - * Do the readahead (8k) - */ - if (!offset) { - for (i = 16 >> (sb->s_blocksize_bits - 9), num = 0; + /* The idea here is to begin with 8k read-ahead and to stay + * 4k ahead of our current position. + * + * TODO: Use the pagecache for this. We just need to + * make sure it's cluster-safe... */ + if (!last_ra_blk + || (((last_ra_blk - blk) << 9) <= (ra_sectors / 2))) { + for (i = ra_sectors >> (sb->s_blocksize_bits - 9); i > 0; i--) { tmp = ocfs2_bread(inode, ++blk, &err, 1); if (tmp) brelse(tmp); } + last_ra_blk = blk; + ra_sectors = 8; } revalidate: @@ -194,9 +198,9 @@ revalidate: stored = 0; bail: - if (have_disk_lock) - ocfs2_meta_unlock(inode, 0); + ocfs2_meta_unlock(inode, 0); +bail_nolock: mlog_exit(stored); return stored; diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 42775e2bbe2c..f13a4bac41f0 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -367,12 +367,10 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) goto do_ast; } - mlog(ML_ERROR, "got %sast for unknown lock! cookie=%u:%llu, " - "name=%.*s, namelen=%u\n", - past->type == DLM_AST ? "" : "b", - dlm_get_lock_cookie_node(cookie), - dlm_get_lock_cookie_seq(cookie), - locklen, name, locklen); + mlog(0, "got %sast for unknown lock! cookie=%u:%llu, " + "name=%.*s, namelen=%u\n", past->type == DLM_AST ? "" : "b", + dlm_get_lock_cookie_node(cookie), dlm_get_lock_cookie_seq(cookie), + locklen, name, locklen); ret = DLM_NORMAL; unlock_out: diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 762eb1fbb34d..151b41781eab 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1330,6 +1330,7 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode) cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime)); lvb->lvb_imtime_packed = cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime)); + lvb->lvb_iattr = cpu_to_be32(oi->ip_attr); mlog_meta_lvb(0, lockres); @@ -1360,6 +1361,9 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode) oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters); i_size_write(inode, be64_to_cpu(lvb->lvb_isize)); + oi->ip_attr = be32_to_cpu(lvb->lvb_iattr); + ocfs2_set_inode_flags(inode); + /* fast-symlinks are a special case */ if (S_ISLNK(inode->i_mode) && !oi->ip_clusters) inode->i_blocks = 0; @@ -2899,8 +2903,9 @@ void ocfs2_dump_meta_lvb_info(u64 level, be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid), be16_to_cpu(lvb->lvb_imode)); mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, " - "mtime_packed 0x%llx\n", be16_to_cpu(lvb->lvb_inlink), + "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink), (long long)be64_to_cpu(lvb->lvb_iatime_packed), (long long)be64_to_cpu(lvb->lvb_ictime_packed), - (long long)be64_to_cpu(lvb->lvb_imtime_packed)); + (long long)be64_to_cpu(lvb->lvb_imtime_packed), + be32_to_cpu(lvb->lvb_iattr)); } diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index 8f2d1db2d9ea..243ae862ece5 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -27,7 +27,7 @@ #ifndef DLMGLUE_H #define DLMGLUE_H -#define OCFS2_LVB_VERSION 2 +#define OCFS2_LVB_VERSION 3 struct ocfs2_meta_lvb { __be32 lvb_version; @@ -40,7 +40,8 @@ struct ocfs2_meta_lvb { __be64 lvb_isize; __be16 lvb_imode; __be16 lvb_inlink; - __be32 lvb_reserved[3]; + __be32 lvb_iattr; + __be32 lvb_reserved[2]; }; /* ocfs2_meta_lock_full() and ocfs2_data_lock_full() 'arg_flags' flags */ diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index a9559c874530..2bbfa17090cf 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -44,6 +44,7 @@ #include "file.h" #include "sysfile.h" #include "inode.h" +#include "ioctl.h" #include "journal.h" #include "mmap.h" #include "suballoc.h" @@ -1227,10 +1228,12 @@ const struct file_operations ocfs2_fops = { .open = ocfs2_file_open, .aio_read = ocfs2_file_aio_read, .aio_write = ocfs2_file_aio_write, + .ioctl = ocfs2_ioctl, }; const struct file_operations ocfs2_dops = { .read = generic_read_dir, .readdir = ocfs2_readdir, .fsync = ocfs2_sync_file, + .ioctl = ocfs2_ioctl, }; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 327a5b7b86ed..7bcf69154592 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -71,6 +71,26 @@ static int ocfs2_truncate_for_delete(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *fe_bh); +void ocfs2_set_inode_flags(struct inode *inode) +{ + unsigned int flags = OCFS2_I(inode)->ip_attr; + + inode->i_flags &= ~(S_IMMUTABLE | + S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC); + + if (flags & OCFS2_IMMUTABLE_FL) + inode->i_flags |= S_IMMUTABLE; + + if (flags & OCFS2_SYNC_FL) + inode->i_flags |= S_SYNC; + if (flags & OCFS2_APPEND_FL) + inode->i_flags |= S_APPEND; + if (flags & OCFS2_NOATIME_FL) + inode->i_flags |= S_NOATIME; + if (flags & OCFS2_DIRSYNC_FL) + inode->i_flags |= S_DIRSYNC; +} + struct inode *ocfs2_ilookup_for_vote(struct ocfs2_super *osb, u64 blkno, int delete_vote) @@ -260,7 +280,6 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, inode->i_blocks = ocfs2_align_bytes_to_sectors(le64_to_cpu(fe->i_size)); inode->i_mapping->a_ops = &ocfs2_aops; - inode->i_flags |= S_NOATIME; inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime); @@ -276,6 +295,7 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); OCFS2_I(inode)->ip_orphaned_slot = OCFS2_INVALID_SLOT; + OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr); if (create_ino) inode->i_ino = ino_from_blkno(inode->i_sb, @@ -330,6 +350,9 @@ int ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, ocfs2_inode_lock_res_init(&OCFS2_I(inode)->ip_data_lockres, OCFS2_LOCK_TYPE_DATA, inode); + ocfs2_set_inode_flags(inode); + inode->i_flags |= S_NOATIME; + status = 0; bail: mlog_exit(status); @@ -1027,12 +1050,8 @@ struct buffer_head *ocfs2_bread(struct inode *inode, u64 p_blkno; int readflags = OCFS2_BH_CACHED; -#if 0 - /* only turn this on if we know we can deal with read_block - * returning nothing */ if (reada) readflags |= OCFS2_BH_READAHEAD; -#endif if (((u64)block << inode->i_sb->s_blocksize_bits) >= i_size_read(inode)) { @@ -1131,6 +1150,7 @@ int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle, spin_lock(&OCFS2_I(inode)->ip_lock); fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters); + fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr); spin_unlock(&OCFS2_I(inode)->ip_lock); fe->i_size = cpu_to_le64(i_size_read(inode)); @@ -1169,6 +1189,8 @@ void ocfs2_refresh_inode(struct inode *inode, spin_lock(&OCFS2_I(inode)->ip_lock); OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); + OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr); + ocfs2_set_inode_flags(inode); i_size_write(inode, le64_to_cpu(fe->i_size)); inode->i_nlink = le16_to_cpu(fe->i_links_count); inode->i_uid = le32_to_cpu(fe->i_uid); diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index 35140f6cf840..4d1e53992566 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -56,6 +56,7 @@ struct ocfs2_inode_info struct ocfs2_journal_handle *ip_handle; u32 ip_flags; /* see below */ + u32 ip_attr; /* inode attributes */ /* protected by recovery_lock. */ struct inode *ip_next_orphan; @@ -142,4 +143,6 @@ int ocfs2_mark_inode_dirty(struct ocfs2_journal_handle *handle, int ocfs2_aio_read(struct file *file, struct kiocb *req, struct iocb *iocb); int ocfs2_aio_write(struct file *file, struct kiocb *req, struct iocb *iocb); +void ocfs2_set_inode_flags(struct inode *inode); + #endif /* OCFS2_INODE_H */ diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c new file mode 100644 index 000000000000..3663cef80689 --- /dev/null +++ b/fs/ocfs2/ioctl.c @@ -0,0 +1,136 @@ +/* + * linux/fs/ocfs2/ioctl.c + * + * Copyright (C) 2006 Herbert Poetzl + * adapted from Remy Card's ext2/ioctl.c + */ + +#include <linux/fs.h> +#include <linux/mount.h> + +#define MLOG_MASK_PREFIX ML_INODE +#include <cluster/masklog.h> + +#include "ocfs2.h" +#include "alloc.h" +#include "dlmglue.h" +#include "inode.h" +#include "journal.h" + +#include "ocfs2_fs.h" +#include "ioctl.h" + +#include <linux/ext2_fs.h> + +static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) +{ + int status; + + status = ocfs2_meta_lock(inode, NULL, NULL, 0); + if (status < 0) { + mlog_errno(status); + return status; + } + *flags = OCFS2_I(inode)->ip_attr; + ocfs2_meta_unlock(inode, 0); + + mlog_exit(status); + return status; +} + +static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags, + unsigned mask) +{ + struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct ocfs2_journal_handle *handle = NULL; + struct buffer_head *bh = NULL; + unsigned oldflags; + int status; + + mutex_lock(&inode->i_mutex); + + status = ocfs2_meta_lock(inode, NULL, &bh, 1); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = -EROFS; + if (IS_RDONLY(inode)) + goto bail_unlock; + + status = -EACCES; + if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) + goto bail_unlock; + + if (!S_ISDIR(inode->i_mode)) + flags &= ~OCFS2_DIRSYNC_FL; + + handle = ocfs2_start_trans(osb, NULL, OCFS2_INODE_UPDATE_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + mlog_errno(status); + goto bail_unlock; + } + + oldflags = ocfs2_inode->ip_attr; + flags = flags & mask; + flags |= oldflags & ~mask; + + /* + * The IMMUTABLE and APPEND_ONLY flags can only be changed by + * the relevant capability. + */ + status = -EPERM; + if ((oldflags & OCFS2_IMMUTABLE_FL) || ((flags ^ oldflags) & + (OCFS2_APPEND_FL | OCFS2_IMMUTABLE_FL))) { + if (!capable(CAP_LINUX_IMMUTABLE)) + goto bail_unlock; + } + + ocfs2_inode->ip_attr = flags; + ocfs2_set_inode_flags(inode); + + status = ocfs2_mark_inode_dirty(handle, inode, bh); + if (status < 0) + mlog_errno(status); + + ocfs2_commit_trans(handle); +bail_unlock: + ocfs2_meta_unlock(inode, 1); +bail: + mutex_unlock(&inode->i_mutex); + + if (bh) + brelse(bh); + + mlog_exit(status); + return status; +} + +int ocfs2_ioctl(struct inode * inode, struct file * filp, + unsigned int cmd, unsigned long arg) +{ + unsigned int flags; + int status; + + switch (cmd) { + case OCFS2_IOC_GETFLAGS: + status = ocfs2_get_inode_attr(inode, &flags); + if (status < 0) + return status; + + flags &= OCFS2_FL_VISIBLE; + return put_user(flags, (int __user *) arg); + case OCFS2_IOC_SETFLAGS: + if (get_user(flags, (int __user *) arg)) + return -EFAULT; + + return ocfs2_set_inode_attr(inode, flags, + OCFS2_FL_MODIFIABLE); + default: + return -ENOTTY; + } +} + diff --git a/fs/ocfs2/ioctl.h b/fs/ocfs2/ioctl.h new file mode 100644 index 000000000000..4a7c82931dba --- /dev/null +++ b/fs/ocfs2/ioctl.h @@ -0,0 +1,16 @@ +/* + * ioctl.h + * + * Function prototypes + * + * Copyright (C) 2006 Herbert Poetzl + * + */ + +#ifndef OCFS2_IOCTL_H +#define OCFS2_IOCTL_H + +int ocfs2_ioctl(struct inode * inode, struct file * filp, + unsigned int cmd, unsigned long arg); + +#endif /* OCFS2_IOCTL_H */ diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 0673862c8bdd..0d3e939b1f56 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -56,6 +56,7 @@ #include "journal.h" #include "namei.h" #include "suballoc.h" +#include "super.h" #include "symlink.h" #include "sysfile.h" #include "uptodate.h" @@ -310,13 +311,6 @@ static int ocfs2_mknod(struct inode *dir, /* get our super block */ osb = OCFS2_SB(dir->i_sb); - if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) { - mlog(ML_ERROR, "inode %llu has i_nlink of %u\n", - (unsigned long long)OCFS2_I(dir)->ip_blkno, dir->i_nlink); - status = -EMLINK; - goto leave; - } - handle = ocfs2_alloc_handle(osb); if (handle == NULL) { status = -ENOMEM; @@ -331,6 +325,11 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } + if (S_ISDIR(mode) && (dir->i_nlink >= OCFS2_LINK_MAX)) { + status = -EMLINK; + goto leave; + } + dirfe = (struct ocfs2_dinode *) parent_fe_bh->b_data; if (!dirfe->i_links_count) { /* can't make a file in a deleted directory. */ @@ -643,11 +642,6 @@ static int ocfs2_link(struct dentry *old_dentry, goto bail; } - if (inode->i_nlink >= OCFS2_LINK_MAX) { - err = -EMLINK; - goto bail; - } - handle = ocfs2_alloc_handle(osb); if (handle == NULL) { err = -ENOMEM; @@ -661,6 +655,11 @@ static int ocfs2_link(struct dentry *old_dentry, goto bail; } + if (!dir->i_nlink) { + err = -ENOENT; + goto bail; + } + err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name, dentry->d_name.len); if (err) @@ -1964,13 +1963,8 @@ restart: } num++; - /* XXX: questionable readahead stuff here */ bh = ocfs2_bread(dir, b++, &err, 1); bh_use[ra_max] = bh; -#if 0 // ??? - if (bh) - ll_rw_block(READ, 1, &bh); -#endif } } if ((bh = bh_use[ra_ptr++]) == NULL) @@ -1978,6 +1972,10 @@ restart: wait_on_buffer(bh); if (!buffer_uptodate(bh)) { /* read error, skip block & hope for the best */ + ocfs2_error(dir->i_sb, "reading directory %llu, " + "offset %lu\n", + (unsigned long long)OCFS2_I(dir)->ip_blkno, + block); brelse(bh); goto next; } diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index c5b1ac547c15..3330a5dc6be2 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -114,6 +114,26 @@ #define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */ #define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */ +/* Inode attributes, keep in sync with EXT2 */ +#define OCFS2_SECRM_FL (0x00000001) /* Secure deletion */ +#define OCFS2_UNRM_FL (0x00000002) /* Undelete */ +#define OCFS2_COMPR_FL (0x00000004) /* Compress file */ +#define OCFS2_SYNC_FL (0x00000008) /* Synchronous updates */ +#define OCFS2_IMMUTABLE_FL (0x00000010) /* Immutable file */ +#define OCFS2_APPEND_FL (0x00000020) /* writes to file may only append */ +#define OCFS2_NODUMP_FL (0x00000040) /* do not dump file */ +#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */ +#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */ + +#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */ +#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */ + +/* + * ioctl commands + */ +#define OCFS2_IOC_GETFLAGS _IOR('f', 1, long) +#define OCFS2_IOC_SETFLAGS _IOW('f', 2, long) + /* * Journal Flags (ocfs2_dinode.id1.journal1.i_flags) */ @@ -399,7 +419,9 @@ struct ocfs2_dinode { __le32 i_atime_nsec; __le32 i_ctime_nsec; __le32 i_mtime_nsec; -/*70*/ __le64 i_reserved1[9]; + __le32 i_attr; + __le32 i_reserved1; +/*70*/ __le64 i_reserved2[8]; /*B8*/ union { __le64 i_pad1; /* Generic way to refer to this 64bit union */ diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index b8a00a793326..9707ed7a3206 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c @@ -206,7 +206,10 @@ static int ocfs2_buffer_cached(struct ocfs2_inode_info *oi, } /* Warning: even if it returns true, this does *not* guarantee that - * the block is stored in our inode metadata cache. */ + * the block is stored in our inode metadata cache. + * + * This can be called under lock_buffer() + */ int ocfs2_buffer_uptodate(struct inode *inode, struct buffer_head *bh) { @@ -226,6 +229,16 @@ int ocfs2_buffer_uptodate(struct inode *inode, return ocfs2_buffer_cached(OCFS2_I(inode), bh); } +/* + * Determine whether a buffer is currently out on a read-ahead request. + * ip_io_sem should be held to serialize submitters with the logic here. + */ +int ocfs2_buffer_read_ahead(struct inode *inode, + struct buffer_head *bh) +{ + return buffer_locked(bh) && ocfs2_buffer_cached(OCFS2_I(inode), bh); +} + /* Requires ip_lock */ static void ocfs2_append_cache_array(struct ocfs2_caching_info *ci, sector_t block) @@ -403,7 +416,11 @@ out_free: * * Note that this function may actually fail to insert the block if * memory cannot be allocated. This is not fatal however (but may - * result in a performance penalty) */ + * result in a performance penalty) + * + * Readahead buffers can be passed in here before the I/O request is + * completed. + */ void ocfs2_set_buffer_uptodate(struct inode *inode, struct buffer_head *bh) { diff --git a/fs/ocfs2/uptodate.h b/fs/ocfs2/uptodate.h index 01cd32d26b06..2e73206059a8 100644 --- a/fs/ocfs2/uptodate.h +++ b/fs/ocfs2/uptodate.h @@ -40,5 +40,7 @@ void ocfs2_set_new_buffer_uptodate(struct inode *inode, struct buffer_head *bh); void ocfs2_remove_from_cache(struct inode *inode, struct buffer_head *bh); +int ocfs2_buffer_read_ahead(struct inode *inode, + struct buffer_head *bh); #endif /* OCFS2_UPTODATE_H */ diff --git a/include/Kbuild b/include/Kbuild index cb2534800b19..2d03f995865f 100644 --- a/include/Kbuild +++ b/include/Kbuild @@ -1,2 +1,9 @@ -header-y += asm-generic/ linux/ scsi/ sound/ mtd/ rdma/ video/ -header-y += asm-$(ARCH)/ +header-y += asm-generic/ +header-y += linux/ +header-y += scsi/ +header-y += sound/ +header-y += mtd/ +header-y += rdma/ +header-y += video/ + +header-y += asm-$(ARCH)/ diff --git a/include/asm-alpha/Kbuild b/include/asm-alpha/Kbuild index 2b06b3bad5ff..b7c8f188b313 100644 --- a/include/asm-alpha/Kbuild +++ b/include/asm-alpha/Kbuild @@ -1,5 +1,11 @@ include include/asm-generic/Kbuild.asm -unifdef-y += console.h fpu.h sysinfo.h compiler.h +header-y += gentrap.h +header-y += regdef.h +header-y += pal.h +header-y += reg.h -header-y += gentrap.h regdef.h pal.h reg.h +unifdef-y += console.h +unifdef-y += fpu.h +unifdef-y += sysinfo.h +unifdef-y += compiler.h diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 70594b275a6e..3c06be381701 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -1,3 +1,12 @@ -header-y += atomic.h errno-base.h errno.h fcntl.h ioctl.h ipc.h mman.h \ - signal.h statfs.h -unifdef-y := resource.h siginfo.h +header-y += atomic.h +header-y += errno-base.h +header-y += errno.h +header-y += fcntl.h +header-y += ioctl.h +header-y += ipc.h +header-y += mman.h +header-y += signal.h +header-y += statfs.h + +unifdef-y += resource.h +unifdef-y += siginfo.h diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm index c00de6028fa8..a84c3d88a189 100644 --- a/include/asm-generic/Kbuild.asm +++ b/include/asm-generic/Kbuild.asm @@ -1,8 +1,34 @@ -unifdef-y += a.out.h auxvec.h byteorder.h errno.h fcntl.h ioctl.h \ - ioctls.h ipcbuf.h mman.h msgbuf.h param.h poll.h \ - posix_types.h ptrace.h resource.h sembuf.h shmbuf.h shmparam.h \ - sigcontext.h siginfo.h signal.h socket.h sockios.h stat.h \ - statfs.h termbits.h termios.h types.h unistd.h user.h +unifdef-y += a.out.h +unifdef-y += auxvec.h +unifdef-y += byteorder.h +unifdef-y += errno.h +unifdef-y += fcntl.h +unifdef-y += ioctl.h +unifdef-y += ioctls.h +unifdef-y += ipcbuf.h +unifdef-y += mman.h +unifdef-y += msgbuf.h +unifdef-y += param.h +unifdef-y += poll.h +unifdef-y += posix_types.h +unifdef-y += ptrace.h +unifdef-y += resource.h +unifdef-y += sembuf.h +unifdef-y += shmbuf.h +unifdef-y += sigcontext.h +unifdef-y += siginfo.h +unifdef-y += signal.h +unifdef-y += socket.h +unifdef-y += sockios.h +unifdef-y += stat.h +unifdef-y += statfs.h +unifdef-y += termbits.h +unifdef-y += termios.h +unifdef-y += types.h +unifdef-y += unistd.h +unifdef-y += user.h # These probably shouldn't be exported -unifdef-y += elf.h page.h +unifdef-y += shmparam.h +unifdef-y += elf.h +unifdef-y += page.h diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild index 2308190321da..b75a348d0c1c 100644 --- a/include/asm-i386/Kbuild +++ b/include/asm-i386/Kbuild @@ -1,5 +1,10 @@ include include/asm-generic/Kbuild.asm -header-y += boot.h debugreg.h ldt.h ucontext.h +header-y += boot.h +header-y += debugreg.h +header-y += ldt.h +header-y += ucontext.h -unifdef-y += mtrr.h setup.h vm86.h +unifdef-y += mtrr.h +unifdef-y += setup.h +unifdef-y += vm86.h diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild index f1cb00f39c22..15818a18bc52 100644 --- a/include/asm-ia64/Kbuild +++ b/include/asm-ia64/Kbuild @@ -1,7 +1,17 @@ include include/asm-generic/Kbuild.asm -header-y += break.h fpu.h fpswa.h gcc_intrin.h ia64regs.h \ - intel_intrin.h intrinsics.h perfmon_default_smpl.h \ - ptrace_offsets.h rse.h setup.h ucontext.h +header-y += break.h +header-y += fpu.h +header-y += fpswa.h +header-y += gcc_intrin.h +header-y += ia64regs.h +header-y += intel_intrin.h +header-y += intrinsics.h +header-y += perfmon_default_smpl.h +header-y += ptrace_offsets.h +header-y += rse.h +header-y += setup.h +header-y += ucontext.h -unifdef-y += perfmon.h ustack.h +unifdef-y += perfmon.h +unifdef-y += ustack.h diff --git a/include/asm-powerpc/Kbuild b/include/asm-powerpc/Kbuild index ac61d7eb6021..9827849953a3 100644 --- a/include/asm-powerpc/Kbuild +++ b/include/asm-powerpc/Kbuild @@ -1,10 +1,41 @@ include include/asm-generic/Kbuild.asm -unifdef-y += a.out.h asm-compat.h bootx.h byteorder.h cputable.h elf.h \ - nvram.h param.h posix_types.h ptrace.h seccomp.h signal.h \ - termios.h types.h unistd.h +header-y += auxvec.h +header-y += ioctls.h +header-y += mman.h +header-y += sembuf.h +header-y += siginfo.h +header-y += stat.h +header-y += errno.h +header-y += ipcbuf.h +header-y += msgbuf.h +header-y += shmbuf.h +header-y += socket.h +header-y += termbits.h +header-y += fcntl.h +header-y += ipc.h +header-y += poll.h +header-y += shmparam.h +header-y += sockios.h +header-y += ucontext.h +header-y += ioctl.h +header-y += linkage.h +header-y += resource.h +header-y += sigcontext.h +header-y += statfs.h -header-y += auxvec.h ioctls.h mman.h sembuf.h siginfo.h stat.h errno.h \ - ipcbuf.h msgbuf.h shmbuf.h socket.h termbits.h fcntl.h ipc.h \ - poll.h shmparam.h sockios.h ucontext.h ioctl.h linkage.h \ - resource.h sigcontext.h statfs.h +unifdef-y += a.out.h +unifdef-y += asm-compat.h +unifdef-y += bootx.h +unifdef-y += byteorder.h +unifdef-y += cputable.h +unifdef-y += elf.h +unifdef-y += nvram.h +unifdef-y += param.h +unifdef-y += posix_types.h +unifdef-y += ptrace.h +unifdef-y += seccomp.h +unifdef-y += signal.h +unifdef-y += termios.h +unifdef-y += types.h +unifdef-y += unistd.h diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 1ba3c9983614..12707ab9dc98 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -23,6 +23,7 @@ #define PPC_FEATURE_SMT 0x00004000 #define PPC_FEATURE_ICACHE_SNOOP 0x00002000 #define PPC_FEATURE_ARCH_2_05 0x00001000 +#define PPC_FEATURE_PA6T 0x00000800 #define PPC_FEATURE_TRUE_LE 0x00000002 #define PPC_FEATURE_PPC_LE 0x00000001 @@ -36,6 +37,7 @@ struct cpu_spec; typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec); +typedef void (*cpu_restore_t)(void); enum powerpc_oprofile_type { PPC_OPROFILE_INVALID = 0, @@ -65,6 +67,8 @@ struct cpu_spec { * BHT, SPD, etc... from head.S before branching to identify_machine */ cpu_setup_t cpu_setup; + /* Used to restore cpu setup on secondary processors and at resume */ + cpu_restore_t cpu_restore; /* Used by oprofile userspace to select the right counters */ char *oprofile_cpu_type; @@ -145,7 +149,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTR_PPCAS_ARCH_V2_BASE (CPU_FTR_SLB | \ CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \ - CPU_FTR_NODSISRALIGN | CPU_FTR_CTRL) + CPU_FTR_NODSISRALIGN) /* iSeries doesn't support large pages */ #ifdef CONFIG_PPC_ISERIES @@ -310,24 +314,29 @@ extern void do_cpu_ftr_fixups(unsigned long offset); CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ CPU_FTR_MMCRA | CPU_FTR_CTRL) #define CPU_FTRS_POWER4 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_MMCRA) + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ + CPU_FTR_MMCRA) #define CPU_FTRS_PPC970 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) #define CPU_FTRS_POWER5 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR) #define CPU_FTRS_POWER6 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_MMCRA | CPU_FTR_SMT | \ CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_REAL_LE) #define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE) + CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE) +#define CPU_FTRS_PA6T (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ + CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ + CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ + CPU_FTR_PURR | CPU_FTR_REAL_LE) #define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2) #endif @@ -336,7 +345,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset); #define CPU_FTRS_POSSIBLE \ (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ - CPU_FTRS_CELL | CPU_FTR_CI_LARGE_PAGE) + CPU_FTRS_CELL | CPU_FTRS_PA6T) #else enum { CPU_FTRS_POSSIBLE = @@ -375,7 +384,7 @@ enum { #define CPU_FTRS_ALWAYS \ (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ - CPU_FTRS_CELL & CPU_FTRS_POSSIBLE) + CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) #else enum { CPU_FTRS_ALWAYS = diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h index 0d3c4e85711a..257d1cecb8c9 100644 --- a/include/asm-powerpc/hvcall.h +++ b/include/asm-powerpc/hvcall.h @@ -164,9 +164,15 @@ #define H_VIO_SIGNAL 0x104 #define H_SEND_CRQ 0x108 #define H_COPY_RDMA 0x110 +#define H_REGISTER_LOGICAL_LAN 0x114 +#define H_FREE_LOGICAL_LAN 0x118 +#define H_ADD_LOGICAL_LAN_BUFFER 0x11C +#define H_SEND_LOGICAL_LAN 0x120 +#define H_MULTICAST_CTRL 0x130 #define H_SET_XDABR 0x134 #define H_STUFF_TCE 0x138 #define H_PUT_TCE_INDIRECT 0x13C +#define H_CHANGE_LOGICAL_LAN_MAC 0x14C #define H_VTERM_PARTNER_INFO 0x150 #define H_REGISTER_VTERM 0x154 #define H_FREE_VTERM 0x158 @@ -196,102 +202,59 @@ #define H_GET_HCA_INFO 0x1B8 #define H_GET_PERF_COUNT 0x1BC #define H_MANAGE_TRACE 0x1C0 +#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 #define H_QUERY_INT_STATE 0x1E4 #define H_POLL_PENDING 0x1D8 #define H_JOIN 0x298 #define H_VASI_STATE 0x2A4 #define H_ENABLE_CRQ 0x2B0 +#define MAX_HCALL_OPCODE H_ENABLE_CRQ #ifndef __ASSEMBLY__ -/* plpar_hcall() -- Generic call interface using above opcodes +/** + * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments + * @opcode: The hypervisor call to make. * - * The actual call interface is a hypervisor call instruction with - * the opcode in R3 and input args in R4-R7. - * Status is returned in R3 with variable output values in R4-R11. - * Only H_PTE_READ with H_READ_4 uses R6-R11 so we ignore it for now - * and return only two out args which MUST ALWAYS BE PROVIDED. - */ -long plpar_hcall(unsigned long opcode, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, - unsigned long arg4, - unsigned long *out1, - unsigned long *out2, - unsigned long *out3); - -/* Same as plpar_hcall but for those opcodes that return no values - * other than status. Slightly more efficient. + * This call supports up to 7 arguments and only returns the status of + * the hcall. Use this version where possible, its slightly faster than + * the other plpar_hcalls. */ long plpar_hcall_norets(unsigned long opcode, ...); -/* - * Special hcall interface for ibmveth support. - * Takes 8 input parms. Returns a rc and stores the - * R4 return value in *out1. - */ -long plpar_hcall_8arg_2ret(unsigned long opcode, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, - unsigned long arg4, - unsigned long arg5, - unsigned long arg6, - unsigned long arg7, - unsigned long arg8, - unsigned long *out1); - -/* plpar_hcall_4out() +/** + * plpar_hcall: - Make a pseries hypervisor call + * @opcode: The hypervisor call to make. + * @retbuf: Buffer to store up to 4 return arguments in. * - * same as plpar_hcall except with 4 output arguments. + * This call supports up to 6 arguments and 4 return arguments. Use + * PLPAR_HCALL_BUFSIZE to size the return argument buffer. * + * Used for all but the craziest of phyp interfaces (see plpar_hcall9) */ -long plpar_hcall_4out(unsigned long opcode, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, - unsigned long arg4, - unsigned long *out1, - unsigned long *out2, - unsigned long *out3, - unsigned long *out4); +#define PLPAR_HCALL_BUFSIZE 4 +long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...); -long plpar_hcall_7arg_7ret(unsigned long opcode, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, - unsigned long arg4, - unsigned long arg5, - unsigned long arg6, - unsigned long arg7, - unsigned long *out1, - unsigned long *out2, - unsigned long *out3, - unsigned long *out4, - unsigned long *out5, - unsigned long *out6, - unsigned long *out7); +/** + * plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments + * @opcode: The hypervisor call to make. + * @retbuf: Buffer to store up to 9 return arguments in. + * + * This call supports up to 9 arguments and 9 return arguments. Use + * PLPAR_HCALL9_BUFSIZE to size the return argument buffer. + */ +#define PLPAR_HCALL9_BUFSIZE 9 +long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); -long plpar_hcall_9arg_9ret(unsigned long opcode, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, - unsigned long arg4, - unsigned long arg5, - unsigned long arg6, - unsigned long arg7, - unsigned long arg8, - unsigned long arg9, - unsigned long *out1, - unsigned long *out2, - unsigned long *out3, - unsigned long *out4, - unsigned long *out5, - unsigned long *out6, - unsigned long *out7, - unsigned long *out8, - unsigned long *out9); +/* For hcall instrumentation. One structure per-hcall, per-CPU */ +struct hcall_stats { + unsigned long num_calls; /* number of calls (on this CPU) */ + unsigned long tb_total; /* total wall time (mftb) of calls. */ + unsigned long purr_total; /* total cpu time (PURR) of calls. */ +}; +void update_hcall_stats(unsigned long opcode, unsigned long tb_delta, + unsigned long purr_delta); +#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1) #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/ibmebus.h b/include/asm-powerpc/ibmebus.h index 7a42723d107c..7ab195a27888 100644 --- a/include/asm-powerpc/ibmebus.h +++ b/include/asm-powerpc/ibmebus.h @@ -48,7 +48,7 @@ extern struct dma_mapping_ops ibmebus_dma_ops; extern struct bus_type ibmebus_bus_type; struct ibmebus_dev { - char *name; + const char *name; struct of_device ofdev; }; diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h index b09b42af6a1e..c8390f9485de 100644 --- a/include/asm-powerpc/ide.h +++ b/include/asm-powerpc/ide.h @@ -12,6 +12,7 @@ #include <linux/sched.h> #include <asm/mpc8xx.h> #endif +#include <asm/io.h> #ifndef MAX_HWIFS #ifdef __powerpc64__ @@ -21,15 +22,14 @@ #endif #endif +#define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c)) +#define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c)) +#define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c)) +#define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c)) + #ifndef __powerpc64__ #include <linux/hdreg.h> #include <linux/ioport.h> -#include <asm/io.h> - -extern void __ide_mm_insw(void __iomem *port, void *addr, u32 count); -extern void __ide_mm_outsw(void __iomem *port, void *addr, u32 count); -extern void __ide_mm_insl(void __iomem *port, void *addr, u32 count); -extern void __ide_mm_outsl(void __iomem *port, void *addr, u32 count); struct ide_machdep_calls { int (*default_irq)(unsigned long base); diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 212428db0d8b..46bae1cf385b 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -20,20 +20,11 @@ extern int check_legacy_ioport(unsigned long base_port); #include <asm/page.h> #include <asm/byteorder.h> #include <asm/paca.h> -#ifdef CONFIG_PPC_ISERIES -#include <asm/iseries/iseries_io.h> -#endif #include <asm/synch.h> #include <asm/delay.h> #include <asm-generic/iomap.h> -#define __ide_mm_insw(p, a, c) _insw_ns((volatile u16 __iomem *)(p), (a), (c)) -#define __ide_mm_insl(p, a, c) _insl_ns((volatile u32 __iomem *)(p), (a), (c)) -#define __ide_mm_outsw(p, a, c) _outsw_ns((volatile u16 __iomem *)(p), (a), (c)) -#define __ide_mm_outsl(p, a, c) _outsl_ns((volatile u32 __iomem *)(p), (a), (c)) - - #define SIO_CONFIG_RA 0x398 #define SIO_CONFIG_RD 0x399 @@ -43,42 +34,53 @@ extern unsigned long isa_io_base; extern unsigned long pci_io_base; #ifdef CONFIG_PPC_ISERIES -/* __raw_* accessors aren't supported on iSeries */ -#define __raw_readb(addr) { BUG(); 0; } -#define __raw_readw(addr) { BUG(); 0; } -#define __raw_readl(addr) { BUG(); 0; } -#define __raw_readq(addr) { BUG(); 0; } -#define __raw_writeb(v, addr) { BUG(); 0; } -#define __raw_writew(v, addr) { BUG(); 0; } -#define __raw_writel(v, addr) { BUG(); 0; } -#define __raw_writeq(v, addr) { BUG(); 0; } -#define readb(addr) iSeries_Read_Byte(addr) -#define readw(addr) iSeries_Read_Word(addr) -#define readl(addr) iSeries_Read_Long(addr) -#define writeb(data, addr) iSeries_Write_Byte((data),(addr)) -#define writew(data, addr) iSeries_Write_Word((data),(addr)) -#define writel(data, addr) iSeries_Write_Long((data),(addr)) -#define memset_io(a,b,c) iSeries_memset_io((a),(b),(c)) -#define memcpy_fromio(a,b,c) iSeries_memcpy_fromio((a), (b), (c)) -#define memcpy_toio(a,b,c) iSeries_memcpy_toio((a), (b), (c)) - -#define inb(addr) readb(((void __iomem *)(long)(addr))) -#define inw(addr) readw(((void __iomem *)(long)(addr))) -#define inl(addr) readl(((void __iomem *)(long)(addr))) -#define outb(data,addr) writeb(data,((void __iomem *)(long)(addr))) -#define outw(data,addr) writew(data,((void __iomem *)(long)(addr))) -#define outl(data,addr) writel(data,((void __iomem *)(long)(addr))) -/* - * The *_ns versions below don't do byte-swapping. - * Neither do the standard versions now, these are just here - * for older code. - */ -#define insb(port, buf, ns) _insb((u8 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define insw(port, buf, ns) _insw_ns((u8 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define insl(port, buf, nl) _insl_ns((u8 __iomem *)((port)+pci_io_base), (buf), (nl)) -#define insw_ns(port, buf, ns) _insw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define insl_ns(port, buf, nl) _insl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) -#else + +extern int in_8(const volatile unsigned char __iomem *addr); +extern void out_8(volatile unsigned char __iomem *addr, int val); +extern int in_le16(const volatile unsigned short __iomem *addr); +extern int in_be16(const volatile unsigned short __iomem *addr); +extern void out_le16(volatile unsigned short __iomem *addr, int val); +extern void out_be16(volatile unsigned short __iomem *addr, int val); +extern unsigned in_le32(const volatile unsigned __iomem *addr); +extern unsigned in_be32(const volatile unsigned __iomem *addr); +extern void out_le32(volatile unsigned __iomem *addr, int val); +extern void out_be32(volatile unsigned __iomem *addr, int val); +extern unsigned long in_le64(const volatile unsigned long __iomem *addr); +extern unsigned long in_be64(const volatile unsigned long __iomem *addr); +extern void out_le64(volatile unsigned long __iomem *addr, unsigned long val); +extern void out_be64(volatile unsigned long __iomem *addr, unsigned long val); + +extern unsigned char __raw_readb(const volatile void __iomem *addr); +extern unsigned short __raw_readw(const volatile void __iomem *addr); +extern unsigned int __raw_readl(const volatile void __iomem *addr); +extern unsigned long __raw_readq(const volatile void __iomem *addr); +extern void __raw_writeb(unsigned char v, volatile void __iomem *addr); +extern void __raw_writew(unsigned short v, volatile void __iomem *addr); +extern void __raw_writel(unsigned int v, volatile void __iomem *addr); +extern void __raw_writeq(unsigned long v, volatile void __iomem *addr); + +extern void memset_io(volatile void __iomem *addr, int c, unsigned long n); +extern void memcpy_fromio(void *dest, const volatile void __iomem *src, + unsigned long n); +extern void memcpy_toio(volatile void __iomem *dest, const void *src, + unsigned long n); + +#else /* CONFIG_PPC_ISERIES */ + +#define in_8(addr) __in_8((addr)) +#define out_8(addr, val) __out_8((addr), (val)) +#define in_le16(addr) __in_le16((addr)) +#define in_be16(addr) __in_be16((addr)) +#define out_le16(addr, val) __out_le16((addr), (val)) +#define out_be16(addr, val) __out_be16((addr), (val)) +#define in_le32(addr) __in_le32((addr)) +#define in_be32(addr) __in_be32((addr)) +#define out_le32(addr, val) __out_le32((addr), (val)) +#define out_be32(addr, val) __out_be32((addr), (val)) +#define in_le64(addr) __in_le64((addr)) +#define in_be64(addr) __in_be64((addr)) +#define out_le64(addr, val) __out_le64((addr), (val)) +#define out_be64(addr, val) __out_be64((addr), (val)) static inline unsigned char __raw_readb(const volatile void __iomem *addr) { @@ -112,23 +114,11 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) { *(volatile unsigned long __force *)addr = v; } -#define readb(addr) eeh_readb(addr) -#define readw(addr) eeh_readw(addr) -#define readl(addr) eeh_readl(addr) -#define readq(addr) eeh_readq(addr) -#define writeb(data, addr) eeh_writeb((data), (addr)) -#define writew(data, addr) eeh_writew((data), (addr)) -#define writel(data, addr) eeh_writel((data), (addr)) -#define writeq(data, addr) eeh_writeq((data), (addr)) #define memset_io(a,b,c) eeh_memset_io((a),(b),(c)) #define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(b),(c)) #define memcpy_toio(a,b,c) eeh_memcpy_toio((a),(b),(c)) -#define inb(port) eeh_inb((unsigned long)port) -#define outb(val, port) eeh_outb(val, (unsigned long)port) -#define inw(port) eeh_inw((unsigned long)port) -#define outw(val, port) eeh_outw(val, (unsigned long)port) -#define inl(port) eeh_inl((unsigned long)port) -#define outl(val, port) eeh_outl(val, (unsigned long)port) + +#endif /* CONFIG_PPC_ISERIES */ /* * The insw/outsw/insl/outsl macros don't do byte-swapping. @@ -138,30 +128,37 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr) #define insb(port, buf, ns) eeh_insb((port), (buf), (ns)) #define insw(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) #define insl(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) -#define insw_ns(port, buf, ns) eeh_insw_ns((port), (buf), (ns)) -#define insl_ns(port, buf, nl) eeh_insl_ns((port), (buf), (nl)) - -#endif #define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns)) #define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) #define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) +#define readb(addr) eeh_readb(addr) +#define readw(addr) eeh_readw(addr) +#define readl(addr) eeh_readl(addr) +#define readq(addr) eeh_readq(addr) +#define writeb(data, addr) eeh_writeb((data), (addr)) +#define writew(data, addr) eeh_writew((data), (addr)) +#define writel(data, addr) eeh_writel((data), (addr)) +#define writeq(data, addr) eeh_writeq((data), (addr)) +#define inb(port) eeh_inb((unsigned long)port) +#define outb(val, port) eeh_outb(val, (unsigned long)port) +#define inw(port) eeh_inw((unsigned long)port) +#define outw(val, port) eeh_outw(val, (unsigned long)port) +#define inl(port) eeh_inl((unsigned long)port) +#define outl(val, port) eeh_outl(val, (unsigned long)port) + #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) #define readq_relaxed(addr) readq(addr) -extern void _insb(volatile u8 __iomem *port, void *buf, int ns); -extern void _outsb(volatile u8 __iomem *port, const void *buf, int ns); -extern void _insw(volatile u16 __iomem *port, void *buf, int ns); -extern void _outsw(volatile u16 __iomem *port, const void *buf, int ns); -extern void _insl(volatile u32 __iomem *port, void *buf, int nl); -extern void _outsl(volatile u32 __iomem *port, const void *buf, int nl); -extern void _insw_ns(volatile u16 __iomem *port, void *buf, int ns); -extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); -extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); -extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); +extern void _insb(volatile u8 __iomem *port, void *buf, long count); +extern void _outsb(volatile u8 __iomem *port, const void *buf, long count); +extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count); +extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count); +extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count); +extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count); static inline void mmiowb(void) { @@ -180,14 +177,6 @@ static inline void mmiowb(void) #define inl_p(port) inl(port) #define outl_p(val, port) (udelay(1), outl((val), (port))) -/* - * The *_ns versions below don't do byte-swapping. - * Neither do the standard versions now, these are just here - * for older code. - */ -#define outsw_ns(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns)) -#define outsl_ns(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl)) - #define IO_SPACE_LIMIT ~(0UL) @@ -279,7 +268,7 @@ static inline void iosync(void) * and should not be used directly by device drivers. Use inb/readb * instead. */ -static inline int in_8(const volatile unsigned char __iomem *addr) +static inline int __in_8(const volatile unsigned char __iomem *addr) { int ret; @@ -288,14 +277,14 @@ static inline int in_8(const volatile unsigned char __iomem *addr) return ret; } -static inline void out_8(volatile unsigned char __iomem *addr, int val) +static inline void __out_8(volatile unsigned char __iomem *addr, int val) { __asm__ __volatile__("sync; stb%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); get_paca()->io_sync = 1; } -static inline int in_le16(const volatile unsigned short __iomem *addr) +static inline int __in_le16(const volatile unsigned short __iomem *addr) { int ret; @@ -304,7 +293,7 @@ static inline int in_le16(const volatile unsigned short __iomem *addr) return ret; } -static inline int in_be16(const volatile unsigned short __iomem *addr) +static inline int __in_be16(const volatile unsigned short __iomem *addr) { int ret; @@ -313,21 +302,21 @@ static inline int in_be16(const volatile unsigned short __iomem *addr) return ret; } -static inline void out_le16(volatile unsigned short __iomem *addr, int val) +static inline void __out_le16(volatile unsigned short __iomem *addr, int val) { __asm__ __volatile__("sync; sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); get_paca()->io_sync = 1; } -static inline void out_be16(volatile unsigned short __iomem *addr, int val) +static inline void __out_be16(volatile unsigned short __iomem *addr, int val) { __asm__ __volatile__("sync; sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); get_paca()->io_sync = 1; } -static inline unsigned in_le32(const volatile unsigned __iomem *addr) +static inline unsigned __in_le32(const volatile unsigned __iomem *addr) { unsigned ret; @@ -336,7 +325,7 @@ static inline unsigned in_le32(const volatile unsigned __iomem *addr) return ret; } -static inline unsigned in_be32(const volatile unsigned __iomem *addr) +static inline unsigned __in_be32(const volatile unsigned __iomem *addr) { unsigned ret; @@ -345,21 +334,21 @@ static inline unsigned in_be32(const volatile unsigned __iomem *addr) return ret; } -static inline void out_le32(volatile unsigned __iomem *addr, int val) +static inline void __out_le32(volatile unsigned __iomem *addr, int val) { __asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); get_paca()->io_sync = 1; } -static inline void out_be32(volatile unsigned __iomem *addr, int val) +static inline void __out_be32(volatile unsigned __iomem *addr, int val) { __asm__ __volatile__("sync; stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); get_paca()->io_sync = 1; } -static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) +static inline unsigned long __in_le64(const volatile unsigned long __iomem *addr) { unsigned long tmp, ret; @@ -379,7 +368,7 @@ static inline unsigned long in_le64(const volatile unsigned long __iomem *addr) return ret; } -static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) +static inline unsigned long __in_be64(const volatile unsigned long __iomem *addr) { unsigned long ret; @@ -388,7 +377,7 @@ static inline unsigned long in_be64(const volatile unsigned long __iomem *addr) return ret; } -static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long val) +static inline void __out_le64(volatile unsigned long __iomem *addr, unsigned long val) { unsigned long tmp; @@ -406,15 +395,13 @@ static inline void out_le64(volatile unsigned long __iomem *addr, unsigned long get_paca()->io_sync = 1; } -static inline void out_be64(volatile unsigned long __iomem *addr, unsigned long val) +static inline void __out_be64(volatile unsigned long __iomem *addr, unsigned long val) { __asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val)); get_paca()->io_sync = 1; } -#ifndef CONFIG_PPC_ISERIES #include <asm/eeh.h> -#endif /** * check_signature - find BIOS signatures @@ -430,7 +417,6 @@ static inline int check_signature(const volatile void __iomem * io_addr, const unsigned char *signature, int length) { int retval = 0; -#ifndef CONFIG_PPC_ISERIES do { if (readb(io_addr) != *signature) goto out; @@ -440,7 +426,6 @@ static inline int check_signature(const volatile void __iomem * io_addr, } while (length); retval = 1; out: -#endif return retval; } diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index d903a62959da..4da41efb1319 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h @@ -137,7 +137,7 @@ struct irq_map_entry { extern struct irq_map_entry irq_map[NR_IRQS]; -/*** +/** * irq_alloc_host - Allocate a new irq_host data structure * @node: device-tree node of the interrupt controller * @revmap_type: type of reverse mapping to use @@ -159,14 +159,14 @@ extern struct irq_host *irq_alloc_host(unsigned int revmap_type, irq_hw_number_t inval_irq); -/*** +/** * irq_find_host - Locates a host for a given device node * @node: device-tree node of the interrupt controller */ extern struct irq_host *irq_find_host(struct device_node *node); -/*** +/** * irq_set_default_host - Set a "default" host * @host: default host pointer * @@ -178,7 +178,7 @@ extern struct irq_host *irq_find_host(struct device_node *node); extern void irq_set_default_host(struct irq_host *host); -/*** +/** * irq_set_virq_count - Set the maximum number of virt irqs * @count: number of linux virtual irqs, capped with NR_IRQS * @@ -188,7 +188,7 @@ extern void irq_set_default_host(struct irq_host *host); extern void irq_set_virq_count(unsigned int count); -/*** +/** * irq_create_mapping - Map a hardware interrupt into linux virq space * @host: host owning this hardware interrupt or NULL for default host * @hwirq: hardware irq number in that host space @@ -202,13 +202,13 @@ extern unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq); -/*** +/** * irq_dispose_mapping - Unmap an interrupt * @virq: linux virq number of the interrupt to unmap */ extern void irq_dispose_mapping(unsigned int virq); -/*** +/** * irq_find_mapping - Find a linux virq from an hw irq number. * @host: host owning this hardware interrupt * @hwirq: hardware irq number in that host space @@ -221,7 +221,7 @@ extern unsigned int irq_find_mapping(struct irq_host *host, irq_hw_number_t hwirq); -/*** +/** * irq_radix_revmap - Find a linux virq from a hw irq number. * @host: host owning this hardware interrupt * @hwirq: hardware irq number in that host space @@ -232,7 +232,7 @@ extern unsigned int irq_find_mapping(struct irq_host *host, extern unsigned int irq_radix_revmap(struct irq_host *host, irq_hw_number_t hwirq); -/*** +/** * irq_linear_revmap - Find a linux virq from a hw irq number. * @host: host owning this hardware interrupt * @hwirq: hardware irq number in that host space @@ -247,7 +247,7 @@ extern unsigned int irq_linear_revmap(struct irq_host *host, -/*** +/** * irq_alloc_virt - Allocate virtual irq numbers * @host: host owning these new virtual irqs * @count: number of consecutive numbers to allocate @@ -261,7 +261,7 @@ extern unsigned int irq_alloc_virt(struct irq_host *host, unsigned int count, unsigned int hint); -/*** +/** * irq_free_virt - Free virtual irq numbers * @virq: virtual irq number of the first interrupt to free * @count: number of interrupts to free @@ -300,7 +300,7 @@ extern unsigned int irq_of_parse_and_map(struct device_node *dev, int index); /* -- End OF helpers -- */ -/*** +/** * irq_early_init - Init irq remapping subsystem */ extern void irq_early_init(void); diff --git a/include/asm-powerpc/iseries/hv_call_xm.h b/include/asm-powerpc/iseries/hv_call_xm.h index ca9202cb01ed..392ac3f54df0 100644 --- a/include/asm-powerpc/iseries/hv_call_xm.h +++ b/include/asm-powerpc/iseries/hv_call_xm.h @@ -16,23 +16,6 @@ #define HvCallXmSetTce HvCallXm + 11 #define HvCallXmSetTces HvCallXm + 13 -/* - * Structure passed to HvCallXm_getTceTableParms - */ -struct iommu_table_cb { - unsigned long itc_busno; /* Bus number for this tce table */ - unsigned long itc_start; /* Will be NULL for secondary */ - unsigned long itc_totalsize; /* Size (in pages) of whole table */ - unsigned long itc_offset; /* Index into real tce table of the - start of our section */ - unsigned long itc_size; /* Size (in pages) of our section */ - unsigned long itc_index; /* Index of this tce table */ - unsigned short itc_maxtables; /* Max num of tables for partition */ - unsigned char itc_virtbus; /* Flag to indicate virtual bus */ - unsigned char itc_slotno; /* IOA Tce Slot Index */ - unsigned char itc_rsvd[4]; -}; - static inline void HvCallXm_getTceTableParms(u64 cb) { HvCall1(HvCallXmGetTceTableParms, cb); diff --git a/include/asm-powerpc/iseries/hv_lp_config.h b/include/asm-powerpc/iseries/hv_lp_config.h index df8b20739719..a006fd1e4a2c 100644 --- a/include/asm-powerpc/iseries/hv_lp_config.h +++ b/include/asm-powerpc/iseries/hv_lp_config.h @@ -25,7 +25,6 @@ #include <asm/iseries/hv_call_sc.h> #include <asm/iseries/hv_types.h> -#include <asm/iseries/it_lp_naca.h> enum { HvCallCfg_Cur = 0, @@ -44,16 +43,8 @@ enum { #define HvCallCfgGetHostingLpIndex HvCallCfg + 32 extern HvLpIndex HvLpConfig_getLpIndex_outline(void); - -static inline HvLpIndex HvLpConfig_getLpIndex(void) -{ - return itLpNaca.xLpIndex; -} - -static inline HvLpIndex HvLpConfig_getPrimaryLpIndex(void) -{ - return itLpNaca.xPrimaryLpIndex; -} +extern HvLpIndex HvLpConfig_getLpIndex(void); +extern HvLpIndex HvLpConfig_getPrimaryLpIndex(void); static inline u64 HvLpConfig_getMsChunks(void) { diff --git a/include/asm-powerpc/iseries/iseries_io.h b/include/asm-powerpc/iseries/iseries_io.h deleted file mode 100644 index f29009bd63c9..000000000000 --- a/include/asm-powerpc/iseries/iseries_io.h +++ /dev/null @@ -1,60 +0,0 @@ -#ifndef _ASM_POWERPC_ISERIES_ISERIES_IO_H -#define _ASM_POWERPC_ISERIES_ISERIES_IO_H - - -#ifdef CONFIG_PPC_ISERIES -#include <linux/types.h> -/* - * Created by Allan Trautman on Thu Dec 28 2000. - * - * Remaps the io.h for the iSeries Io - * Copyright (C) 2000 Allan H Trautman, IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the: - * Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, - * Boston, MA 02111-1307 USA - * - * Change Activity: - * Created December 28, 2000 - * End Change Activity - */ - -#ifdef CONFIG_PCI -extern u8 iSeries_Read_Byte(const volatile void __iomem * IoAddress); -extern u16 iSeries_Read_Word(const volatile void __iomem * IoAddress); -extern u32 iSeries_Read_Long(const volatile void __iomem * IoAddress); -extern void iSeries_Write_Byte(u8 IoData, volatile void __iomem * IoAddress); -extern void iSeries_Write_Word(u16 IoData, volatile void __iomem * IoAddress); -extern void iSeries_Write_Long(u32 IoData, volatile void __iomem * IoAddress); - -extern void iSeries_memset_io(volatile void __iomem *dest, char x, size_t n); -extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, - size_t n); -extern void iSeries_memcpy_fromio(void *dest, - const volatile void __iomem *source, size_t n); -#else -static inline u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) -{ - return 0xff; -} - -static inline void iSeries_Write_Byte(u8 IoData, - volatile void __iomem *IoAddress) -{ -} -#endif /* CONFIG_PCI */ - -#endif /* CONFIG_PPC_ISERIES */ -#endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */ diff --git a/include/asm-powerpc/iseries/it_lp_queue.h b/include/asm-powerpc/iseries/it_lp_queue.h index 284c5a7db3ac..3f6814769295 100644 --- a/include/asm-powerpc/iseries/it_lp_queue.h +++ b/include/asm-powerpc/iseries/it_lp_queue.h @@ -27,8 +27,6 @@ #include <asm/types.h> #include <asm/ptrace.h> -struct HvLpEvent; - #define IT_LP_MAX_QUEUES 8 #define IT_LP_NOT_USED 0 /* Queue will not be used by PLIC */ diff --git a/include/asm-powerpc/iseries/vio.h b/include/asm-powerpc/iseries/vio.h index 72a97d37aac3..7a95d296abd1 100644 --- a/include/asm-powerpc/iseries/vio.h +++ b/include/asm-powerpc/iseries/vio.h @@ -122,6 +122,34 @@ enum viorc { viorc_openRejected = 0x0301 }; +/* + * The structure of the events that flow between us and OS/400 for chario + * events. You can't mess with this unless the OS/400 side changes too. + */ +struct viocharlpevent { + struct HvLpEvent event; + u32 reserved; + u16 version; + u16 subtype_result_code; + u8 virtual_device; + u8 len; + u8 data[VIOCHAR_MAX_DATA]; +}; + +#define VIOCHAR_WINDOW 10 + +enum viocharsubtype { + viocharopen = 0x0001, + viocharclose = 0x0002, + viochardata = 0x0003, + viocharack = 0x0004, + viocharconfig = 0x0005 +}; + +enum viochar_rc { + viochar_rc_ebusy = 1 +}; + struct device; extern struct device *iSeries_vio_dev; diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h index 4dc514aabfe7..821ea0c512b4 100644 --- a/include/asm-powerpc/lppaca.h +++ b/include/asm-powerpc/lppaca.h @@ -27,7 +27,9 @@ // // //---------------------------------------------------------------------------- +#include <linux/cache.h> #include <asm/types.h> +#include <asm/mmu.h> /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k * alignment is sufficient to prevent this */ @@ -114,7 +116,7 @@ struct lppaca { //============================================================================= -// CACHE_LINE_3 0x0100 - 0x007F: This line is shared with other processors +// CACHE_LINE_3 0x0100 - 0x017F: This line is shared with other processors //============================================================================= // This is the yield_count. An "odd" value (low bit on) means that // the processor is yielded (either because of an OS yield or a PLIC @@ -126,12 +128,29 @@ struct lppaca { u8 reserved6[124]; // Reserved x04-x7F //============================================================================= -// CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data +// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data //============================================================================= u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF } __attribute__((__aligned__(0x400))); extern struct lppaca lppaca[]; +/* + * SLB shadow buffer structure as defined in the PAPR. The save_area + * contains adjacent ESID and VSID pairs for each shadowed SLB. The + * ESID is stored in the lower 64bits, then the VSID. + */ +struct slb_shadow { + u32 persistent; // Number of persistent SLBs x00-x03 + u32 buffer_length; // Total shadow buffer length x04-x07 + u64 reserved; // Alignment x08-x0f + struct { + u64 esid; + u64 vsid; + } save_area[SLB_NUM_BOLTED]; // x10-x40 +} ____cacheline_aligned; + +extern struct slb_shadow slb_shadow[]; + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LPPACA_H */ diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 3d5d590bc4b0..0a4e5c93e8e6 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -23,6 +23,7 @@ register struct paca_struct *local_paca asm("r13"); #define get_paca() local_paca #define get_lppaca() (get_paca()->lppaca_ptr) +#define get_slb_shadow() (get_paca()->slb_shadow_ptr) struct task_struct; @@ -99,6 +100,8 @@ struct paca_struct { u64 user_time; /* accumulated usermode TB ticks */ u64 system_time; /* accumulated system TB ticks */ u64 startpurr; /* PURR/TB value snapshot */ + + struct slb_shadow *slb_shadow_ptr; }; extern struct paca_struct paca[]; diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index fb597b37c2a2..b4d38b0b15f8 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h @@ -55,12 +55,6 @@ #define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START) #define KERNELBASE (PAGE_OFFSET + PHYSICAL_START) -#ifdef CONFIG_DISCONTIGMEM -#define page_to_pfn(page) discontigmem_page_to_pfn(page) -#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn) -#define pfn_valid(pfn) discontigmem_pfn_valid(pfn) -#endif - #ifdef CONFIG_FLATMEM #define pfn_valid(pfn) ((pfn) < max_mapnr) #endif diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index cf79bc7ebb55..1115756c79f9 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -69,6 +69,17 @@ struct pci_dev *pci_get_device_by_addr(unsigned long addr); void eeh_slot_error_detail (struct pci_dn *pdn, int severity); /** + * rtas_pci_enableo - enable IO transfers for this slot + * @pdn: pci device node + * @function: either EEH_THAW_MMIO or EEH_THAW_DMA + * + * Enable I/O transfers to this slot + */ +#define EEH_THAW_MMIO 2 +#define EEH_THAW_DMA 3 +int rtas_pci_enable(struct pci_dn *pdn, int function); + +/** * rtas_set_slot_reset -- unfreeze a frozen slot * * Clear the EEH-frozen condition on a slot. This routine diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index 22e54a2a6604..6cb6fb19e57f 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -32,6 +32,7 @@ #define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */ #define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */ #define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */ +#define _CHRP_briq 0x07 /* TotalImpact's briQ */ #if defined(__KERNEL__) && defined(CONFIG_PPC32) diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index d0fa1b9aed35..524629769336 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -72,8 +72,8 @@ struct property { }; struct device_node { - char *name; - char *type; + const char *name; + const char *type; phandle node; phandle linux_phandle; char *full_name; @@ -160,7 +160,7 @@ extern void unflatten_device_tree(void); extern void early_init_devtree(void *); extern int device_is_compatible(struct device_node *device, const char *); extern int machine_is_compatible(const char *compat); -extern void *get_property(struct device_node *node, const char *name, +extern const void *get_property(struct device_node *node, const char *name, int *lenp); extern void print_properties(struct device_node *node); extern int prom_n_addr_cells(struct device_node* np); @@ -197,8 +197,8 @@ extern int release_OF_resource(struct device_node* node, int index); */ -/* Helper to read a big number */ -static inline u64 of_read_number(u32 *cell, int size) +/* Helper to read a big number; size is in cells (not bytes) */ +static inline u64 of_read_number(const u32 *cell, int size) { u64 r = 0; while (size--) @@ -206,18 +206,28 @@ static inline u64 of_read_number(u32 *cell, int size) return r; } +/* Like of_read_number, but we want an unsigned long result */ +#ifdef CONFIG_PPC32 +static inline unsigned long of_read_ulong(const u32 *cell, int size) +{ + return cell[size-1]; +} +#else +#define of_read_ulong(cell, size) of_read_number(cell, size) +#endif + /* Translate an OF address block into a CPU physical address */ #define OF_BAD_ADDR ((u64)-1) -extern u64 of_translate_address(struct device_node *np, u32 *addr); +extern u64 of_translate_address(struct device_node *np, const u32 *addr); /* Extract an address from a device, returns the region size and * the address space flags too. The PCI version uses a BAR number * instead of an absolute index */ -extern u32 *of_get_address(struct device_node *dev, int index, +extern const u32 *of_get_address(struct device_node *dev, int index, u64 *size, unsigned int *flags); -extern u32 *of_get_pci_address(struct device_node *dev, int bar_no, +extern const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, unsigned int *flags); /* Get an address as a resource. Note that if your address is @@ -234,7 +244,7 @@ extern int of_pci_address_to_resource(struct device_node *dev, int bar, /* Parse the ibm,dma-window property of an OF node into the busno, phys and * size parameters. */ -void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop, +void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size); extern void kdump_move_device_tree(void); @@ -259,7 +269,7 @@ struct of_irq { u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */ }; -/*** +/** * of_irq_map_init - Initialize the irq remapper * @flags: flags defining workarounds to enable * @@ -272,7 +282,7 @@ struct of_irq { extern void of_irq_map_init(unsigned int flags); -/*** +/** * of_irq_map_raw - Low level interrupt tree parsing * @parent: the device interrupt parent * @intspec: interrupt specifier ("interrupts" property of the device) @@ -289,12 +299,12 @@ extern void of_irq_map_init(unsigned int flags); * */ -extern int of_irq_map_raw(struct device_node *parent, u32 *intspec, - u32 ointsize, u32 *addr, +extern int of_irq_map_raw(struct device_node *parent, const u32 *intspec, + u32 ointsize, const u32 *addr, struct of_irq *out_irq); -/*** +/** * of_irq_map_one - Resolve an interrupt for a device * @device: the device whose interrupt is to be resolved * @index: index of the interrupt to resolve @@ -307,7 +317,7 @@ extern int of_irq_map_raw(struct device_node *parent, u32 *intspec, extern int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq); -/*** +/** * of_irq_map_pci - Resolve the interrupt for a PCI device * @pdev: the device whose interrupt is to be resolved * @out_irq: structure of_irq filled by this function diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h index dc4cb9cc73a1..4435efe85d0e 100644 --- a/include/asm-powerpc/ptrace.h +++ b/include/asm-powerpc/ptrace.h @@ -215,12 +215,10 @@ do { \ #define PTRACE_GETVRREGS 18 #define PTRACE_SETVRREGS 19 -#ifndef __powerpc64__ /* Get/set all the upper 32-bits of the SPE registers, accumulator, and * spefscr, in one go */ #define PTRACE_GETEVRREGS 20 #define PTRACE_SETEVRREGS 21 -#endif /* __powerpc64__ */ /* * Get or set a debug register. The first 16 are DABR registers and the @@ -235,7 +233,6 @@ do { \ #define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ #define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ -#ifdef __powerpc64__ /* Calls to trace a 64bit program from a 32bit program */ #define PPC_PTRACE_PEEKTEXT_3264 0x95 #define PPC_PTRACE_PEEKDATA_3264 0x94 @@ -243,6 +240,5 @@ do { \ #define PPC_PTRACE_POKEDATA_3264 0x92 #define PPC_PTRACE_PEEKUSR_3264 0x91 #define PPC_PTRACE_POKEUSR_3264 0x90 -#endif /* __powerpc64__ */ #endif /* _ASM_POWERPC_PTRACE_H */ diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index cf73475a0c69..3a9fcc15811b 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -592,6 +592,7 @@ #define PV_630p 0x0041 #define PV_970MP 0x0044 #define PV_BE 0x0070 +#define PV_PA6T 0x0090 /* * Number of entries in the SLB. If this ever changes we should handle diff --git a/include/asm-powerpc/rtas.h b/include/asm-powerpc/rtas.h index 82a27e9a041f..d34f9e1f242c 100644 --- a/include/asm-powerpc/rtas.h +++ b/include/asm-powerpc/rtas.h @@ -230,5 +230,21 @@ extern unsigned long rtas_rmo_buf; #define GLOBAL_INTERRUPT_QUEUE 9005 +/** + * rtas_config_addr - Format a busno, devfn and reg for RTAS. + * @busno: The bus number. + * @devfn: The device and function number as encoded by PCI_DEVFN(). + * @reg: The register number. + * + * This function encodes the given busno, devfn and register number as + * required for RTAS calls that take a "config_addr" parameter. + * See PAPR requirement 7.3.4-1 for more info. + */ +static inline u32 rtas_config_addr(int busno, int devfn, int reg) +{ + return ((reg & 0xf00) << 20) | ((busno & 0xff) << 16) | + (devfn << 8) | (reg & 0xff); +} + #endif /* __KERNEL__ */ #endif /* _POWERPC_RTAS_H */ diff --git a/include/asm-powerpc/smu.h b/include/asm-powerpc/smu.h index 51e65fc46a03..e49f644ca63a 100644 --- a/include/asm-powerpc/smu.h +++ b/include/asm-powerpc/smu.h @@ -517,7 +517,7 @@ struct smu_sdbp_cpupiddata { * This returns the pointer to an SMU "sdb" partition data or NULL * if not found. The data format is described below */ -extern struct smu_sdbp_header *smu_get_sdb_partition(int id, +extern const struct smu_sdbp_header *smu_get_sdb_partition(int id, unsigned int *size); /* Get "sdb" partition data from an SMU satellite */ diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h index c02d105d8294..b42b53c40f5d 100644 --- a/include/asm-powerpc/spu.h +++ b/include/asm-powerpc/spu.h @@ -106,7 +106,7 @@ struct spu_context; struct spu_runqueue; struct spu { - char *name; + const char *name; unsigned long local_store_phys; u8 *local_store; unsigned long problem_phys; diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 4c9f5229e833..4b41deaa8d8d 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -177,11 +177,6 @@ extern u32 booke_wdt_enabled; extern u32 booke_wdt_period; #endif /* CONFIG_BOOKE_WDT */ -/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */ -extern unsigned char e2a(unsigned char); -extern unsigned char* strne2a(unsigned char *dest, - const unsigned char *src, size_t n); - struct device_node; extern void note_scsi_host(struct device_node *, void *); diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h index dc9bd101ca14..4b51d42e1419 100644 --- a/include/asm-powerpc/vio.h +++ b/include/asm-powerpc/vio.h @@ -46,8 +46,8 @@ struct iommu_table; */ struct vio_dev { struct iommu_table *iommu_table; /* vio_map_* uses this */ - char *name; - char *type; + const char *name; + const char *type; uint32_t unit_address; unsigned int irq; struct device dev; diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 680555be22ec..3d9a9e6f3321 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -327,26 +327,12 @@ __do_out_asm(outl, "stwbrx") #define inl_p(port) inl((port)) #define outl_p(val, port) outl((val), (port)) -extern void _insb(volatile u8 __iomem *port, void *buf, int ns); -extern void _outsb(volatile u8 __iomem *port, const void *buf, int ns); -extern void _insw(volatile u16 __iomem *port, void *buf, int ns); -extern void _outsw(volatile u16 __iomem *port, const void *buf, int ns); -extern void _insl(volatile u32 __iomem *port, void *buf, int nl); -extern void _outsl(volatile u32 __iomem *port, const void *buf, int nl); -extern void _insw_ns(volatile u16 __iomem *port, void *buf, int ns); -extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, int ns); -extern void _insl_ns(volatile u32 __iomem *port, void *buf, int nl); -extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, int nl); - -/* - * The *_ns versions below don't do byte-swapping. - * Neither do the standard versions now, these are just here - * for older code. - */ -#define insw_ns(port, buf, ns) _insw_ns((port)+___IO_BASE, (buf), (ns)) -#define outsw_ns(port, buf, ns) _outsw_ns((port)+___IO_BASE, (buf), (ns)) -#define insl_ns(port, buf, nl) _insl_ns((port)+___IO_BASE, (buf), (nl)) -#define outsl_ns(port, buf, nl) _outsl_ns((port)+___IO_BASE, (buf), (nl)) +extern void _insb(volatile u8 __iomem *port, void *buf, long count); +extern void _outsb(volatile u8 __iomem *port, const void *buf, long count); +extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count); +extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count); +extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count); +extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count); #define IO_SPACE_LIMIT ~0 diff --git a/include/asm-ppc/mpc8260_pci9.h b/include/asm-ppc/mpc8260_pci9.h index 26b3f6e787bc..9f7176881c56 100644 --- a/include/asm-ppc/mpc8260_pci9.h +++ b/include/asm-ppc/mpc8260_pci9.h @@ -30,8 +30,6 @@ #undef inb #undef inw #undef inl -#undef insw_ns -#undef insl_ns #undef memcpy_fromio extern int readb(volatile unsigned char *addr); @@ -43,8 +41,6 @@ extern void insl(unsigned port, void *buf, int nl); extern int inb(unsigned port); extern int inw(unsigned port); extern unsigned inl(unsigned port); -extern void insw_ns(unsigned port, void *buf, int ns); -extern void insl_ns(unsigned port, void *buf, int nl); extern void *memcpy_fromio(void *dest, unsigned long src, size_t count); #endif /* !__CONFIG_8260_PCI9_DEFS */ diff --git a/include/asm-ppc/reg_booke.h b/include/asm-ppc/reg_booke.h index 4944c0fb8bea..602fbadeaf48 100644 --- a/include/asm-ppc/reg_booke.h +++ b/include/asm-ppc/reg_booke.h @@ -300,14 +300,14 @@ do { \ #define DBSR_IC 0x80000000 /* Instruction Completion */ #define DBSR_BT 0x40000000 /* Branch taken */ #define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */ -#define DBSR_IAC1 0x00800000 /* Instruction Address Compare 1 Event */ -#define DBSR_IAC2 0x00400000 /* Instruction Address Compare 2 Event */ -#define DBSR_IAC3 0x00200000 /* Instruction Address Compare 3 Event */ -#define DBSR_IAC4 0x00100000 /* Instruction Address Compare 4 Event */ -#define DBSR_DAC1R 0x00080000 /* Data Address Compare 1 Read Event */ -#define DBSR_DAC1W 0x00040000 /* Data Address Compare 1 Write Event */ -#define DBSR_DAC2R 0x00020000 /* Data Address Compare 2 Read Event */ -#define DBSR_DAC2W 0x00010000 /* Data Address Compare 2 Write Event */ +#define DBSR_IAC1 0x04000000 /* Instruction Address Compare 1 Event */ +#define DBSR_IAC2 0x02000000 /* Instruction Address Compare 2 Event */ +#define DBSR_IAC3 0x00080000 /* Instruction Address Compare 3 Event */ +#define DBSR_IAC4 0x00040000 /* Instruction Address Compare 4 Event */ +#define DBSR_DAC1R 0x01000000 /* Data Address Compare 1 Read Event */ +#define DBSR_DAC1W 0x00800000 /* Data Address Compare 1 Write Event */ +#define DBSR_DAC2R 0x00400000 /* Data Address Compare 2 Read Event */ +#define DBSR_DAC2W 0x00200000 /* Data Address Compare 2 Write Event */ #endif /* Bit definitions related to the ESR. */ diff --git a/include/asm-s390/Kbuild b/include/asm-s390/Kbuild index ed8955f49e47..088969d55e72 100644 --- a/include/asm-s390/Kbuild +++ b/include/asm-s390/Kbuild @@ -1,4 +1,12 @@ include include/asm-generic/Kbuild.asm -unifdef-y += cmb.h debug.h -header-y += dasd.h qeth.h tape390.h ucontext.h vtoc.h z90crypt.h +header-y += dasd.h +header-y += monwriter.h +header-y += qeth.h +header-y += tape390.h +header-y += ucontext.h +header-y += vtoc.h +header-y += z90crypt.h + +unifdef-y += cmb.h +unifdef-y += debug.h diff --git a/include/asm-s390/appldata.h b/include/asm-s390/appldata.h new file mode 100644 index 000000000000..b1770703b706 --- /dev/null +++ b/include/asm-s390/appldata.h @@ -0,0 +1,90 @@ +/* + * include/asm-s390/appldata.h + * + * Copyright (C) IBM Corp. 2006 + * + * Author(s): Melissa Howland <melissah@us.ibm.com> + */ + +#ifndef _ASM_S390_APPLDATA_H +#define _ASM_S390_APPLDATA_H + +#include <asm/io.h> + +#ifndef CONFIG_64BIT + +#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */ +#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */ +#define APPLDATA_GEN_EVENT_REC 0x02 +#define APPLDATA_START_CONFIG_REC 0x03 + +/* + * Parameter list for DIAGNOSE X'DC' + */ +struct appldata_parameter_list { + u16 diag; /* The DIAGNOSE code X'00DC' */ + u8 function; /* The function code for the DIAGNOSE */ + u8 parlist_length; /* Length of the parameter list */ + u32 product_id_addr; /* Address of the 16-byte product ID */ + u16 reserved; + u16 buffer_length; /* Length of the application data buffer */ + u32 buffer_addr; /* Address of the application data buffer */ +} __attribute__ ((packed)); + +#else /* CONFIG_64BIT */ + +#define APPLDATA_START_INTERVAL_REC 0x80 +#define APPLDATA_STOP_REC 0x81 +#define APPLDATA_GEN_EVENT_REC 0x82 +#define APPLDATA_START_CONFIG_REC 0x83 + +/* + * Parameter list for DIAGNOSE X'DC' + */ +struct appldata_parameter_list { + u16 diag; + u8 function; + u8 parlist_length; + u32 unused01; + u16 reserved; + u16 buffer_length; + u32 unused02; + u64 product_id_addr; + u64 buffer_addr; +} __attribute__ ((packed)); + +#endif /* CONFIG_64BIT */ + +struct appldata_product_id { + char prod_nr[7]; /* product number */ + u16 prod_fn; /* product function */ + u8 record_nr; /* record number */ + u16 version_nr; /* version */ + u16 release_nr; /* release */ + u16 mod_lvl; /* modification level */ +} __attribute__ ((packed)); + +static inline int appldata_asm(struct appldata_product_id *id, + unsigned short fn, void *buffer, + unsigned short length) +{ + struct appldata_parameter_list parm_list; + int ry; + + if (!MACHINE_IS_VM) + return -ENOSYS; + parm_list.diag = 0xdc; + parm_list.function = fn; + parm_list.parlist_length = sizeof(parm_list); + parm_list.buffer_length = length; + parm_list.product_id_addr = (unsigned long) id; + parm_list.buffer_addr = virt_to_phys(buffer); + asm volatile( + "diag %1,%0,0xdc" + : "=d" (ry) + : "d" (&parm_list), "m" (parm_list), "m" (*id) + : "cc"); + return ry; +} + +#endif /* _ASM_S390_APPLDATA_H */ diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h index 28fdd6e2b8ba..da063cd5f0a0 100644 --- a/include/asm-s390/cio.h +++ b/include/asm-s390/cio.h @@ -270,6 +270,11 @@ struct diag210 { __u32 vrdccrft : 8; /* real device feature (output) */ } __attribute__ ((packed,aligned(4))); +struct ccw_dev_id { + u8 ssid; + u16 devno; +}; + extern int diag210(struct diag210 *addr); extern void wait_cons_dev(void); @@ -280,6 +285,8 @@ extern void cio_reset_channel_paths(void); extern void css_schedule_reprobe(void); +extern void reipl_ccw_dev(struct ccw_dev_id *id); + #endif #endif diff --git a/include/asm-s390/dma.h b/include/asm-s390/dma.h index 02720c449cd8..7425c6af6cd4 100644 --- a/include/asm-s390/dma.h +++ b/include/asm-s390/dma.h @@ -11,6 +11,6 @@ #define MAX_DMA_ADDRESS 0x80000000 -#define free_dma(x) +#define free_dma(x) do { } while (0) #endif /* _ASM_DMA_H */ diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h index ffedf14f89f6..5e261e1de671 100644 --- a/include/asm-s390/futex.h +++ b/include/asm-s390/futex.h @@ -7,75 +7,21 @@ #include <asm/errno.h> #include <asm/uaccess.h> -#ifndef __s390x__ -#define __futex_atomic_fixup \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,4b,2b,4b,3b,4b\n" \ - ".previous" -#else /* __s390x__ */ -#define __futex_atomic_fixup \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 0b,4b,2b,4b,3b,4b\n" \ - ".previous" -#endif /* __s390x__ */ - -#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ - asm volatile(" sacf 256\n" \ - "0: l %1,0(%6)\n" \ - "1: " insn \ - "2: cs %1,%2,0(%6)\n" \ - "3: jl 1b\n" \ - " lhi %0,0\n" \ - "4: sacf 0\n" \ - __futex_atomic_fixup \ - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ - "=m" (*uaddr) \ - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ - "m" (*uaddr) : "cc" ); - static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; - int oldval = 0, newval, ret; + int oldval, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - inc_preempt_count(); - - switch (op) { - case FUTEX_OP_SET: - __futex_atomic_op("lr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_ADD: - __futex_atomic_op("lr %2,%1\nar %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_OR: - __futex_atomic_op("lr %2,%1\nor %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_ANDN: - __futex_atomic_op("lr %2,%1\nnr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_XOR: - __futex_atomic_op("lr %2,%1\nxr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - default: - ret = -ENOSYS; - } - - dec_preempt_count(); + ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); if (!ret) { switch (cmp) { @@ -91,32 +37,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) return ret; } -static inline int -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, + int oldval, int newval) { - int ret; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - asm volatile(" sacf 256\n" - " cs %1,%4,0(%5)\n" - "0: lr %0,%1\n" - "1: sacf 0\n" -#ifndef __s390x__ - ".section __ex_table,\"a\"\n" - " .align 4\n" - " .long 0b,1b\n" - ".previous" -#else /* __s390x__ */ - ".section __ex_table,\"a\"\n" - " .align 8\n" - " .quad 0b,1b\n" - ".previous" -#endif /* __s390x__ */ - : "=d" (ret), "+d" (oldval), "=m" (*uaddr) - : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) - : "cc", "memory" ); - return oldval; + + return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h index d4614b35f423..a6cc27e77007 100644 --- a/include/asm-s390/io.h +++ b/include/asm-s390/io.h @@ -116,7 +116,7 @@ extern void iounmap(void *addr); #define outb(x,addr) ((void) writeb(x,addr)) #define outb_p(x,addr) outb(x,addr) -#define mmiowb() +#define mmiowb() do { } while (0) /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h new file mode 100644 index 000000000000..40cc68025e01 --- /dev/null +++ b/include/asm-s390/kdebug.h @@ -0,0 +1,59 @@ +#ifndef _S390_KDEBUG_H +#define _S390_KDEBUG_H + +/* + * Feb 2006 Ported to s390 <grundym@us.ibm.com> + */ +#include <linux/notifier.h> + +struct pt_regs; + +struct die_args { + struct pt_regs *regs; + const char *str; + long err; + int trapnr; + int signr; +}; + +/* Note - you should never unregister because that can race with NMIs. + * If you really want to do it first unregister - then synchronize_sched + * - then free. + */ +extern int register_die_notifier(struct notifier_block *); +extern int unregister_die_notifier(struct notifier_block *); +extern int register_page_fault_notifier(struct notifier_block *); +extern int unregister_page_fault_notifier(struct notifier_block *); +extern struct atomic_notifier_head s390die_chain; + + +enum die_val { + DIE_OOPS = 1, + DIE_BPT, + DIE_SSTEP, + DIE_PANIC, + DIE_NMI, + DIE_DIE, + DIE_NMIWATCHDOG, + DIE_KERNELDEBUG, + DIE_TRAP, + DIE_GPF, + DIE_CALL, + DIE_NMI_IPI, + DIE_PAGE_FAULT, +}; + +static inline int notify_die(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig + }; + return atomic_notifier_call_chain(&s390die_chain, val, &args); +} + +#endif diff --git a/include/asm-s390/kprobes.h b/include/asm-s390/kprobes.h new file mode 100644 index 000000000000..b847ff0ec3fa --- /dev/null +++ b/include/asm-s390/kprobes.h @@ -0,0 +1,114 @@ +#ifndef _ASM_S390_KPROBES_H +#define _ASM_S390_KPROBES_H +/* + * Kernel Probes (KProbes) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2006 + * + * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel + * Probes initial implementation ( includes suggestions from + * Rusty Russell). + * 2004-Nov Modified for PPC64 by Ananth N Mavinakayanahalli + * <ananth@in.ibm.com> + * 2005-Dec Used as a template for s390 by Mike Grundy + * <grundym@us.ibm.com> + */ +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/percpu.h> + +#define __ARCH_WANT_KPROBES_INSN_SLOT +struct pt_regs; +struct kprobe; + +typedef u16 kprobe_opcode_t; +#define BREAKPOINT_INSTRUCTION 0x0002 + +/* Maximum instruction size is 3 (16bit) halfwords: */ +#define MAX_INSN_SIZE 0x0003 +#define MAX_STACK_SIZE 64 +#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ + (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \ + ? (MAX_STACK_SIZE) \ + : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) + +#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)(pentry) + +#define ARCH_SUPPORTS_KRETPROBES +#define ARCH_INACTIVE_KPROBE_COUNT 0 + +#define KPROBE_SWAP_INST 0x10 + +#define FIXUP_PSW_NORMAL 0x08 +#define FIXUP_BRANCH_NOT_TAKEN 0x04 +#define FIXUP_RETURN_REGISTER 0x02 +#define FIXUP_NOT_REQUIRED 0x01 + +/* Architecture specific copy of original instruction */ +struct arch_specific_insn { + /* copy of original instruction */ + kprobe_opcode_t *insn; + int fixup; + int ilen; + int reg; +}; + +struct ins_replace_args { + kprobe_opcode_t *ptr; + kprobe_opcode_t old; + kprobe_opcode_t new; +}; +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; + unsigned long saved_psw; + unsigned long kprobe_saved_imask; + unsigned long kprobe_saved_ctl[3]; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + unsigned long kprobe_saved_imask; + unsigned long kprobe_saved_ctl[3]; + struct pt_regs jprobe_saved_regs; + unsigned long jprobe_saved_r14; + unsigned long jprobe_saved_r15; + struct prev_kprobe prev_kprobe; + kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; +}; + +void arch_remove_kprobe(struct kprobe *p); +void kretprobe_trampoline(void); +int is_prohibited_opcode(kprobe_opcode_t *instruction); +void get_instruction_type(struct arch_specific_insn *ainsn); + +#define flush_insn_slot(p) do { } while (0) + +#endif /* _ASM_S390_KPROBES_H */ + +#ifdef CONFIG_KPROBES + +extern int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data); +#else /* !CONFIG_KPROBES */ +static inline int kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + return 0; +} +#endif diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 596c8b172104..18695d10dedf 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h @@ -35,6 +35,7 @@ #define __LC_IO_NEW_PSW 0x01f0 #endif /* !__s390x__ */ +#define __LC_IPL_PARMBLOCK_PTR 0x014 #define __LC_EXT_PARAMS 0x080 #define __LC_CPU_ADDRESS 0x084 #define __LC_EXT_INT_CODE 0x086 @@ -47,6 +48,7 @@ #define __LC_PER_ATMID 0x096 #define __LC_PER_ADDRESS 0x098 #define __LC_PER_ACCESS_ID 0x0A1 +#define __LC_AR_MODE_ID 0x0A3 #define __LC_SUBCHANNEL_ID 0x0B8 #define __LC_SUBCHANNEL_NR 0x0BA @@ -106,18 +108,28 @@ #define __LC_INT_CLOCK 0xDE8 #endif /* __s390x__ */ -#define __LC_PANIC_MAGIC 0xE00 +#define __LC_PANIC_MAGIC 0xE00 #ifndef __s390x__ #define __LC_PFAULT_INTPARM 0x080 #define __LC_CPU_TIMER_SAVE_AREA 0x0D8 +#define __LC_CLOCK_COMP_SAVE_AREA 0x0E0 +#define __LC_PSW_SAVE_AREA 0x100 +#define __LC_PREFIX_SAVE_AREA 0x108 #define __LC_AREGS_SAVE_AREA 0x120 +#define __LC_FPREGS_SAVE_AREA 0x160 #define __LC_GPREGS_SAVE_AREA 0x180 #define __LC_CREGS_SAVE_AREA 0x1C0 #else /* __s390x__ */ #define __LC_PFAULT_INTPARM 0x11B8 +#define __LC_FPREGS_SAVE_AREA 0x1200 #define __LC_GPREGS_SAVE_AREA 0x1280 +#define __LC_PSW_SAVE_AREA 0x1300 +#define __LC_PREFIX_SAVE_AREA 0x1318 +#define __LC_FP_CREG_SAVE_AREA 0x131C +#define __LC_TODREG_SAVE_AREA 0x1324 #define __LC_CPU_TIMER_SAVE_AREA 0x1328 +#define __LC_CLOCK_COMP_SAVE_AREA 0x1331 #define __LC_AREGS_SAVE_AREA 0x1340 #define __LC_CREGS_SAVE_AREA 0x1380 #endif /* __s390x__ */ diff --git a/include/asm-s390/monwriter.h b/include/asm-s390/monwriter.h new file mode 100644 index 000000000000..f0cbf96c52e6 --- /dev/null +++ b/include/asm-s390/monwriter.h @@ -0,0 +1,33 @@ +/* + * include/asm-s390/monwriter.h + * + * Copyright (C) IBM Corp. 2006 + * Character device driver for writing z/VM APPLDATA monitor records + * Version 1.0 + * Author(s): Melissa Howland <melissah@us.ibm.com> + * + */ + +#ifndef _ASM_390_MONWRITER_H +#define _ASM_390_MONWRITER_H + +/* mon_function values */ +#define MONWRITE_START_INTERVAL 0x00 /* start interval recording */ +#define MONWRITE_STOP_INTERVAL 0x01 /* stop interval or config recording */ +#define MONWRITE_GEN_EVENT 0x02 /* generate event record */ +#define MONWRITE_START_CONFIG 0x03 /* start configuration recording */ + +/* the header the app uses in its write() data */ +struct monwrite_hdr { + unsigned char mon_function; + unsigned short applid; + unsigned char record_num; + unsigned short version; + unsigned short release; + unsigned short mod_level; + unsigned short datalen; + unsigned char hdrlen; + +} __attribute__((packed)); + +#endif /* _ASM_390_MONWRITER_H */ diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index a78e853e0dd5..803bc7064418 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h @@ -22,6 +22,16 @@ extern void diag10(unsigned long addr); /* + * Page allocation orders. + */ +#ifndef __s390x__ +# define PGD_ALLOC_ORDER 1 +#else /* __s390x__ */ +# define PMD_ALLOC_ORDER 2 +# define PGD_ALLOC_ORDER 2 +#endif /* __s390x__ */ + +/* * Allocate and free page tables. The xxx_kernel() versions are * used to allocate a kernel page table - this turns on ASN bits * if any. @@ -29,30 +39,23 @@ extern void diag10(unsigned long addr); static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd; + pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); int i; + if (!pgd) + return NULL; + for (i = 0; i < PTRS_PER_PGD; i++) #ifndef __s390x__ - pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1); - if (pgd != NULL) - for (i = 0; i < USER_PTRS_PER_PGD; i++) - pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); -#else /* __s390x__ */ - pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2); - if (pgd != NULL) - for (i = 0; i < PTRS_PER_PGD; i++) - pgd_clear(pgd + i); -#endif /* __s390x__ */ + pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); +#else + pgd_clear(pgd + i); +#endif return pgd; } static inline void pgd_free(pgd_t *pgd) { -#ifndef __s390x__ - free_pages((unsigned long) pgd, 1); -#else /* __s390x__ */ - free_pages((unsigned long) pgd, 2); -#endif /* __s390x__ */ + free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); } #ifndef __s390x__ @@ -68,20 +71,19 @@ static inline void pgd_free(pgd_t *pgd) #else /* __s390x__ */ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { - pmd_t *pmd; - int i; + pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); + int i; - pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2); - if (pmd != NULL) { - for (i=0; i < PTRS_PER_PMD; i++) - pmd_clear(pmd+i); - } + if (!pmd) + return NULL; + for (i=0; i < PTRS_PER_PMD; i++) + pmd_clear(pmd + i); return pmd; } static inline void pmd_free (pmd_t *pmd) { - free_pages((unsigned long) pmd, 2); + free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); } #define __pmd_free_tlb(tlb,pmd) \ @@ -123,15 +125,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) { - pte_t *pte; - int i; - - pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pte != NULL) { - for (i=0; i < PTRS_PER_PTE; i++) { - pte_clear(mm, vmaddr, pte+i); - vmaddr += PAGE_SIZE; - } + pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); + int i; + + if (!pte) + return NULL; + for (i=0; i < PTRS_PER_PTE; i++) { + pte_clear(mm, vmaddr, pte + i); + vmaddr += PAGE_SIZE; } return pte; } diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 24312387fa24..1a07028d575e 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -89,19 +89,6 @@ extern char empty_zero_page[PAGE_SIZE]; # define PTRS_PER_PGD 2048 #endif /* __s390x__ */ -/* - * pgd entries used up by user/kernel: - */ -#ifndef __s390x__ -# define USER_PTRS_PER_PGD 512 -# define USER_PGD_PTRS 512 -# define KERNEL_PGD_PTRS 512 -#else /* __s390x__ */ -# define USER_PTRS_PER_PGD 2048 -# define USER_PGD_PTRS 2048 -# define KERNEL_PGD_PTRS 2048 -#endif /* __s390x__ */ - #define FIRST_USER_ADDRESS 0 #define pte_ERROR(e) \ @@ -216,12 +203,14 @@ extern char empty_zero_page[PAGE_SIZE]; #define _PAGE_RO 0x200 /* HW read-only */ #define _PAGE_INVALID 0x400 /* HW invalid */ -/* Mask and four different kinds of invalid pages. */ -#define _PAGE_INVALID_MASK 0x601 -#define _PAGE_INVALID_EMPTY 0x400 -#define _PAGE_INVALID_NONE 0x401 -#define _PAGE_INVALID_SWAP 0x600 -#define _PAGE_INVALID_FILE 0x601 +/* Mask and six different types of pages. */ +#define _PAGE_TYPE_MASK 0x601 +#define _PAGE_TYPE_EMPTY 0x400 +#define _PAGE_TYPE_NONE 0x401 +#define _PAGE_TYPE_SWAP 0x600 +#define _PAGE_TYPE_FILE 0x601 +#define _PAGE_TYPE_RO 0x200 +#define _PAGE_TYPE_RW 0x000 #ifndef __s390x__ @@ -280,15 +269,14 @@ extern char empty_zero_page[PAGE_SIZE]; #endif /* __s390x__ */ /* - * No mapping available + * Page protection definitions. */ -#define PAGE_NONE_SHARED __pgprot(_PAGE_INVALID_NONE) -#define PAGE_NONE_PRIVATE __pgprot(_PAGE_INVALID_NONE) -#define PAGE_RO_SHARED __pgprot(_PAGE_RO) -#define PAGE_RO_PRIVATE __pgprot(_PAGE_RO) -#define PAGE_COPY __pgprot(_PAGE_RO) -#define PAGE_SHARED __pgprot(0) -#define PAGE_KERNEL __pgprot(0) +#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) +#define PAGE_RO __pgprot(_PAGE_TYPE_RO) +#define PAGE_RW __pgprot(_PAGE_TYPE_RW) + +#define PAGE_KERNEL PAGE_RW +#define PAGE_COPY PAGE_RO /* * The S390 can't do page protection for execute, and considers that the @@ -296,23 +284,23 @@ extern char empty_zero_page[PAGE_SIZE]; * the closest we can get.. */ /*xwr*/ -#define __P000 PAGE_NONE_PRIVATE -#define __P001 PAGE_RO_PRIVATE -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_RO_PRIVATE -#define __P101 PAGE_RO_PRIVATE -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE_SHARED -#define __S001 PAGE_RO_SHARED -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_RO_SHARED -#define __S101 PAGE_RO_SHARED -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED +#define __P000 PAGE_NONE +#define __P001 PAGE_RO +#define __P010 PAGE_RO +#define __P011 PAGE_RO +#define __P100 PAGE_RO +#define __P101 PAGE_RO +#define __P110 PAGE_RO +#define __P111 PAGE_RO + +#define __S000 PAGE_NONE +#define __S001 PAGE_RO +#define __S010 PAGE_RW +#define __S011 PAGE_RW +#define __S100 PAGE_RO +#define __S101 PAGE_RO +#define __S110 PAGE_RW +#define __S111 PAGE_RW /* * Certain architectures need to do special things when PTEs @@ -377,18 +365,18 @@ static inline int pmd_bad(pmd_t pmd) static inline int pte_none(pte_t pte) { - return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY; + return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_EMPTY; } static inline int pte_present(pte_t pte) { return !(pte_val(pte) & _PAGE_INVALID) || - (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE; + (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_NONE; } static inline int pte_file(pte_t pte) { - return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE; + return (pte_val(pte) & _PAGE_TYPE_MASK) == _PAGE_TYPE_FILE; } #define pte_same(a,b) (pte_val(a) == pte_val(b)) @@ -461,7 +449,7 @@ static inline void pmd_clear(pmd_t * pmdp) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_val(*ptep) = _PAGE_INVALID_EMPTY; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; } /* @@ -477,7 +465,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_wrprotect(pte_t pte) { - /* Do not clobber _PAGE_INVALID_NONE pages! */ + /* Do not clobber _PAGE_TYPE_NONE pages! */ if (!(pte_val(pte) & _PAGE_INVALID)) pte_val(pte) |= _PAGE_RO; return pte; @@ -556,26 +544,30 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, return pte; } -static inline pte_t -ptep_clear_flush(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +static inline void __ptep_ipte(unsigned long address, pte_t *ptep) { - pte_t pte = *ptep; + if (!(pte_val(*ptep) & _PAGE_INVALID)) { #ifndef __s390x__ - if (!(pte_val(pte) & _PAGE_INVALID)) { /* S390 has 1mb segments, we are emulating 4MB segments */ pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); - __asm__ __volatile__ ("ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (pto), "a" (address) ); +#else + /* ipte in zarch mode can do the math */ + pte_t *pto = ptep; +#endif + asm volatile ("ipte %2,%3" + : "=m" (*ptep) : "m" (*ptep), + "a" (pto), "a" (address) ); } -#else /* __s390x__ */ - if (!(pte_val(pte) & _PAGE_INVALID)) - __asm__ __volatile__ ("ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (ptep), "a" (address) ); -#endif /* __s390x__ */ - pte_val(*ptep) = _PAGE_INVALID_EMPTY; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; +} + +static inline pte_t +ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + pte_t pte = *ptep; + + __ptep_ipte(address, ptep); return pte; } @@ -755,7 +747,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; offset &= __SWP_OFFSET_MASK; - pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) | + pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); return pte; } @@ -778,7 +770,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define pgoff_to_pte(__off) \ ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ - | _PAGE_INVALID_FILE }) + | _PAGE_TYPE_FILE }) #endif /* !__ASSEMBLY__ */ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 5b71d3731723..a3a4e5fd30d7 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -339,4 +339,21 @@ int unregister_idle_notifier(struct notifier_block *nb); #endif +/* + * Helper macro for exception table entries + */ +#ifndef __s390x__ +#define EX_TABLE(_fault,_target) \ + ".section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long " #_fault "," #_target "\n" \ + ".previous\n" +#else +#define EX_TABLE(_fault,_target) \ + ".section __ex_table,\"a\"\n" \ + " .align 8\n" \ + " .quad " #_fault "," #_target "\n" \ + ".previous\n" +#endif + #endif /* __ASM_S390_PROCESSOR_H */ diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index 19e31979309a..f1959732b6fd 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h @@ -14,8 +14,6 @@ #define PARMAREA 0x10400 #define COMMAND_LINE_SIZE 896 -#define RAMDISK_ORIGIN 0x800000 -#define RAMDISK_SIZE 0x800000 #define MEMORY_CHUNKS 16 /* max 0x7fff */ #define IPL_PARMBLOCK_ORIGIN 0x2000 @@ -46,10 +44,12 @@ extern unsigned long machine_flags; #define MACHINE_HAS_IEEE (machine_flags & 2) #define MACHINE_HAS_CSP (machine_flags & 8) #define MACHINE_HAS_DIAG44 (1) +#define MACHINE_HAS_MVCOS (0) #else /* __s390x__ */ #define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_CSP (1) #define MACHINE_HAS_DIAG44 (machine_flags & 32) +#define MACHINE_HAS_MVCOS (machine_flags & 512) #endif /* __s390x__ */ @@ -70,52 +70,76 @@ extern unsigned int console_irq; #define SET_CONSOLE_3215 do { console_mode = 2; } while (0) #define SET_CONSOLE_3270 do { console_mode = 3; } while (0) -struct ipl_list_header { - u32 length; - u8 reserved[3]; + +struct ipl_list_hdr { + u32 len; + u8 reserved1[3]; u8 version; + u32 blk0_len; + u8 pbt; + u8 flags; + u16 reserved2; } __attribute__((packed)); struct ipl_block_fcp { - u32 length; - u8 pbt; - u8 reserved1[322-1]; + u8 reserved1[313-1]; + u8 opt; + u8 reserved2[3]; + u16 reserved3; u16 devno; - u8 reserved2[4]; + u8 reserved4[4]; u64 wwpn; u64 lun; u32 bootprog; - u8 reserved3[12]; + u8 reserved5[12]; u64 br_lba; u32 scp_data_len; - u8 reserved4[260]; + u8 reserved6[260]; u8 scp_data[]; } __attribute__((packed)); +struct ipl_block_ccw { + u8 load_param[8]; + u8 reserved1[84]; + u8 reserved2[2]; + u16 devno; + u8 vm_flags; + u8 reserved3[3]; + u32 vm_parm_len; +} __attribute__((packed)); + struct ipl_parameter_block { + struct ipl_list_hdr hdr; union { - u32 length; - struct ipl_list_header header; - } hdr; - struct ipl_block_fcp fcp; + struct ipl_block_fcp fcp; + struct ipl_block_ccw ccw; + } ipl_info; } __attribute__((packed)); -#define IPL_MAX_SUPPORTED_VERSION (0) +#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ + sizeof(struct ipl_block_fcp)) -#define IPL_TYPE_FCP (0) +#define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ + sizeof(struct ipl_block_ccw)) + +#define IPL_MAX_SUPPORTED_VERSION (0) /* * IPL validity flags and parameters as detected in head.S */ -extern u32 ipl_parameter_flags; +extern u32 ipl_flags; extern u16 ipl_devno; -#define IPL_DEVNO_VALID (ipl_parameter_flags & 1) -#define IPL_PARMBLOCK_VALID (ipl_parameter_flags & 2) +void do_reipl(void); + +enum { + IPL_DEVNO_VALID = 1, + IPL_PARMBLOCK_VALID = 2, +}; #define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ IPL_PARMBLOCK_ORIGIN) -#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.length) +#define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len) #else /* __ASSEMBLY__ */ diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 657646054c5e..9fb02e9779c9 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h @@ -104,7 +104,7 @@ smp_call_function_on(void (*func) (void *info), void *info, #define smp_cpu_not_running(cpu) 1 #define smp_get_cpu(cpu) ({ 0; }) #define smp_put_cpu(cpu) ({ 0; }) -#define smp_setup_cpu_possible_map() +#define smp_setup_cpu_possible_map() do { } while (0) #endif #endif diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h index 0b7c0ca4c3d7..e2047b0c9092 100644 --- a/include/asm-s390/uaccess.h +++ b/include/asm-s390/uaccess.h @@ -47,7 +47,7 @@ S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ asm volatile ("lctlg 7,7,%0" : : "m" (__pto) ); \ }) -#else +#else /* __s390x__ */ #define set_fs(x) \ ({ \ unsigned long __pto; \ @@ -56,7 +56,7 @@ S390_lowcore.user_asce : S390_lowcore.kernel_asce; \ asm volatile ("lctl 7,7,%0" : : "m" (__pto) ); \ }) -#endif +#endif /* __s390x__ */ #define segment_eq(a,b) ((a).ar4 == (b).ar4) @@ -85,76 +85,51 @@ struct exception_table_entry unsigned long insn, fixup; }; -#ifndef __s390x__ -#define __uaccess_fixup \ - ".section .fixup,\"ax\"\n" \ - "2: lhi %0,%4\n" \ - " bras 1,3f\n" \ - " .long 1b\n" \ - "3: l 1,0(1)\n" \ - " br 1\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 0b,2b\n" \ - ".previous" -#define __uaccess_clobber "cc", "1" -#else /* __s390x__ */ -#define __uaccess_fixup \ - ".section .fixup,\"ax\"\n" \ - "2: lghi %0,%4\n" \ - " jg 1b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .align 8\n" \ - " .quad 0b,2b\n" \ - ".previous" -#define __uaccess_clobber "cc" -#endif /* __s390x__ */ +struct uaccess_ops { + size_t (*copy_from_user)(size_t, const void __user *, void *); + size_t (*copy_from_user_small)(size_t, const void __user *, void *); + size_t (*copy_to_user)(size_t, void __user *, const void *); + size_t (*copy_to_user_small)(size_t, void __user *, const void *); + size_t (*copy_in_user)(size_t, void __user *, const void __user *); + size_t (*clear_user)(size_t, void __user *); + size_t (*strnlen_user)(size_t, const char __user *); + size_t (*strncpy_from_user)(size_t, const char __user *, char *); + int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); + int (*futex_atomic_cmpxchg)(int __user *, int old, int new); +}; + +extern struct uaccess_ops uaccess; +extern struct uaccess_ops uaccess_std; +extern struct uaccess_ops uaccess_mvcos; + +static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +{ + size = uaccess.copy_to_user_small(size, ptr, x); + return size ? -EFAULT : size; +} + +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +{ + size = uaccess.copy_from_user_small(size, ptr, x); + return size ? -EFAULT : size; +} /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. */ -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) -#define __put_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile( \ - "0: mvcs 0(%1,%2),%3,%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err) \ - : "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x), \ - "K" (-EFAULT) \ - : __uaccess_clobber ); \ -}) -#else -#define __put_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile( \ - "0: mvcs 0(%1,%2),0(%3),%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err) \ - : "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)), \ - "K" (-EFAULT), "m" (x) \ - : __uaccess_clobber ); \ -}) -#endif - #define __put_user(x, ptr) \ ({ \ __typeof__(*(ptr)) __x = (x); \ - int __pu_err; \ + int __pu_err = -EFAULT; \ __chk_user_ptr(ptr); \ switch (sizeof (*(ptr))) { \ case 1: \ case 2: \ case 4: \ case 8: \ - __put_user_asm(__x, ptr, __pu_err); \ + __pu_err = __put_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ break; \ default: \ __put_user_bad(); \ @@ -172,60 +147,36 @@ struct exception_table_entry extern int __put_user_bad(void) __attribute__((noreturn)); -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) -#define __get_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile ( \ - "0: mvcp %O1(%2,%R1),0(%3),%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err), "=Q" (x) \ - : "d" (sizeof(*(ptr))), "a" (ptr), \ - "K" (-EFAULT) \ - : __uaccess_clobber ); \ -}) -#else -#define __get_user_asm(x, ptr, err) \ -({ \ - err = 0; \ - asm volatile ( \ - "0: mvcp 0(%2,%5),0(%3),%0\n" \ - "1:\n" \ - __uaccess_fixup \ - : "+&d" (err), "=m" (x) \ - : "d" (sizeof(*(ptr))), "a" (ptr), \ - "K" (-EFAULT), "a" (&(x)) \ - : __uaccess_clobber ); \ -}) -#endif - #define __get_user(x, ptr) \ ({ \ - int __gu_err; \ - __chk_user_ptr(ptr); \ + int __gu_err = -EFAULT; \ + __chk_user_ptr(ptr); \ switch (sizeof(*(ptr))) { \ case 1: { \ unsigned char __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ unsigned short __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ unsigned int __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ unsigned long long __x; \ - __get_user_asm(__x, ptr, __gu_err); \ + __gu_err = __get_user_fn(sizeof (*(ptr)), \ + ptr, &__x); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ @@ -247,8 +198,6 @@ extern int __get_user_bad(void) __attribute__((noreturn)); #define __put_user_unaligned __put_user #define __get_user_unaligned __get_user -extern long __copy_to_user_asm(const void *from, long n, void __user *to); - /** * __copy_to_user: - Copy a block of data into user space, with less checking. * @to: Destination address, in user space. @@ -266,7 +215,10 @@ extern long __copy_to_user_asm(const void *from, long n, void __user *to); static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { - return __copy_to_user_asm(from, n, to); + if (__builtin_constant_p(n) && (n <= 256)) + return uaccess.copy_to_user_small(n, to, from); + else + return uaccess.copy_to_user(n, to, from); } #define __copy_to_user_inatomic __copy_to_user @@ -294,8 +246,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) return n; } -extern long __copy_from_user_asm(void *to, long n, const void __user *from); - /** * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. @@ -316,7 +266,10 @@ extern long __copy_from_user_asm(void *to, long n, const void __user *from); static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { - return __copy_from_user_asm(to, n, from); + if (__builtin_constant_p(n) && (n <= 256)) + return uaccess.copy_from_user_small(n, from, to); + else + return uaccess.copy_from_user(n, from, to); } /** @@ -346,13 +299,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n) return n; } -extern unsigned long __copy_in_user_asm(const void __user *from, long n, - void __user *to); - static inline unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) { - return __copy_in_user_asm(from, n, to); + return uaccess.copy_in_user(n, to, from); } static inline unsigned long @@ -360,34 +310,28 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n) { might_sleep(); if (__access_ok(from,n) && __access_ok(to,n)) - n = __copy_in_user_asm(from, n, to); + n = __copy_in_user(to, from, n); return n; } /* * Copy a null terminated string from userspace. */ -extern long __strncpy_from_user_asm(long count, char *dst, - const char __user *src); - static inline long strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; might_sleep(); if (access_ok(VERIFY_READ, src, 1)) - res = __strncpy_from_user_asm(count, dst, src); + res = uaccess.strncpy_from_user(count, src, dst); return res; } - -extern long __strnlen_user_asm(long count, const char __user *src); - static inline unsigned long strnlen_user(const char __user * src, unsigned long n) { might_sleep(); - return __strnlen_user_asm(n, src); + return uaccess.strnlen_user(n, src); } /** @@ -410,12 +354,10 @@ strnlen_user(const char __user * src, unsigned long n) * Zero Userspace */ -extern long __clear_user_asm(void __user *to, long n); - static inline unsigned long __clear_user(void __user *to, unsigned long n) { - return __clear_user_asm(to, n); + return uaccess.clear_user(n, to); } static inline unsigned long @@ -423,7 +365,7 @@ clear_user(void __user *to, unsigned long n) { might_sleep(); if (access_ok(VERIFY_WRITE, to, n)) - n = __clear_user_asm(to, n); + n = uaccess.clear_user(n, to); return n; } diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index aa7a243862e1..02b942d85c37 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h @@ -25,17 +25,12 @@ #define __NR_unlink 10 #define __NR_execve 11 #define __NR_chdir 12 -#define __NR_time 13 #define __NR_mknod 14 #define __NR_chmod 15 -#define __NR_lchown 16 #define __NR_lseek 19 #define __NR_getpid 20 #define __NR_mount 21 #define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 #define __NR_ptrace 26 #define __NR_alarm 27 #define __NR_pause 29 @@ -51,11 +46,7 @@ #define __NR_pipe 42 #define __NR_times 43 #define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 #define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 #define __NR_acct 51 #define __NR_umount2 52 #define __NR_ioctl 54 @@ -69,18 +60,13 @@ #define __NR_getpgrp 65 #define __NR_setsid 66 #define __NR_sigaction 67 -#define __NR_setreuid 70 -#define __NR_setregid 71 #define __NR_sigsuspend 72 #define __NR_sigpending 73 #define __NR_sethostname 74 #define __NR_setrlimit 75 -#define __NR_getrlimit 76 #define __NR_getrusage 77 #define __NR_gettimeofday 78 #define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 #define __NR_symlink 83 #define __NR_readlink 85 #define __NR_uselib 86 @@ -92,12 +78,10 @@ #define __NR_truncate 92 #define __NR_ftruncate 93 #define __NR_fchmod 94 -#define __NR_fchown 95 #define __NR_getpriority 96 #define __NR_setpriority 97 #define __NR_statfs 99 #define __NR_fstatfs 100 -#define __NR_ioperm 101 #define __NR_socketcall 102 #define __NR_syslog 103 #define __NR_setitimer 104 @@ -131,11 +115,7 @@ #define __NR_sysfs 135 #define __NR_personality 136 #define __NR_afs_syscall 137 /* Syscall for Andrew File System */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 #define __NR_getdents 141 -#define __NR__newselect 142 #define __NR_flock 143 #define __NR_msync 144 #define __NR_readv 145 @@ -157,13 +137,9 @@ #define __NR_sched_rr_get_interval 161 #define __NR_nanosleep 162 #define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 #define __NR_query_module 167 #define __NR_poll 168 #define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 #define __NR_prctl 172 #define __NR_rt_sigreturn 173 #define __NR_rt_sigaction 174 @@ -174,7 +150,6 @@ #define __NR_rt_sigsuspend 179 #define __NR_pread64 180 #define __NR_pwrite64 181 -#define __NR_chown 182 #define __NR_getcwd 183 #define __NR_capget 184 #define __NR_capset 185 @@ -183,39 +158,11 @@ #define __NR_getpmsg 188 #define __NR_putpmsg 189 #define __NR_vfork 190 -#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_lchown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_chown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 #define __NR_pivot_root 217 #define __NR_mincore 218 #define __NR_madvise 219 #define __NR_getdents64 220 -#define __NR_fcntl64 221 #define __NR_readahead 222 -#define __NR_sendfile64 223 #define __NR_setxattr 224 #define __NR_lsetxattr 225 #define __NR_fsetxattr 226 @@ -256,7 +203,6 @@ #define __NR_clock_getres (__NR_timer_create+7) #define __NR_clock_nanosleep (__NR_timer_create+8) /* Number 263 is reserved for vserver */ -#define __NR_fadvise64_64 264 #define __NR_statfs64 265 #define __NR_fstatfs64 266 #define __NR_remap_file_pages 267 @@ -285,7 +231,6 @@ #define __NR_mknodat 290 #define __NR_fchownat 291 #define __NR_futimesat 292 -#define __NR_fstatat64 293 #define __NR_unlinkat 294 #define __NR_renameat 295 #define __NR_linkat 296 @@ -310,62 +255,65 @@ * have a different name although they do the same (e.g. __NR_chown32 * is __NR_chown on 64 bit). */ -#ifdef __s390x__ -#undef __NR_time -#undef __NR_lchown -#undef __NR_setuid -#undef __NR_getuid -#undef __NR_stime -#undef __NR_setgid -#undef __NR_getgid -#undef __NR_geteuid -#undef __NR_getegid -#undef __NR_setreuid -#undef __NR_setregid -#undef __NR_getrlimit -#undef __NR_getgroups -#undef __NR_setgroups -#undef __NR_fchown -#undef __NR_ioperm -#undef __NR_setfsuid -#undef __NR_setfsgid -#undef __NR__llseek -#undef __NR__newselect -#undef __NR_setresuid -#undef __NR_getresuid -#undef __NR_setresgid -#undef __NR_getresgid -#undef __NR_chown -#undef __NR_ugetrlimit -#undef __NR_mmap2 -#undef __NR_truncate64 -#undef __NR_ftruncate64 -#undef __NR_stat64 -#undef __NR_lstat64 -#undef __NR_fstat64 -#undef __NR_lchown32 -#undef __NR_getuid32 -#undef __NR_getgid32 -#undef __NR_geteuid32 -#undef __NR_getegid32 -#undef __NR_setreuid32 -#undef __NR_setregid32 -#undef __NR_getgroups32 -#undef __NR_setgroups32 -#undef __NR_fchown32 -#undef __NR_setresuid32 -#undef __NR_getresuid32 -#undef __NR_setresgid32 -#undef __NR_getresgid32 -#undef __NR_chown32 -#undef __NR_setuid32 -#undef __NR_setgid32 -#undef __NR_setfsuid32 -#undef __NR_setfsgid32 -#undef __NR_fcntl64 -#undef __NR_sendfile64 -#undef __NR_fadvise64_64 -#undef __NR_fstatat64 +#ifndef __s390x__ + +#define __NR_time 13 +#define __NR_lchown 16 +#define __NR_setuid 23 +#define __NR_getuid 24 +#define __NR_stime 25 +#define __NR_setgid 46 +#define __NR_getgid 47 +#define __NR_geteuid 49 +#define __NR_getegid 50 +#define __NR_setreuid 70 +#define __NR_setregid 71 +#define __NR_getrlimit 76 +#define __NR_getgroups 80 +#define __NR_setgroups 81 +#define __NR_fchown 95 +#define __NR_ioperm 101 +#define __NR_setfsuid 138 +#define __NR_setfsgid 139 +#define __NR__llseek 140 +#define __NR__newselect 142 +#define __NR_setresuid 164 +#define __NR_getresuid 165 +#define __NR_setresgid 170 +#define __NR_getresgid 171 +#define __NR_chown 182 +#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ +#define __NR_mmap2 192 +#define __NR_truncate64 193 +#define __NR_ftruncate64 194 +#define __NR_stat64 195 +#define __NR_lstat64 196 +#define __NR_fstat64 197 +#define __NR_lchown32 198 +#define __NR_getuid32 199 +#define __NR_getgid32 200 +#define __NR_geteuid32 201 +#define __NR_getegid32 202 +#define __NR_setreuid32 203 +#define __NR_setregid32 204 +#define __NR_getgroups32 205 +#define __NR_setgroups32 206 +#define __NR_fchown32 207 +#define __NR_setresuid32 208 +#define __NR_getresuid32 209 +#define __NR_setresgid32 210 +#define __NR_getresgid32 211 +#define __NR_chown32 212 +#define __NR_setuid32 213 +#define __NR_setgid32 214 +#define __NR_setfsuid32 215 +#define __NR_setfsgid32 216 +#define __NR_fcntl64 221 +#define __NR_sendfile64 223 +#define __NR_fadvise64_64 264 +#define __NR_fstatat64 293 + +#else #define __NR_select 142 #define __NR_getrlimit 191 /* SuS compliant getrlimit */ diff --git a/include/asm-s390/z90crypt.h b/include/asm-s390/z90crypt.h deleted file mode 100644 index 31a2439b07bd..000000000000 --- a/include/asm-s390/z90crypt.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * include/asm-s390/z90crypt.h - * - * z90crypt 1.3.3 (user-visible header) - * - * Copyright (C) 2001, 2005 IBM Corporation - * Author(s): Robert Burroughs - * Eric Rossman (edrossma@us.ibm.com) - * - * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#ifndef __ASM_S390_Z90CRYPT_H -#define __ASM_S390_Z90CRYPT_H -#include <linux/ioctl.h> - -#define z90crypt_VERSION 1 -#define z90crypt_RELEASE 3 // 2 = PCIXCC, 3 = rewrite for coding standards -#define z90crypt_VARIANT 3 // 3 = CEX2A support - -/** - * struct ica_rsa_modexpo - * - * Requirements: - * - outputdatalength is at least as large as inputdatalength. - * - All key parts are right justified in their fields, padded on - * the left with zeroes. - * - length(b_key) = inputdatalength - * - length(n_modulus) = inputdatalength - */ -struct ica_rsa_modexpo { - char __user * inputdata; - unsigned int inputdatalength; - char __user * outputdata; - unsigned int outputdatalength; - char __user * b_key; - char __user * n_modulus; -}; - -/** - * struct ica_rsa_modexpo_crt - * - * Requirements: - * - inputdatalength is even. - * - outputdatalength is at least as large as inputdatalength. - * - All key parts are right justified in their fields, padded on - * the left with zeroes. - * - length(bp_key) = inputdatalength/2 + 8 - * - length(bq_key) = inputdatalength/2 - * - length(np_key) = inputdatalength/2 + 8 - * - length(nq_key) = inputdatalength/2 - * - length(u_mult_inv) = inputdatalength/2 + 8 - */ -struct ica_rsa_modexpo_crt { - char __user * inputdata; - unsigned int inputdatalength; - char __user * outputdata; - unsigned int outputdatalength; - char __user * bp_key; - char __user * bq_key; - char __user * np_prime; - char __user * nq_prime; - char __user * u_mult_inv; -}; - -#define Z90_IOCTL_MAGIC 'z' // NOTE: Need to allocate from linux folks - -/** - * Interface notes: - * - * The ioctl()s which are implemented (along with relevant details) - * are: - * - * ICARSAMODEXPO - * Perform an RSA operation using a Modulus-Exponent pair - * This takes an ica_rsa_modexpo struct as its arg. - * - * NOTE: please refer to the comments preceding this structure - * for the implementation details for the contents of the - * block - * - * ICARSACRT - * Perform an RSA operation using a Chinese-Remainder Theorem key - * This takes an ica_rsa_modexpo_crt struct as its arg. - * - * NOTE: please refer to the comments preceding this structure - * for the implementation details for the contents of the - * block - * - * Z90STAT_TOTALCOUNT - * Return an integer count of all device types together. - * - * Z90STAT_PCICACOUNT - * Return an integer count of all PCICAs. - * - * Z90STAT_PCICCCOUNT - * Return an integer count of all PCICCs. - * - * Z90STAT_PCIXCCMCL2COUNT - * Return an integer count of all MCL2 PCIXCCs. - * - * Z90STAT_PCIXCCMCL3COUNT - * Return an integer count of all MCL3 PCIXCCs. - * - * Z90STAT_CEX2CCOUNT - * Return an integer count of all CEX2Cs. - * - * Z90STAT_CEX2ACOUNT - * Return an integer count of all CEX2As. - * - * Z90STAT_REQUESTQ_COUNT - * Return an integer count of the number of entries waiting to be - * sent to a device. - * - * Z90STAT_PENDINGQ_COUNT - * Return an integer count of the number of entries sent to a - * device awaiting the reply. - * - * Z90STAT_TOTALOPEN_COUNT - * Return an integer count of the number of open file handles. - * - * Z90STAT_DOMAIN_INDEX - * Return the integer value of the Cryptographic Domain. - * - * Z90STAT_STATUS_MASK - * Return an 64 element array of unsigned chars for the status of - * all devices. - * 0x01: PCICA - * 0x02: PCICC - * 0x03: PCIXCC_MCL2 - * 0x04: PCIXCC_MCL3 - * 0x05: CEX2C - * 0x06: CEX2A - * 0x0d: device is disabled via the proc filesystem - * - * Z90STAT_QDEPTH_MASK - * Return an 64 element array of unsigned chars for the queue - * depth of all devices. - * - * Z90STAT_PERDEV_REQCNT - * Return an 64 element array of unsigned integers for the number - * of successfully completed requests per device since the device - * was detected and made available. - * - * ICAZ90STATUS (deprecated) - * Return some device driver status in a ica_z90_status struct - * This takes an ica_z90_status struct as its arg. - * - * NOTE: this ioctl() is deprecated, and has been replaced with - * single ioctl()s for each type of status being requested - * - * Z90STAT_PCIXCCCOUNT (deprecated) - * Return an integer count of all PCIXCCs (MCL2 + MCL3). - * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from - * MCL2 PCIXCCs. - * - * Z90QUIESCE (not recommended) - * Quiesce the driver. This is intended to stop all new - * requests from being processed. Its use is NOT recommended, - * except in circumstances where there is no other way to stop - * callers from accessing the driver. Its original use was to - * allow the driver to be "drained" of work in preparation for - * a system shutdown. - * - * NOTE: once issued, this ban on new work cannot be undone - * except by unloading and reloading the driver. - */ - -/** - * Supported ioctl calls - */ -#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x05, 0) -#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, Z90_IOCTL_MAGIC, 0x06, 0) - -/* DEPRECATED status calls (bound for removal at some point) */ -#define ICAZ90STATUS _IOR(Z90_IOCTL_MAGIC, 0x10, struct ica_z90_status) -#define Z90STAT_PCIXCCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x43, int) - -/* unrelated to ICA callers */ -#define Z90QUIESCE _IO(Z90_IOCTL_MAGIC, 0x11) - -/* New status calls */ -#define Z90STAT_TOTALCOUNT _IOR(Z90_IOCTL_MAGIC, 0x40, int) -#define Z90STAT_PCICACOUNT _IOR(Z90_IOCTL_MAGIC, 0x41, int) -#define Z90STAT_PCICCCOUNT _IOR(Z90_IOCTL_MAGIC, 0x42, int) -#define Z90STAT_PCIXCCMCL2COUNT _IOR(Z90_IOCTL_MAGIC, 0x4b, int) -#define Z90STAT_PCIXCCMCL3COUNT _IOR(Z90_IOCTL_MAGIC, 0x4c, int) -#define Z90STAT_CEX2CCOUNT _IOR(Z90_IOCTL_MAGIC, 0x4d, int) -#define Z90STAT_CEX2ACOUNT _IOR(Z90_IOCTL_MAGIC, 0x4e, int) -#define Z90STAT_REQUESTQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x44, int) -#define Z90STAT_PENDINGQ_COUNT _IOR(Z90_IOCTL_MAGIC, 0x45, int) -#define Z90STAT_TOTALOPEN_COUNT _IOR(Z90_IOCTL_MAGIC, 0x46, int) -#define Z90STAT_DOMAIN_INDEX _IOR(Z90_IOCTL_MAGIC, 0x47, int) -#define Z90STAT_STATUS_MASK _IOR(Z90_IOCTL_MAGIC, 0x48, char[64]) -#define Z90STAT_QDEPTH_MASK _IOR(Z90_IOCTL_MAGIC, 0x49, char[64]) -#define Z90STAT_PERDEV_REQCNT _IOR(Z90_IOCTL_MAGIC, 0x4a, int[64]) - -#endif /* __ASM_S390_Z90CRYPT_H */ diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h new file mode 100644 index 000000000000..7244c68464f2 --- /dev/null +++ b/include/asm-s390/zcrypt.h @@ -0,0 +1,285 @@ +/* + * include/asm-s390/zcrypt.h + * + * zcrypt 2.1.0 (user-visible header) + * + * Copyright (C) 2001, 2006 IBM Corporation + * Author(s): Robert Burroughs + * Eric Rossman (edrossma@us.ibm.com) + * + * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __ASM_S390_ZCRYPT_H +#define __ASM_S390_ZCRYPT_H + +#define ZCRYPT_VERSION 2 +#define ZCRYPT_RELEASE 1 +#define ZCRYPT_VARIANT 0 + +#include <linux/ioctl.h> +#include <linux/compiler.h> + +/** + * struct ica_rsa_modexpo + * + * Requirements: + * - outputdatalength is at least as large as inputdatalength. + * - All key parts are right justified in their fields, padded on + * the left with zeroes. + * - length(b_key) = inputdatalength + * - length(n_modulus) = inputdatalength + */ +struct ica_rsa_modexpo { + char __user * inputdata; + unsigned int inputdatalength; + char __user * outputdata; + unsigned int outputdatalength; + char __user * b_key; + char __user * n_modulus; +}; + +/** + * struct ica_rsa_modexpo_crt + * + * Requirements: + * - inputdatalength is even. + * - outputdatalength is at least as large as inputdatalength. + * - All key parts are right justified in their fields, padded on + * the left with zeroes. + * - length(bp_key) = inputdatalength/2 + 8 + * - length(bq_key) = inputdatalength/2 + * - length(np_key) = inputdatalength/2 + 8 + * - length(nq_key) = inputdatalength/2 + * - length(u_mult_inv) = inputdatalength/2 + 8 + */ +struct ica_rsa_modexpo_crt { + char __user * inputdata; + unsigned int inputdatalength; + char __user * outputdata; + unsigned int outputdatalength; + char __user * bp_key; + char __user * bq_key; + char __user * np_prime; + char __user * nq_prime; + char __user * u_mult_inv; +}; + +/** + * CPRBX + * Note that all shorts and ints are big-endian. + * All pointer fields are 16 bytes long, and mean nothing. + * + * A request CPRB is followed by a request_parameter_block. + * + * The request (or reply) parameter block is organized thus: + * function code + * VUD block + * key block + */ +struct ica_CPRBX { + unsigned short cprb_len; /* CPRB length 220 */ + unsigned char cprb_ver_id; /* CPRB version id. 0x02 */ + unsigned char pad_000[3]; /* Alignment pad bytes */ + unsigned char func_id[2]; /* function id 0x5432 */ + unsigned char cprb_flags[4]; /* Flags */ + unsigned int req_parml; /* request parameter buffer len */ + unsigned int req_datal; /* request data buffer */ + unsigned int rpl_msgbl; /* reply message block length */ + unsigned int rpld_parml; /* replied parameter block len */ + unsigned int rpl_datal; /* reply data block len */ + unsigned int rpld_datal; /* replied data block len */ + unsigned int req_extbl; /* request extension block len */ + unsigned char pad_001[4]; /* reserved */ + unsigned int rpld_extbl; /* replied extension block len */ + unsigned char padx000[16 - sizeof (char *)]; + unsigned char * req_parmb; /* request parm block 'address' */ + unsigned char padx001[16 - sizeof (char *)]; + unsigned char * req_datab; /* request data block 'address' */ + unsigned char padx002[16 - sizeof (char *)]; + unsigned char * rpl_parmb; /* reply parm block 'address' */ + unsigned char padx003[16 - sizeof (char *)]; + unsigned char * rpl_datab; /* reply data block 'address' */ + unsigned char padx004[16 - sizeof (char *)]; + unsigned char * req_extb; /* request extension block 'addr'*/ + unsigned char padx005[16 - sizeof (char *)]; + unsigned char * rpl_extb; /* reply extension block 'addres'*/ + unsigned short ccp_rtcode; /* server return code */ + unsigned short ccp_rscode; /* server reason code */ + unsigned int mac_data_len; /* Mac Data Length */ + unsigned char logon_id[8]; /* Logon Identifier */ + unsigned char mac_value[8]; /* Mac Value */ + unsigned char mac_content_flgs;/* Mac content flag byte */ + unsigned char pad_002; /* Alignment */ + unsigned short domain; /* Domain */ + unsigned char usage_domain[4];/* Usage domain */ + unsigned char cntrl_domain[4];/* Control domain */ + unsigned char S390enf_mask[4];/* S/390 enforcement mask */ + unsigned char pad_004[36]; /* reserved */ +}; + +/** + * xcRB + */ +struct ica_xcRB { + unsigned short agent_ID; + unsigned int user_defined; + unsigned short request_ID; + unsigned int request_control_blk_length; + unsigned char padding1[16 - sizeof (char *)]; + char __user * request_control_blk_addr; + unsigned int request_data_length; + char padding2[16 - sizeof (char *)]; + char __user * request_data_address; + unsigned int reply_control_blk_length; + char padding3[16 - sizeof (char *)]; + char __user * reply_control_blk_addr; + unsigned int reply_data_length; + char padding4[16 - sizeof (char *)]; + char __user * reply_data_addr; + unsigned short priority_window; + unsigned int status; +} __attribute__((packed)); +#define AUTOSELECT ((unsigned int)0xFFFFFFFF) + +#define ZCRYPT_IOCTL_MAGIC 'z' + +/** + * Interface notes: + * + * The ioctl()s which are implemented (along with relevant details) + * are: + * + * ICARSAMODEXPO + * Perform an RSA operation using a Modulus-Exponent pair + * This takes an ica_rsa_modexpo struct as its arg. + * + * NOTE: please refer to the comments preceding this structure + * for the implementation details for the contents of the + * block + * + * ICARSACRT + * Perform an RSA operation using a Chinese-Remainder Theorem key + * This takes an ica_rsa_modexpo_crt struct as its arg. + * + * NOTE: please refer to the comments preceding this structure + * for the implementation details for the contents of the + * block + * + * Z90STAT_TOTALCOUNT + * Return an integer count of all device types together. + * + * Z90STAT_PCICACOUNT + * Return an integer count of all PCICAs. + * + * Z90STAT_PCICCCOUNT + * Return an integer count of all PCICCs. + * + * Z90STAT_PCIXCCMCL2COUNT + * Return an integer count of all MCL2 PCIXCCs. + * + * Z90STAT_PCIXCCMCL3COUNT + * Return an integer count of all MCL3 PCIXCCs. + * + * Z90STAT_CEX2CCOUNT + * Return an integer count of all CEX2Cs. + * + * Z90STAT_CEX2ACOUNT + * Return an integer count of all CEX2As. + * + * Z90STAT_REQUESTQ_COUNT + * Return an integer count of the number of entries waiting to be + * sent to a device. + * + * Z90STAT_PENDINGQ_COUNT + * Return an integer count of the number of entries sent to a + * device awaiting the reply. + * + * Z90STAT_TOTALOPEN_COUNT + * Return an integer count of the number of open file handles. + * + * Z90STAT_DOMAIN_INDEX + * Return the integer value of the Cryptographic Domain. + * + * Z90STAT_STATUS_MASK + * Return an 64 element array of unsigned chars for the status of + * all devices. + * 0x01: PCICA + * 0x02: PCICC + * 0x03: PCIXCC_MCL2 + * 0x04: PCIXCC_MCL3 + * 0x05: CEX2C + * 0x06: CEX2A + * 0x0d: device is disabled via the proc filesystem + * + * Z90STAT_QDEPTH_MASK + * Return an 64 element array of unsigned chars for the queue + * depth of all devices. + * + * Z90STAT_PERDEV_REQCNT + * Return an 64 element array of unsigned integers for the number + * of successfully completed requests per device since the device + * was detected and made available. + * + * ICAZ90STATUS (deprecated) + * Return some device driver status in a ica_z90_status struct + * This takes an ica_z90_status struct as its arg. + * + * NOTE: this ioctl() is deprecated, and has been replaced with + * single ioctl()s for each type of status being requested + * + * Z90STAT_PCIXCCCOUNT (deprecated) + * Return an integer count of all PCIXCCs (MCL2 + MCL3). + * This is DEPRECATED now that MCL3 PCIXCCs are treated differently from + * MCL2 PCIXCCs. + * + * Z90QUIESCE (not recommended) + * Quiesce the driver. This is intended to stop all new + * requests from being processed. Its use is NOT recommended, + * except in circumstances where there is no other way to stop + * callers from accessing the driver. Its original use was to + * allow the driver to be "drained" of work in preparation for + * a system shutdown. + * + * NOTE: once issued, this ban on new work cannot be undone + * except by unloading and reloading the driver. + */ + +/** + * Supported ioctl calls + */ +#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) +#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) +#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) + +/* New status calls */ +#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) +#define Z90STAT_PCICACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x41, int) +#define Z90STAT_PCICCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x42, int) +#define Z90STAT_PCIXCCMCL2COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4b, int) +#define Z90STAT_PCIXCCMCL3COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4c, int) +#define Z90STAT_CEX2CCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4d, int) +#define Z90STAT_CEX2ACOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4e, int) +#define Z90STAT_REQUESTQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x44, int) +#define Z90STAT_PENDINGQ_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x45, int) +#define Z90STAT_TOTALOPEN_COUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x46, int) +#define Z90STAT_DOMAIN_INDEX _IOR(ZCRYPT_IOCTL_MAGIC, 0x47, int) +#define Z90STAT_STATUS_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x48, char[64]) +#define Z90STAT_QDEPTH_MASK _IOR(ZCRYPT_IOCTL_MAGIC, 0x49, char[64]) +#define Z90STAT_PERDEV_REQCNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x4a, int[64]) + +#endif /* __ASM_S390_ZCRYPT_H */ diff --git a/include/asm-sparc/Kbuild b/include/asm-sparc/Kbuild index e2a57fd7abfa..b22b67a64ecc 100644 --- a/include/asm-sparc/Kbuild +++ b/include/asm-sparc/Kbuild @@ -1,6 +1,22 @@ include include/asm-generic/Kbuild.asm -unifdef-y += fbio.h perfctr.h psr.h -header-y += apc.h asi.h auxio.h bpp.h head.h ipc.h jsflash.h \ - openpromio.h pbm.h pconf.h pgtsun4.h reg.h traps.h \ - turbosparc.h vfc_ioctls.h winmacro.h +header-y += apc.h +header-y += asi.h +header-y += auxio.h +header-y += bpp.h +header-y += head.h +header-y += ipc.h +header-y += jsflash.h +header-y += openpromio.h +header-y += pbm.h +header-y += pconf.h +header-y += pgtsun4.h +header-y += reg.h +header-y += traps.h +header-y += turbosparc.h +header-y += vfc_ioctls.h +header-y += winmacro.h + +unifdef-y += fbio.h +unifdef-y += perfctr.h +unifdef-y += psr.h diff --git a/include/asm-sparc64/Kbuild b/include/asm-sparc64/Kbuild index 9284c3cb27ec..4b59ce46cc2d 100644 --- a/include/asm-sparc64/Kbuild +++ b/include/asm-sparc64/Kbuild @@ -4,7 +4,26 @@ ALTARCH := sparc ARCHDEF := defined __sparc__ && defined __arch64__ ALTARCHDEF := defined __sparc__ && !defined __arch64__ -unifdef-y += fbio.h perfctr.h -header-y += apb.h asi.h bbc.h bpp.h display7seg.h envctrl.h floppy.h \ - ipc.h kdebug.h mostek.h openprom.h openpromio.h parport.h \ - pconf.h psrcompat.h pstate.h reg.h uctx.h utrap.h watchdog.h +header-y += apb.h +header-y += asi.h +header-y += bbc.h +header-y += bpp.h +header-y += display7seg.h +header-y += envctrl.h +header-y += floppy.h +header-y += ipc.h +header-y += kdebug.h +header-y += mostek.h +header-y += openprom.h +header-y += openpromio.h +header-y += parport.h +header-y += pconf.h +header-y += psrcompat.h +header-y += pstate.h +header-y += reg.h +header-y += uctx.h +header-y += utrap.h +header-y += watchdog.h + +unifdef-y += fbio.h +unifdef-y += perfctr.h diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild index dc4d101e8a16..40f2f13fe174 100644 --- a/include/asm-x86_64/Kbuild +++ b/include/asm-x86_64/Kbuild @@ -4,8 +4,18 @@ ALTARCH := i386 ARCHDEF := defined __x86_64__ ALTARCHDEF := defined __i386__ -header-y += boot.h bootsetup.h cpufeature.h debugreg.h ldt.h \ - msr.h prctl.h setup.h sigcontext32.h ucontext.h \ - vsyscall32.h +header-y += boot.h +header-y += bootsetup.h +header-y += cpufeature.h +header-y += debugreg.h +header-y += ldt.h +header-y += msr.h +header-y += prctl.h +header-y += setup.h +header-y += sigcontext32.h +header-y += ucontext.h +header-y += vsyscall32.h -unifdef-y += mce.h mtrr.h vsyscall.h +unifdef-y += mce.h +unifdef-y += mtrr.h +unifdef-y += vsyscall.h diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h new file mode 100644 index 000000000000..5748aecdb414 --- /dev/null +++ b/include/crypto/algapi.h @@ -0,0 +1,156 @@ +/* + * Cryptographic API for algorithms (i.e., low-level API). + * + * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + */ +#ifndef _CRYPTO_ALGAPI_H +#define _CRYPTO_ALGAPI_H + +#include <linux/crypto.h> + +struct module; +struct seq_file; + +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg); + int (*init)(struct crypto_tfm *tfm); + void (*exit)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); +}; + +struct crypto_instance { + struct crypto_alg alg; + + struct crypto_template *tmpl; + struct hlist_node list; + + void *__ctx[] CRYPTO_MINALIGN_ATTR; +}; + +struct crypto_template { + struct list_head list; + struct hlist_head instances; + struct module *module; + + struct crypto_instance *(*alloc)(void *param, unsigned int len); + void (*free)(struct crypto_instance *inst); + + char name[CRYPTO_MAX_ALG_NAME]; +}; + +struct crypto_spawn { + struct list_head list; + struct crypto_alg *alg; + struct crypto_instance *inst; +}; + +struct scatter_walk { + struct scatterlist *sg; + unsigned int offset; +}; + +struct blkcipher_walk { + union { + struct { + struct page *page; + unsigned long offset; + } phys; + + struct { + u8 *page; + u8 *addr; + } virt; + } src, dst; + + struct scatter_walk in; + unsigned int nbytes; + + struct scatter_walk out; + unsigned int total; + + void *page; + u8 *buffer; + u8 *iv; + + int flags; +}; + +extern const struct crypto_type crypto_blkcipher_type; +extern const struct crypto_type crypto_hash_type; + +void crypto_mod_put(struct crypto_alg *alg); + +int crypto_register_template(struct crypto_template *tmpl); +void crypto_unregister_template(struct crypto_template *tmpl); +struct crypto_template *crypto_lookup_template(const char *name); + +int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, + struct crypto_instance *inst); +void crypto_drop_spawn(struct crypto_spawn *spawn); +struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn); + +struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len, + u32 type, u32 mask); +struct crypto_instance *crypto_alloc_instance(const char *name, + struct crypto_alg *alg); + +int blkcipher_walk_done(struct blkcipher_desc *desc, + struct blkcipher_walk *walk, int err); +int blkcipher_walk_virt(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); +int blkcipher_walk_phys(struct blkcipher_desc *desc, + struct blkcipher_walk *walk); + +static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) +{ + unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); + unsigned long align = crypto_tfm_alg_alignmask(tfm); + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (void *)ALIGN(addr, align); +} + +static inline void *crypto_instance_ctx(struct crypto_instance *inst) +{ + return inst->__ctx; +} + +static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; +} + +static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm) +{ + return crypto_tfm_ctx_aligned(&tfm->base); +} + +static inline void blkcipher_walk_init(struct blkcipher_walk *walk, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + walk->in.sg = src; + walk->out.sg = dst; + walk->total = nbytes; +} + +#endif /* _CRYPTO_ALGAPI_H */ + diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h new file mode 100644 index 000000000000..c408522595c6 --- /dev/null +++ b/include/crypto/twofish.h @@ -0,0 +1,22 @@ +#ifndef _CRYPTO_TWOFISH_H +#define _CRYPTO_TWOFISH_H + +#include <linux/types.h> + +#define TF_MIN_KEY_SIZE 16 +#define TF_MAX_KEY_SIZE 32 +#define TF_BLOCK_SIZE 16 + +struct crypto_tfm; + +/* Structure for an expanded Twofish key. s contains the key-dependent + * S-boxes composed with the MDS matrix; w contains the eight "whitening" + * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note + * that k[i] corresponds to what the Twofish paper calls K[i+8]. */ +struct twofish_ctx { + u32 s[4][256], w[8], k[32]; +}; + +int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len); + +#endif diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 2b8a7d68fae3..7d076d97b2f7 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -1,63 +1,343 @@ -header-y := byteorder/ dvb/ hdlc/ isdn/ nfsd/ raid/ sunrpc/ tc_act/ \ - netfilter/ netfilter_arp/ netfilter_bridge/ netfilter_ipv4/ \ - netfilter_ipv6/ +header-y += byteorder/ +header-y += dvb/ +header-y += hdlc/ +header-y += isdn/ +header-y += nfsd/ +header-y += raid/ +header-y += sunrpc/ +header-y += tc_act/ +header-y += netfilter/ +header-y += netfilter_arp/ +header-y += netfilter_bridge/ +header-y += netfilter_ipv4/ +header-y += netfilter_ipv6/ -header-y += affs_fs.h affs_hardblocks.h aio_abi.h a.out.h arcfb.h \ - atmapi.h atmbr2684.h atmclip.h atm_eni.h atm_he.h \ - atm_idt77105.h atmioc.h atmlec.h atmmpc.h atm_nicstar.h \ - atmppp.h atmsap.h atmsvc.h atm_zatm.h auto_fs4.h auxvec.h \ - awe_voice.h ax25.h b1lli.h baycom.h bfs_fs.h blkpg.h \ - bpqether.h cdk.h chio.h coda_psdev.h coff.h comstats.h \ - consolemap.h cycx_cfm.h dm-ioctl.h dn.h dqblk_v1.h \ - dqblk_v2.h dqblk_xfs.h efs_fs_sb.h elf-fdpic.h elf.h elf-em.h \ - fadvise.h fd.h fdreg.h ftape-header-segment.h ftape-vendors.h \ - fuse.h futex.h genetlink.h gen_stats.h gigaset_dev.h hdsmart.h \ - hpfs_fs.h hysdn_if.h i2c-dev.h i8k.h icmp.h \ - if_arcnet.h if_arp.h if_bonding.h if_cablemodem.h if_fc.h \ - if_fddi.h if.h if_hippi.h if_infiniband.h if_packet.h \ - if_plip.h if_ppp.h if_slip.h if_strip.h if_tunnel.h in6.h \ - in_route.h ioctl.h ip.h ipmi_msgdefs.h ip_mp_alg.h ipsec.h \ - ipx.h irda.h isdn_divertif.h iso_fs.h ite_gpio.h ixjuser.h \ - jffs2.h keyctl.h limits.h major.h matroxfb.h meye.h minix_fs.h \ - mmtimer.h mqueue.h mtio.h ncp_no.h netfilter_arp.h netrom.h \ - nfs2.h nfs4_mount.h nfs_mount.h openprom_fs.h param.h \ - pci_ids.h pci_regs.h personality.h pfkeyv2.h pg.h pkt_cls.h \ - pkt_sched.h posix_types.h ppdev.h prctl.h ps2esdi.h qic117.h \ - qnxtypes.h quotaio_v1.h quotaio_v2.h radeonfb.h raw.h \ - resource.h rose.h sctp.h smbno.h snmp.h sockios.h som.h \ - sound.h stddef.h synclink.h telephony.h termios.h ticable.h \ - times.h tiocl.h tipc.h toshiba.h ultrasound.h un.h utime.h \ - utsname.h video_decoder.h video_encoder.h videotext.h vt.h \ - wavefront.h wireless.h xattr.h x25.h zorro_ids.h +header-y += affs_fs.h +header-y += affs_hardblocks.h +header-y += aio_abi.h +header-y += a.out.h +header-y += arcfb.h +header-y += atmapi.h +header-y += atmbr2684.h +header-y += atmclip.h +header-y += atm_eni.h +header-y += atm_he.h +header-y += atm_idt77105.h +header-y += atmioc.h +header-y += atmlec.h +header-y += atmmpc.h +header-y += atm_nicstar.h +header-y += atmppp.h +header-y += atmsap.h +header-y += atmsvc.h +header-y += atm_zatm.h +header-y += auto_fs4.h +header-y += auxvec.h +header-y += awe_voice.h +header-y += ax25.h +header-y += b1lli.h +header-y += baycom.h +header-y += bfs_fs.h +header-y += blkpg.h +header-y += bpqether.h +header-y += cdk.h +header-y += chio.h +header-y += coda_psdev.h +header-y += coff.h +header-y += comstats.h +header-y += consolemap.h +header-y += cycx_cfm.h +header-y += dm-ioctl.h +header-y += dn.h +header-y += dqblk_v1.h +header-y += dqblk_v2.h +header-y += dqblk_xfs.h +header-y += efs_fs_sb.h +header-y += elf-fdpic.h +header-y += elf.h +header-y += elf-em.h +header-y += fadvise.h +header-y += fd.h +header-y += fdreg.h +header-y += ftape-header-segment.h +header-y += ftape-vendors.h +header-y += fuse.h +header-y += futex.h +header-y += genetlink.h +header-y += gen_stats.h +header-y += gigaset_dev.h +header-y += hdsmart.h +header-y += hpfs_fs.h +header-y += hysdn_if.h +header-y += i2c-dev.h +header-y += i8k.h +header-y += icmp.h +header-y += if_arcnet.h +header-y += if_arp.h +header-y += if_bonding.h +header-y += if_cablemodem.h +header-y += if_fc.h +header-y += if_fddi.h +header-y += if.h +header-y += if_hippi.h +header-y += if_infiniband.h +header-y += if_packet.h +header-y += if_plip.h +header-y += if_ppp.h +header-y += if_slip.h +header-y += if_strip.h +header-y += if_tunnel.h +header-y += in6.h +header-y += in_route.h +header-y += ioctl.h +header-y += ip.h +header-y += ipmi_msgdefs.h +header-y += ip_mp_alg.h +header-y += ipsec.h +header-y += ipx.h +header-y += irda.h +header-y += isdn_divertif.h +header-y += iso_fs.h +header-y += ite_gpio.h +header-y += ixjuser.h +header-y += jffs2.h +header-y += keyctl.h +header-y += limits.h +header-y += major.h +header-y += matroxfb.h +header-y += meye.h +header-y += minix_fs.h +header-y += mmtimer.h +header-y += mqueue.h +header-y += mtio.h +header-y += ncp_no.h +header-y += netfilter_arp.h +header-y += netrom.h +header-y += nfs2.h +header-y += nfs4_mount.h +header-y += nfs_mount.h +header-y += openprom_fs.h +header-y += param.h +header-y += pci_ids.h +header-y += pci_regs.h +header-y += personality.h +header-y += pfkeyv2.h +header-y += pg.h +header-y += pkt_cls.h +header-y += pkt_sched.h +header-y += posix_types.h +header-y += ppdev.h +header-y += prctl.h +header-y += ps2esdi.h +header-y += qic117.h +header-y += qnxtypes.h +header-y += quotaio_v1.h +header-y += quotaio_v2.h +header-y += radeonfb.h +header-y += raw.h +header-y += resource.h +header-y += rose.h +header-y += sctp.h +header-y += smbno.h +header-y += snmp.h +header-y += sockios.h +header-y += som.h +header-y += sound.h +header-y += stddef.h +header-y += synclink.h +header-y += telephony.h +header-y += termios.h +header-y += ticable.h +header-y += times.h +header-y += tiocl.h +header-y += tipc.h +header-y += toshiba.h +header-y += ultrasound.h +header-y += un.h +header-y += utime.h +header-y += utsname.h +header-y += video_decoder.h +header-y += video_encoder.h +header-y += videotext.h +header-y += vt.h +header-y += wavefront.h +header-y += wireless.h +header-y += xattr.h +header-y += x25.h +header-y += zorro_ids.h -unifdef-y += acct.h adb.h adfs_fs.h agpgart.h apm_bios.h atalk.h \ - atmarp.h atmdev.h atm.h atm_tcp.h audit.h auto_fs.h binfmts.h \ - capability.h capi.h cciss_ioctl.h cdrom.h cm4000_cs.h \ - cn_proc.h coda.h connector.h cramfs_fs.h cuda.h cyclades.h \ - dccp.h dirent.h divert.h elfcore.h errno.h errqueue.h \ - ethtool.h eventpoll.h ext2_fs.h ext3_fs.h fb.h fcntl.h \ - filter.h flat.h fs.h ftape.h gameport.h generic_serial.h \ - genhd.h hayesesp.h hdlcdrv.h hdlc.h hdreg.h hiddev.h hpet.h \ - i2c.h i2o-dev.h icmpv6.h if_bridge.h if_ec.h \ - if_eql.h if_ether.h if_frad.h if_ltalk.h if_pppox.h \ - if_shaper.h if_tr.h if_tun.h if_vlan.h if_wanpipe.h igmp.h \ - inet_diag.h in.h inotify.h input.h ipc.h ipmi.h ipv6.h \ - ipv6_route.h isdn.h isdnif.h isdn_ppp.h isicom.h jbd.h \ - joystick.h kdev_t.h kd.h kernelcapi.h kernel.h keyboard.h \ - llc.h loop.h lp.h mempolicy.h mii.h mman.h mroute.h msdos_fs.h \ - msg.h nbd.h ncp_fs.h ncp.h ncp_mount.h netdevice.h \ - netfilter_bridge.h netfilter_decnet.h netfilter.h \ - netfilter_ipv4.h netfilter_ipv6.h netfilter_logging.h net.h \ - netlink.h nfs3.h nfs4.h nfsacl.h nfs_fs.h nfs.h nfs_idmap.h \ - n_r3964.h nubus.h nvram.h parport.h patchkey.h pci.h pktcdvd.h \ - pmu.h poll.h ppp_defs.h ppp-comp.h ptrace.h qnx4_fs.h quota.h \ - random.h reboot.h reiserfs_fs.h reiserfs_xattr.h romfs_fs.h \ - route.h rtc.h rtnetlink.h scc.h sched.h sdla.h \ - selinux_netlink.h sem.h serial_core.h serial.h serio.h shm.h \ - signal.h smb_fs.h smb.h smb_mount.h socket.h sonet.h sonypi.h \ - soundcard.h stat.h sysctl.h tcp.h time.h timex.h tty.h types.h \ - udf_fs_i.h udp.h uinput.h uio.h unistd.h usb_ch9.h \ - usbdevice_fs.h user.h videodev2.h videodev.h wait.h \ - wanrouter.h watchdog.h xfrm.h zftape.h +unifdef-y += acct.h +unifdef-y += adb.h +unifdef-y += adfs_fs.h +unifdef-y += agpgart.h +unifdef-y += apm_bios.h +unifdef-y += atalk.h +unifdef-y += atmarp.h +unifdef-y += atmdev.h +unifdef-y += atm.h +unifdef-y += atm_tcp.h +unifdef-y += audit.h +unifdef-y += auto_fs.h +unifdef-y += binfmts.h +unifdef-y += capability.h +unifdef-y += capi.h +unifdef-y += cciss_ioctl.h +unifdef-y += cdrom.h +unifdef-y += cm4000_cs.h +unifdef-y += cn_proc.h +unifdef-y += coda.h +unifdef-y += connector.h +unifdef-y += cramfs_fs.h +unifdef-y += cuda.h +unifdef-y += cyclades.h +unifdef-y += dccp.h +unifdef-y += dirent.h +unifdef-y += divert.h +unifdef-y += elfcore.h +unifdef-y += errno.h +unifdef-y += errqueue.h +unifdef-y += ethtool.h +unifdef-y += eventpoll.h +unifdef-y += ext2_fs.h +unifdef-y += ext3_fs.h +unifdef-y += fb.h +unifdef-y += fcntl.h +unifdef-y += filter.h +unifdef-y += flat.h +unifdef-y += fs.h +unifdef-y += ftape.h +unifdef-y += gameport.h +unifdef-y += generic_serial.h +unifdef-y += genhd.h +unifdef-y += hayesesp.h +unifdef-y += hdlcdrv.h +unifdef-y += hdlc.h +unifdef-y += hdreg.h +unifdef-y += hiddev.h +unifdef-y += hpet.h +unifdef-y += i2c.h +unifdef-y += i2o-dev.h +unifdef-y += icmpv6.h +unifdef-y += if_bridge.h +unifdef-y += if_ec.h +unifdef-y += if_eql.h +unifdef-y += if_ether.h +unifdef-y += if_frad.h +unifdef-y += if_ltalk.h +unifdef-y += if_pppox.h +unifdef-y += if_shaper.h +unifdef-y += if_tr.h +unifdef-y += if_tun.h +unifdef-y += if_vlan.h +unifdef-y += if_wanpipe.h +unifdef-y += igmp.h +unifdef-y += inet_diag.h +unifdef-y += in.h +unifdef-y += inotify.h +unifdef-y += input.h +unifdef-y += ipc.h +unifdef-y += ipmi.h +unifdef-y += ipv6.h +unifdef-y += ipv6_route.h +unifdef-y += isdn.h +unifdef-y += isdnif.h +unifdef-y += isdn_ppp.h +unifdef-y += isicom.h +unifdef-y += jbd.h +unifdef-y += joystick.h +unifdef-y += kdev_t.h +unifdef-y += kd.h +unifdef-y += kernelcapi.h +unifdef-y += kernel.h +unifdef-y += keyboard.h +unifdef-y += llc.h +unifdef-y += loop.h +unifdef-y += lp.h +unifdef-y += mempolicy.h +unifdef-y += mii.h +unifdef-y += mman.h +unifdef-y += mroute.h +unifdef-y += msdos_fs.h +unifdef-y += msg.h +unifdef-y += nbd.h +unifdef-y += ncp_fs.h +unifdef-y += ncp.h +unifdef-y += ncp_mount.h +unifdef-y += netdevice.h +unifdef-y += netfilter_bridge.h +unifdef-y += netfilter_decnet.h +unifdef-y += netfilter.h +unifdef-y += netfilter_ipv4.h +unifdef-y += netfilter_ipv6.h +unifdef-y += netfilter_logging.h +unifdef-y += net.h +unifdef-y += netlink.h +unifdef-y += nfs3.h +unifdef-y += nfs4.h +unifdef-y += nfsacl.h +unifdef-y += nfs_fs.h +unifdef-y += nfs.h +unifdef-y += nfs_idmap.h +unifdef-y += n_r3964.h +unifdef-y += nubus.h +unifdef-y += nvram.h +unifdef-y += parport.h +unifdef-y += patchkey.h +unifdef-y += pci.h +unifdef-y += pktcdvd.h +unifdef-y += pmu.h +unifdef-y += poll.h +unifdef-y += ppp_defs.h +unifdef-y += ppp-comp.h +unifdef-y += ptrace.h +unifdef-y += qnx4_fs.h +unifdef-y += quota.h +unifdef-y += random.h +unifdef-y += reboot.h +unifdef-y += reiserfs_fs.h +unifdef-y += reiserfs_xattr.h +unifdef-y += romfs_fs.h +unifdef-y += route.h +unifdef-y += rtc.h +unifdef-y += rtnetlink.h +unifdef-y += scc.h +unifdef-y += sched.h +unifdef-y += sdla.h +unifdef-y += selinux_netlink.h +unifdef-y += sem.h +unifdef-y += serial_core.h +unifdef-y += serial.h +unifdef-y += serio.h +unifdef-y += shm.h +unifdef-y += signal.h +unifdef-y += smb_fs.h +unifdef-y += smb.h +unifdef-y += smb_mount.h +unifdef-y += socket.h +unifdef-y += sonet.h +unifdef-y += sonypi.h +unifdef-y += soundcard.h +unifdef-y += stat.h +unifdef-y += sysctl.h +unifdef-y += tcp.h +unifdef-y += time.h +unifdef-y += timex.h +unifdef-y += tty.h +unifdef-y += types.h +unifdef-y += udf_fs_i.h +unifdef-y += udp.h +unifdef-y += uinput.h +unifdef-y += uio.h +unifdef-y += unistd.h +unifdef-y += usb_ch9.h +unifdef-y += usbdevice_fs.h +unifdef-y += user.h +unifdef-y += videodev2.h +unifdef-y += videodev.h +unifdef-y += wait.h +unifdef-y += wanrouter.h +unifdef-y += watchdog.h +unifdef-y += xfrm.h +unifdef-y += zftape.h -objhdr-y := version.h +objhdr-y += version.h diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild index 84a57d4fb212..56499ab9e32e 100644 --- a/include/linux/byteorder/Kbuild +++ b/include/linux/byteorder/Kbuild @@ -1,2 +1,7 @@ -unifdef-y += generic.h swabb.h swab.h -header-y += big_endian.h little_endian.h pdp_endian.h +header-y += big_endian.h +header-y += little_endian.h +header-y += pdp_endian.h + +unifdef-y += generic.h +unifdef-y += swabb.h +unifdef-y += swab.h diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 7f946241b879..8f2ffa4caabf 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -17,20 +17,36 @@ #ifndef _LINUX_CRYPTO_H #define _LINUX_CRYPTO_H +#include <asm/atomic.h> #include <linux/module.h> #include <linux/kernel.h> -#include <linux/types.h> #include <linux/list.h> +#include <linux/slab.h> #include <linux/string.h> -#include <asm/page.h> +#include <linux/uaccess.h> /* * Algorithm masks and types. */ -#define CRYPTO_ALG_TYPE_MASK 0x000000ff +#define CRYPTO_ALG_TYPE_MASK 0x0000000f #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 #define CRYPTO_ALG_TYPE_DIGEST 0x00000002 -#define CRYPTO_ALG_TYPE_COMPRESS 0x00000004 +#define CRYPTO_ALG_TYPE_HASH 0x00000003 +#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 +#define CRYPTO_ALG_TYPE_COMPRESS 0x00000005 + +#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e + +#define CRYPTO_ALG_LARVAL 0x00000010 +#define CRYPTO_ALG_DEAD 0x00000020 +#define CRYPTO_ALG_DYING 0x00000040 +#define CRYPTO_ALG_ASYNC 0x00000080 + +/* + * Set this bit if and only if the algorithm requires another algorithm of + * the same type to handle corner cases. + */ +#define CRYPTO_ALG_NEED_FALLBACK 0x00000100 /* * Transform masks and values (for crt_flags). @@ -61,8 +77,37 @@ #define CRYPTO_DIR_ENCRYPT 1 #define CRYPTO_DIR_DECRYPT 0 +/* + * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual + * declaration) is used to ensure that the crypto_tfm context structure is + * aligned correctly for the given architecture so that there are no alignment + * faults for C data types. In particular, this is required on platforms such + * as arm where pointers are 32-bit aligned but there are data types such as + * u64 which require 64-bit alignment. + */ +#if defined(ARCH_KMALLOC_MINALIGN) +#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN +#elif defined(ARCH_SLAB_MINALIGN) +#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN +#endif + +#ifdef CRYPTO_MINALIGN +#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) +#else +#define CRYPTO_MINALIGN_ATTR +#endif + struct scatterlist; +struct crypto_blkcipher; +struct crypto_hash; struct crypto_tfm; +struct crypto_type; + +struct blkcipher_desc { + struct crypto_blkcipher *tfm; + void *info; + u32 flags; +}; struct cipher_desc { struct crypto_tfm *tfm; @@ -72,30 +117,50 @@ struct cipher_desc { void *info; }; +struct hash_desc { + struct crypto_hash *tfm; + u32 flags; +}; + /* * Algorithms: modular crypto algorithm implementations, managed * via crypto_register_alg() and crypto_unregister_alg(). */ +struct blkcipher_alg { + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + + unsigned int min_keysize; + unsigned int max_keysize; + unsigned int ivsize; +}; + struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags); + unsigned int keylen); void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, u8 *dst, const u8 *src, - unsigned int nbytes); + unsigned int nbytes) __deprecated; unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc, u8 *dst, const u8 *src, - unsigned int nbytes); + unsigned int nbytes) __deprecated; unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc, u8 *dst, const u8 *src, - unsigned int nbytes); + unsigned int nbytes) __deprecated; unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc, u8 *dst, const u8 *src, - unsigned int nbytes); + unsigned int nbytes) __deprecated; }; struct digest_alg { @@ -105,7 +170,20 @@ struct digest_alg { unsigned int len); void (*dia_final)(struct crypto_tfm *tfm, u8 *out); int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen, u32 *flags); + unsigned int keylen); +}; + +struct hash_alg { + int (*init)(struct hash_desc *desc); + int (*update)(struct hash_desc *desc, struct scatterlist *sg, + unsigned int nbytes); + int (*final)(struct hash_desc *desc, u8 *out); + int (*digest)(struct hash_desc *desc, struct scatterlist *sg, + unsigned int nbytes, u8 *out); + int (*setkey)(struct crypto_hash *tfm, const u8 *key, + unsigned int keylen); + + unsigned int digestsize; }; struct compress_alg { @@ -115,30 +193,40 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +#define cra_blkcipher cra_u.blkcipher #define cra_cipher cra_u.cipher #define cra_digest cra_u.digest +#define cra_hash cra_u.hash #define cra_compress cra_u.compress struct crypto_alg { struct list_head cra_list; + struct list_head cra_users; + u32 cra_flags; unsigned int cra_blocksize; unsigned int cra_ctxsize; unsigned int cra_alignmask; int cra_priority; + atomic_t cra_refcnt; char cra_name[CRYPTO_MAX_ALG_NAME]; char cra_driver_name[CRYPTO_MAX_ALG_NAME]; + const struct crypto_type *cra_type; + union { + struct blkcipher_alg blkcipher; struct cipher_alg cipher; struct digest_alg digest; + struct hash_alg hash; struct compress_alg compress; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); void (*cra_exit)(struct crypto_tfm *tfm); + void (*cra_destroy)(struct crypto_alg *alg); struct module *cra_module; }; @@ -153,20 +241,39 @@ int crypto_unregister_alg(struct crypto_alg *alg); * Algorithm query interface. */ #ifdef CONFIG_CRYPTO -int crypto_alg_available(const char *name, u32 flags); +int crypto_alg_available(const char *name, u32 flags) + __deprecated_for_modules; +int crypto_has_alg(const char *name, u32 type, u32 mask); #else +static int crypto_alg_available(const char *name, u32 flags); + __deprecated_for_modules; static inline int crypto_alg_available(const char *name, u32 flags) { return 0; } + +static inline int crypto_has_alg(const char *name, u32 type, u32 mask) +{ + return 0; +} #endif /* * Transforms: user-instantiated objects which encapsulate algorithms - * and core processing logic. Managed via crypto_alloc_tfm() and - * crypto_free_tfm(), as well as the various helpers below. + * and core processing logic. Managed via crypto_alloc_*() and + * crypto_free_*(), as well as the various helpers below. */ +struct blkcipher_tfm { + void *iv; + int (*setkey)(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen); + int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); + int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes); +}; + struct cipher_tfm { void *cit_iv; unsigned int cit_ivsize; @@ -190,20 +297,20 @@ struct cipher_tfm { struct scatterlist *src, unsigned int nbytes, u8 *iv); void (*cit_xor_block)(u8 *dst, const u8 *src); + void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; -struct digest_tfm { - void (*dit_init)(struct crypto_tfm *tfm); - void (*dit_update)(struct crypto_tfm *tfm, - struct scatterlist *sg, unsigned int nsg); - void (*dit_final)(struct crypto_tfm *tfm, u8 *out); - void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg, - unsigned int nsg, u8 *out); - int (*dit_setkey)(struct crypto_tfm *tfm, - const u8 *key, unsigned int keylen); -#ifdef CONFIG_CRYPTO_HMAC - void *dit_hmac_block; -#endif +struct hash_tfm { + int (*init)(struct hash_desc *desc); + int (*update)(struct hash_desc *desc, + struct scatterlist *sg, unsigned int nsg); + int (*final)(struct hash_desc *desc, u8 *out); + int (*digest)(struct hash_desc *desc, struct scatterlist *sg, + unsigned int nsg, u8 *out); + int (*setkey)(struct crypto_hash *tfm, const u8 *key, + unsigned int keylen); + unsigned int digestsize; }; struct compress_tfm { @@ -215,8 +322,9 @@ struct compress_tfm { u8 *dst, unsigned int *dlen); }; +#define crt_blkcipher crt_u.blkcipher #define crt_cipher crt_u.cipher -#define crt_digest crt_u.digest +#define crt_hash crt_u.hash #define crt_compress crt_u.compress struct crypto_tfm { @@ -224,30 +332,43 @@ struct crypto_tfm { u32 crt_flags; union { + struct blkcipher_tfm blkcipher; struct cipher_tfm cipher; - struct digest_tfm digest; + struct hash_tfm hash; struct compress_tfm compress; } crt_u; struct crypto_alg *__crt_alg; - char __crt_ctx[] __attribute__ ((__aligned__)); + void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; +}; + +#define crypto_cipher crypto_tfm +#define crypto_comp crypto_tfm + +struct crypto_blkcipher { + struct crypto_tfm base; +}; + +struct crypto_hash { + struct crypto_tfm base; +}; + +enum { + CRYPTOA_UNSPEC, + CRYPTOA_ALG, +}; + +struct crypto_attr_alg { + char name[CRYPTO_MAX_ALG_NAME]; }; /* * Transform user interface. */ -/* - * crypto_alloc_tfm() will first attempt to locate an already loaded algorithm. - * If that fails and the kernel supports dynamically loadable modules, it - * will then attempt to load a module of the same name or alias. A refcount - * is grabbed on the algorithm which is then associated with the new transform. - * - * crypto_free_tfm() frees up the transform and any associated resources, - * then drops the refcount on the associated algorithm. - */ struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); +struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); void crypto_free_tfm(struct crypto_tfm *tfm); /* @@ -258,6 +379,16 @@ static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_name; } +static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_driver_name; +} + +static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_priority; +} + static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm) { return module_name(tfm->__crt_alg->cra_module); @@ -268,18 +399,23 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; } +static unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm) + __deprecated; static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm) { BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); return tfm->__crt_alg->cra_cipher.cia_min_keysize; } +static unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm) + __deprecated; static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm) { BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); return tfm->__crt_alg->cra_cipher.cia_max_keysize; } +static unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) __deprecated; static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) { BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); @@ -302,6 +438,21 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_alignmask; } +static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) +{ + return tfm->crt_flags; +} + +static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags |= flags; +} + +static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) +{ + tfm->crt_flags &= ~flags; +} + static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) { return tfm->__crt_ctx; @@ -316,50 +467,374 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) /* * API wrappers. */ -static inline void crypto_digest_init(struct crypto_tfm *tfm) +static inline struct crypto_blkcipher *__crypto_blkcipher_cast( + struct crypto_tfm *tfm) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); - tfm->crt_digest.dit_init(tfm); + return (struct crypto_blkcipher *)tfm; } -static inline void crypto_digest_update(struct crypto_tfm *tfm, - struct scatterlist *sg, - unsigned int nsg) +static inline struct crypto_blkcipher *crypto_blkcipher_cast( + struct crypto_tfm *tfm) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); - tfm->crt_digest.dit_update(tfm, sg, nsg); + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); + return __crypto_blkcipher_cast(tfm); } -static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out) +static inline struct crypto_blkcipher *crypto_alloc_blkcipher( + const char *alg_name, u32 type, u32 mask) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); - tfm->crt_digest.dit_final(tfm, out); + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); } -static inline void crypto_digest_digest(struct crypto_tfm *tfm, - struct scatterlist *sg, - unsigned int nsg, u8 *out) +static inline struct crypto_tfm *crypto_blkcipher_tfm( + struct crypto_blkcipher *tfm) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); - tfm->crt_digest.dit_digest(tfm, sg, nsg, out); + return &tfm->base; } -static inline int crypto_digest_setkey(struct crypto_tfm *tfm, +static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) +{ + crypto_free_tfm(crypto_blkcipher_tfm(tfm)); +} + +static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_BLKCIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); +} + +static inline struct blkcipher_tfm *crypto_blkcipher_crt( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; +} + +static inline struct blkcipher_alg *crypto_blkcipher_alg( + struct crypto_blkcipher *tfm) +{ + return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; +} + +static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) +{ + return crypto_blkcipher_alg(tfm)->ivsize; +} + +static inline unsigned int crypto_blkcipher_blocksize( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); +} + +static inline unsigned int crypto_blkcipher_alignmask( + struct crypto_blkcipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); +} + +static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) +{ + return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); +} + +static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); +} + +static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); +} + +static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, + const u8 *key, unsigned int keylen) +{ + return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), + key, keylen); +} + +static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); +} + +static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + desc->info = crypto_blkcipher_crt(desc->tfm)->iv; + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); +} + +static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, + const u8 *src, unsigned int len) +{ + memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); +} + +static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, + u8 *dst, unsigned int len) +{ + memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); +} + +static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_cipher *)tfm; +} + +static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) +{ + BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); + return __crypto_cipher_cast(tfm); +} + +static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) +{ + return tfm; +} + +static inline void crypto_free_cipher(struct crypto_cipher *tfm) +{ + crypto_free_tfm(crypto_cipher_tfm(tfm)); +} + +static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_CIPHER; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) +{ + return &crypto_cipher_tfm(tfm)->crt_cipher; +} + +static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); +} + +static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); +} + +static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) +{ + return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); +} + +static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, + u32 flags) +{ + crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); +} + +static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, const u8 *key, unsigned int keylen) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST); - if (tfm->crt_digest.dit_setkey == NULL) - return -ENOSYS; - return tfm->crt_digest.dit_setkey(tfm, key, keylen); + return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), + key, keylen); +} + +static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + +static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, + u8 *dst, const u8 *src) +{ + crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), + dst, src); +} + +void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules; +void crypto_digest_update(struct crypto_tfm *tfm, + struct scatterlist *sg, unsigned int nsg) + __deprecated_for_modules; +void crypto_digest_final(struct crypto_tfm *tfm, u8 *out) + __deprecated_for_modules; +void crypto_digest_digest(struct crypto_tfm *tfm, + struct scatterlist *sg, unsigned int nsg, u8 *out) + __deprecated_for_modules; + +static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_hash *)tfm; +} + +static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm) +{ + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) & + CRYPTO_ALG_TYPE_HASH_MASK); + return __crypto_hash_cast(tfm); } -static inline int crypto_cipher_setkey(struct crypto_tfm *tfm, +static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) __deprecated; +static inline int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); - return tfm->crt_cipher.cit_setkey(tfm, key, keylen); + return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen); +} + +static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_HASH; + mask |= CRYPTO_ALG_TYPE_HASH_MASK; + + return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm) +{ + return &tfm->base; +} + +static inline void crypto_free_hash(struct crypto_hash *tfm) +{ + crypto_free_tfm(crypto_hash_tfm(tfm)); +} + +static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_HASH; + mask |= CRYPTO_ALG_TYPE_HASH_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm) +{ + return &crypto_hash_tfm(tfm)->crt_hash; +} + +static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) +{ + return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); +} + +static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm) +{ + return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); +} + +static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) +{ + return crypto_hash_crt(tfm)->digestsize; +} + +static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm) +{ + return crypto_tfm_get_flags(crypto_hash_tfm(tfm)); +} + +static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags) +{ + crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags); } +static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags) +{ + crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); +} + +static inline int crypto_hash_init(struct hash_desc *desc) +{ + return crypto_hash_crt(desc->tfm)->init(desc); +} + +static inline int crypto_hash_update(struct hash_desc *desc, + struct scatterlist *sg, + unsigned int nbytes) +{ + return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); +} + +static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) +{ + return crypto_hash_crt(desc->tfm)->final(desc, out); +} + +static inline int crypto_hash_digest(struct hash_desc *desc, + struct scatterlist *sg, + unsigned int nbytes, u8 *out) +{ + return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); +} + +static inline int crypto_hash_setkey(struct crypto_hash *hash, + const u8 *key, unsigned int keylen) +{ + return crypto_hash_crt(hash)->setkey(hash, key, keylen); +} + +static int crypto_cipher_encrypt(struct crypto_tfm *tfm, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) __deprecated; static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm, struct scatterlist *dst, struct scatterlist *src, @@ -369,16 +844,23 @@ static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm, return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes); } +static int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, u8 *iv) __deprecated; static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, u8 *iv) { BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); - BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB); return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv); } +static int crypto_cipher_decrypt(struct crypto_tfm *tfm, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) __deprecated; static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm, struct scatterlist *dst, struct scatterlist *src, @@ -388,16 +870,21 @@ static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm, return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes); } +static int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes, u8 *iv) __deprecated; static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, u8 *iv) { BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); - BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB); return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv); } +static void crypto_cipher_set_iv(struct crypto_tfm *tfm, + const u8 *src, unsigned int len) __deprecated; static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm, const u8 *src, unsigned int len) { @@ -405,6 +892,8 @@ static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm, memcpy(tfm->crt_cipher.cit_iv, src, len); } +static void crypto_cipher_get_iv(struct crypto_tfm *tfm, + u8 *dst, unsigned int len) __deprecated; static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm, u8 *dst, unsigned int len) { @@ -412,34 +901,70 @@ static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm, memcpy(dst, tfm->crt_cipher.cit_iv, len); } -static inline int crypto_comp_compress(struct crypto_tfm *tfm, +static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) +{ + return (struct crypto_comp *)tfm; +} + +static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) +{ + BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & + CRYPTO_ALG_TYPE_MASK); + return __crypto_comp_cast(tfm); +} + +static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, + u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_COMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); +} + +static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) +{ + return tfm; +} + +static inline void crypto_free_comp(struct crypto_comp *tfm) +{ + crypto_free_tfm(crypto_comp_tfm(tfm)); +} + +static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) +{ + type &= ~CRYPTO_ALG_TYPE_MASK; + type |= CRYPTO_ALG_TYPE_COMPRESS; + mask |= CRYPTO_ALG_TYPE_MASK; + + return crypto_has_alg(alg_name, type, mask); +} + +static inline const char *crypto_comp_name(struct crypto_comp *tfm) +{ + return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); +} + +static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) +{ + return &crypto_comp_tfm(tfm)->crt_compress; +} + +static inline int crypto_comp_compress(struct crypto_comp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS); - return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen); + return crypto_comp_crt(tfm)->cot_compress(tfm, src, slen, dst, dlen); } -static inline int crypto_comp_decompress(struct crypto_tfm *tfm, +static inline int crypto_comp_decompress(struct crypto_comp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { - BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS); - return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen); + return crypto_comp_crt(tfm)->cot_decompress(tfm, src, slen, dst, dlen); } -/* - * HMAC support. - */ -#ifdef CONFIG_CRYPTO_HMAC -void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen); -void crypto_hmac_update(struct crypto_tfm *tfm, - struct scatterlist *sg, unsigned int nsg); -void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key, - unsigned int *keylen, u8 *out); -void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen, - struct scatterlist *sg, unsigned int nsg, u8 *out); -#endif /* CONFIG_CRYPTO_HMAC */ - #endif /* _LINUX_CRYPTO_H */ diff --git a/include/linux/dvb/Kbuild b/include/linux/dvb/Kbuild index 63973af72fd5..d97b3a51e227 100644 --- a/include/linux/dvb/Kbuild +++ b/include/linux/dvb/Kbuild @@ -1,2 +1,9 @@ -header-y += ca.h frontend.h net.h osd.h version.h -unifdef-y := audio.h dmx.h video.h +header-y += ca.h +header-y += frontend.h +header-y += net.h +header-y += osd.h +header-y += version.h + +unifdef-y += audio.h +unifdef-y += dmx.h +unifdef-y += video.h diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f6977708585c..f7ca0b09075d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -148,6 +148,17 @@ struct ccw_device_id { #define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04 #define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08 +/* s390 AP bus devices */ +struct ap_device_id { + __u16 match_flags; /* which fields to match against */ + __u8 dev_type; /* device type */ + __u8 pad1; + __u32 pad2; + kernel_ulong_t driver_info; +}; + +#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01 + #define PNP_ID_LEN 8 #define PNP_MAX_DEVICES 8 diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild index 1d3a14e2da6e..9a285cecf249 100644 --- a/include/linux/netfilter/Kbuild +++ b/include/linux/netfilter/Kbuild @@ -1,11 +1,38 @@ -header-y := nf_conntrack_sctp.h nf_conntrack_tuple_common.h \ - nfnetlink_conntrack.h nfnetlink_log.h nfnetlink_queue.h \ - xt_CLASSIFY.h xt_comment.h xt_connbytes.h xt_connmark.h \ - xt_CONNMARK.h xt_conntrack.h xt_dccp.h xt_esp.h \ - xt_helper.h xt_length.h xt_limit.h xt_mac.h xt_mark.h \ - xt_MARK.h xt_multiport.h xt_NFQUEUE.h xt_pkttype.h \ - xt_policy.h xt_realm.h xt_sctp.h xt_state.h xt_string.h \ - xt_tcpmss.h xt_tcpudp.h xt_SECMARK.h xt_CONNSECMARK.h +header-y += nf_conntrack_sctp.h +header-y += nf_conntrack_tuple_common.h +header-y += nfnetlink_conntrack.h +header-y += nfnetlink_log.h +header-y += nfnetlink_queue.h +header-y += xt_CLASSIFY.h +header-y += xt_comment.h +header-y += xt_connbytes.h +header-y += xt_connmark.h +header-y += xt_CONNMARK.h +header-y += xt_conntrack.h +header-y += xt_dccp.h +header-y += xt_esp.h +header-y += xt_helper.h +header-y += xt_length.h +header-y += xt_limit.h +header-y += xt_mac.h +header-y += xt_mark.h +header-y += xt_MARK.h +header-y += xt_multiport.h +header-y += xt_NFQUEUE.h +header-y += xt_pkttype.h +header-y += xt_policy.h +header-y += xt_realm.h +header-y += xt_sctp.h +header-y += xt_state.h +header-y += xt_string.h +header-y += xt_tcpmss.h +header-y += xt_tcpudp.h +header-y += xt_SECMARK.h +header-y += xt_CONNSECMARK.h -unifdef-y := nf_conntrack_common.h nf_conntrack_ftp.h \ - nf_conntrack_tcp.h nfnetlink.h x_tables.h xt_physdev.h +unifdef-y += nf_conntrack_common.h +unifdef-y += nf_conntrack_ftp.h +unifdef-y += nf_conntrack_tcp.h +unifdef-y += nfnetlink.h +unifdef-y += x_tables.h +unifdef-y += xt_physdev.h diff --git a/include/linux/netfilter_arp/Kbuild b/include/linux/netfilter_arp/Kbuild index 198ec5e7b17d..4f13dfcb92ea 100644 --- a/include/linux/netfilter_arp/Kbuild +++ b/include/linux/netfilter_arp/Kbuild @@ -1,2 +1,3 @@ -header-y := arpt_mangle.h -unifdef-y := arp_tables.h +header-y += arpt_mangle.h + +unifdef-y += arp_tables.h diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild index 5b1aba6abbad..76ff4c47d8c4 100644 --- a/include/linux/netfilter_bridge/Kbuild +++ b/include/linux/netfilter_bridge/Kbuild @@ -1,4 +1,17 @@ -header-y += ebt_among.h ebt_arp.h ebt_arpreply.h ebt_ip.h ebt_limit.h \ - ebt_log.h ebt_mark_m.h ebt_mark_t.h ebt_nat.h ebt_pkttype.h \ - ebt_redirect.h ebt_stp.h ebt_ulog.h ebt_vlan.h -unifdef-y := ebtables.h ebt_802_3.h +header-y += ebt_among.h +header-y += ebt_arp.h +header-y += ebt_arpreply.h +header-y += ebt_ip.h +header-y += ebt_limit.h +header-y += ebt_log.h +header-y += ebt_mark_m.h +header-y += ebt_mark_t.h +header-y += ebt_nat.h +header-y += ebt_pkttype.h +header-y += ebt_redirect.h +header-y += ebt_stp.h +header-y += ebt_ulog.h +header-y += ebt_vlan.h + +unifdef-y += ebtables.h +unifdef-y += ebt_802_3.h diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild index 04e4d2721689..591c1a809c00 100644 --- a/include/linux/netfilter_ipv4/Kbuild +++ b/include/linux/netfilter_ipv4/Kbuild @@ -1,21 +1,63 @@ +header-y += ip_conntrack_helper.h +header-y += ip_conntrack_helper_h323_asn1.h +header-y += ip_conntrack_helper_h323_types.h +header-y += ip_conntrack_protocol.h +header-y += ip_conntrack_sctp.h +header-y += ip_conntrack_tcp.h +header-y += ip_conntrack_tftp.h +header-y += ip_nat_pptp.h +header-y += ipt_addrtype.h +header-y += ipt_ah.h +header-y += ipt_CLASSIFY.h +header-y += ipt_CLUSTERIP.h +header-y += ipt_comment.h +header-y += ipt_connbytes.h +header-y += ipt_connmark.h +header-y += ipt_CONNMARK.h +header-y += ipt_conntrack.h +header-y += ipt_dccp.h +header-y += ipt_dscp.h +header-y += ipt_DSCP.h +header-y += ipt_ecn.h +header-y += ipt_ECN.h +header-y += ipt_esp.h +header-y += ipt_hashlimit.h +header-y += ipt_helper.h +header-y += ipt_iprange.h +header-y += ipt_length.h +header-y += ipt_limit.h +header-y += ipt_LOG.h +header-y += ipt_mac.h +header-y += ipt_mark.h +header-y += ipt_MARK.h +header-y += ipt_multiport.h +header-y += ipt_NFQUEUE.h +header-y += ipt_owner.h +header-y += ipt_physdev.h +header-y += ipt_pkttype.h +header-y += ipt_policy.h +header-y += ipt_realm.h +header-y += ipt_recent.h +header-y += ipt_REJECT.h +header-y += ipt_SAME.h +header-y += ipt_sctp.h +header-y += ipt_state.h +header-y += ipt_string.h +header-y += ipt_tcpmss.h +header-y += ipt_TCPMSS.h +header-y += ipt_tos.h +header-y += ipt_TOS.h +header-y += ipt_ttl.h +header-y += ipt_TTL.h +header-y += ipt_ULOG.h -header-y := ip_conntrack_helper.h ip_conntrack_helper_h323_asn1.h \ - ip_conntrack_helper_h323_types.h ip_conntrack_protocol.h \ - ip_conntrack_sctp.h ip_conntrack_tcp.h ip_conntrack_tftp.h \ - ip_nat_pptp.h ipt_addrtype.h ipt_ah.h \ - ipt_CLASSIFY.h ipt_CLUSTERIP.h ipt_comment.h \ - ipt_connbytes.h ipt_connmark.h ipt_CONNMARK.h \ - ipt_conntrack.h ipt_dccp.h ipt_dscp.h ipt_DSCP.h ipt_ecn.h \ - ipt_ECN.h ipt_esp.h ipt_hashlimit.h ipt_helper.h \ - ipt_iprange.h ipt_length.h ipt_limit.h ipt_LOG.h ipt_mac.h \ - ipt_mark.h ipt_MARK.h ipt_multiport.h ipt_NFQUEUE.h \ - ipt_owner.h ipt_physdev.h ipt_pkttype.h ipt_policy.h \ - ipt_realm.h ipt_recent.h ipt_REJECT.h ipt_SAME.h \ - ipt_sctp.h ipt_state.h ipt_string.h ipt_tcpmss.h \ - ipt_TCPMSS.h ipt_tos.h ipt_TOS.h ipt_ttl.h ipt_TTL.h \ - ipt_ULOG.h - -unifdef-y := ip_conntrack.h ip_conntrack_h323.h ip_conntrack_irc.h \ - ip_conntrack_pptp.h ip_conntrack_proto_gre.h \ - ip_conntrack_tuple.h ip_nat.h ip_nat_rule.h ip_queue.h \ - ip_tables.h +unifdef-y += ip_conntrack.h +unifdef-y += ip_conntrack_h323.h +unifdef-y += ip_conntrack_irc.h +unifdef-y += ip_conntrack_pptp.h +unifdef-y += ip_conntrack_proto_gre.h +unifdef-y += ip_conntrack_tuple.h +unifdef-y += ip_nat.h +unifdef-y += ip_nat_rule.h +unifdef-y += ip_queue.h +unifdef-y += ip_tables.h diff --git a/include/linux/netfilter_ipv6/Kbuild b/include/linux/netfilter_ipv6/Kbuild index 913ddbf55b4b..9dd978d149ff 100644 --- a/include/linux/netfilter_ipv6/Kbuild +++ b/include/linux/netfilter_ipv6/Kbuild @@ -1,6 +1,21 @@ -header-y += ip6t_HL.h ip6t_LOG.h ip6t_MARK.h ip6t_REJECT.h ip6t_ah.h \ - ip6t_esp.h ip6t_frag.h ip6t_hl.h ip6t_ipv6header.h \ - ip6t_length.h ip6t_limit.h ip6t_mac.h ip6t_mark.h \ - ip6t_multiport.h ip6t_opts.h ip6t_owner.h ip6t_policy.h \ - ip6t_physdev.h ip6t_rt.h -unifdef-y := ip6_tables.h +header-y += ip6t_HL.h +header-y += ip6t_LOG.h +header-y += ip6t_MARK.h +header-y += ip6t_REJECT.h +header-y += ip6t_ah.h +header-y += ip6t_esp.h +header-y += ip6t_frag.h +header-y += ip6t_hl.h +header-y += ip6t_ipv6header.h +header-y += ip6t_length.h +header-y += ip6t_limit.h +header-y += ip6t_mac.h +header-y += ip6t_mark.h +header-y += ip6t_multiport.h +header-y += ip6t_opts.h +header-y += ip6t_owner.h +header-y += ip6t_policy.h +header-y += ip6t_physdev.h +header-y += ip6t_rt.h + +unifdef-y += ip6_tables.h diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild index c8c545665885..d9c5455808e5 100644 --- a/include/linux/nfsd/Kbuild +++ b/include/linux/nfsd/Kbuild @@ -1,2 +1,7 @@ -unifdef-y := const.h export.h stats.h syscall.h nfsfh.h debug.h auth.h - +unifdef-y += const.h +unifdef-y += export.h +unifdef-y += stats.h +unifdef-y += syscall.h +unifdef-y += nfsfh.h +unifdef-y += debug.h +unifdef-y += auth.h diff --git a/include/linux/raid/Kbuild b/include/linux/raid/Kbuild index 73fa27a8d552..2415a64c5e51 100644 --- a/include/linux/raid/Kbuild +++ b/include/linux/raid/Kbuild @@ -1 +1,2 @@ -header-y += md_p.h md_u.h +header-y += md_p.h +header-y += md_u.h diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 66ff545552f7..4efbd9c445f5 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -5,7 +5,7 @@ #include <linux/mm.h> #include <linux/string.h> -static inline void sg_set_buf(struct scatterlist *sg, void *buf, +static inline void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { sg->page = virt_to_page(buf); @@ -13,7 +13,7 @@ static inline void sg_set_buf(struct scatterlist *sg, void *buf, sg->length = buflen; } -static inline void sg_init_one(struct scatterlist *sg, void *buf, +static inline void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) { memset(sg, 0, sizeof(*sg)); diff --git a/include/linux/sunrpc/Kbuild b/include/linux/sunrpc/Kbuild index 0d1d768a27bf..fb438f158eee 100644 --- a/include/linux/sunrpc/Kbuild +++ b/include/linux/sunrpc/Kbuild @@ -1 +1 @@ -unifdef-y := debug.h +unifdef-y += debug.h diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index 1279280d7196..e30ba201910a 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -46,8 +46,8 @@ struct krb5_ctx { unsigned char seed[16]; int signalg; int sealalg; - struct crypto_tfm *enc; - struct crypto_tfm *seq; + struct crypto_blkcipher *enc; + struct crypto_blkcipher *seq; s32 endtime; u32 seq_send; struct xdr_netobj mech_used; @@ -136,26 +136,27 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, u32 -krb5_encrypt(struct crypto_tfm * key, +krb5_encrypt(struct crypto_blkcipher *key, void *iv, void *in, void *out, int length); u32 -krb5_decrypt(struct crypto_tfm * key, +krb5_decrypt(struct crypto_blkcipher *key, void *iv, void *in, void *out, int length); int -gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset, - struct page **pages); +gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf, + int offset, struct page **pages); int -gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset); +gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf, + int offset); s32 -krb5_make_seq_num(struct crypto_tfm * key, +krb5_make_seq_num(struct crypto_blkcipher *key, int direction, s32 seqnum, unsigned char *cksum, unsigned char *buf); s32 -krb5_get_seq_num(struct crypto_tfm * key, +krb5_get_seq_num(struct crypto_blkcipher *key, unsigned char *cksum, unsigned char *buf, int *direction, s32 * seqnum); diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h index 336e218c2782..2cf3fbb40b4f 100644 --- a/include/linux/sunrpc/gss_spkm3.h +++ b/include/linux/sunrpc/gss_spkm3.h @@ -19,9 +19,9 @@ struct spkm3_ctx { unsigned int req_flags ; struct xdr_netobj share_key; int conf_alg; - struct crypto_tfm* derived_conf_key; + struct crypto_blkcipher *derived_conf_key; int intg_alg; - struct crypto_tfm* derived_integ_key; + struct crypto_blkcipher *derived_integ_key; int keyestb_alg; /* alg used to get share_key */ int owf_alg; /* one way function */ }; diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild index 5251a505b2f1..78dfbac36375 100644 --- a/include/linux/tc_act/Kbuild +++ b/include/linux/tc_act/Kbuild @@ -1 +1,4 @@ -header-y += tc_gact.h tc_ipt.h tc_mirred.h tc_pedit.h +header-y += tc_gact.h +header-y += tc_ipt.h +header-y += tc_mirred.h +header-y += tc_pedit.h diff --git a/include/linux/tc_ematch/Kbuild b/include/linux/tc_ematch/Kbuild index 381e93018df6..4a58a1c32a00 100644 --- a/include/linux/tc_ematch/Kbuild +++ b/include/linux/tc_ematch/Kbuild @@ -1 +1,4 @@ -headers-y := tc_em_cmp.h tc_em_meta.h tc_em_nbyte.h tc_em_text.h +header-y += tc_em_cmp.h +header-y += tc_em_meta.h +header-y += tc_em_nbyte.h +header-y += tc_em_text.h diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild index e1da2a5b2a57..13e7a3c6d794 100644 --- a/include/mtd/Kbuild +++ b/include/mtd/Kbuild @@ -1,2 +1,6 @@ -unifdef-y := mtd-abi.h -header-y := inftl-user.h jffs2-user.h mtd-user.h nftl-user.h +header-y += inftl-user.h +header-y += jffs2-user.h +header-y += mtd-user.h +header-y += nftl-user.h + +unifdef-y += mtd-abi.h diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h index 1da3f7fa7993..b0a67b7ffdcd 100644 --- a/include/mtd/mtd-abi.h +++ b/include/mtd/mtd-abi.h @@ -34,6 +34,7 @@ struct mtd_oob_buf { #define MTD_WRITEABLE 0x400 /* Device is writeable */ #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ #define MTD_NO_ERASE 0x1000 /* No erase necessary */ +#define MTD_STUPID_LOCK 0x2000 /* Always locked after reset */ // Some common devices / combinations of capabilities #define MTD_CAP_ROM 0 diff --git a/include/net/ah.h b/include/net/ah.h index ceff00afae09..8f257c159902 100644 --- a/include/net/ah.h +++ b/include/net/ah.h @@ -1,6 +1,7 @@ #ifndef _NET_AH_H #define _NET_AH_H +#include <linux/crypto.h> #include <net/xfrm.h> /* This is the maximum truncated ICV length that we know of. */ @@ -14,22 +15,29 @@ struct ah_data int icv_full_len; int icv_trunc_len; - void (*icv)(struct ah_data*, - struct sk_buff *skb, u8 *icv); - - struct crypto_tfm *tfm; + struct crypto_hash *tfm; }; -static inline void -ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data) +static inline int ah_mac_digest(struct ah_data *ahp, struct sk_buff *skb, + u8 *auth_data) { - struct crypto_tfm *tfm = ahp->tfm; + struct hash_desc desc; + int err; + + desc.tfm = ahp->tfm; + desc.flags = 0; memset(auth_data, 0, ahp->icv_trunc_len); - crypto_hmac_init(tfm, ahp->key, &ahp->key_len); - skb_icv_walk(skb, tfm, 0, skb->len, crypto_hmac_update); - crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv); - memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len); + err = crypto_hash_init(&desc); + if (unlikely(err)) + goto out; + err = skb_icv_walk(skb, &desc, 0, skb->len, crypto_hash_update); + if (unlikely(err)) + goto out; + err = crypto_hash_final(&desc, ahp->work_icv); + +out: + return err; } #endif diff --git a/include/net/esp.h b/include/net/esp.h index 90cd94fad7d9..064366d66eea 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -1,6 +1,7 @@ #ifndef _NET_ESP_H #define _NET_ESP_H +#include <linux/crypto.h> #include <net/xfrm.h> #include <asm/scatterlist.h> @@ -21,7 +22,7 @@ struct esp_data * >= crypto_tfm_alg_ivsize(tfm). */ int ivlen; int padlen; /* 0..255 */ - struct crypto_tfm *tfm; /* crypto handle */ + struct crypto_blkcipher *tfm; /* crypto handle */ } conf; /* Integrity. It is active when icv_full_len != 0 */ @@ -34,7 +35,7 @@ struct esp_data void (*icv)(struct esp_data*, struct sk_buff *skb, int offset, int len, u8 *icv); - struct crypto_tfm *tfm; + struct crypto_hash *tfm; } auth; }; @@ -42,18 +43,22 @@ extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); -static inline void -esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset, - int len, u8 *auth_data) +static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb, + int offset, int len) { - struct crypto_tfm *tfm = esp->auth.tfm; - char *icv = esp->auth.work_icv; - - memset(auth_data, 0, esp->auth.icv_trunc_len); - crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len); - skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update); - crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv); - memcpy(auth_data, icv, esp->auth.icv_trunc_len); + struct hash_desc desc; + int err; + + desc.tfm = esp->auth.tfm; + desc.flags = 0; + + err = crypto_hash_init(&desc); + if (unlikely(err)) + return err; + err = skb_icv_walk(skb, &desc, offset, len, crypto_hash_update); + if (unlikely(err)) + return err; + return crypto_hash_final(&desc, esp->auth.work_icv); } #endif diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h index e651a57ecdd5..87c1af3e5e82 100644 --- a/include/net/ipcomp.h +++ b/include/net/ipcomp.h @@ -1,11 +1,14 @@ #ifndef _NET_IPCOMP_H #define _NET_IPCOMP_H +#include <linux/crypto.h> +#include <linux/types.h> + #define IPCOMP_SCRATCH_SIZE 65400 struct ipcomp_data { u16 threshold; - struct crypto_tfm **tfms; + struct crypto_comp **tfms; }; #endif diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index c51541ee0247..57166bfdf8eb 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -312,9 +312,9 @@ enum { SCTP_MAX_GABS = 16 }; */ #if defined (CONFIG_SCTP_HMAC_MD5) -#define SCTP_COOKIE_HMAC_ALG "md5" +#define SCTP_COOKIE_HMAC_ALG "hmac(md5)" #elif defined (CONFIG_SCTP_HMAC_SHA1) -#define SCTP_COOKIE_HMAC_ALG "sha1" +#define SCTP_COOKIE_HMAC_ALG "hmac(sha1)" #else #define SCTP_COOKIE_HMAC_ALG NULL #endif diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 92eae0e0f3f1..1c1abce5f6b6 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -330,17 +330,6 @@ static inline void sctp_v6_exit(void) { return; } #endif /* #if defined(CONFIG_IPV6) */ -/* Some wrappers, in case crypto not available. */ -#if defined (CONFIG_CRYPTO_HMAC) -#define sctp_crypto_alloc_tfm crypto_alloc_tfm -#define sctp_crypto_free_tfm crypto_free_tfm -#define sctp_crypto_hmac crypto_hmac -#else -#define sctp_crypto_alloc_tfm(x...) NULL -#define sctp_crypto_free_tfm(x...) -#define sctp_crypto_hmac(x...) -#endif - /* Map an association to an assoc_id. */ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index e5aa7ff1f5b5..0412e730c765 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -87,6 +87,7 @@ struct sctp_bind_addr; struct sctp_ulpq; struct sctp_ep_common; struct sctp_ssnmap; +struct crypto_hash; #include <net/sctp/tsnmap.h> @@ -264,7 +265,7 @@ struct sctp_sock { struct sctp_pf *pf; /* Access to HMAC transform. */ - struct crypto_tfm *hmac; + struct crypto_hash *hmac; /* What is our base endpointer? */ struct sctp_endpoint *ep; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 9c5ee9f20b65..3ecd9fa1ed4b 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -8,7 +8,6 @@ #include <linux/list.h> #include <linux/skbuff.h> #include <linux/socket.h> -#include <linux/crypto.h> #include <linux/pfkeyv2.h> #include <linux/in6.h> #include <linux/mutex.h> @@ -855,6 +854,7 @@ struct xfrm_algo_comp_info { struct xfrm_algo_desc { char *name; + char *compat; u8 available:1; union { struct xfrm_algo_auth_info auth; @@ -984,11 +984,13 @@ extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe); extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe); extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe); -struct crypto_tfm; -typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int); +struct hash_desc; +struct scatterlist; +typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *, + unsigned int); -extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, - int offset, int len, icv_update_fn_t icv_update); +extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm, + int offset, int len, icv_update_fn_t icv_update); static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, int family) diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild index eb710ba9b1a0..e7c043216558 100644 --- a/include/rdma/Kbuild +++ b/include/rdma/Kbuild @@ -1 +1 @@ -header-y := ib_user_mad.h +header-y += ib_user_mad.h diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index 0ff67398928d..81b62307621d 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -40,7 +40,7 @@ struct rdma_dev_addr { unsigned char src_dev_addr[MAX_ADDR_LEN]; unsigned char dst_dev_addr[MAX_ADDR_LEN]; unsigned char broadcast[MAX_ADDR_LEN]; - enum ib_node_type dev_type; + enum rdma_node_type dev_type; }; /** @@ -72,6 +72,9 @@ int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr, void rdma_addr_cancel(struct rdma_dev_addr *addr); +int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, + const unsigned char *dst_dev_addr); + static inline int ip_addr_size(struct sockaddr *addr) { return addr->sa_family == AF_INET6 ? @@ -113,4 +116,16 @@ static inline void ib_addr_set_dgid(struct rdma_dev_addr *dev_addr, memcpy(dev_addr->dst_dev_addr + 4, gid, sizeof *gid); } +static inline void iw_addr_get_sgid(struct rdma_dev_addr *dev_addr, + union ib_gid *gid) +{ + memcpy(gid, dev_addr->src_dev_addr, sizeof *gid); +} + +static inline void iw_addr_get_dgid(struct rdma_dev_addr *dev_addr, + union ib_gid *gid) +{ + memcpy(gid, dev_addr->dst_dev_addr, sizeof *gid); +} + #endif /* IB_ADDR_H */ diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index c99e4420fd7e..97715b0c20b6 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -36,8 +37,11 @@ #ifndef IB_SA_H #define IB_SA_H +#include <linux/completion.h> #include <linux/compiler.h> +#include <asm/atomic.h> + #include <rdma/ib_verbs.h> #include <rdma/ib_mad.h> @@ -79,8 +83,8 @@ enum { }; enum ib_sa_selector { - IB_SA_GTE = 0, - IB_SA_LTE = 1, + IB_SA_GT = 0, + IB_SA_LT = 1, IB_SA_EQ = 2, /* * The meaning of "best" depends on the attribute: for @@ -250,11 +254,28 @@ struct ib_sa_service_rec { u64 data64[2]; }; +struct ib_sa_client { + atomic_t users; + struct completion comp; +}; + +/** + * ib_sa_register_client - Register an SA client. + */ +void ib_sa_register_client(struct ib_sa_client *client); + +/** + * ib_sa_unregister_client - Deregister an SA client. + * @client: Client object to deregister. + */ +void ib_sa_unregister_client(struct ib_sa_client *client); + struct ib_sa_query; void ib_sa_cancel_query(int id, struct ib_sa_query *query); -int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, +int ib_sa_path_rec_get(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, struct ib_sa_path_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, @@ -264,7 +285,8 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, void *context, struct ib_sa_query **query); -int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, +int ib_sa_mcmember_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, @@ -275,7 +297,8 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, void *context, struct ib_sa_query **query); -int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, +int ib_sa_service_rec_query(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, @@ -288,6 +311,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, /** * ib_sa_mcmember_rec_set - Start an MCMember set query + * @client:SA client * @device:device to send query on * @port_num: port number to send query on * @rec:MCMember Record to send in query @@ -311,7 +335,8 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, * cancel the query. */ static inline int -ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, +ib_sa_mcmember_rec_set(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, @@ -321,7 +346,7 @@ ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, void *context, struct ib_sa_query **query) { - return ib_sa_mcmember_rec_query(device, port_num, + return ib_sa_mcmember_rec_query(client, device, port_num, IB_MGMT_METHOD_SET, rec, comp_mask, timeout_ms, gfp_mask, callback, @@ -330,6 +355,7 @@ ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, /** * ib_sa_mcmember_rec_delete - Start an MCMember delete query + * @client:SA client * @device:device to send query on * @port_num: port number to send query on * @rec:MCMember Record to send in query @@ -353,7 +379,8 @@ ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, * cancel the query. */ static inline int -ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, +ib_sa_mcmember_rec_delete(struct ib_sa_client *client, + struct ib_device *device, u8 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, int timeout_ms, gfp_t gfp_mask, @@ -363,7 +390,7 @@ ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, void *context, struct ib_sa_query **query) { - return ib_sa_mcmember_rec_query(device, port_num, + return ib_sa_mcmember_rec_query(client, device, port_num, IB_SA_METHOD_DELETE, rec, comp_mask, timeout_ms, gfp_mask, callback, diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h index 7b5372010f4b..db1b814b62cc 100644 --- a/include/rdma/ib_user_verbs.h +++ b/include/rdma/ib_user_verbs.h @@ -275,6 +275,8 @@ struct ib_uverbs_resize_cq { struct ib_uverbs_resize_cq_resp { __u32 cqe; + __u32 reserved; + __u64 driver_data[0]; }; struct ib_uverbs_poll_cq { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ee1f3a355666..8eacc3510993 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -56,12 +56,22 @@ union ib_gid { } global; }; -enum ib_node_type { - IB_NODE_CA = 1, - IB_NODE_SWITCH, - IB_NODE_ROUTER +enum rdma_node_type { + /* IB values map to NodeInfo:NodeType. */ + RDMA_NODE_IB_CA = 1, + RDMA_NODE_IB_SWITCH, + RDMA_NODE_IB_ROUTER, + RDMA_NODE_RNIC }; +enum rdma_transport_type { + RDMA_TRANSPORT_IB, + RDMA_TRANSPORT_IWARP +}; + +enum rdma_transport_type +rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__; + enum ib_device_cap_flags { IB_DEVICE_RESIZE_MAX_WR = 1, IB_DEVICE_BAD_PKEY_CNTR = (1<<1), @@ -78,6 +88,9 @@ enum ib_device_cap_flags { IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), IB_DEVICE_SRQ_RESIZE = (1<<13), IB_DEVICE_N_NOTIFY_CQ = (1<<14), + IB_DEVICE_ZERO_STAG = (1<<15), + IB_DEVICE_SEND_W_INV = (1<<16), + IB_DEVICE_MEM_WINDOW = (1<<17) }; enum ib_atomic_cap { @@ -835,6 +848,8 @@ struct ib_cache { u8 *lmc_cache; }; +struct iw_cm_verbs; + struct ib_device { struct device *dma_device; @@ -851,6 +866,8 @@ struct ib_device { u32 flags; + struct iw_cm_verbs *iwcm; + int (*query_device)(struct ib_device *device, struct ib_device_attr *device_attr); int (*query_port)(struct ib_device *device, @@ -888,7 +905,8 @@ struct ib_device { struct ib_udata *udata); int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr, - enum ib_srq_attr_mask srq_attr_mask); + enum ib_srq_attr_mask srq_attr_mask, + struct ib_udata *udata); int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); int (*destroy_srq)(struct ib_srq *srq); @@ -900,7 +918,8 @@ struct ib_device { struct ib_udata *udata); int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, - int qp_attr_mask); + int qp_attr_mask, + struct ib_udata *udata); int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h new file mode 100644 index 000000000000..aeefa9b740dc --- /dev/null +++ b/include/rdma/iw_cm.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef IW_CM_H +#define IW_CM_H + +#include <linux/in.h> +#include <rdma/ib_cm.h> + +struct iw_cm_id; + +enum iw_cm_event_type { + IW_CM_EVENT_CONNECT_REQUEST = 1, /* connect request received */ + IW_CM_EVENT_CONNECT_REPLY, /* reply from active connect request */ + IW_CM_EVENT_ESTABLISHED, /* passive side accept successful */ + IW_CM_EVENT_DISCONNECT, /* orderly shutdown */ + IW_CM_EVENT_CLOSE /* close complete */ +}; + +enum iw_cm_event_status { + IW_CM_EVENT_STATUS_OK = 0, /* request successful */ + IW_CM_EVENT_STATUS_ACCEPTED = 0, /* connect request accepted */ + IW_CM_EVENT_STATUS_REJECTED, /* connect request rejected */ + IW_CM_EVENT_STATUS_TIMEOUT, /* the operation timed out */ + IW_CM_EVENT_STATUS_RESET, /* reset from remote peer */ + IW_CM_EVENT_STATUS_EINVAL, /* asynchronous failure for bad parm */ +}; + +struct iw_cm_event { + enum iw_cm_event_type event; + enum iw_cm_event_status status; + struct sockaddr_in local_addr; + struct sockaddr_in remote_addr; + void *private_data; + u8 private_data_len; + void* provider_data; +}; + +/** + * iw_cm_handler - Function to be called by the IW CM when delivering events + * to the client. + * + * @cm_id: The IW CM identifier associated with the event. + * @event: Pointer to the event structure. + */ +typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id, + struct iw_cm_event *event); + +/** + * iw_event_handler - Function called by the provider when delivering provider + * events to the IW CM. Returns either 0 indicating the event was processed + * or -errno if the event could not be processed. + * + * @cm_id: The IW CM identifier associated with the event. + * @event: Pointer to the event structure. + */ +typedef int (*iw_event_handler)(struct iw_cm_id *cm_id, + struct iw_cm_event *event); + +struct iw_cm_id { + iw_cm_handler cm_handler; /* client callback function */ + void *context; /* client cb context */ + struct ib_device *device; + struct sockaddr_in local_addr; + struct sockaddr_in remote_addr; + void *provider_data; /* provider private data */ + iw_event_handler event_handler; /* cb for provider + events */ + /* Used by provider to add and remove refs on IW cm_id */ + void (*add_ref)(struct iw_cm_id *); + void (*rem_ref)(struct iw_cm_id *); +}; + +struct iw_cm_conn_param { + const void *private_data; + u16 private_data_len; + u32 ord; + u32 ird; + u32 qpn; +}; + +struct iw_cm_verbs { + void (*add_ref)(struct ib_qp *qp); + + void (*rem_ref)(struct ib_qp *qp); + + struct ib_qp * (*get_qp)(struct ib_device *device, + int qpn); + + int (*connect)(struct iw_cm_id *cm_id, + struct iw_cm_conn_param *conn_param); + + int (*accept)(struct iw_cm_id *cm_id, + struct iw_cm_conn_param *conn_param); + + int (*reject)(struct iw_cm_id *cm_id, + const void *pdata, u8 pdata_len); + + int (*create_listen)(struct iw_cm_id *cm_id, + int backlog); + + int (*destroy_listen)(struct iw_cm_id *cm_id); +}; + +/** + * iw_create_cm_id - Create an IW CM identifier. + * + * @device: The IB device on which to create the IW CM identier. + * @event_handler: User callback invoked to report events associated with the + * returned IW CM identifier. + * @context: User specified context associated with the id. + */ +struct iw_cm_id *iw_create_cm_id(struct ib_device *device, + iw_cm_handler cm_handler, void *context); + +/** + * iw_destroy_cm_id - Destroy an IW CM identifier. + * + * @cm_id: The previously created IW CM identifier to destroy. + * + * The client can assume that no events will be delivered for the CM ID after + * this function returns. + */ +void iw_destroy_cm_id(struct iw_cm_id *cm_id); + +/** + * iw_cm_bind_qp - Unbind the specified IW CM identifier and QP + * + * @cm_id: The IW CM idenfier to unbind from the QP. + * @qp: The QP + * + * This is called by the provider when destroying the QP to ensure + * that any references held by the IWCM are released. It may also + * be called by the IWCM when destroying a CM_ID to that any + * references held by the provider are released. + */ +void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp); + +/** + * iw_cm_get_qp - Return the ib_qp associated with a QPN + * + * @ib_device: The IB device + * @qpn: The queue pair number + */ +struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn); + +/** + * iw_cm_listen - Listen for incoming connection requests on the + * specified IW CM id. + * + * @cm_id: The IW CM identifier. + * @backlog: The maximum number of outstanding un-accepted inbound listen + * requests to queue. + * + * The source address and port number are specified in the IW CM identifier + * structure. + */ +int iw_cm_listen(struct iw_cm_id *cm_id, int backlog); + +/** + * iw_cm_accept - Called to accept an incoming connect request. + * + * @cm_id: The IW CM identifier associated with the connection request. + * @iw_param: Pointer to a structure containing connection establishment + * parameters. + * + * The specified cm_id will have been provided in the event data for a + * CONNECT_REQUEST event. Subsequent events related to this connection will be + * delivered to the specified IW CM identifier prior and may occur prior to + * the return of this function. If this function returns a non-zero value, the + * client can assume that no events will be delivered to the specified IW CM + * identifier. + */ +int iw_cm_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param); + +/** + * iw_cm_reject - Reject an incoming connection request. + * + * @cm_id: Connection identifier associated with the request. + * @private_daa: Pointer to data to deliver to the remote peer as part of the + * reject message. + * @private_data_len: The number of bytes in the private_data parameter. + * + * The client can assume that no events will be delivered to the specified IW + * CM identifier following the return of this function. The private_data + * buffer is available for reuse when this function returns. + */ +int iw_cm_reject(struct iw_cm_id *cm_id, const void *private_data, + u8 private_data_len); + +/** + * iw_cm_connect - Called to request a connection to a remote peer. + * + * @cm_id: The IW CM identifier for the connection. + * @iw_param: Pointer to a structure containing connection establishment + * parameters. + * + * Events may be delivered to the specified IW CM identifier prior to the + * return of this function. If this function returns a non-zero value, the + * client can assume that no events will be delivered to the specified IW CM + * identifier. + */ +int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param); + +/** + * iw_cm_disconnect - Close the specified connection. + * + * @cm_id: The IW CM identifier to close. + * @abrupt: If 0, the connection will be closed gracefully, otherwise, the + * connection will be reset. + * + * The IW CM identifier is still active until the IW_CM_EVENT_CLOSE event is + * delivered. + */ +int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt); + +/** + * iw_cm_init_qp_attr - Called to initialize the attributes of the QP + * associated with a IW CM identifier. + * + * @cm_id: The IW CM identifier associated with the QP + * @qp_attr: Pointer to the QP attributes structure. + * @qp_attr_mask: Pointer to a bit vector specifying which QP attributes are + * valid. + */ +int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr, + int *qp_attr_mask); + +#endif /* IW_CM_H */ diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 402c63d7226b..deb5a0a4cee5 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -117,6 +117,14 @@ struct rdma_cm_id { struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, void *context, enum rdma_port_space ps); +/** + * rdma_destroy_id - Destroys an RDMA identifier. + * + * @id: RDMA identifier. + * + * Note: calling this function has the effect of canceling in-flight + * asynchronous operations associated with the id. + */ void rdma_destroy_id(struct rdma_cm_id *id); /** @@ -237,6 +245,10 @@ int rdma_listen(struct rdma_cm_id *id, int backlog); * Typically, this routine is only called by the listener to accept a connection * request. It must also be called on the active side of a connection if the * user is performing their own QP transitions. + * + * In the case of error, a reject message is sent to the remote side and the + * state of the qp associated with the id is modified to error, such that any + * previously posted receive buffers would be flushed. */ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild index 14a033d73314..744f85011f1e 100644 --- a/include/scsi/Kbuild +++ b/include/scsi/Kbuild @@ -1,2 +1,4 @@ header-y += scsi.h -unifdef-y := scsi_ioctl.h sg.h + +unifdef-y += scsi_ioctl.h +unifdef-y += sg.h diff --git a/include/sound/Kbuild b/include/sound/Kbuild index 3a5a3df61496..fd054a344324 100644 --- a/include/sound/Kbuild +++ b/include/sound/Kbuild @@ -1,2 +1,10 @@ -header-y := asound_fm.h hdsp.h hdspm.h sfnt_info.h sscape_ioctl.h -unifdef-y := asequencer.h asound.h emu10k1.h sb16_csp.h +header-y += asound_fm.h +header-y += hdsp.h +header-y += hdspm.h +header-y += sfnt_info.h +header-y += sscape_ioctl.h + +unifdef-y += asequencer.h +unifdef-y += asound.h +unifdef-y += emu10k1.h +unifdef-y += sb16_csp.h diff --git a/include/video/Kbuild b/include/video/Kbuild index 76a60737cc15..a14f9c045b8c 100644 --- a/include/video/Kbuild +++ b/include/video/Kbuild @@ -1 +1 @@ -unifdef-y := sisfb.h +unifdef-y += sisfb.h diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 098c66846339..35aa3426c3fa 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -9,6 +9,7 @@ * more details. */ +#include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -48,7 +49,7 @@ struct ieee80211_ccmp_data { int key_idx; - struct crypto_tfm *tfm; + struct crypto_cipher *tfm; /* scratch buffers for virt_to_page() (crypto API) */ u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], @@ -56,20 +57,10 @@ struct ieee80211_ccmp_data { u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; }; -static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, - const u8 pt[16], u8 ct[16]) +static inline void ieee80211_ccmp_aes_encrypt(struct crypto_cipher *tfm, + const u8 pt[16], u8 ct[16]) { - struct scatterlist src, dst; - - src.page = virt_to_page(pt); - src.offset = offset_in_page(pt); - src.length = AES_BLOCK_LEN; - - dst.page = virt_to_page(ct); - dst.offset = offset_in_page(ct); - dst.length = AES_BLOCK_LEN; - - crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN); + crypto_cipher_encrypt_one(tfm, ct, pt); } static void *ieee80211_ccmp_init(int key_idx) @@ -81,10 +72,11 @@ static void *ieee80211_ccmp_init(int key_idx) goto fail; priv->key_idx = key_idx; - priv->tfm = crypto_alloc_tfm("aes", 0); - if (priv->tfm == NULL) { + priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tfm)) { printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate " "crypto API aes\n"); + priv->tfm = NULL; goto fail; } @@ -93,7 +85,7 @@ static void *ieee80211_ccmp_init(int key_idx) fail: if (priv) { if (priv->tfm) - crypto_free_tfm(priv->tfm); + crypto_free_cipher(priv->tfm); kfree(priv); } @@ -104,7 +96,7 @@ static void ieee80211_ccmp_deinit(void *priv) { struct ieee80211_ccmp_data *_priv = priv; if (_priv && _priv->tfm) - crypto_free_tfm(_priv->tfm); + crypto_free_cipher(_priv->tfm); kfree(priv); } @@ -115,7 +107,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len) b[i] ^= a[i]; } -static void ccmp_init_blocks(struct crypto_tfm *tfm, +static void ccmp_init_blocks(struct crypto_cipher *tfm, struct ieee80211_hdr_4addr *hdr, u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) { @@ -398,7 +390,7 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv) { struct ieee80211_ccmp_data *data = priv; int keyidx; - struct crypto_tfm *tfm = data->tfm; + struct crypto_cipher *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index f2df2f5b3e4c..259572dfd4f1 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -9,6 +9,7 @@ * more details. */ +#include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -52,10 +53,10 @@ struct ieee80211_tkip_data { int key_idx; - struct crypto_tfm *tx_tfm_arc4; - struct crypto_tfm *tx_tfm_michael; - struct crypto_tfm *rx_tfm_arc4; - struct crypto_tfm *rx_tfm_michael; + struct crypto_blkcipher *rx_tfm_arc4; + struct crypto_hash *rx_tfm_michael; + struct crypto_blkcipher *tx_tfm_arc4; + struct crypto_hash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; @@ -87,31 +88,37 @@ static void *ieee80211_tkip_init(int key_idx) priv->key_idx = key_idx; - priv->tx_tfm_arc4 = crypto_alloc_tfm("arc4", 0); - if (priv->tx_tfm_arc4 == NULL) { + priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tx_tfm_arc4)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API arc4\n"); + priv->tfm_arc4 = NULL; goto fail; } - priv->tx_tfm_michael = crypto_alloc_tfm("michael_mic", 0); - if (priv->tx_tfm_michael == NULL) { + priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API michael_mic\n"); goto fail; } - priv->rx_tfm_arc4 = crypto_alloc_tfm("arc4", 0); - if (priv->rx_tfm_arc4 == NULL) { + priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->rx_tfm_arc4)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API arc4\n"); goto fail; } - priv->rx_tfm_michael = crypto_alloc_tfm("michael_mic", 0); - if (priv->rx_tfm_michael == NULL) { + priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->rx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API michael_mic\n"); + priv->tfm_michael = NULL; goto fail; } @@ -120,13 +127,13 @@ static void *ieee80211_tkip_init(int key_idx) fail: if (priv) { if (priv->tx_tfm_michael) - crypto_free_tfm(priv->tx_tfm_michael); + crypto_free_hash(priv->tx_tfm_michael); if (priv->tx_tfm_arc4) - crypto_free_tfm(priv->tx_tfm_arc4); + crypto_free_blkcipher(priv->tx_tfm_arc4); if (priv->rx_tfm_michael) - crypto_free_tfm(priv->rx_tfm_michael); + crypto_free_hash(priv->rx_tfm_michael); if (priv->rx_tfm_arc4) - crypto_free_tfm(priv->rx_tfm_arc4); + crypto_free_blkcipher(priv->rx_tfm_arc4); kfree(priv); } @@ -138,13 +145,13 @@ static void ieee80211_tkip_deinit(void *priv) struct ieee80211_tkip_data *_priv = priv; if (_priv) { if (_priv->tx_tfm_michael) - crypto_free_tfm(_priv->tx_tfm_michael); + crypto_free_hash(_priv->tx_tfm_michael); if (_priv->tx_tfm_arc4) - crypto_free_tfm(_priv->tx_tfm_arc4); + crypto_free_blkcipher(_priv->tx_tfm_arc4); if (_priv->rx_tfm_michael) - crypto_free_tfm(_priv->rx_tfm_michael); + crypto_free_hash(_priv->rx_tfm_michael); if (_priv->rx_tfm_arc4) - crypto_free_tfm(_priv->rx_tfm_arc4); + crypto_free_blkcipher(_priv->rx_tfm_arc4); } kfree(priv); } @@ -344,6 +351,7 @@ static int ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; + struct blkcipher_desc desc = { .tfm = tkey->tx_tfm_arc4 }; int len; u8 rc4key[16], *pos, *icv; u32 crc; @@ -377,31 +385,17 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); + crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; - crypto_cipher_encrypt(tkey->tx_tfm_arc4, &sg, &sg, len + 4); - - return 0; -} - -/* - * deal with seq counter wrapping correctly. - * refer to timer_after() for jiffies wrapping handling - */ -static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n, - u32 iv32_o, u16 iv16_o) -{ - if ((s32)iv32_n - (s32)iv32_o < 0 || - (iv32_n == iv32_o && iv16_n <= iv16_o)) - return 1; - return 0; + return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); } static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; + struct blkcipher_desc desc = { .tfm = tkey->rx_tfm_arc4 }; u8 rc4key[16]; u8 keyidx, *pos; u32 iv32; @@ -472,11 +466,18 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 12; - crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); + crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = plen + 4; - crypto_cipher_decrypt(tkey->rx_tfm_arc4, &sg, &sg, plen + 4); + if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { + if (net_ratelimit()) { + printk(KERN_DEBUG ": TKIP: failed to decrypt " + "received packet from " MAC_FMT "\n", + MAC_ARG(hdr->addr2)); + } + return -7; + } crc = ~crc32_le(~0, pos, plen); icv[0] = crc; @@ -510,9 +511,10 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return keyidx; } -static int michael_mic(struct crypto_tfm *tfm_michael, u8 * key, u8 * hdr, +static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, u8 * data, size_t data_len, u8 * mic) { + struct hash_desc desc; struct scatterlist sg[2]; if (tfm_michael == NULL) { @@ -527,12 +529,12 @@ static int michael_mic(struct crypto_tfm *tfm_michael, u8 * key, u8 * hdr, sg[1].offset = offset_in_page(data); sg[1].length = data_len; - crypto_digest_init(tfm_michael); - crypto_digest_setkey(tfm_michael, key, 8); - crypto_digest_update(tfm_michael, sg, 2); - crypto_digest_final(tfm_michael, mic); + if (crypto_hash_setkey(tfm_michael, key, 8)) + return -1; - return 0; + desc.tfm = tfm_michael; + desc.flags = 0; + return crypto_hash_digest(&desc, sg, data_len + 16, mic); } static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) @@ -656,10 +658,10 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv) { struct ieee80211_tkip_data *tkey = priv; int keyidx; - struct crypto_tfm *tfm = tkey->tx_tfm_michael; - struct crypto_tfm *tfm2 = tkey->tx_tfm_arc4; - struct crypto_tfm *tfm3 = tkey->rx_tfm_michael; - struct crypto_tfm *tfm4 = tkey->rx_tfm_arc4; + struct crypto_hash *tfm = tkey->tx_tfm_michael; + struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4; + struct crypto_hash *tfm3 = tkey->rx_tfm_michael; + struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index b435b28857ed..9eeec13c28b0 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -9,6 +9,7 @@ * more details. */ +#include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> @@ -32,8 +33,8 @@ struct prism2_wep_data { u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; - struct crypto_tfm *tx_tfm; - struct crypto_tfm *rx_tfm; + struct crypto_blkcipher *tx_tfm; + struct crypto_blkcipher *rx_tfm; }; static void *prism2_wep_init(int keyidx) @@ -45,15 +46,16 @@ static void *prism2_wep_init(int keyidx) goto fail; priv->key_idx = keyidx; - priv->tx_tfm = crypto_alloc_tfm("arc4", 0); - if (priv->tx_tfm == NULL) { + priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->tx_tfm)) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); + priv->tfm = NULL; goto fail; } - priv->rx_tfm = crypto_alloc_tfm("arc4", 0); - if (priv->rx_tfm == NULL) { + priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(priv->rx_tfm)) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); goto fail; @@ -66,9 +68,9 @@ static void *prism2_wep_init(int keyidx) fail: if (priv) { if (priv->tx_tfm) - crypto_free_tfm(priv->tx_tfm); + crypto_free_blkcipher(priv->tx_tfm); if (priv->rx_tfm) - crypto_free_tfm(priv->rx_tfm); + crypto_free_blkcipher(priv->rx_tfm); kfree(priv); } return NULL; @@ -79,9 +81,9 @@ static void prism2_wep_deinit(void *priv) struct prism2_wep_data *_priv = priv; if (_priv) { if (_priv->tx_tfm) - crypto_free_tfm(_priv->tx_tfm); + crypto_free_blkcipher(_priv->tx_tfm); if (_priv->rx_tfm) - crypto_free_tfm(_priv->rx_tfm); + crypto_free_blkcipher(_priv->rx_tfm); } kfree(priv); } @@ -133,6 +135,7 @@ static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len, static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; + struct blkcipher_desc desc = { .tfm = wep->tx_tfm }; u32 crc, klen, len; u8 *pos, *icv; struct scatterlist sg; @@ -164,13 +167,11 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_cipher_setkey(wep->tx_tfm, key, klen); + crypto_blkcipher_setkey(wep->tx_tfm, key, klen); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; - crypto_cipher_encrypt(wep->tx_tfm, &sg, &sg, len + 4); - - return 0; + return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); } /* Perform WEP decryption on given buffer. Buffer includes whole WEP part of @@ -183,6 +184,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; + struct blkcipher_desc desc = { .tfm = wep->rx_tfm }; u32 crc, klen, plen; u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos, icv[4]; @@ -207,11 +209,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) /* Apply RC4 to data and compute CRC32 over decrypted data */ plen = skb->len - hdr_len - 8; - crypto_cipher_setkey(wep->rx_tfm, key, klen); + crypto_blkcipher_setkey(wep->rx_tfm, key, klen); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = plen + 4; - crypto_cipher_decrypt(wep->rx_tfm, &sg, &sg, plen + 4); + if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) + return -7; crc = ~crc32_le(~0, pos, plen); icv[0] = crc; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 8514106761b0..3b5d504a74be 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -386,6 +386,7 @@ config INET_ESP select CRYPTO select CRYPTO_HMAC select CRYPTO_MD5 + select CRYPTO_CBC select CRYPTO_SHA1 select CRYPTO_DES ---help--- diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 1366bc6ce6a5..2b98943e6b02 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -1,3 +1,4 @@ +#include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> @@ -97,7 +98,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) ah->spi = x->id.spi; ah->seq_no = htonl(++x->replay.oseq); xfrm_aevent_doreplay(x); - ahp->icv(ahp, skb, ah->auth_data); + err = ah_mac_digest(ahp, skb, ah->auth_data); + if (err) + goto error; + memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; @@ -119,6 +123,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) { int ah_hlen; int ihl; + int err = -EINVAL; struct iphdr *iph; struct ip_auth_hdr *ah; struct ah_data *ahp; @@ -166,8 +171,11 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); skb_push(skb, ihl); - ahp->icv(ahp, skb, ah->auth_data); - if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { + err = ah_mac_digest(ahp, skb, ah->auth_data); + if (err) + goto out; + err = -EINVAL; + if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) { x->stats.integrity_failed++; goto out; } @@ -179,7 +187,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) return 0; out: - return -EINVAL; + return err; } static void ah4_err(struct sk_buff *skb, u32 info) @@ -204,6 +212,7 @@ static int ah_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; + struct crypto_hash *tfm; if (!x->aalg) goto error; @@ -221,24 +230,27 @@ static int ah_init_state(struct xfrm_state *x) ahp->key = x->aalg->alg_key; ahp->key_len = (x->aalg->alg_key_len+7)/8; - ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); - if (!ahp->tfm) + tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto error; + + ahp->tfm = tfm; + if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) goto error; - ahp->icv = ah_hmac_digest; /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here - * after a successful crypto_alloc_tfm(). + * after a successful crypto_alloc_hash(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != - crypto_tfm_alg_digestsize(ahp->tfm)) { + crypto_hash_digestsize(tfm)) { printk(KERN_INFO "AH: %s digestsize %u != %hu\n", - x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), + x->aalg->alg_name, crypto_hash_digestsize(tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } @@ -262,7 +274,7 @@ static int ah_init_state(struct xfrm_state *x) error: if (ahp) { kfree(ahp->work_icv); - crypto_free_tfm(ahp->tfm); + crypto_free_hash(ahp->tfm); kfree(ahp); } return -EINVAL; @@ -277,7 +289,7 @@ static void ah_destroy(struct xfrm_state *x) kfree(ahp->work_icv); ahp->work_icv = NULL; - crypto_free_tfm(ahp->tfm); + crypto_free_hash(ahp->tfm); ahp->tfm = NULL; kfree(ahp); } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index fc2f8ce441de..b428489f6ccd 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -1,3 +1,4 @@ +#include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> @@ -16,7 +17,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) int err; struct iphdr *top_iph; struct ip_esp_hdr *esph; - struct crypto_tfm *tfm; + struct crypto_blkcipher *tfm; + struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; int blksize; @@ -36,7 +38,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; - blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); + desc.tfm = tfm; + desc.flags = 0; + blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); @@ -92,7 +96,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) xfrm_aevent_doreplay(x); if (esp->conf.ivlen) - crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); + crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); do { struct scatterlist *sg = &esp->sgbuf[0]; @@ -103,26 +107,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); - crypto_cipher_encrypt(tfm, sg, sg, clen); + err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); + if (unlikely(err)) + goto error; + if (esp->conf.ivlen) { - memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); - crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); + memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); + crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } if (esp->auth.icv_full_len) { - esp->auth.icv(esp, skb, (u8*)esph-skb->data, - sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); - pskb_put(skb, trailer, alen); + err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, + sizeof(*esph) + esp->conf.ivlen + clen); + memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } ip_send_check(top_iph); - err = 0; - error: return err; } @@ -137,8 +142,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph; struct ip_esp_hdr *esph; struct esp_data *esp = x->data; + struct crypto_blkcipher *tfm = esp->conf.tfm; + struct blkcipher_desc desc = { .tfm = tfm }; struct sk_buff *trailer; - int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); + int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; @@ -146,6 +153,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) u8 nexthdr[2]; struct scatterlist *sg; int padlen; + int err; if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr))) goto out; @@ -155,15 +163,16 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { - u8 sum[esp->auth.icv_full_len]; - u8 sum1[alen]; - - esp->auth.icv(esp, skb, 0, skb->len-alen, sum); + u8 sum[alen]; - if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) + err = esp_mac_digest(esp, skb, 0, skb->len - alen); + if (err) + goto out; + + if (skb_copy_bits(skb, skb->len - alen, sum, alen)) BUG(); - if (unlikely(memcmp(sum, sum1, alen))) { + if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { x->stats.integrity_failed++; goto out; } @@ -178,7 +187,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) - crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); + crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); sg = &esp->sgbuf[0]; @@ -188,9 +197,11 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) goto out; } skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen); - crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); + err = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); + if (unlikely(err)) + return err; if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); @@ -254,7 +265,7 @@ out: static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; - u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); + u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); if (x->props.mode) { mtu = ALIGN(mtu + 2, blksize); @@ -293,11 +304,11 @@ static void esp_destroy(struct xfrm_state *x) if (!esp) return; - crypto_free_tfm(esp->conf.tfm); + crypto_free_blkcipher(esp->conf.tfm); esp->conf.tfm = NULL; kfree(esp->conf.ivec); esp->conf.ivec = NULL; - crypto_free_tfm(esp->auth.tfm); + crypto_free_hash(esp->auth.tfm); esp->auth.tfm = NULL; kfree(esp->auth.work_icv); esp->auth.work_icv = NULL; @@ -307,6 +318,7 @@ static void esp_destroy(struct xfrm_state *x) static int esp_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; + struct crypto_blkcipher *tfm; /* null auth and encryption can have zero length keys */ if (x->aalg) { @@ -322,22 +334,27 @@ static int esp_init_state(struct xfrm_state *x) if (x->aalg) { struct xfrm_algo_desc *aalg_desc; + struct crypto_hash *hash; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; - esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); - if (esp->auth.tfm == NULL) + hash = crypto_alloc_hash(x->aalg->alg_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(hash)) + goto error; + + esp->auth.tfm = hash; + if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) goto error; - esp->auth.icv = esp_hmac_digest; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != - crypto_tfm_alg_digestsize(esp->auth.tfm)) { + crypto_hash_digestsize(hash)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, - crypto_tfm_alg_digestsize(esp->auth.tfm), + crypto_hash_digestsize(hash), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } @@ -351,13 +368,11 @@ static int esp_init_state(struct xfrm_state *x) } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; - if (x->props.ealgo == SADB_EALG_NULL) - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); - else - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); - if (esp->conf.tfm == NULL) + tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) goto error; - esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); + esp->conf.tfm = tfm; + esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); @@ -365,7 +380,7 @@ static int esp_init_state(struct xfrm_state *x) goto error; get_random_bytes(esp->conf.ivec, esp->conf.ivlen); } - if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) + if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode) diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index a0c28b2b756e..5bb9c9f03fb6 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -32,7 +32,7 @@ struct ipcomp_tfms { struct list_head list; - struct crypto_tfm **tfms; + struct crypto_comp **tfms; int users; }; @@ -46,7 +46,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) int err, plen, dlen; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch; - struct crypto_tfm *tfm; + struct crypto_comp *tfm; int cpu; plen = skb->len; @@ -107,7 +107,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) struct iphdr *iph = skb->nh.iph; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch; - struct crypto_tfm *tfm; + struct crypto_comp *tfm; int cpu; ihlen = iph->ihl * 4; @@ -302,7 +302,7 @@ static void **ipcomp_alloc_scratches(void) return scratches; } -static void ipcomp_free_tfms(struct crypto_tfm **tfms) +static void ipcomp_free_tfms(struct crypto_comp **tfms) { struct ipcomp_tfms *pos; int cpu; @@ -324,28 +324,28 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms) return; for_each_possible_cpu(cpu) { - struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); - crypto_free_tfm(tfm); + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); + crypto_free_comp(tfm); } free_percpu(tfms); } -static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) +static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name) { struct ipcomp_tfms *pos; - struct crypto_tfm **tfms; + struct crypto_comp **tfms; int cpu; /* This can be any valid CPU ID so we don't need locking. */ cpu = raw_smp_processor_id(); list_for_each_entry(pos, &ipcomp_tfms_list, list) { - struct crypto_tfm *tfm; + struct crypto_comp *tfm; tfms = pos->tfms; tfm = *per_cpu_ptr(tfms, cpu); - if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { + if (!strcmp(crypto_comp_name(tfm), alg_name)) { pos->users++; return tfms; } @@ -359,12 +359,13 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name) INIT_LIST_HEAD(&pos->list); list_add(&pos->list, &ipcomp_tfms_list); - pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); if (!tfms) goto error; for_each_possible_cpu(cpu) { - struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, + CRYPTO_ALG_ASYNC); if (!tfm) goto error; *per_cpu_ptr(tfms, cpu) = tfm; diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index e923d4dea418..0ba06c0c5d39 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -77,6 +77,7 @@ config INET6_ESP select CRYPTO select CRYPTO_HMAC select CRYPTO_MD5 + select CRYPTO_CBC select CRYPTO_SHA1 select CRYPTO_DES ---help--- diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 9d4831bd4335..00ffa7bc6c9f 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -213,7 +213,10 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) ah->spi = x->id.spi; ah->seq_no = htonl(++x->replay.oseq); xfrm_aevent_doreplay(x); - ahp->icv(ahp, skb, ah->auth_data); + err = ah_mac_digest(ahp, skb, ah->auth_data); + if (err) + goto error_free_iph; + memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len); err = 0; @@ -251,6 +254,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) u16 hdr_len; u16 ah_hlen; int nexthdr; + int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) goto out; @@ -292,8 +296,11 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); memset(ah->auth_data, 0, ahp->icv_trunc_len); skb_push(skb, hdr_len); - ahp->icv(ahp, skb, ah->auth_data); - if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) { + err = ah_mac_digest(ahp, skb, ah->auth_data); + if (err) + goto free_out; + err = -EINVAL; + if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) { LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n"); x->stats.integrity_failed++; goto free_out; @@ -310,7 +317,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) free_out: kfree(tmp_hdr); out: - return -EINVAL; + return err; } static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, @@ -338,6 +345,7 @@ static int ah6_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; + struct crypto_hash *tfm; if (!x->aalg) goto error; @@ -355,24 +363,27 @@ static int ah6_init_state(struct xfrm_state *x) ahp->key = x->aalg->alg_key; ahp->key_len = (x->aalg->alg_key_len+7)/8; - ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); - if (!ahp->tfm) + tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto error; + + ahp->tfm = tfm; + if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len)) goto error; - ahp->icv = ah_hmac_digest; /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here - * after a successful crypto_alloc_tfm(). + * after a successful crypto_alloc_hash(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != - crypto_tfm_alg_digestsize(ahp->tfm)) { + crypto_hash_digestsize(tfm)) { printk(KERN_INFO "AH: %s digestsize %u != %hu\n", - x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), + x->aalg->alg_name, crypto_hash_digestsize(tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } @@ -396,7 +407,7 @@ static int ah6_init_state(struct xfrm_state *x) error: if (ahp) { kfree(ahp->work_icv); - crypto_free_tfm(ahp->tfm); + crypto_free_hash(ahp->tfm); kfree(ahp); } return -EINVAL; @@ -411,7 +422,7 @@ static void ah6_destroy(struct xfrm_state *x) kfree(ahp->work_icv); ahp->work_icv = NULL; - crypto_free_tfm(ahp->tfm); + crypto_free_hash(ahp->tfm); ahp->tfm = NULL; kfree(ahp); } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index a278d5e862fe..2ebfd281e721 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -24,6 +24,7 @@ * This file is derived from net/ipv4/esp.c */ +#include <linux/err.h> #include <linux/module.h> #include <net/ip.h> #include <net/xfrm.h> @@ -44,7 +45,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) int hdr_len; struct ipv6hdr *top_iph; struct ipv6_esp_hdr *esph; - struct crypto_tfm *tfm; + struct crypto_blkcipher *tfm; + struct blkcipher_desc desc; struct esp_data *esp; struct sk_buff *trailer; int blksize; @@ -67,7 +69,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; - blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); + desc.tfm = tfm; + desc.flags = 0; + blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) clen = ALIGN(clen, esp->conf.padlen); @@ -96,7 +100,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) xfrm_aevent_doreplay(x); if (esp->conf.ivlen) - crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); + crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen); do { struct scatterlist *sg = &esp->sgbuf[0]; @@ -107,24 +111,25 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) goto error; } skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen); - crypto_cipher_encrypt(tfm, sg, sg, clen); + err = crypto_blkcipher_encrypt(&desc, sg, sg, clen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); } while (0); + if (unlikely(err)) + goto error; + if (esp->conf.ivlen) { - memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); - crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm)); + memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen); + crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen); } if (esp->auth.icv_full_len) { - esp->auth.icv(esp, skb, (u8*)esph-skb->data, - sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail); - pskb_put(skb, trailer, alen); + err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data, + sizeof(*esph) + esp->conf.ivlen + clen); + memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen); } - err = 0; - error: return err; } @@ -134,8 +139,10 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) struct ipv6hdr *iph; struct ipv6_esp_hdr *esph; struct esp_data *esp = x->data; + struct crypto_blkcipher *tfm = esp->conf.tfm; + struct blkcipher_desc desc = { .tfm = tfm }; struct sk_buff *trailer; - int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); + int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; @@ -155,15 +162,16 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) /* If integrity check is required, do this. */ if (esp->auth.icv_full_len) { - u8 sum[esp->auth.icv_full_len]; - u8 sum1[alen]; + u8 sum[alen]; - esp->auth.icv(esp, skb, 0, skb->len-alen, sum); + ret = esp_mac_digest(esp, skb, 0, skb->len - alen); + if (ret) + goto out; - if (skb_copy_bits(skb, skb->len-alen, sum1, alen)) + if (skb_copy_bits(skb, skb->len - alen, sum, alen)) BUG(); - if (unlikely(memcmp(sum, sum1, alen))) { + if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) { x->stats.integrity_failed++; ret = -EINVAL; goto out; @@ -182,7 +190,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) /* Get ivec. This can be wrong, check against another impls. */ if (esp->conf.ivlen) - crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm)); + crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen); { u8 nexthdr[2]; @@ -197,9 +205,11 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) } } skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen); - crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen); + ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen); if (unlikely(sg != &esp->sgbuf[0])) kfree(sg); + if (unlikely(ret)) + goto out; if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2)) BUG(); @@ -225,7 +235,7 @@ out: static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; - u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); + u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); if (x->props.mode) { mtu = ALIGN(mtu + 2, blksize); @@ -266,11 +276,11 @@ static void esp6_destroy(struct xfrm_state *x) if (!esp) return; - crypto_free_tfm(esp->conf.tfm); + crypto_free_blkcipher(esp->conf.tfm); esp->conf.tfm = NULL; kfree(esp->conf.ivec); esp->conf.ivec = NULL; - crypto_free_tfm(esp->auth.tfm); + crypto_free_hash(esp->auth.tfm); esp->auth.tfm = NULL; kfree(esp->auth.work_icv); esp->auth.work_icv = NULL; @@ -280,6 +290,7 @@ static void esp6_destroy(struct xfrm_state *x) static int esp6_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; + struct crypto_blkcipher *tfm; /* null auth and encryption can have zero length keys */ if (x->aalg) { @@ -298,24 +309,29 @@ static int esp6_init_state(struct xfrm_state *x) if (x->aalg) { struct xfrm_algo_desc *aalg_desc; + struct crypto_hash *hash; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; - esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); - if (esp->auth.tfm == NULL) + hash = crypto_alloc_hash(x->aalg->alg_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(hash)) + goto error; + + esp->auth.tfm = hash; + if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) goto error; - esp->auth.icv = esp_hmac_digest; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != - crypto_tfm_alg_digestsize(esp->auth.tfm)) { - printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", - x->aalg->alg_name, - crypto_tfm_alg_digestsize(esp->auth.tfm), - aalg_desc->uinfo.auth.icv_fullbits/8); - goto error; + crypto_hash_digestsize(hash)) { + NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", + x->aalg->alg_name, + crypto_hash_digestsize(hash), + aalg_desc->uinfo.auth.icv_fullbits/8); + goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; @@ -327,13 +343,11 @@ static int esp6_init_state(struct xfrm_state *x) } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; - if (x->props.ealgo == SADB_EALG_NULL) - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); - else - esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); - if (esp->conf.tfm == NULL) + tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) goto error; - esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); + esp->conf.tfm = tfm; + esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); @@ -341,7 +355,7 @@ static int esp6_init_state(struct xfrm_state *x) goto error; get_random_bytes(esp->conf.ivec, esp->conf.ivlen); } - if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len)) + if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; if (x->props.mode) diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 7e4d1c17bfbc..a81e9e9d93bd 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -53,7 +53,7 @@ struct ipcomp6_tfms { struct list_head list; - struct crypto_tfm **tfms; + struct crypto_comp **tfms; int users; }; @@ -70,7 +70,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) int plen, dlen; struct ipcomp_data *ipcd = x->data; u8 *start, *scratch; - struct crypto_tfm *tfm; + struct crypto_comp *tfm; int cpu; if (skb_linearize_cow(skb)) @@ -129,7 +129,7 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) struct ipcomp_data *ipcd = x->data; int plen, dlen; u8 *start, *scratch; - struct crypto_tfm *tfm; + struct crypto_comp *tfm; int cpu; hdr_len = skb->h.raw - skb->data; @@ -301,7 +301,7 @@ static void **ipcomp6_alloc_scratches(void) return scratches; } -static void ipcomp6_free_tfms(struct crypto_tfm **tfms) +static void ipcomp6_free_tfms(struct crypto_comp **tfms) { struct ipcomp6_tfms *pos; int cpu; @@ -323,28 +323,28 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms) return; for_each_possible_cpu(cpu) { - struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu); - crypto_free_tfm(tfm); + struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); + crypto_free_comp(tfm); } free_percpu(tfms); } -static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) +static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name) { struct ipcomp6_tfms *pos; - struct crypto_tfm **tfms; + struct crypto_comp **tfms; int cpu; /* This can be any valid CPU ID so we don't need locking. */ cpu = raw_smp_processor_id(); list_for_each_entry(pos, &ipcomp6_tfms_list, list) { - struct crypto_tfm *tfm; + struct crypto_comp *tfm; tfms = pos->tfms; tfm = *per_cpu_ptr(tfms, cpu); - if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) { + if (!strcmp(crypto_comp_name(tfm), alg_name)) { pos->users++; return tfms; } @@ -358,12 +358,13 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name) INIT_LIST_HEAD(&pos->list); list_add(&pos->list, &ipcomp6_tfms_list); - pos->tfms = tfms = alloc_percpu(struct crypto_tfm *); + pos->tfms = tfms = alloc_percpu(struct crypto_comp *); if (!tfms) goto error; for_each_possible_cpu(cpu) { - struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0); + struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, + CRYPTO_ALG_ASYNC); if (!tfm) goto error; *per_cpu_ptr(tfms, cpu) = tfm; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index ffda1d680529..35c49ff2d062 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -173,7 +173,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep) SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); /* Free up the HMAC transform. */ - sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); + crypto_free_hash(sctp_sk(ep->base.sk)->hmac); /* Cleanup. */ sctp_inq_free(&ep->base.inqueue); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 17b509282cf2..7745bdea7817 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1282,10 +1282,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, retval = kmalloc(*cookie_len, GFP_ATOMIC); - if (!retval) { - *cookie_len = 0; + if (!retval) goto nodata; - } /* Clear this memory since we are sending this data structure * out on the network. @@ -1321,19 +1319,29 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); if (sctp_sk(ep->base.sk)->hmac) { + struct hash_desc desc; + /* Sign the message. */ sg.page = virt_to_page(&cookie->c); sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; sg.length = bodysize; keylen = SCTP_SECRET_SIZE; key = (char *)ep->secret_key[ep->current_key]; + desc.tfm = sctp_sk(ep->base.sk)->hmac; + desc.flags = 0; - sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, - &sg, 1, cookie->signature); + if (crypto_hash_setkey(desc.tfm, key, keylen) || + crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) + goto free_cookie; } -nodata: return retval; + +free_cookie: + kfree(retval); +nodata: + *cookie_len = 0; + return NULL; } /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ @@ -1354,6 +1362,7 @@ struct sctp_association *sctp_unpack_cookie( sctp_scope_t scope; struct sk_buff *skb = chunk->skb; struct timeval tv; + struct hash_desc desc; /* Header size is static data prior to the actual cookie, including * any padding. @@ -1389,17 +1398,25 @@ struct sctp_association *sctp_unpack_cookie( sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; sg.length = bodysize; key = (char *)ep->secret_key[ep->current_key]; + desc.tfm = sctp_sk(ep->base.sk)->hmac; + desc.flags = 0; memset(digest, 0x00, SCTP_SIGNATURE_SIZE); - sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, &sg, - 1, digest); + if (crypto_hash_setkey(desc.tfm, key, keylen) || + crypto_hash_digest(&desc, &sg, bodysize, digest)) { + *error = -SCTP_IERROR_NOMEM; + goto fail; + } if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { /* Try the previous key. */ key = (char *)ep->secret_key[ep->last_key]; memset(digest, 0x00, SCTP_SIGNATURE_SIZE); - sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, - &sg, 1, digest); + if (crypto_hash_setkey(desc.tfm, key, keylen) || + crypto_hash_digest(&desc, &sg, bodysize, digest)) { + *error = -SCTP_IERROR_NOMEM; + goto fail; + } if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { /* Yikes! Still bad signature! */ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dab15949958e..85caf7963886 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4898,7 +4898,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog) int sctp_inet_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; - struct crypto_tfm *tfm=NULL; + struct crypto_hash *tfm = NULL; int err = -EINVAL; if (unlikely(backlog < 0)) @@ -4911,7 +4911,7 @@ int sctp_inet_listen(struct socket *sock, int backlog) /* Allocate HMAC for generating cookie. */ if (sctp_hmac_alg) { - tfm = sctp_crypto_alloc_tfm(sctp_hmac_alg, 0); + tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC); if (!tfm) { err = -ENOSYS; goto out; @@ -4937,7 +4937,7 @@ out: sctp_release_sock(sk); return err; cleanup: - sctp_crypto_free_tfm(tfm); + crypto_free_hash(tfm); goto out; } diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 76b969e6904f..e11a40b25cce 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -34,6 +34,7 @@ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ +#include <linux/err.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/slab.h> @@ -49,7 +50,7 @@ u32 krb5_encrypt( - struct crypto_tfm *tfm, + struct crypto_blkcipher *tfm, void * iv, void * in, void * out, @@ -58,26 +59,27 @@ krb5_encrypt( u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[16] = {0}; + struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; dprintk("RPC: krb5_encrypt: input data:\n"); print_hexl((u32 *)in, length, 0); - if (length % crypto_tfm_alg_blocksize(tfm) != 0) + if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; - if (crypto_tfm_alg_ivsize(tfm) > 16) { + if (crypto_blkcipher_ivsize(tfm) > 16) { dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", - crypto_tfm_alg_ivsize(tfm)); + crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm)); + memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); sg_set_buf(sg, out, length); - ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv); + ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); dprintk("RPC: krb5_encrypt: output data:\n"); print_hexl((u32 *)out, length, 0); @@ -90,7 +92,7 @@ EXPORT_SYMBOL(krb5_encrypt); u32 krb5_decrypt( - struct crypto_tfm *tfm, + struct crypto_blkcipher *tfm, void * iv, void * in, void * out, @@ -99,25 +101,26 @@ krb5_decrypt( u32 ret = -EINVAL; struct scatterlist sg[1]; u8 local_iv[16] = {0}; + struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; dprintk("RPC: krb5_decrypt: input data:\n"); print_hexl((u32 *)in, length, 0); - if (length % crypto_tfm_alg_blocksize(tfm) != 0) + if (length % crypto_blkcipher_blocksize(tfm) != 0) goto out; - if (crypto_tfm_alg_ivsize(tfm) > 16) { + if (crypto_blkcipher_ivsize(tfm) > 16) { dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", - crypto_tfm_alg_ivsize(tfm)); + crypto_blkcipher_ivsize(tfm)); goto out; } if (iv) - memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm)); + memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); memcpy(out, in, length); sg_set_buf(sg, out, length); - ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv); + ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); dprintk("RPC: krb5_decrypt: output_data:\n"); print_hexl((u32 *)out, length, 0); @@ -197,11 +200,9 @@ out: static int checksummer(struct scatterlist *sg, void *data) { - struct crypto_tfm *tfm = (struct crypto_tfm *)data; + struct hash_desc *desc = data; - crypto_digest_update(tfm, sg, 1); - - return 0; + return crypto_hash_update(desc, sg, sg->length); } /* checksum the plaintext data and hdrlen bytes of the token header */ @@ -210,8 +211,9 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, int body_offset, struct xdr_netobj *cksum) { char *cksumname; - struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ + struct hash_desc desc; /* XXX add to ctx? */ struct scatterlist sg[1]; + int err; switch (cksumtype) { case CKSUMTYPE_RSA_MD5: @@ -222,25 +224,35 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, " unsupported checksum %d", cksumtype); return GSS_S_FAILURE; } - if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP))) + desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(desc.tfm)) return GSS_S_FAILURE; - cksum->len = crypto_tfm_alg_digestsize(tfm); + cksum->len = crypto_hash_digestsize(desc.tfm); + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; - crypto_digest_init(tfm); + err = crypto_hash_init(&desc); + if (err) + goto out; sg_set_buf(sg, header, hdrlen); - crypto_digest_update(tfm, sg, 1); - process_xdr_buf(body, body_offset, body->len - body_offset, - checksummer, tfm); - crypto_digest_final(tfm, cksum->data); - crypto_free_tfm(tfm); - return 0; + err = crypto_hash_update(&desc, sg, hdrlen); + if (err) + goto out; + err = process_xdr_buf(body, body_offset, body->len - body_offset, + checksummer, &desc); + if (err) + goto out; + err = crypto_hash_final(&desc, cksum->data); + +out: + crypto_free_hash(desc.tfm); + return err ? GSS_S_FAILURE : 0; } EXPORT_SYMBOL(make_checksum); struct encryptor_desc { u8 iv[8]; /* XXX hard-coded blocksize */ - struct crypto_tfm *tfm; + struct blkcipher_desc desc; int pos; struct xdr_buf *outbuf; struct page **pages; @@ -285,8 +297,8 @@ encryptor(struct scatterlist *sg, void *data) if (thislen == 0) return 0; - ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, - thislen, desc->iv); + ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, + desc->infrags, thislen); if (ret) return ret; if (fraglen) { @@ -305,16 +317,18 @@ encryptor(struct scatterlist *sg, void *data) } int -gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, - struct page **pages) +gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, + int offset, struct page **pages) { int ret; struct encryptor_desc desc; - BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); + BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); memset(desc.iv, 0, sizeof(desc.iv)); - desc.tfm = tfm; + desc.desc.tfm = tfm; + desc.desc.info = desc.iv; + desc.desc.flags = 0; desc.pos = offset; desc.outbuf = buf; desc.pages = pages; @@ -329,7 +343,7 @@ EXPORT_SYMBOL(gss_encrypt_xdr_buf); struct decryptor_desc { u8 iv[8]; /* XXX hard-coded blocksize */ - struct crypto_tfm *tfm; + struct blkcipher_desc desc; struct scatterlist frags[4]; int fragno; int fraglen; @@ -355,8 +369,8 @@ decryptor(struct scatterlist *sg, void *data) if (thislen == 0) return 0; - ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, - thislen, desc->iv); + ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, + desc->frags, thislen); if (ret) return ret; if (fraglen) { @@ -373,15 +387,18 @@ decryptor(struct scatterlist *sg, void *data) } int -gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) +gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, + int offset) { struct decryptor_desc desc; /* XXXJBF: */ - BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); + BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); memset(desc.iv, 0, sizeof(desc.iv)); - desc.tfm = tfm; + desc.desc.tfm = tfm; + desc.desc.info = desc.iv; + desc.desc.flags = 0; desc.fragno = 0; desc.fraglen = 0; return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 70e1e53a632b..325e72e4fd31 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -34,6 +34,7 @@ * */ +#include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> @@ -78,10 +79,10 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) } static inline const void * -get_key(const void *p, const void *end, struct crypto_tfm **res) +get_key(const void *p, const void *end, struct crypto_blkcipher **res) { struct xdr_netobj key; - int alg, alg_mode; + int alg; char *alg_name; p = simple_get_bytes(p, end, &alg, sizeof(alg)); @@ -93,18 +94,19 @@ get_key(const void *p, const void *end, struct crypto_tfm **res) switch (alg) { case ENCTYPE_DES_CBC_RAW: - alg_name = "des"; - alg_mode = CRYPTO_TFM_MODE_CBC; + alg_name = "cbc(des)"; break; default: printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); goto out_err_free_key; } - if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { + *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(*res)) { printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); + *res = NULL; goto out_err_free_key; } - if (crypto_cipher_setkey(*res, key.data, key.len)) { + if (crypto_blkcipher_setkey(*res, key.data, key.len)) { printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); goto out_err_free_tfm; } @@ -113,7 +115,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res) return p; out_err_free_tfm: - crypto_free_tfm(*res); + crypto_free_blkcipher(*res); out_err_free_key: kfree(key.data); p = ERR_PTR(-EINVAL); @@ -172,9 +174,9 @@ gss_import_sec_context_kerberos(const void *p, return 0; out_err_free_key2: - crypto_free_tfm(ctx->seq); + crypto_free_blkcipher(ctx->seq); out_err_free_key1: - crypto_free_tfm(ctx->enc); + crypto_free_blkcipher(ctx->enc); out_err_free_mech: kfree(ctx->mech_used.data); out_err_free_ctx: @@ -187,8 +189,8 @@ static void gss_delete_sec_context_kerberos(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; - crypto_free_tfm(kctx->seq); - crypto_free_tfm(kctx->enc); + crypto_free_blkcipher(kctx->seq); + crypto_free_blkcipher(kctx->enc); kfree(kctx->mech_used.data); kfree(kctx); } diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index c53ead39118d..c604baf3a5f6 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -41,7 +41,7 @@ #endif s32 -krb5_make_seq_num(struct crypto_tfm *key, +krb5_make_seq_num(struct crypto_blkcipher *key, int direction, s32 seqnum, unsigned char *cksum, unsigned char *buf) @@ -62,7 +62,7 @@ krb5_make_seq_num(struct crypto_tfm *key, } s32 -krb5_get_seq_num(struct crypto_tfm *key, +krb5_get_seq_num(struct crypto_blkcipher *key, unsigned char *cksum, unsigned char *buf, int *direction, s32 * seqnum) diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 89d1f3e14128..f179415d0c38 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, goto out_err; } - blocksize = crypto_tfm_alg_blocksize(kctx->enc); + blocksize = crypto_blkcipher_blocksize(kctx->enc); gss_krb5_add_padding(buf, offset, blocksize); BUG_ON((buf->len - offset) % blocksize); plainlen = blocksize + buf->len - offset; @@ -346,7 +346,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) /* Copy the data back to the right position. XXX: Would probably be * better to copy and encrypt at the same time. */ - blocksize = crypto_tfm_alg_blocksize(kctx->enc); + blocksize = crypto_blkcipher_blocksize(kctx->enc); data_start = ptr + 22 + blocksize; orig_start = buf->head[0].iov_base + offset; data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 88dcb52d171b..bdedf456bc17 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -34,6 +34,7 @@ * */ +#include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> @@ -83,10 +84,11 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) } static inline const void * -get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) +get_key(const void *p, const void *end, struct crypto_blkcipher **res, + int *resalg) { struct xdr_netobj key = { 0 }; - int alg_mode,setkey = 0; + int setkey = 0; char *alg_name; p = simple_get_bytes(p, end, resalg, sizeof(*resalg)); @@ -98,14 +100,12 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) switch (*resalg) { case NID_des_cbc: - alg_name = "des"; - alg_mode = CRYPTO_TFM_MODE_CBC; + alg_name = "cbc(des)"; setkey = 1; break; case NID_cast5_cbc: /* XXXX here in name only, not used */ - alg_name = "cast5"; - alg_mode = CRYPTO_TFM_MODE_CBC; + alg_name = "cbc(cast5)"; setkey = 0; /* XXX will need to set to 1 */ break; case NID_md5: @@ -113,19 +113,20 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); } alg_name = "md5"; - alg_mode = 0; setkey = 0; break; default: dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg); goto out_err_free_key; } - if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { + *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(*res)) { printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name); + *res = NULL; goto out_err_free_key; } if (setkey) { - if (crypto_cipher_setkey(*res, key.data, key.len)) { + if (crypto_blkcipher_setkey(*res, key.data, key.len)) { printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name); goto out_err_free_tfm; } @@ -136,7 +137,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg) return p; out_err_free_tfm: - crypto_free_tfm(*res); + crypto_free_blkcipher(*res); out_err_free_key: if(key.len > 0) kfree(key.data); @@ -204,9 +205,9 @@ gss_import_sec_context_spkm3(const void *p, size_t len, return 0; out_err_free_key2: - crypto_free_tfm(ctx->derived_integ_key); + crypto_free_blkcipher(ctx->derived_integ_key); out_err_free_key1: - crypto_free_tfm(ctx->derived_conf_key); + crypto_free_blkcipher(ctx->derived_conf_key); out_err_free_s_key: kfree(ctx->share_key.data); out_err_free_mech: @@ -223,8 +224,8 @@ static void gss_delete_sec_context_spkm3(void *internal_ctx) { struct spkm3_ctx *sctx = internal_ctx; - crypto_free_tfm(sctx->derived_integ_key); - crypto_free_tfm(sctx->derived_conf_key); + crypto_free_blkcipher(sctx->derived_integ_key); + crypto_free_blkcipher(sctx->derived_conf_key); kfree(sctx->share_key.data); kfree(sctx->mech_used.data); kfree(sctx); diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index 04e1aea58bc9..5a0dbeb6bbe8 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -30,7 +30,8 @@ */ static struct xfrm_algo_desc aalg_list[] = { { - .name = "digest_null", + .name = "hmac(digest_null)", + .compat = "digest_null", .uinfo = { .auth = { @@ -47,7 +48,8 @@ static struct xfrm_algo_desc aalg_list[] = { } }, { - .name = "md5", + .name = "hmac(md5)", + .compat = "md5", .uinfo = { .auth = { @@ -64,7 +66,8 @@ static struct xfrm_algo_desc aalg_list[] = { } }, { - .name = "sha1", + .name = "hmac(sha1)", + .compat = "sha1", .uinfo = { .auth = { @@ -81,7 +84,8 @@ static struct xfrm_algo_desc aalg_list[] = { } }, { - .name = "sha256", + .name = "hmac(sha256)", + .compat = "sha256", .uinfo = { .auth = { @@ -98,7 +102,8 @@ static struct xfrm_algo_desc aalg_list[] = { } }, { - .name = "ripemd160", + .name = "hmac(ripemd160)", + .compat = "ripemd160", .uinfo = { .auth = { @@ -118,7 +123,8 @@ static struct xfrm_algo_desc aalg_list[] = { static struct xfrm_algo_desc ealg_list[] = { { - .name = "cipher_null", + .name = "ecb(cipher_null)", + .compat = "cipher_null", .uinfo = { .encr = { @@ -135,7 +141,8 @@ static struct xfrm_algo_desc ealg_list[] = { } }, { - .name = "des", + .name = "cbc(des)", + .compat = "des", .uinfo = { .encr = { @@ -152,7 +159,8 @@ static struct xfrm_algo_desc ealg_list[] = { } }, { - .name = "des3_ede", + .name = "cbc(des3_ede)", + .compat = "des3_ede", .uinfo = { .encr = { @@ -169,7 +177,8 @@ static struct xfrm_algo_desc ealg_list[] = { } }, { - .name = "cast128", + .name = "cbc(cast128)", + .compat = "cast128", .uinfo = { .encr = { @@ -186,7 +195,8 @@ static struct xfrm_algo_desc ealg_list[] = { } }, { - .name = "blowfish", + .name = "cbc(blowfish)", + .compat = "blowfish", .uinfo = { .encr = { @@ -203,7 +213,8 @@ static struct xfrm_algo_desc ealg_list[] = { } }, { - .name = "aes", + .name = "cbc(aes)", + .compat = "aes", .uinfo = { .encr = { @@ -220,7 +231,8 @@ static struct xfrm_algo_desc ealg_list[] = { } }, { - .name = "serpent", + .name = "cbc(serpent)", + .compat = "serpent", .uinfo = { .encr = { @@ -237,7 +249,8 @@ static struct xfrm_algo_desc ealg_list[] = { } }, { - .name = "twofish", + .name = "cbc(twofish)", + .compat = "twofish", .uinfo = { .encr = { @@ -350,8 +363,8 @@ struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id) EXPORT_SYMBOL_GPL(xfrm_calg_get_byid); static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, - int entries, char *name, - int probe) + int entries, u32 type, u32 mask, + char *name, int probe) { int i, status; @@ -359,7 +372,8 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, return NULL; for (i = 0; i < entries; i++) { - if (strcmp(name, list[i].name)) + if (strcmp(name, list[i].name) && + (!list[i].compat || strcmp(name, list[i].compat))) continue; if (list[i].available) @@ -368,7 +382,7 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, if (!probe) break; - status = crypto_alg_available(name, 0); + status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC); if (!status) break; @@ -380,19 +394,25 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list, struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe) { - return xfrm_get_byname(aalg_list, aalg_entries(), name, probe); + return xfrm_get_byname(aalg_list, aalg_entries(), + CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK, + name, probe); } EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname); struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe) { - return xfrm_get_byname(ealg_list, ealg_entries(), name, probe); + return xfrm_get_byname(ealg_list, ealg_entries(), + CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK, + name, probe); } EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname); struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe) { - return xfrm_get_byname(calg_list, calg_entries(), name, probe); + return xfrm_get_byname(calg_list, calg_entries(), + CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK, + name, probe); } EXPORT_SYMBOL_GPL(xfrm_calg_get_byname); @@ -427,19 +447,22 @@ void xfrm_probe_algs(void) BUG_ON(in_softirq()); for (i = 0; i < aalg_entries(); i++) { - status = crypto_alg_available(aalg_list[i].name, 0); + status = crypto_has_hash(aalg_list[i].name, 0, + CRYPTO_ALG_ASYNC); if (aalg_list[i].available != status) aalg_list[i].available = status; } for (i = 0; i < ealg_entries(); i++) { - status = crypto_alg_available(ealg_list[i].name, 0); + status = crypto_has_blkcipher(ealg_list[i].name, 0, + CRYPTO_ALG_ASYNC); if (ealg_list[i].available != status) ealg_list[i].available = status; } for (i = 0; i < calg_entries(); i++) { - status = crypto_alg_available(calg_list[i].name, 0); + status = crypto_has_comp(calg_list[i].name, 0, + CRYPTO_ALG_ASYNC); if (calg_list[i].available != status) calg_list[i].available = status; } @@ -471,11 +494,12 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported); /* Move to common area: it is shared with AH. */ -void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, - int offset, int len, icv_update_fn_t icv_update) +int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc, + int offset, int len, icv_update_fn_t icv_update) { int start = skb_headlen(skb); int i, copy = start - offset; + int err; struct scatterlist sg; /* Checksum header. */ @@ -487,10 +511,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; sg.length = copy; - icv_update(tfm, &sg, 1); + err = icv_update(desc, &sg, copy); + if (unlikely(err)) + return err; if ((len -= copy) == 0) - return; + return 0; offset += copy; } @@ -510,10 +536,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, sg.offset = frag->page_offset + offset-start; sg.length = copy; - icv_update(tfm, &sg, 1); + err = icv_update(desc, &sg, copy); + if (unlikely(err)) + return err; if (!(len -= copy)) - return; + return 0; offset += copy; } start = end; @@ -531,15 +559,19 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm, if ((copy = end - offset) > 0) { if (copy > len) copy = len; - skb_icv_walk(list, tfm, offset-start, copy, icv_update); + err = skb_icv_walk(list, desc, offset-start, + copy, icv_update); + if (unlikely(err)) + return err; if ((len -= copy) == 0) - return; + return 0; offset += copy; } start = end; } } BUG_ON(len); + return 0; } EXPORT_SYMBOL_GPL(skb_icv_walk); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 3e6a722d072e..fa79ddc4239e 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -10,6 +10,7 @@ * */ +#include <linux/crypto.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> @@ -212,6 +213,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, return -ENOMEM; memcpy(p, ualg, len); + strcpy(p->alg_name, algo->name); *algpp = p; return 0; } diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl index b34924663ac1..f7844f6aa487 100755 --- a/scripts/checkstack.pl +++ b/scripts/checkstack.pl @@ -62,6 +62,8 @@ my (@stack, $re, $x, $xs); } elsif ($arch eq 'ppc64') { #XXX $re = qr/.*stdu.*r1,-($x{1,8})\(r1\)/o; + } elsif ($arch eq 'powerpc') { + $re = qr/.*st[dw]u.*r1,-($x{1,8})\(r1\)/o; } elsif ($arch =~ /^s390x?$/) { # 11160: a7 fb ff 60 aghi %r15,-160 $re = qr/.*ag?hi.*\%r15,-(([0-9]{2}|[3-9])[0-9]{2})/o; diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index e2de650d3dbf..de76da80443f 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -265,6 +265,14 @@ static int do_ccw_entry(const char *filename, return 1; } +/* looks like: "ap:tN" */ +static int do_ap_entry(const char *filename, + struct ap_device_id *id, char *alias) +{ + sprintf(alias, "ap:t%02X", id->dev_type); + return 1; +} + /* Looks like: "serio:tyNprNidNexN" */ static int do_serio_entry(const char *filename, struct serio_device_id *id, char *alias) @@ -503,6 +511,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, do_table(symval, sym->st_size, sizeof(struct ccw_device_id), "ccw", do_ccw_entry, mod); + else if (sym_is(symname, "__mod_ap_device_table")) + do_table(symval, sym->st_size, + sizeof(struct ap_device_id), "ap", + do_ap_entry, mod); else if (sym_is(symname, "__mod_serio_device_table")) do_table(symval, sym->st_size, sizeof(struct serio_device_id), "serio", diff --git a/security/seclvl.c b/security/seclvl.c index c26dd7de0471..8f6291991fbc 100644 --- a/security/seclvl.c +++ b/security/seclvl.c @@ -16,6 +16,7 @@ * (at your option) any later version. */ +#include <linux/err.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> @@ -197,26 +198,27 @@ static unsigned char hashedPassword[SHA1_DIGEST_SIZE]; static int plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) { - struct crypto_tfm *tfm; + struct hash_desc desc; struct scatterlist sg; + int err; + if (len > PAGE_SIZE) { seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d " "characters). Largest possible is %lu " "bytes.\n", len, PAGE_SIZE); return -EINVAL; } - tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); - if (tfm == NULL) { + desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(desc.tfm)) { seclvl_printk(0, KERN_ERR, "Failed to load transform for SHA1\n"); return -EINVAL; } sg_init_one(&sg, (u8 *)plaintext, len); - crypto_digest_init(tfm); - crypto_digest_update(tfm, &sg, 1); - crypto_digest_final(tfm, hash); - crypto_free_tfm(tfm); - return 0; + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_hash_digest(&desc, &sg, len, hash); + crypto_free_hash(desc.tfm); + return err; } /** diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c index f69d33357a28..7c26089527f6 100644 --- a/sound/aoa/core/snd-aoa-gpio-feature.c +++ b/sound/aoa/core/snd-aoa-gpio-feature.c @@ -56,7 +56,7 @@ static struct device_node *get_gpio(char *name, { struct device_node *np, *gpio; u32 *reg; - char *audio_gpio; + const char *audio_gpio; *gpioptr = -1; diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c index 4359903f4376..9ae659f82430 100644 --- a/sound/oss/dmasound/dmasound_awacs.c +++ b/sound/oss/dmasound/dmasound_awacs.c @@ -347,8 +347,8 @@ int setup_audio_gpio(const char *name, const char* compatible, int *gpio_addr, int* gpio_pol) { struct device_node *np; - u32* pp; - + const u32* pp; + np = find_devices("gpio"); if (!np) return -ENODEV; @@ -356,7 +356,8 @@ setup_audio_gpio(const char *name, const char* compatible, int *gpio_addr, int* np = np->child; while(np != 0) { if (name) { - char *property = get_property(np,"audio-gpio",NULL); + const char *property = + get_property(np,"audio-gpio",NULL); if (property != 0 && strcmp(property,name) == 0) break; } else if (compatible && device_is_compatible(np, compatible)) @@ -365,11 +366,11 @@ setup_audio_gpio(const char *name, const char* compatible, int *gpio_addr, int* } if (!np) return -ENODEV; - pp = (u32 *)get_property(np, "AAPL,address", NULL); + pp = get_property(np, "AAPL,address", NULL); if (!pp) return -ENODEV; *gpio_addr = (*pp) & 0x0000ffff; - pp = (u32 *)get_property(np, "audio-gpio-active-state", NULL); + pp = get_property(np, "audio-gpio-active-state", NULL); if (pp) *gpio_pol = *pp; else diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c index 84f6b19c07ca..6ae2d5b9aa4a 100644 --- a/sound/ppc/tumbler.c +++ b/sound/ppc/tumbler.c @@ -1035,7 +1035,7 @@ static struct device_node *find_audio_device(const char *name) return NULL; for (np = np->child; np; np = np->sibling) { - char *property = get_property(np, "audio-gpio", NULL); + const char *property = get_property(np, "audio-gpio", NULL); if (property && strcmp(property, name) == 0) return np; } @@ -1062,7 +1062,8 @@ static long tumbler_find_device(const char *device, const char *platform, struct pmac_gpio *gp, int is_compatible) { struct device_node *node; - u32 *base, addr; + const u32 *base; + u32 addr; if (is_compatible) node = find_compatible_audio_device(device); @@ -1074,9 +1075,9 @@ static long tumbler_find_device(const char *device, const char *platform, return -ENODEV; } - base = (u32 *)get_property(node, "AAPL,address", NULL); + base = get_property(node, "AAPL,address", NULL); if (! base) { - base = (u32 *)get_property(node, "reg", NULL); + base = get_property(node, "reg", NULL); if (!base) { DBG("(E) cannot find address for device %s !\n", device); snd_printd("cannot find address for device %s\n", device); @@ -1090,13 +1091,13 @@ static long tumbler_find_device(const char *device, const char *platform, gp->addr = addr & 0x0000ffff; /* Try to find the active state, default to 0 ! */ - base = (u32 *)get_property(node, "audio-gpio-active-state", NULL); + base = get_property(node, "audio-gpio-active-state", NULL); if (base) { gp->active_state = *base; gp->active_val = (*base) ? 0x5 : 0x4; gp->inactive_val = (*base) ? 0x4 : 0x5; } else { - u32 *prop = NULL; + const u32 *prop = NULL; gp->active_state = 0; gp->active_val = 0x4; gp->inactive_val = 0x5; @@ -1105,7 +1106,7 @@ static long tumbler_find_device(const char *device, const char *platform, * as we don't yet have an interpreter for these things */ if (platform) - prop = (u32 *)get_property(node, platform, NULL); + prop = get_property(node, platform, NULL); if (prop) { if (prop[3] == 0x9 && prop[4] == 0x9) { gp->active_val = 0xd; |