aboutsummaryrefslogtreecommitdiff
path: root/crypto/bn
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/bn')
-rw-r--r--crypto/bn/asm/ia64.S2
-rw-r--r--crypto/bn/asm/parisc-mont.pl2
-rwxr-xr-xcrypto/bn/asm/rsaz-x86_64.pl4
-rw-r--r--crypto/bn/bn_gcd.c2
-rw-r--r--crypto/bn/bn_kron.c2
-rw-r--r--crypto/bn/bn_lib.c2
-rw-r--r--crypto/bn/bn_mul.c2
-rw-r--r--crypto/bn/bn_nist.c2
-rw-r--r--crypto/bn/bn_x931p.c2
9 files changed, 10 insertions, 10 deletions
diff --git a/crypto/bn/asm/ia64.S b/crypto/bn/asm/ia64.S
index a9a42ab..9e090ab 100644
--- a/crypto/bn/asm/ia64.S
+++ b/crypto/bn/asm/ia64.S
@@ -495,7 +495,7 @@ bn_sqr_words:
// scalability. The decision will very likely be reconsidered after the
// benchmark program is profiled. I.e. if perfomance gain on Itanium
// will appear larger than loss on "wider" IA-64, then the loop should
-// be explicitely split and the epilogue compressed.
+// be explicitly split and the epilogue compressed.
.L_bn_sqr_words_ctop:
{ .mfi; (p16) ldf8 f32=[r33],8
(p25) xmpy.lu f42=f41,f41
diff --git a/crypto/bn/asm/parisc-mont.pl b/crypto/bn/asm/parisc-mont.pl
index c02ef6f..aa6c797 100644
--- a/crypto/bn/asm/parisc-mont.pl
+++ b/crypto/bn/asm/parisc-mont.pl
@@ -126,7 +126,7 @@ $fp="%r3";
$hi1="%r2";
$hi0="%r1";
-$xfer=$n0; # accomodates [-16..15] offset in fld[dw]s
+$xfer=$n0; # accommodates [-16..15] offset in fld[dw]s
$fm0="%fr4"; $fti=$fm0;
$fbi="%fr5L";
diff --git a/crypto/bn/asm/rsaz-x86_64.pl b/crypto/bn/asm/rsaz-x86_64.pl
index 091cdc2..bac6aee 100755
--- a/crypto/bn/asm/rsaz-x86_64.pl
+++ b/crypto/bn/asm/rsaz-x86_64.pl
@@ -1671,7 +1671,7 @@ ___
{ # __rsaz_512_mul
#
# input: %rsi - ap, %rbp - bp
- # ouput:
+ # output:
# clobbers: everything
my ($ap,$bp) = ("%rsi","%rbp");
$code.=<<___;
@@ -1823,7 +1823,7 @@ if ($addx) {
# __rsaz_512_mulx
#
# input: %rsi - ap, %rbp - bp
- # ouput:
+ # output:
# clobbers: everything
my ($ap,$bp,$zero) = ("%rsi","%rbp","%rdi");
$code.=<<___;
diff --git a/crypto/bn/bn_gcd.c b/crypto/bn/bn_gcd.c
index b6dd09e..a6e909d 100644
--- a/crypto/bn/bn_gcd.c
+++ b/crypto/bn/bn_gcd.c
@@ -294,7 +294,7 @@ BIGNUM *int_bn_mod_inverse(BIGNUM *in,
/*
* Binary inversion algorithm; requires odd modulus. This is faster
* than the general algorithm if the modulus is sufficiently small
- * (about 400 .. 500 bits on 32-bit sytems, but much more on 64-bit
+ * (about 400 .. 500 bits on 32-bit systems, but much more on 64-bit
* systems)
*/
int shift;
diff --git a/crypto/bn/bn_kron.c b/crypto/bn/bn_kron.c
index 20a64a7..4477bec 100644
--- a/crypto/bn/bn_kron.c
+++ b/crypto/bn/bn_kron.c
@@ -91,7 +91,7 @@ int BN_kronecker(const BIGNUM *a, const BIGNUM *b, BN_CTX *ctx)
goto end;
/*
- * Kronecker symbol, imlemented according to Henri Cohen,
+ * Kronecker symbol, implemented according to Henri Cohen,
* "A Course in Computational Algebraic Number Theory"
* (algorithm 1.4.10).
*/
diff --git a/crypto/bn/bn_lib.c b/crypto/bn/bn_lib.c
index dd07d19..09d3954 100644
--- a/crypto/bn/bn_lib.c
+++ b/crypto/bn/bn_lib.c
@@ -330,7 +330,7 @@ static BN_ULONG *bn_expand_internal(const BIGNUM *b, int words)
* The fact that the loop is unrolled
* 4-wise is a tribute to Intel. It's
* the one that doesn't have enough
- * registers to accomodate more data.
+ * registers to accommodate more data.
* I'd unroll it 8-wise otherwise:-)
*
* <appro@fy.chalmers.se>
diff --git a/crypto/bn/bn_mul.c b/crypto/bn/bn_mul.c
index f30855a..7d4cd31 100644
--- a/crypto/bn/bn_mul.c
+++ b/crypto/bn/bn_mul.c
@@ -69,7 +69,7 @@
* Here follows specialised variants of bn_add_words() and bn_sub_words().
* They have the property performing operations on arrays of different sizes.
* The sizes of those arrays is expressed through cl, which is the common
- * length ( basicall, min(len(a),len(b)) ), and dl, which is the delta
+ * length ( basically, min(len(a),len(b)) ), and dl, which is the delta
* between the two lengths, calculated as len(a)-len(b). All lengths are the
* number of BN_ULONGs... For the operations that require a result array as
* parameter, it must have the length cl+abs(dl). These functions should
diff --git a/crypto/bn/bn_nist.c b/crypto/bn/bn_nist.c
index db8f687..35d0eef 100644
--- a/crypto/bn/bn_nist.c
+++ b/crypto/bn/bn_nist.c
@@ -644,7 +644,7 @@ int BN_nist_mod_224(BIGNUM *r, const BIGNUM *a, const BIGNUM *field,
#endif
} else if (carry < 0) {
/*
- * it's a bit more comlicated logic in this case. if bn_add_words
+ * it's a bit more complicated logic in this case. if bn_add_words
* yields no carry, then result has to be adjusted by unconditionally
* *adding* the modulus. but if it does, then result has to be
* compared to the modulus and conditionally adjusted by
diff --git a/crypto/bn/bn_x931p.c b/crypto/bn/bn_x931p.c
index 3c8f34a..3c74fd57 100644
--- a/crypto/bn/bn_x931p.c
+++ b/crypto/bn/bn_x931p.c
@@ -78,7 +78,7 @@ static int bn_x931_derive_pi(BIGNUM *pi, const BIGNUM *Xpi, BN_CTX *ctx,
for (;;) {
i++;
BN_GENCB_call(cb, 0, i);
- /* NB 27 MR is specificed in X9.31 */
+ /* NB 27 MR is specified in X9.31 */
if (BN_is_prime_fasttest_ex(pi, 27, ctx, 1, cb))
break;
if (!BN_add_word(pi, 2))