summaryrefslogtreecommitdiff
path: root/crypto/testmgr.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-09-15 20:50:42 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-09-15 20:50:42 +0200
commitf4f1815d81195033eabb55651db2756b27cbaf0d (patch)
treee7667ecbd53d3985d294ea7ee64e33f9523cedff /crypto/testmgr.c
parentea2d7a962774232c5ec13f85a399e7f2a7b6746e (diff)
parenta41bb691f04fcf6d3fa1f6e743d1520e305bc71d (diff)
downloadlinux-crypto-f4f1815d81195033eabb55651db2756b27cbaf0d.tar.gz
linux-crypto-f4f1815d81195033eabb55651db2756b27cbaf0d.zip
Merge tag 'y2038' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/playground into timers/core
Pull more y2038 work from Arnd Bergman: y2038: convert more syscalls Here is another set of system call changes to prepare the change over to 64-bit time_t. As before, the strategy is to change system calls that take a 'struct timespec' argument over to 'struct __kernel_timespec', which for now is defined to be the same but will get redefined to use a 64-bit time_t argument once we are ready to modify the system call tables. The major change from previous patches is that the plan is no longer to directly use the 'compat' system calls for providing compatibility with the existing 32-bit time_t based entry points. Instead, we rename the compat code to something that makes more sense on 32-bit architectures, e.g. compat_timespec becomes old_timespec32. With the renamed types in place, change over the 'stat' and 'utimes' families of system calls, sched_rr_get_interval, recvmmsg and rt_sigtimedwait. Another series for poll, select and io_pgetevents is currently being tested.
Diffstat (limited to '')
-rw-r--r--crypto/testmgr.c59
1 files changed, 48 insertions, 11 deletions
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 11e45352..a1d42245 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -259,9 +259,15 @@ out_nostate:
return ret;
}
+enum hash_test {
+ HASH_TEST_DIGEST,
+ HASH_TEST_FINAL,
+ HASH_TEST_FINUP
+};
+
static int __test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template, unsigned int tcount,
- bool use_digest, const int align_offset)
+ enum hash_test test_type, const int align_offset)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
size_t digest_size = crypto_ahash_digestsize(tfm);
@@ -332,14 +338,17 @@ static int __test_hash(struct crypto_ahash *tfm,
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
- if (use_digest) {
+ switch (test_type) {
+ case HASH_TEST_DIGEST:
ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
- } else {
+ break;
+
+ case HASH_TEST_FINAL:
memset(result, 1, digest_size);
ret = crypto_wait_req(crypto_ahash_init(req), &wait);
if (ret) {
@@ -371,6 +380,29 @@ static int __test_hash(struct crypto_ahash *tfm,
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
+ break;
+
+ case HASH_TEST_FINUP:
+ memset(result, 1, digest_size);
+ ret = crypto_wait_req(crypto_ahash_init(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ ret = ahash_guard_result(result, 1, digest_size);
+ if (ret) {
+ pr_err("alg: hash: init failed on test %d "
+ "for %s: used req->result\n", j, algo);
+ goto out;
+ }
+ ret = crypto_wait_req(crypto_ahash_finup(req), &wait);
+ if (ret) {
+ pr_err("alg: hash: final failed on test %d "
+ "for %s: ret=%d\n", j, algo, -ret);
+ goto out;
+ }
+ break;
}
if (memcmp(result, template[i].digest,
@@ -383,6 +415,9 @@ static int __test_hash(struct crypto_ahash *tfm,
}
}
+ if (test_type)
+ goto out;
+
j = 0;
for (i = 0; i < tcount; i++) {
/* alignment tests are only done with continuous buffers */
@@ -540,24 +575,24 @@ out_nobuf:
static int test_hash(struct crypto_ahash *tfm,
const struct hash_testvec *template,
- unsigned int tcount, bool use_digest)
+ unsigned int tcount, enum hash_test test_type)
{
unsigned int alignmask;
int ret;
- ret = __test_hash(tfm, template, tcount, use_digest, 0);
+ ret = __test_hash(tfm, template, tcount, test_type, 0);
if (ret)
return ret;
/* test unaligned buffers, check with one byte offset */
- ret = __test_hash(tfm, template, tcount, use_digest, 1);
+ ret = __test_hash(tfm, template, tcount, test_type, 1);
if (ret)
return ret;
alignmask = crypto_tfm_alg_alignmask(&tfm->base);
if (alignmask) {
/* Check if alignment mask for tfm is correctly set. */
- ret = __test_hash(tfm, template, tcount, use_digest,
+ ret = __test_hash(tfm, template, tcount, test_type,
alignmask + 1);
if (ret)
return ret;
@@ -1803,9 +1838,11 @@ static int __alg_test_hash(const struct hash_testvec *template,
return PTR_ERR(tfm);
}
- err = test_hash(tfm, template, tcount, true);
+ err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST);
+ if (!err)
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINAL);
if (!err)
- err = test_hash(tfm, template, tcount, false);
+ err = test_hash(tfm, template, tcount, HASH_TEST_FINUP);
crypto_free_ahash(tfm);
return err;
}
@@ -3478,10 +3515,10 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(tgr192_tv_template)
}
}, {
- .alg = "vmac(aes)",
+ .alg = "vmac64(aes)",
.test = alg_test_hash,
.suite = {
- .hash = __VECS(aes_vmac128_tv_template)
+ .hash = __VECS(vmac64_aes_tv_template)
}
}, {
.alg = "wp256",