From 2cfc0e4622866b4cfbdb9c3f143ae120a89bb572 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Wed, 31 Dec 2025 11:26:28 -0800 Subject: [PATCH 01/13] WOLFHSM_CFG_THREADSAFE: Adds framework for internal server thread safety, serializing access to shared global resources like NVM and global keycache --- .github/workflows/build-and-test-stress.yml | 35 + .github/workflows/build-and-test.yml | 12 + port/posix/posix_lock.c | 134 ++ port/posix/posix_lock.h | 70 + src/wh_lock.c | 103 + src/wh_nvm.c | 460 ++++- src/wh_server_keystore.c | 741 +++++-- test/Makefile | 31 +- test/tsan.supp | 36 + test/wh_test.c | 16 +- test/wh_test_cert.c | 2 +- test/wh_test_clientserver.c | 4 +- test/wh_test_crypto.c | 2 +- test/wh_test_lock.c | 213 ++ test/wh_test_lock.h | 53 + test/wh_test_log.c | 2 +- test/wh_test_server_img_mgr.c | 2 +- test/wh_test_threadsafe_stress.c | 2068 +++++++++++++++++++ test/wh_test_threadsafe_stress.h | 40 + wolfhsm/wh_lock.h | 149 ++ wolfhsm/wh_nvm.h | 7 + wolfhsm/wh_nvm_internal.h | 91 + wolfhsm/wh_settings.h | 9 + 23 files changed, 4027 insertions(+), 253 deletions(-) create mode 100644 .github/workflows/build-and-test-stress.yml create mode 100644 port/posix/posix_lock.c create mode 100644 port/posix/posix_lock.h create mode 100644 src/wh_lock.c create mode 100644 test/tsan.supp create mode 100644 test/wh_test_lock.c create mode 100644 test/wh_test_lock.h create mode 100644 test/wh_test_threadsafe_stress.c create mode 100644 test/wh_test_threadsafe_stress.h create mode 100644 wolfhsm/wh_lock.h create mode 100644 wolfhsm/wh_nvm_internal.h diff --git a/.github/workflows/build-and-test-stress.yml b/.github/workflows/build-and-test-stress.yml new file mode 100644 index 000000000..bf83ff78f --- /dev/null +++ b/.github/workflows/build-and-test-stress.yml @@ -0,0 +1,35 @@ +name: Build and Test Stress + +on: + push: + branches: [ 'master', 'main', 'release/**' ] + pull_request: + branches: [ '*' ] + +jobs: + build: + + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v4 + + # List host CPU info + - name: Host CPU info + run: cat /proc/cpuinfo + + # List compiler version + - name: List compiler version + run: gcc --version + + # pull and build wolfssl + - name: Checkout wolfssl + uses: actions/checkout@v4 + with: + repository: wolfssl/wolfssl + path: wolfssl + + # Build and run stress tests with TSAN + - name: Build and run stress tests with TSAN + run: cd test && make clean && make -j THREADSAFE=1 TSAN=1 STRESS=1 DMA=1 SHE=1 WOLFSSL_DIR=../wolfssl && make TSAN=1 run diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index aa6d4f32a..9e77d6349 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -64,3 +64,15 @@ jobs: # Build and test with DEBUG_VERBOSE=1 (includes DEBUG) - name: Build and test with DEBUG_VERBOSE run: cd test && make clean && make -j DEBUG_VERBOSE=1 WOLFSSL_DIR=../wolfssl && make run + + # Build and test in multithreaded mode with everything enabled + - name: Build and test with THREADSAFE and everything + run: cd test && make clean && make -j THREADSAFE=1 DMA=1 SHE=1 ASAN=1 WOLFSSL_DIR=../wolfssl && make run + + # Build and test in multithreaded mode with everything enabled and wolfCrypt tests + - name: Build and test with THREADSAFE and TESTWOLFCRYPT and everything + run: cd test && make clean && make -j THREADSAFE=1 TESTWOLFCRYPT=1 DMA=1 SHE=1 ASAN=1 WOLFSSL_DIR=../wolfssl && make run + + # Build and test in multithreaded mode with everything enabled and wolfCrypt tests with dma + - name: Build and test with THREADSAFE and TESTWOLFCRYPT with DMA + run: cd test && make clean && make -j THREADSAFE=1 TESTWOLFCRYPT=1 TESTWOLFCRYPT_DMA=1 DMA=1 SHE=1 ASAN=1 WOLFSSL_DIR=../wolfssl && make run diff --git a/port/posix/posix_lock.c b/port/posix/posix_lock.c new file mode 100644 index 000000000..898c633f7 --- /dev/null +++ b/port/posix/posix_lock.c @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2024 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * port/posix/posix_lock.c + * + * POSIX pthread_mutex-based implementation of the wolfHSM lock abstraction. + * Each lock context contains a single mutex for one shared resource. + */ + +#include "wolfhsm/wh_settings.h" + +#ifdef WOLFHSM_CFG_THREADSAFE + +#include + +#include "wolfhsm/wh_error.h" +#include "wolfhsm/wh_lock.h" + +#include "port/posix/posix_lock.h" + +int posixLock_Init(void* context, const void* config) +{ + posixLockContext* ctx = (posixLockContext*)context; + const posixLockConfig* cfg = (const posixLockConfig*)config; + const pthread_mutexattr_t* attr = NULL; + int rc; + + if (ctx == NULL) { + return WH_ERROR_BADARGS; + } + + /* Already initialized? */ + if (ctx->initialized) { + return WH_ERROR_OK; + } + + /* Use attributes from config if provided */ + if (cfg != NULL) { + attr = cfg->attr; + } + + rc = pthread_mutex_init(&ctx->mutex, attr); + if (rc != 0) { + return WH_ERROR_ABORTED; + } + + ctx->initialized = 1; + return WH_ERROR_OK; +} + +int posixLock_Cleanup(void* context) +{ + posixLockContext* ctx = (posixLockContext*)context; + int rc; + + if (ctx == NULL) { + return WH_ERROR_BADARGS; + } + + /* Not initialized? */ + if (!ctx->initialized) { + return WH_ERROR_OK; + } + + rc = pthread_mutex_destroy(&ctx->mutex); + if (rc != 0) { + return WH_ERROR_ABORTED; + } + + ctx->initialized = 0; + return WH_ERROR_OK; +} + +int posixLock_Acquire(void* context) +{ + posixLockContext* ctx = (posixLockContext*)context; + int rc; + + if (ctx == NULL) { + return WH_ERROR_BADARGS; + } + + /* Not initialized? */ + if (!ctx->initialized) { + return WH_ERROR_NOTREADY; + } + + rc = pthread_mutex_lock(&ctx->mutex); + if (rc != 0) { + return WH_ERROR_ABORTED; + } + + return WH_ERROR_OK; +} + +int posixLock_Release(void* context) +{ + posixLockContext* ctx = (posixLockContext*)context; + int rc; + + if (ctx == NULL) { + return WH_ERROR_BADARGS; + } + + /* Not initialized? */ + if (!ctx->initialized) { + return WH_ERROR_NOTREADY; + } + + rc = pthread_mutex_unlock(&ctx->mutex); + if (rc != 0) { + return WH_ERROR_ABORTED; + } + + return WH_ERROR_OK; +} + +#endif /* WOLFHSM_CFG_THREADSAFE */ diff --git a/port/posix/posix_lock.h b/port/posix/posix_lock.h new file mode 100644 index 000000000..815088d03 --- /dev/null +++ b/port/posix/posix_lock.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2024 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * port/posix/posix_lock.h + * + * POSIX pthread_mutex-based implementation of the wolfHSM lock abstraction. + * Provides thread-safe synchronization for shared resources. + * + * Each posixLockContext instance contains a single mutex. Create one + * context per shared resource (e.g., one for NVM, one for crypto). + */ + +#ifndef PORT_POSIX_POSIX_LOCK_H_ +#define PORT_POSIX_POSIX_LOCK_H_ + +#include "wolfhsm/wh_settings.h" + +#ifdef WOLFHSM_CFG_THREADSAFE + +#include + +#include "wolfhsm/wh_lock.h" + +/* Configuration for POSIX lock backend */ +typedef struct posixLockConfig_t { + const pthread_mutexattr_t* attr; /* Mutex attributes, NULL for defaults */ +} posixLockConfig; + +/* Context structure containing a single mutex for one resource */ +typedef struct posixLockContext_t { + pthread_mutex_t mutex; + int initialized; +} posixLockContext; + +/* Callback functions matching whLockCb interface */ +int posixLock_Init(void* context, const void* config); +int posixLock_Cleanup(void* context); +int posixLock_Acquire(void* context); +int posixLock_Release(void* context); + +/* Convenience macro for callback table initialization */ +/* clang-format off */ +#define POSIX_LOCK_CB \ + { \ + .init = posixLock_Init, \ + .cleanup = posixLock_Cleanup, \ + .acquire = posixLock_Acquire, \ + .release = posixLock_Release, \ + } +/* clang-format on */ + +#endif /* WOLFHSM_CFG_THREADSAFE */ + +#endif /* !PORT_POSIX_POSIX_LOCK_H_ */ diff --git a/src/wh_lock.c b/src/wh_lock.c new file mode 100644 index 000000000..a42ccd15e --- /dev/null +++ b/src/wh_lock.c @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2024 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * src/wh_lock.c + * + * Implementation of platform-agnostic lock abstraction for thread-safe + * access to shared resources. Each lock instance protects exactly one + * resource, allowing arbitrary sharing topologies. + */ + +#include "wolfhsm/wh_settings.h" + +#include /* For NULL */ + +#include "wolfhsm/wh_lock.h" +#include "wolfhsm/wh_error.h" + +#ifdef WOLFHSM_CFG_THREADSAFE + +int wh_Lock_Init(whLock* lock, const whLockConfig* config) +{ + int ret = WH_ERROR_OK; + + if (lock == NULL) { + return WH_ERROR_BADARGS; + } + + /* Allow NULL config for single-threaded mode (no-op locking) */ + if ((config == NULL) || (config->cb == NULL)) { + lock->cb = NULL; + lock->context = NULL; + return WH_ERROR_OK; + } + + lock->cb = config->cb; + lock->context = config->context; + + /* Initialize the lock if callback provided */ + if (lock->cb->init != NULL) { + ret = lock->cb->init(lock->context, config->config); + if (ret != WH_ERROR_OK) { + lock->cb = NULL; + lock->context = NULL; + return ret; + } + } + + return WH_ERROR_OK; +} + +int wh_Lock_Cleanup(whLock* lock) +{ + if (lock == NULL) { + return WH_ERROR_BADARGS; + } + + if ((lock->cb != NULL) && (lock->cb->cleanup != NULL)) { + (void)lock->cb->cleanup(lock->context); + } + + lock->cb = NULL; + lock->context = NULL; + + return WH_ERROR_OK; +} + +int wh_Lock_Acquire(whLock* lock) +{ + /* No-op if lock is NULL or not configured */ + if ((lock == NULL) || (lock->cb == NULL) || (lock->cb->acquire == NULL)) { + return WH_ERROR_OK; + } + + return lock->cb->acquire(lock->context); +} + +int wh_Lock_Release(whLock* lock) +{ + /* No-op if lock is NULL or not configured */ + if ((lock == NULL) || (lock->cb == NULL) || (lock->cb->release == NULL)) { + return WH_ERROR_OK; + } + + return lock->cb->release(lock->context); +} + +#endif /* WOLFHSM_CFG_THREADSAFE */ diff --git a/src/wh_nvm.c b/src/wh_nvm.c index bc32b09e0..df2c1106a 100644 --- a/src/wh_nvm.c +++ b/src/wh_nvm.c @@ -33,16 +33,96 @@ #include "wolfhsm/wh_nvm.h" +#ifdef WOLFHSM_CFG_THREADSAFE +/* Helper functions for NVM locking */ +static int _LockNvm(whNvmContext* context) +{ + return wh_Lock_Acquire(&context->lock); +} + +static int _UnlockNvm(whNvmContext* context) +{ + return wh_Lock_Release(&context->lock); +} +#else +#define _LockNvm(context) (WH_ERROR_OK) +#define _UnlockNvm(context) (WH_ERROR_OK) +#endif /* WOLFHSM_CFG_THREADSAFE */ + +/* + * Internal unlocked callback helpers. + * These call the NVM callbacks directly without acquiring locks. + * Callers MUST hold the NVM lock when using these functions. + */ +static int _GetAvailableUnlocked(whNvmContext* context, + uint32_t* out_avail_size, + whNvmId* out_avail_objects, + uint32_t* out_reclaim_size, + whNvmId* out_reclaim_objects) +{ + if (context->cb->GetAvailable == NULL) { + return WH_ERROR_ABORTED; + } + return context->cb->GetAvailable(context->context, out_avail_size, + out_avail_objects, out_reclaim_size, + out_reclaim_objects); +} + +static int _DestroyObjectsUnlocked(whNvmContext* context, whNvmId list_count, + const whNvmId* id_list) +{ + if (context->cb->DestroyObjects == NULL) { + return WH_ERROR_ABORTED; + } + return context->cb->DestroyObjects(context->context, list_count, id_list); +} + +static int _AddObjectUnlocked(whNvmContext* context, whNvmMetadata* meta, + whNvmSize data_len, const uint8_t* data) +{ + if (context->cb->AddObject == NULL) { + return WH_ERROR_ABORTED; + } + return context->cb->AddObject(context->context, meta, data_len, data); +} + +static int _ListUnlocked(whNvmContext* context, whNvmAccess access, + whNvmFlags flags, whNvmId start_id, whNvmId* out_count, + whNvmId* out_id) +{ + if (context->cb->List == NULL) { + return WH_ERROR_ABORTED; + } + return context->cb->List(context->context, access, flags, start_id, + out_count, out_id); +} + +static int _GetMetadataUnlocked(whNvmContext* context, whNvmId id, + whNvmMetadata* meta) +{ + if (context->cb->GetMetadata == NULL) { + return WH_ERROR_ABORTED; + } + return context->cb->GetMetadata(context->context, id, meta); +} + +static int _ReadUnlocked(whNvmContext* context, whNvmId id, whNvmSize offset, + whNvmSize data_len, uint8_t* data) +{ + if (context->cb->Read == NULL) { + return WH_ERROR_ABORTED; + } + return context->cb->Read(context->context, id, offset, data_len, data); +} + typedef enum { WH_NVM_OP_ADD = 0, WH_NVM_OP_READ, WH_NVM_OP_DESTROY, } whNvmOp; -/* Centralized policy check. - * existing_meta is optionally populated when the object is present. */ -static int wh_Nvm_CheckPolicy(whNvmContext* context, whNvmOp op, whNvmId id, - whNvmMetadata* existing_meta) +static int wh_Nvm_CheckPolicyUnlocked(whNvmContext* context, whNvmOp op, + whNvmId id, whNvmMetadata* existing_meta) { whNvmMetadata meta; int ret; @@ -51,7 +131,7 @@ static int wh_Nvm_CheckPolicy(whNvmContext* context, whNvmOp op, whNvmId id, return WH_ERROR_BADARGS; } - ret = wh_Nvm_GetMetadata(context, id, &meta); + ret = _GetMetadataUnlocked(context, id, &meta); if (ret != WH_ERROR_OK) { return ret; } @@ -87,6 +167,56 @@ static int wh_Nvm_CheckPolicy(whNvmContext* context, whNvmOp op, whNvmId id, return WH_ERROR_OK; } +static int _Nvm_AddObjectCheckedUnlocked(whNvmContext* context, + whNvmMetadata* meta, + whNvmSize data_len, + const uint8_t* data) +{ + int ret; + + ret = wh_Nvm_CheckPolicyUnlocked(context, WH_NVM_OP_ADD, meta->id, NULL); + if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { + return ret; + } + + return _AddObjectUnlocked(context, meta, data_len, data); +} + +static int _Nvm_DestroyObjectsCheckedUnlocked(whNvmContext* context, + whNvmId list_count, + const whNvmId* id_list) +{ + whNvmId i; + int ret; + + if (id_list == NULL && list_count != 0) { + return WH_ERROR_BADARGS; + } + + for (i = 0; i < list_count; i++) { + ret = wh_Nvm_CheckPolicyUnlocked(context, WH_NVM_OP_DESTROY, id_list[i], + NULL); + if (ret != WH_ERROR_OK) { + return ret; + } + } + + return _DestroyObjectsUnlocked(context, list_count, id_list); +} + +static int _Nvm_ReadCheckedUnlocked(whNvmContext* context, whNvmId id, + whNvmSize offset, whNvmSize data_len, + uint8_t* data) +{ + int ret; + + ret = wh_Nvm_CheckPolicyUnlocked(context, WH_NVM_OP_READ, id, NULL); + if (ret != WH_ERROR_OK) { + return ret; + } + + return _ReadUnlocked(context, id, offset, data_len, data); +} int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) { @@ -96,6 +226,8 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) return WH_ERROR_BADARGS; } + memset(context, 0, sizeof(*context)); + context->cb = config->cb; context->context = config->context; @@ -104,9 +236,22 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) memset(&context->globalCache, 0, sizeof(context->globalCache)); #endif +#ifdef WOLFHSM_CFG_THREADSAFE + /* Initialize lock (NULL lockConfig = no-op locking) */ + rc = wh_Lock_Init(&context->lock, config->lockConfig); + if (rc != WH_ERROR_OK) { + context->cb = NULL; + context->context = NULL; + return rc; + } +#endif + if (context->cb != NULL && context->cb->Init != NULL) { rc = context->cb->Init(context->context, config->config); if (rc != 0) { +#ifdef WOLFHSM_CFG_THREADSAFE + wh_Lock_Cleanup(&context->lock); +#endif context->cb = NULL; context->context = NULL; } @@ -117,6 +262,8 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) int wh_Nvm_Cleanup(whNvmContext* context) { + int rc; + if ( (context == NULL) || (context->cb == NULL) ) { return WH_ERROR_BADARGS; @@ -129,27 +276,43 @@ int wh_Nvm_Cleanup(whNvmContext* context) /* No callback? Return ABORTED */ if (context->cb->Cleanup == NULL) { - return WH_ERROR_ABORTED; + rc = WH_ERROR_ABORTED; + } + else { + rc = context->cb->Cleanup(context->context); } - return context->cb->Cleanup(context->context); + +#ifdef WOLFHSM_CFG_THREADSAFE + wh_Lock_Cleanup(&context->lock); +#endif + + context->cb = NULL; + context->context = NULL; + + return rc; } int wh_Nvm_GetAvailable(whNvmContext* context, uint32_t *out_avail_size, whNvmId *out_avail_objects, uint32_t *out_reclaim_size, whNvmId *out_reclaim_objects) { - if ( (context == NULL) || - (context->cb == NULL) ) { + int rc; + + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } - /* No callback? Return ABORTED */ - if (context->cb->GetAvailable == NULL) { - return WH_ERROR_ABORTED; + rc = _LockNvm(context); + if (rc != WH_ERROR_OK) { + return rc; } - return context->cb->GetAvailable(context->context, - out_avail_size, out_avail_objects, - out_reclaim_size, out_reclaim_objects); + + rc = _GetAvailableUnlocked(context, out_avail_size, out_avail_objects, + out_reclaim_size, out_reclaim_objects); + + (void)_UnlockNvm(context); + + return rc; } int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, @@ -162,14 +325,18 @@ int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, uint16_t reclaimObjects; /* Note that meta and data pointers are validated by AddObject later */ - if (context == NULL) { + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } + ret = _LockNvm(context); + if (ret != WH_ERROR_OK) { + return ret; + } + /* check if we have available object and data space */ - ret = wh_Nvm_GetAvailable(context, - &availableSize, &availableObjects, - &reclaimSize, &reclaimObjects); + ret = _GetAvailableUnlocked(context, &availableSize, &availableObjects, + &reclaimSize, &reclaimObjects); if (ret == 0) { if ( (availableSize < dataLen) || (availableObjects == 0) ) { @@ -177,32 +344,41 @@ int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, if ( (availableSize + reclaimSize >= dataLen) && (availableObjects + reclaimObjects > 0) ) { /* Reclaim will make sufficient space available */ - ret = wh_Nvm_DestroyObjects(context, 0, NULL); + ret = _DestroyObjectsUnlocked(context, 0, NULL); } else { - /* Reclaim witl not help */ + /* Reclaim will not help */ ret = WH_ERROR_NOSPACE; } } } if (ret == 0) { - ret = wh_Nvm_AddObject(context, meta, dataLen, data); + ret = _AddObjectUnlocked(context, meta, dataLen, data); } + + (void)_UnlockNvm(context); + return ret; } int wh_Nvm_AddObject(whNvmContext* context, whNvmMetadata *meta, whNvmSize data_len, const uint8_t* data) { - if ( (context == NULL) || - (context->cb == NULL) ) { + int rc; + + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } - /* No callback? Return ABORTED */ - if (context->cb->AddObject == NULL) { - return WH_ERROR_ABORTED; + rc = _LockNvm(context); + if (rc != WH_ERROR_OK) { + return rc; } - return context->cb->AddObject(context->context, meta, data_len, data); + + rc = _AddObjectUnlocked(context, meta, data_len, data); + + (void)_UnlockNvm(context); + + return rc; } int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta, @@ -210,96 +386,128 @@ int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta, { int ret; - ret = wh_Nvm_CheckPolicy(context, WH_NVM_OP_ADD, meta->id, NULL); - if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + ret = _LockNvm(context); + if (ret != WH_ERROR_OK) { return ret; } - return wh_Nvm_AddObject(context, meta, data_len, data); + ret = _Nvm_AddObjectCheckedUnlocked(context, meta, data_len, data); + + (void)_UnlockNvm(context); + + return ret; } int wh_Nvm_List(whNvmContext* context, whNvmAccess access, whNvmFlags flags, whNvmId start_id, whNvmId *out_count, whNvmId *out_id) { - if ( (context == NULL) || - (context->cb == NULL) ) { + int rc; + + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } - /* No callback? Return ABORTED */ - if (context->cb->List == NULL) { - return WH_ERROR_ABORTED; + rc = _LockNvm(context); + if (rc != WH_ERROR_OK) { + return rc; } - return context->cb->List(context->context, access, flags, start_id, - out_count, out_id); + + rc = _ListUnlocked(context, access, flags, start_id, out_count, out_id); + + (void)_UnlockNvm(context); + + return rc; } int wh_Nvm_GetMetadata(whNvmContext* context, whNvmId id, whNvmMetadata* meta) { - if ( (context == NULL) || - (context->cb == NULL) ) { + int rc; + + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } - /* No callback? Return ABORTED */ - if (context->cb->GetMetadata == NULL) { - return WH_ERROR_ABORTED; + rc = _LockNvm(context); + if (rc != WH_ERROR_OK) { + return rc; } - return context->cb->GetMetadata(context->context, id, meta); + + rc = _GetMetadataUnlocked(context, id, meta); + + (void)_UnlockNvm(context); + + return rc; } int wh_Nvm_DestroyObjects(whNvmContext* context, whNvmId list_count, const whNvmId* id_list) { - if ( (context == NULL) || - (context->cb == NULL) ) { + int rc; + + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } - /* No callback? Return ABORTED */ - if (context->cb->DestroyObjects == NULL) { - return WH_ERROR_ABORTED; + rc = _LockNvm(context); + if (rc != WH_ERROR_OK) { + return rc; } - return context->cb->DestroyObjects(context->context, list_count, id_list); + + rc = _DestroyObjectsUnlocked(context, list_count, id_list); + + (void)_UnlockNvm(context); + + return rc; } int wh_Nvm_DestroyObjectsChecked(whNvmContext* context, whNvmId list_count, const whNvmId* id_list) { - whNvmId i; int ret; - if (id_list == NULL && list_count != 0) { + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } - for (i = 0; i < list_count; i++) { - ret = wh_Nvm_CheckPolicy(context, WH_NVM_OP_DESTROY, id_list[i], NULL); - if (ret != WH_ERROR_OK) { - return ret; - } + ret = _LockNvm(context); + if (ret != WH_ERROR_OK) { + return ret; } - return wh_Nvm_DestroyObjects(context, list_count, id_list); + ret = _Nvm_DestroyObjectsCheckedUnlocked(context, list_count, id_list); + + (void)_UnlockNvm(context); + + return ret; } int wh_Nvm_Read(whNvmContext* context, whNvmId id, whNvmSize offset, whNvmSize data_len, uint8_t* data) { - if ( (context == NULL) || - (context->cb == NULL) ) { + int rc; + + if ((context == NULL) || (context->cb == NULL)) { return WH_ERROR_BADARGS; } - /* No callback? Return ABORTED */ - if (context->cb->Read == NULL) { - return WH_ERROR_ABORTED; + rc = _LockNvm(context); + if (rc != WH_ERROR_OK) { + return rc; } - return context->cb->Read(context->context, id, offset, data_len, data); + + rc = _ReadUnlocked(context, id, offset, data_len, data); + + (void)_UnlockNvm(context); + + return rc; } int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset, @@ -307,10 +515,130 @@ int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset, { int ret; - ret = wh_Nvm_CheckPolicy(context, WH_NVM_OP_READ, id, NULL); + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + ret = _LockNvm(context); if (ret != WH_ERROR_OK) { return ret; } - return wh_Nvm_Read(context, id, offset, data_len, data); + ret = _Nvm_ReadCheckedUnlocked(context, id, offset, data_len, data); + + (void)_UnlockNvm(context); + + return ret; +} + + +#ifdef WOLFHSM_CFG_THREADSAFE + +/* + * Library internal unlocked NVM functions. Exposes unlocked versions of NVM + * functionality for consumption elsewhere in compound operations to prevent + * recursive locking. These assume the caller already holds context->lock. + * + * If WOLFHSM_CFG_THREADSAFE is not defined, these functions are redirected + * to their regular public-facing (locking) counterparts in wh_nvm_internal.h + */ + +int wh_Nvm_GetMetadataUnlocked(whNvmContext* context, whNvmId id, + whNvmMetadata* meta) +{ + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + return _GetMetadataUnlocked(context, id, meta); +} + +int wh_Nvm_ReadUnlocked(whNvmContext* context, whNvmId id, whNvmSize offset, + whNvmSize data_len, uint8_t* data) +{ + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + return _ReadUnlocked(context, id, offset, data_len, data); +} + +int wh_Nvm_AddObjectWithReclaimUnlocked(whNvmContext* context, + whNvmMetadata* meta, whNvmSize dataLen, + const uint8_t* data) +{ + int ret; + uint32_t availableSize; + uint32_t reclaimSize; + uint16_t availableObjects; + uint16_t reclaimObjects; + + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + /* Check if we have available object and data space */ + ret = _GetAvailableUnlocked(context, &availableSize, &availableObjects, + &reclaimSize, &reclaimObjects); + if (ret == 0) { + if ((availableSize < dataLen) || (availableObjects == 0)) { + /* No available space, try to reclaim */ + if ((availableSize + reclaimSize >= dataLen) && + (availableObjects + reclaimObjects > 0)) { + ret = _DestroyObjectsUnlocked(context, 0, NULL); + } + else { + ret = WH_ERROR_NOSPACE; + } + } + } + if (ret == 0) { + ret = _AddObjectUnlocked(context, meta, dataLen, data); + } + + return ret; +} + +int wh_Nvm_DestroyObjectsUnlocked(whNvmContext* context, whNvmId list_count, + const whNvmId* id_list) +{ + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + return _DestroyObjectsUnlocked(context, list_count, id_list); +} + +int wh_Nvm_AddObjectCheckedUnlocked(whNvmContext* context, whNvmMetadata* meta, + whNvmSize data_len, const uint8_t* data) +{ + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + return _Nvm_AddObjectCheckedUnlocked(context, meta, data_len, data); +} + +int wh_Nvm_DestroyObjectsCheckedUnlocked(whNvmContext* context, + whNvmId list_count, + const whNvmId* id_list) +{ + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + return _Nvm_DestroyObjectsCheckedUnlocked(context, list_count, id_list); } + +int wh_Nvm_ReadCheckedUnlocked(whNvmContext* context, whNvmId id, + whNvmSize offset, whNvmSize data_len, + uint8_t* data) +{ + if ((context == NULL) || (context->cb == NULL)) { + return WH_ERROR_BADARGS; + } + + return _Nvm_ReadCheckedUnlocked(context, id, offset, data_len, data); +} + +#endif /* WOLFHSM_CFG_THREADSAFE */ diff --git a/src/wh_server_keystore.c b/src/wh_server_keystore.c index 625246c34..4d96cd278 100644 --- a/src/wh_server_keystore.c +++ b/src/wh_server_keystore.c @@ -42,6 +42,7 @@ #include "wolfhsm/wh_utils.h" #include "wolfhsm/wh_server.h" #include "wolfhsm/wh_log.h" +#include "wolfhsm/wh_nvm_internal.h" #ifdef WOLFHSM_CFG_SHE_EXTENSION #include "wolfhsm/wh_server_she.h" @@ -49,9 +50,10 @@ #include "wolfhsm/wh_server_keystore.h" -static int _FindInCache(whServerContext* server, whKeyId keyId, int* out_index, - int* out_big, uint8_t** out_buffer, - whNvmMetadata** out_meta); + +static int _FindInCacheUnlocked(whServerContext* server, whKeyId keyId, + int* out_index, int* out_big, + uint8_t** out_buffer, whNvmMetadata** out_meta); #ifdef WOLFHSM_CFG_GLOBAL_KEYS /* @@ -63,6 +65,32 @@ static int _IsGlobalKey(whKeyId keyId) } #endif /* WOLFHSM_CFG_GLOBAL_KEYS */ +/* + * Thread-safe locking helpers for keystore operations. + * Use the unified NVM lock to protect both NVM and cache access. + * These are no-ops when WOLFHSM_CFG_THREADSAFE is not defined. + */ +#ifdef WOLFHSM_CFG_THREADSAFE +static int _LockKeystore(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Acquire(&server->nvm->lock); + } + return WH_ERROR_OK; +} + +static int _UnlockKeystore(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Release(&server->nvm->lock); + } + return WH_ERROR_OK; +} +#else +#define _LockKeystore(server) (WH_ERROR_OK) +#define _UnlockKeystore(server) (WH_ERROR_OK) +#endif /* WOLFHSM_CFG_THREADSAFE */ + /* * @brief Get the appropriate cache context based on keyId * @@ -91,14 +119,14 @@ typedef enum { WH_KS_OP_REVOKE, } whKsOp; -static int _KeyIsCommitted(whServerContext* server, whKeyId keyId) +static int _KeyIsCommittedUnlocked(whServerContext* server, whKeyId keyId) { int ret; int big; int index; whKeyCacheContext* ctx = _GetCacheContext(server, keyId); - ret = _FindInCache(server, keyId, &index, &big, NULL, NULL); + ret = _FindInCacheUnlocked(server, keyId, &index, &big, NULL, NULL); if (ret != WH_ERROR_OK) { return 0; } @@ -110,10 +138,13 @@ static int _KeyIsCommitted(whServerContext* server, whKeyId keyId) return ctx->bigCache[index].committed; } } + /* Centralized cache/NVM policy: enforce NONMODIFIABLE/NONEXPORTABLE at the - * keystore layer. Usage enforcement remains separate. */ -static int _KeystoreCheckPolicy(whServerContext* server, whKsOp op, - whKeyId keyId) + * keystore layer. Usage enforcement remains separate. + * + * This is an unlocked function - caller must hold the keystore lock. */ +static int _KeystoreCheckPolicyUnlocked(whServerContext* server, whKsOp op, + whKeyId keyId) { whNvmMetadata* cacheMeta = NULL; whNvmMetadata nvmMeta; @@ -127,7 +158,7 @@ static int _KeystoreCheckPolicy(whServerContext* server, whKsOp op, } /* Check cache first */ - ret = _FindInCache(server, keyId, NULL, NULL, NULL, &cacheMeta); + ret = _FindInCacheUnlocked(server, keyId, NULL, NULL, NULL, &cacheMeta); if (ret == WH_ERROR_OK && cacheMeta != NULL) { foundInCache = 1; } @@ -137,7 +168,7 @@ static int _KeystoreCheckPolicy(whServerContext* server, whKsOp op, /* Check NVM if not in cache */ if (!foundInCache) { - ret = wh_Nvm_GetMetadata(server->nvm, keyId, &nvmMeta); + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, &nvmMeta); if (ret == WH_ERROR_OK) { foundInNvm = 1; } @@ -162,7 +193,7 @@ static int _KeystoreCheckPolicy(whServerContext* server, whKsOp op, break; case WH_KS_OP_EVICT: - if (_KeyIsCommitted(server, keyId)) { + if (_KeyIsCommittedUnlocked(server, keyId)) { /* Committed keys can always be evicted */ break; } @@ -189,12 +220,14 @@ static int _KeystoreCheckPolicy(whServerContext* server, whKsOp op, return WH_ERROR_OK; } + /** * @brief Find a key in the specified cache context */ -static int _FindInKeyCache(whKeyCacheContext* ctx, whKeyId keyId, - int* out_index, int* out_big, uint8_t** out_buffer, - whNvmMetadata** out_meta) +static int _FindInKeyCacheUnlocked(whKeyCacheContext* ctx, whKeyId keyId, + int* out_index, int* out_big, + uint8_t** out_buffer, + whNvmMetadata** out_meta) { int ret = WH_ERROR_NOTFOUND; int i; @@ -253,8 +286,8 @@ static int _EvictSlot(uint8_t* buf, whNvmMetadata* meta) /** * @brief Get an available cache slot from the specified cache context */ -static int _GetKeyCacheSlot(whKeyCacheContext* ctx, uint16_t keySz, - uint8_t** outBuf, whNvmMetadata** outMeta) +static int _GetKeyCacheSlotUnlocked(whKeyCacheContext* ctx, uint16_t keySz, + uint8_t** outBuf, whNvmMetadata** outMeta) { int foundIndex = -1; int i; @@ -347,12 +380,13 @@ static int _GetKeyCacheSlot(whKeyCacheContext* ctx, uint16_t keySz, * @brief Evict a key from the specified cache context * zeroes the buffer */ -static int _EvictKeyFromCache(whKeyCacheContext* ctx, whKeyId keyId) +static int _EvictKeyFromCacheUnlocked(whKeyCacheContext* ctx, whKeyId keyId) { whNvmMetadata* meta = NULL; uint8_t* outBuffer = NULL; - int ret = _FindInKeyCache(ctx, keyId, NULL, NULL, &outBuffer, &meta); + int ret = + _FindInKeyCacheUnlocked(ctx, keyId, NULL, NULL, &outBuffer, &meta); if (ret == WH_ERROR_OK && meta != NULL) { return _EvictSlot(outBuffer, meta); @@ -364,12 +398,12 @@ static int _EvictKeyFromCache(whKeyCacheContext* ctx, whKeyId keyId) /** * @brief Mark a cached key as committed */ -static int _MarkKeyCommitted(whKeyCacheContext* ctx, whKeyId keyId, - int committed) +static int _MarkKeyCommittedUnlocked(whKeyCacheContext* ctx, whKeyId keyId, + int committed) { int index = -1; int big = -1; - int ret = _FindInKeyCache(ctx, keyId, &index, &big, NULL, NULL); + int ret = _FindInKeyCacheUnlocked(ctx, keyId, &index, &big, NULL, NULL); if (ret == WH_ERROR_OK) { if (big == 0) { @@ -392,7 +426,7 @@ int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) whKeyId key_id = *inout_id; int type = WH_KEYID_TYPE(key_id); int user = WH_KEYID_USER(key_id); - whNvmId buildId; + whNvmId buildId = WH_KEYID_ERASED; whKeyCacheContext* ctx = _GetCacheContext(server, key_id); @@ -401,6 +435,11 @@ int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) return WH_ERROR_BADARGS; } + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + /* try every index until we find a unique one, don't worry about capacity */ for (id = WH_KEYID_IDMAX; id > WH_KEYID_ERASED; id--) { /* id loop var is not an input client ID so we don't need to handle the @@ -408,17 +447,17 @@ int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) buildId = WH_MAKE_KEYID(type, user, id); /* Check against cache keys using unified cache functions */ - ret = _FindInKeyCache(ctx, buildId, NULL, NULL, NULL, NULL); + ret = _FindInKeyCacheUnlocked(ctx, buildId, NULL, NULL, NULL, NULL); if (ret == WH_ERROR_OK) { /* Found in cache, try next ID */ continue; } else if (ret != WH_ERROR_NOTFOUND) { - return ret; + goto cleanup; } /* Check if keyId exists in NVM */ - ret = wh_Nvm_GetMetadata(server->nvm, buildId, NULL); + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, buildId, NULL); if (ret == WH_ERROR_NOTFOUND) { /* key doesn't exist in NVM, we found a candidate ID */ found = 1; @@ -426,51 +465,53 @@ int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) } if (ret != WH_ERROR_OK) { - return ret; + goto cleanup; } } if (!found) { - return WH_ERROR_NOSPACE; + ret = WH_ERROR_NOSPACE; + goto cleanup; } /* Return found id */ *inout_id = buildId; - return WH_ERROR_OK; + ret = WH_ERROR_OK; + +cleanup: + (void)_UnlockKeystore(server); + return ret; } +/* Forward declarations for unlocked functions */ +static int _GetCacheSlotUnlocked(whServerContext* server, whKeyId keyId, + uint16_t keySz, uint8_t** outBuf, + whNvmMetadata** outMeta); +static int _GetCacheSlotCheckedUnlocked(whServerContext* server, whKeyId keyId, + uint16_t keySz, uint8_t** outBuf, + whNvmMetadata** outMeta); + /* find a slot to cache a key. If key is already there, is evicted first */ int wh_Server_KeystoreGetCacheSlot(whServerContext* server, whKeyId keyId, uint16_t keySz, uint8_t** outBuf, whNvmMetadata** outMeta) { - whKeyCacheContext* ctx; - int ret; - int idx = -1; - int isBig = -1; - uint8_t* buf = NULL; - whNvmMetadata* foundMeta = NULL; + int ret; if (server == NULL || (keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BUFSIZE && keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BIG_BUFSIZE)) { return WH_ERROR_BADARGS; } - - ret = _FindInCache(server, keyId, &idx, &isBig, &buf, &foundMeta); - if (ret == WH_ERROR_OK) { - /* Key is already cached; evict it first */ - ret = wh_Server_KeystoreEvictKey(server, keyId); - if (ret != WH_ERROR_OK) { - return ret; - } - } - else if (ret != WH_ERROR_NOTFOUND) { + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { return ret; } - ctx = _GetCacheContext(server, keyId); - return _GetKeyCacheSlot(ctx, keySz, outBuf, outMeta); + ret = _GetCacheSlotUnlocked(server, keyId, keySz, outBuf, outMeta); + + (void)_UnlockKeystore(server); + return ret; } int wh_Server_KeystoreGetCacheSlotChecked(whServerContext* server, @@ -479,19 +520,26 @@ int wh_Server_KeystoreGetCacheSlotChecked(whServerContext* server, whNvmMetadata** outMeta) { int ret; - ret = _KeystoreCheckPolicy(server, WH_KS_OP_CACHE, keyId); - if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { + + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { return ret; } - return wh_Server_KeystoreGetCacheSlot(server, keyId, keySz, outBuf, - outMeta); + + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_CACHE, keyId); + if (ret == WH_ERROR_OK || ret == WH_ERROR_NOTFOUND) { + ret = _GetCacheSlotUnlocked(server, keyId, keySz, outBuf, outMeta); + } + + (void)_UnlockKeystore(server); + return ret; } -static int _KeystoreCacheKey(whServerContext* server, whNvmMetadata* meta, - uint8_t* in, int checked) +static int _KeystoreCacheKeyLocked(whServerContext* server, whNvmMetadata* meta, + uint8_t* in, int checked) { - uint8_t* slotBuf; - whNvmMetadata* slotMeta; + uint8_t* slotBuf = NULL; + whNvmMetadata* slotMeta = NULL; int ret; /* make sure id is valid */ @@ -502,75 +550,131 @@ static int _KeystoreCacheKey(whServerContext* server, whNvmMetadata* meta, return WH_ERROR_BADARGS; } + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + /* Use unlocked variants since we already hold the lock */ if (checked) { - ret = wh_Server_KeystoreGetCacheSlotChecked(server, meta->id, meta->len, - &slotBuf, &slotMeta); + ret = _GetCacheSlotCheckedUnlocked(server, meta->id, meta->len, + &slotBuf, &slotMeta); } else { - ret = wh_Server_KeystoreGetCacheSlot(server, meta->id, meta->len, - &slotBuf, &slotMeta); - } - if (ret != WH_ERROR_OK) { - return ret; + ret = _GetCacheSlotUnlocked(server, meta->id, meta->len, &slotBuf, + &slotMeta); } + if (ret == WH_ERROR_OK) { + memcpy(slotBuf, in, meta->len); + memcpy((uint8_t*)slotMeta, (uint8_t*)meta, sizeof(whNvmMetadata)); - memcpy(slotBuf, in, meta->len); - memcpy((uint8_t*)slotMeta, (uint8_t*)meta, sizeof(whNvmMetadata)); - _MarkKeyCommitted(_GetCacheContext(server, meta->id), meta->id, 0); + _MarkKeyCommittedUnlocked(_GetCacheContext(server, meta->id), meta->id, + 0); + + WH_DEBUG_SERVER_VERBOSE("hsmCacheKey: cached keyid=0x%X, len=%u\n", + meta->id, meta->len); + WH_DEBUG_VERBOSE_HEXDUMP("[server] cacheKey: key=", in, meta->len); + } - WH_DEBUG_SERVER_VERBOSE("hsmCacheKey: cached keyid=0x%X, len=%u\n", - meta->id, meta->len); - WH_DEBUG_VERBOSE_HEXDUMP("[server] cacheKey: key=", in, meta->len); + (void)_UnlockKeystore(server); - return WH_ERROR_OK; + return ret; } int wh_Server_KeystoreCacheKey(whServerContext* server, whNvmMetadata* meta, uint8_t* in) { - return _KeystoreCacheKey(server, meta, in, 0); + return _KeystoreCacheKeyLocked(server, meta, in, 0); } int wh_Server_KeystoreCacheKeyChecked(whServerContext* server, whNvmMetadata* meta, uint8_t* in) { - return _KeystoreCacheKey(server, meta, in, 1); + return _KeystoreCacheKeyLocked(server, meta, in, 1); } -static int _FindInCache(whServerContext* server, whKeyId keyId, int* out_index, - int* out_big, uint8_t** out_buffer, - whNvmMetadata** out_meta) +static int _FindInCacheUnlocked(whServerContext* server, whKeyId keyId, + int* out_index, int* out_big, + uint8_t** out_buffer, whNvmMetadata** out_meta) { whKeyCacheContext* ctx = _GetCacheContext(server, keyId); - return _FindInKeyCache(ctx, keyId, out_index, out_big, out_buffer, - out_meta); + return _FindInKeyCacheUnlocked(ctx, keyId, out_index, out_big, out_buffer, + out_meta); } -#ifdef WOLFHSM_CFG_KEYWRAP -static int _ExistsInCache(whServerContext* server, whKeyId keyId) +/* + * Internal unlocked keystore functions. + * These assume the caller already holds server->nvm->lock for global keys. + * For use when performing atomic multi-step operations. + * When WOLFHSM_CFG_THREADSAFE is not defined, these use the regular NVM + * functions via macros defined in wh_nvm.h. + */ + +/* Unlocked version of wh_Server_KeystoreEvictKey. + * Uses _EvictKeyFromCacheUnlocked which doesn't need the NVM lock. */ +static int _EvictKeyUnlocked(whServerContext* server, whNvmId keyId) { - int ret = 0; - int foundIndex = -1; - int foundBigIndex = -1; - whNvmMetadata* tmpMeta; - uint8_t* tmpBuf; + whKeyCacheContext* ctx; - ret = _FindInCache(server, keyId, &foundIndex, &foundBigIndex, &tmpBuf, - &tmpMeta); + if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { + return WH_ERROR_BADARGS; + } - if (ret != WH_ERROR_OK) { - /* Key doesn't exist in the cache */ - return 0; + ctx = _GetCacheContext(server, keyId); + return _EvictKeyFromCacheUnlocked(ctx, keyId); +} + +/* Unlocked version of wh_Server_KeystoreGetCacheSlot. + * Uses _EvictKeyUnlocked instead of the locked public version. */ +static int _GetCacheSlotUnlocked(whServerContext* server, whKeyId keyId, + uint16_t keySz, uint8_t** outBuf, + whNvmMetadata** outMeta) +{ + whKeyCacheContext* ctx; + int ret; + int idx = -1; + int isBig = -1; + uint8_t* buf = NULL; + whNvmMetadata* foundMeta = NULL; + + if (server == NULL || (keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BUFSIZE && + keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BIG_BUFSIZE)) { + return WH_ERROR_BADARGS; } - /* Key exists in the cache */ - return 1; + ret = _FindInCacheUnlocked(server, keyId, &idx, &isBig, &buf, &foundMeta); + if (ret == WH_ERROR_OK) { + /* Key is already cached; evict it first */ + ret = _EvictKeyUnlocked(server, keyId); + if (ret != WH_ERROR_OK) { + return ret; + } + } + else if (ret != WH_ERROR_NOTFOUND) { + return ret; + } + + ctx = _GetCacheContext(server, keyId); + return _GetKeyCacheSlotUnlocked(ctx, keySz, outBuf, outMeta); } -#endif /* WOLFHSM_CFG_KEYWRAP */ -/* try to put the specified key into cache if it isn't already, return pointers - * to meta and the cached data*/ -int wh_Server_KeystoreFreshenKey(whServerContext* server, whKeyId keyId, - uint8_t** outBuf, whNvmMetadata** outMeta) +/* Unlocked version of wh_Server_KeystoreGetCacheSlotChecked. + * Uses _KeystoreCheckPolicyUnlocked and _GetCacheSlotUnlocked. */ +static int _GetCacheSlotCheckedUnlocked(whServerContext* server, whKeyId keyId, + uint16_t keySz, uint8_t** outBuf, + whNvmMetadata** outMeta) +{ + int ret; + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_CACHE, keyId); + if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { + return ret; + } + return _GetCacheSlotUnlocked(server, keyId, keySz, outBuf, outMeta); +} + +/* Unlocked version of wh_Server_KeystoreFreshenKey. + * Uses wh_Nvm_*Unlocked functions for NVM access. */ +static int _FreshenKeyUnlocked(whServerContext* server, whKeyId keyId, + uint8_t** outBuf, whNvmMetadata** outMeta) { int ret = 0; int foundIndex = -1; @@ -589,13 +693,13 @@ int wh_Server_KeystoreFreshenKey(whServerContext* server, whKeyId keyId, cacheBufOut = (outBuf != NULL) ? outBuf : (uint8_t**)&cacheBufLocal; cacheMetaOut = (outMeta != NULL) ? outMeta : &cacheMetaLocal; - ret = _FindInCache(server, keyId, &foundIndex, &foundBigIndex, cacheBufOut, - cacheMetaOut); + ret = _FindInCacheUnlocked(server, keyId, &foundIndex, &foundBigIndex, + cacheBufOut, cacheMetaOut); if (ret != WH_ERROR_NOTFOUND) { return ret; } - /* key not in the cache */ + /* Key not in the cache */ /* For wrapped keys, just probe the cache and error if not found. We * don't support automatically unwrapping and caching outside of the @@ -605,21 +709,22 @@ int wh_Server_KeystoreFreshenKey(whServerContext* server, whKeyId keyId, } /* Not in cache. Check if it is in NVM */ - ret = wh_Nvm_GetMetadata(server->nvm, keyId, tmpMeta); + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, tmpMeta); if (ret == WH_ERROR_OK) { /* Key found in NVM, get a free cache slot */ - ret = wh_Server_KeystoreGetCacheSlot(server, keyId, tmpMeta->len, - cacheBufOut, cacheMetaOut); + ret = _GetCacheSlotUnlocked(server, keyId, tmpMeta->len, cacheBufOut, + cacheMetaOut); if (ret == WH_ERROR_OK) { /* Read the key from NVM into the cache slot */ - ret = - wh_Nvm_Read(server->nvm, keyId, 0, tmpMeta->len, *cacheBufOut); + ret = wh_Nvm_ReadUnlocked(server->nvm, keyId, 0, tmpMeta->len, + *cacheBufOut); if (ret == WH_ERROR_OK) { /* Copy the metadata to the cache slot if key read is - * successful*/ + * successful */ memcpy((uint8_t*)*cacheMetaOut, (uint8_t*)tmpMeta, sizeof(whNvmMetadata)); - _MarkKeyCommitted(_GetCacheContext(server, keyId), keyId, 1); + _MarkKeyCommittedUnlocked(_GetCacheContext(server, keyId), + keyId, 1); } } } @@ -627,11 +732,74 @@ int wh_Server_KeystoreFreshenKey(whServerContext* server, whKeyId keyId, return ret; } -/* Reads key from cache or NVM. If keyId is a wrapped key will attempt to read - * from cache but NOT from NVM */ -int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, - whNvmMetadata* outMeta, uint8_t* out, - uint32_t* outSz) +/* Unlocked version of wh_Server_KeystoreCommitKey. + * Uses wh_Nvm_AddObjectWithReclaimUnlocked for NVM access. */ +static int _CommitKeyUnlocked(whServerContext* server, whNvmId keyId) +{ + uint8_t* slotBuf; + whNvmMetadata* slotMeta; + whNvmSize size; + int ret; + whKeyCacheContext* ctx; + + if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { + return WH_ERROR_BADARGS; + } + + if (WH_KEYID_TYPE(keyId) == WH_KEYTYPE_WRAPPED) { + return WH_ERROR_ABORTED; + } + + ctx = _GetCacheContext(server, keyId); + + /* Find the key in the appropriate cache context */ + ret = _FindInKeyCacheUnlocked(ctx, keyId, NULL, NULL, &slotBuf, &slotMeta); + if (ret == WH_ERROR_OK) { + size = slotMeta->len; + ret = wh_Nvm_AddObjectWithReclaimUnlocked(server->nvm, slotMeta, size, + slotBuf); + if (ret == 0) { + (void)_MarkKeyCommittedUnlocked(ctx, keyId, 1); + } + } + + return ret; +} + +/* Unlocked version of _KeystoreCacheKeyLocked. + * Uses _GetCacheSlotUnlocked to avoid re-acquiring the lock. */ +static int _CacheKeyUnlocked(whServerContext* server, whNvmMetadata* meta, + uint8_t* in) +{ + uint8_t* slotBuf; + whNvmMetadata* slotMeta; + int ret; + + if ((server == NULL) || (meta == NULL) || (in == NULL) || + WH_KEYID_ISERASED(meta->id) || + ((meta->len > WOLFHSM_CFG_SERVER_KEYCACHE_BUFSIZE) && + (meta->len > WOLFHSM_CFG_SERVER_KEYCACHE_BIG_BUFSIZE))) { + return WH_ERROR_BADARGS; + } + + ret = + _GetCacheSlotUnlocked(server, meta->id, meta->len, &slotBuf, &slotMeta); + if (ret != WH_ERROR_OK) { + return ret; + } + + memcpy(slotBuf, in, meta->len); + memcpy((uint8_t*)slotMeta, (uint8_t*)meta, sizeof(whNvmMetadata)); + _MarkKeyCommittedUnlocked(_GetCacheContext(server, meta->id), meta->id, 0); + + return WH_ERROR_OK; +} + +/* Unlocked version of wh_Server_KeystoreReadKey. + * Uses unlocked NVM functions and _CacheKeyUnlocked. */ +static int _ReadKeyUnlocked(whServerContext* server, whKeyId keyId, + whNvmMetadata* outMeta, uint8_t* out, + uint32_t* outSz) { int ret = 0; whNvmMetadata meta[1]; @@ -645,11 +813,13 @@ int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, } /* Check the cache using unified function */ - ret = _FindInCache(server, keyId, NULL, NULL, &cacheBuffer, &cacheMeta); + ret = _FindInCacheUnlocked(server, keyId, NULL, NULL, &cacheBuffer, + &cacheMeta); if (ret == WH_ERROR_OK) { /* Found in cache */ - if (cacheMeta->len > *outSz) + if (cacheMeta->len > *outSz) { return WH_ERROR_NOSPACE; + } if (outMeta != NULL) { memcpy((uint8_t*)outMeta, (uint8_t*)cacheMeta, sizeof(whNvmMetadata)); @@ -669,7 +839,7 @@ int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, } /* Not in cache, try to read the metadata from NVM */ - ret = wh_Nvm_GetMetadata(server->nvm, keyId, meta); + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, meta); if (ret == 0) { /* set outSz */ *outSz = meta->len; @@ -678,11 +848,11 @@ int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, memcpy((uint8_t*)outMeta, (uint8_t*)meta, sizeof(meta)); /* read the object */ if (out != NULL) - ret = wh_Nvm_Read(server->nvm, keyId, 0, *outSz, out); + ret = wh_Nvm_ReadUnlocked(server->nvm, keyId, 0, *outSz, out); } /* cache key if free slot, will only kick out other committed keys */ if (ret == 0 && out != NULL) { - (void)wh_Server_KeystoreCacheKey(server, meta, out); + (void)_CacheKeyUnlocked(server, meta, out); } #ifdef WOLFHSM_CFG_SHE_EXTENSION /* use empty key of zeros if we couldn't find the master ecu key */ @@ -703,17 +873,96 @@ int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, return ret; } +#ifdef WOLFHSM_CFG_KEYWRAP +static int _ExistsInCacheUnlocked(whServerContext* server, whKeyId keyId) +{ + int ret = 0; + int foundIndex = -1; + int foundBigIndex = -1; + whNvmMetadata* tmpMeta; + uint8_t* tmpBuf; + + ret = _FindInCacheUnlocked(server, keyId, &foundIndex, &foundBigIndex, + &tmpBuf, &tmpMeta); + + if (ret != WH_ERROR_OK) { + /* Key doesn't exist in the cache */ + return 0; + } + + /* Key exists in the cache */ + return 1; +} +#endif /* WOLFHSM_CFG_KEYWRAP */ + +/* try to put the specified key into cache if it isn't already, return pointers + * to meta and the cached data*/ +int wh_Server_KeystoreFreshenKey(whServerContext* server, whKeyId keyId, + uint8_t** outBuf, whNvmMetadata** outMeta) +{ + int ret; + + if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { + return WH_ERROR_BADARGS; + } + + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + ret = _FreshenKeyUnlocked(server, keyId, outBuf, outMeta); + + (void)_UnlockKeystore(server); + + return ret; +} + +/* Reads key from cache or NVM. If keyId is a wrapped key will attempt to read + * from cache but NOT from NVM */ +int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, + whNvmMetadata* outMeta, uint8_t* out, + uint32_t* outSz) +{ + int ret; + + if ((server == NULL) || (outSz == NULL) || + (WH_KEYID_ISERASED(keyId) && + (WH_KEYID_TYPE(keyId) != WH_KEYTYPE_SHE))) { + return WH_ERROR_BADARGS; + } + + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + ret = _ReadKeyUnlocked(server, keyId, outMeta, out, outSz); + + (void)_UnlockKeystore(server); + + return ret; +} + int wh_Server_KeystoreReadKeyChecked(whServerContext* server, whKeyId keyId, whNvmMetadata* outMeta, uint8_t* out, uint32_t* outSz) { int ret; - ret = _KeystoreCheckPolicy(server, WH_KS_OP_EXPORT, keyId); + ret = _LockKeystore(server); if (ret != WH_ERROR_OK) { return ret; } - return wh_Server_KeystoreReadKey(server, keyId, outMeta, out, outSz); + + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EXPORT, keyId); + if (ret == WH_ERROR_OK) { + ret = _ReadKeyUnlocked(server, keyId, outMeta, out, outSz); + } + + (void)_UnlockKeystore(server); + + return ret; } int wh_Server_KeystoreEvictKey(whServerContext* server, whNvmId keyId) @@ -725,17 +974,24 @@ int wh_Server_KeystoreEvictKey(whServerContext* server, whNvmId keyId) return WH_ERROR_BADARGS; } + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + /* Get the appropriate cache context for this key */ ctx = _GetCacheContext(server, keyId); /* Use the unified evict function */ - ret = _EvictKeyFromCache(ctx, keyId); + ret = _EvictKeyFromCacheUnlocked(ctx, keyId); if (ret == 0) { WH_DEBUG_SERVER_VERBOSE("wh_Server_KeystoreEvictKey: evicted keyid=0x%X\n", keyId); } + (void)_UnlockKeystore(server); + return ret; } @@ -743,20 +999,24 @@ int wh_Server_KeystoreEvictKeyChecked(whServerContext* server, whNvmId keyId) { int ret; - ret = _KeystoreCheckPolicy(server, WH_KS_OP_EVICT, keyId); + ret = _LockKeystore(server); if (ret != WH_ERROR_OK) { return ret; } - return wh_Server_KeystoreEvictKey(server, keyId); + + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EVICT, keyId); + if (ret == WH_ERROR_OK) { + ret = _EvictKeyUnlocked(server, keyId); + } + + (void)_UnlockKeystore(server); + + return ret; } int wh_Server_KeystoreCommitKey(whServerContext* server, whNvmId keyId) { - uint8_t* slotBuf; - whNvmMetadata* slotMeta; - whNvmSize size; - int ret; - whKeyCacheContext* ctx; + int ret; if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { return WH_ERROR_BADARGS; @@ -766,19 +1026,15 @@ int wh_Server_KeystoreCommitKey(whServerContext* server, whNvmId keyId) return WH_ERROR_ABORTED; } - /* Get the appropriate cache context for this key */ - ctx = _GetCacheContext(server, keyId); - - /* Find the key in the appropriate cache context obtained above. */ - ret = _FindInKeyCache(ctx, keyId, NULL, NULL, &slotBuf, &slotMeta); - if (ret == WH_ERROR_OK) { - size = slotMeta->len; - ret = wh_Nvm_AddObjectWithReclaim(server->nvm, slotMeta, size, slotBuf); - if (ret == 0) { - /* Mark key as committed using unified function */ - (void)_MarkKeyCommitted(ctx, keyId, 1); - } + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; } + + ret = _CommitKeyUnlocked(server, keyId); + + (void)_UnlockKeystore(server); + return ret; } @@ -786,15 +1042,25 @@ int wh_Server_KeystoreCommitKeyChecked(whServerContext* server, whNvmId keyId) { int ret; - ret = _KeystoreCheckPolicy(server, WH_KS_OP_COMMIT, keyId); + ret = _LockKeystore(server); if (ret != WH_ERROR_OK) { return ret; } - return wh_Server_KeystoreCommitKey(server, keyId); + + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_COMMIT, keyId); + if (ret == WH_ERROR_OK) { + ret = _CommitKeyUnlocked(server, keyId); + } + + (void)_UnlockKeystore(server); + + return ret; } int wh_Server_KeystoreEraseKey(whServerContext* server, whNvmId keyId) { + int ret; + if ((server == NULL) || (WH_KEYID_ISERASED(keyId))) { return WH_ERROR_BADARGS; } @@ -803,15 +1069,27 @@ int wh_Server_KeystoreEraseKey(whServerContext* server, whNvmId keyId) return WH_ERROR_ABORTED; } - /* remove the key from the cache if present */ - (void)wh_Server_KeystoreEvictKey(server, keyId); + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + /* Remove the key from the cache if present */ + (void)_EvictKeyUnlocked(server, keyId); - /* destroy the object */ - return wh_Nvm_DestroyObjects(server->nvm, 1, &keyId); + /* Destroy the object */ + ret = wh_Nvm_DestroyObjectsUnlocked(server->nvm, 1, &keyId); + + (void)_UnlockKeystore(server); + + return ret; } int wh_Server_KeystoreEraseKeyChecked(whServerContext* server, whNvmId keyId) { + int ret; + whNvmMetadata meta; + if ((server == NULL) || (WH_KEYID_ISERASED(keyId))) { return WH_ERROR_BADARGS; } @@ -820,11 +1098,39 @@ int wh_Server_KeystoreEraseKeyChecked(whServerContext* server, whNvmId keyId) return WH_ERROR_ABORTED; } - /* remove the key from the cache if present */ - (void)wh_Server_KeystoreEvictKeyChecked(server, keyId); + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + /* Check eviction policy first */ + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EVICT, keyId); + if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { + goto cleanup; + } + + /* Check NVM destroy policy (NONMODIFIABLE/NONDESTROYABLE enforcement) */ + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, &meta); + if (ret == WH_ERROR_OK) { + if (meta.flags & + (WH_NVM_FLAGS_NONMODIFIABLE | WH_NVM_FLAGS_NONDESTROYABLE)) { + ret = WH_ERROR_ACCESS; + goto cleanup; + } + } + else if (ret != WH_ERROR_NOTFOUND) { + goto cleanup; + } + + /* Remove the key from the cache if present */ + (void)_EvictKeyUnlocked(server, keyId); + + /* Destroy the object */ + ret = wh_Nvm_DestroyObjectsUnlocked(server->nvm, 1, &keyId); - /* destroy the object */ - return wh_Nvm_DestroyObjectsChecked(server->nvm, 1, &keyId); +cleanup: + (void)_UnlockKeystore(server); + return ret; } static void _revokeKey(whNvmMetadata* meta) @@ -856,41 +1162,52 @@ int wh_Server_KeystoreRevokeKey(whServerContext* server, whNvmId keyId) return WH_ERROR_BADARGS; } - ret = _KeystoreCheckPolicy(server, WH_KS_OP_REVOKE, keyId); + ret = _LockKeystore(server); if (ret != WH_ERROR_OK) { return ret; } - ret = wh_Nvm_GetMetadata(server->nvm, keyId, NULL); + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_REVOKE, keyId); + if (ret != WH_ERROR_OK) { + goto cleanup; + } + + /* Check if key is in NVM */ + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, NULL); if (ret == WH_ERROR_OK) { isInNvm = 1; } else if (ret != WH_ERROR_NOTFOUND) { - return ret; + goto cleanup; } - /* be sure to have the key in the cache */ - ret = wh_Server_KeystoreFreshenKey(server, keyId, &cacheBuf, &cacheMeta); + /* Freshen key into cache */ + ret = _FreshenKeyUnlocked(server, keyId, &cacheBuf, &cacheMeta); if (ret != WH_ERROR_OK) { - return ret; + goto cleanup; } - /* if already revoked and committed, nothing to do */ - if (_isKeyRevoked(cacheMeta) && _KeyIsCommitted(server, keyId)) { - return WH_ERROR_OK; + /* If already revoked and committed, nothing to do */ + if (_isKeyRevoked(cacheMeta) && _KeyIsCommittedUnlocked(server, keyId)) { + ret = WH_ERROR_OK; + goto cleanup; } /* Revoke the key by updating its metadata */ _revokeKey(cacheMeta); - /* commit the changes */ + + /* Commit the changes */ if (isInNvm) { - ret = wh_Nvm_AddObjectWithReclaim(server->nvm, cacheMeta, - cacheMeta->len, cacheBuf); + ret = wh_Nvm_AddObjectWithReclaimUnlocked(server->nvm, cacheMeta, + cacheMeta->len, cacheBuf); if (ret == WH_ERROR_OK) { - _MarkKeyCommitted(_GetCacheContext(server, keyId), keyId, 1); + _MarkKeyCommittedUnlocked(_GetCacheContext(server, keyId), keyId, + 1); } } +cleanup: + (void)_UnlockKeystore(server); return ret; } @@ -1469,7 +1786,7 @@ static int _HandleKeyUnwrapAndCacheRequest( #endif /* WOLFHSM_CFG_GLOBAL_KEYS */ /* Ensure a key with the unwrapped ID does not already exist in cache */ - if (_ExistsInCache(server, metadata.id)) { + if (_ExistsInCacheUnlocked(server, metadata.id)) { return WH_ERROR_ABORTED; } @@ -2092,20 +2409,29 @@ int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, uint64_t keyAddr, int checked) { int ret; - uint8_t* buffer; - whNvmMetadata* slotMeta; + uint8_t* buffer = NULL; + whNvmMetadata* slotMeta = NULL; - /* Get a cache slot */ + if ((server == NULL) || (meta == NULL)) { + return WH_ERROR_BADARGS; + } + + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + /* Get a cache slot, optionally checking if necessary */ if (checked) { - ret = wh_Server_KeystoreGetCacheSlotChecked(server, meta->id, meta->len, - &buffer, &slotMeta); + ret = _GetCacheSlotCheckedUnlocked(server, meta->id, meta->len, &buffer, + &slotMeta); } else { - ret = wh_Server_KeystoreGetCacheSlot(server, meta->id, meta->len, - &buffer, &slotMeta); + ret = _GetCacheSlotUnlocked(server, meta->id, meta->len, &buffer, + &slotMeta); } if (ret != 0) { - return ret; + goto cleanup; } /* Copy metadata */ @@ -2120,9 +2446,12 @@ int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, slotMeta->id = WH_KEYID_ERASED; } else { - _MarkKeyCommitted(_GetCacheContext(server, meta->id), meta->id, 0); + _MarkKeyCommittedUnlocked(_GetCacheContext(server, meta->id), meta->id, + 0); } +cleanup: + (void)_UnlockKeystore(server); return ret; } int wh_Server_KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, @@ -2145,14 +2474,24 @@ int wh_Server_KeystoreExportKeyDma(whServerContext* server, whKeyId keyId, uint8_t* buffer; whNvmMetadata* cacheMeta; - /* bring key in cache */ - ret = wh_Server_KeystoreFreshenKey(server, keyId, &buffer, &cacheMeta); + if ((server == NULL) || (outMeta == NULL)) { + return WH_ERROR_BADARGS; + } + + ret = _LockKeystore(server); if (ret != WH_ERROR_OK) { return ret; } + /* bring key in cache */ + ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); + if (ret != WH_ERROR_OK) { + goto cleanup; + } + if (keySz < cacheMeta->len) { - return WH_ERROR_NOSPACE; + ret = WH_ERROR_NOSPACE; + goto cleanup; } memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); @@ -2161,6 +2500,8 @@ int wh_Server_KeystoreExportKeyDma(whServerContext* server, whKeyId keyId, ret = whServerDma_CopyToClient(server, keyAddr, buffer, outMeta->len, (whServerDmaFlags){0}); +cleanup: + (void)_UnlockKeystore(server); return ret; } int wh_Server_KeystoreExportKeyDmaChecked(whServerContext* server, @@ -2168,14 +2509,44 @@ int wh_Server_KeystoreExportKeyDmaChecked(whServerContext* server, uint64_t keySz, whNvmMetadata* outMeta) { - int ret; + int ret; + uint8_t* buffer; + whNvmMetadata* cacheMeta; - ret = _KeystoreCheckPolicy(server, WH_KS_OP_EXPORT, keyId); + if ((server == NULL) || (outMeta == NULL)) { + return WH_ERROR_BADARGS; + } + + ret = _LockKeystore(server); if (ret != WH_ERROR_OK) { return ret; } - return wh_Server_KeystoreExportKeyDma(server, keyId, keyAddr, keySz, - outMeta); + + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EXPORT, keyId); + if (ret != WH_ERROR_OK) { + goto cleanup; + } + + /* Freshen key into cache */ + ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); + if (ret != WH_ERROR_OK) { + goto cleanup; + } + + if (keySz < cacheMeta->len) { + ret = WH_ERROR_NOSPACE; + goto cleanup; + } + + memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); + + /* Copy key data using DMA */ + ret = whServerDma_CopyToClient(server, keyAddr, buffer, outMeta->len, + (whServerDmaFlags){0}); + +cleanup: + (void)_UnlockKeystore(server); + return ret; } #endif /* WOLFHSM_CFG_DMA */ @@ -2208,20 +2579,32 @@ int wh_Server_KeystoreFindEnforceKeyUsage(whServerContext* server, { int ret; whNvmMetadata* meta = NULL; + whNvmMetadata metaCopy; /* Validate input parameters */ if (server == NULL) { return WH_ERROR_BADARGS; } + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + /* Freshen the key to obtain the metadata */ - ret = wh_Server_KeystoreFreshenKey(server, keyId, NULL, &meta); + ret = _FreshenKeyUnlocked(server, keyId, NULL, &meta); if (ret != WH_ERROR_OK) { + (void)_UnlockKeystore(server); return ret; } - /* Enforce the usage policy with the obtained metadata */ - return wh_Server_KeystoreEnforceKeyUsage(meta, requiredUsage); + /* Copy metadata while holding the lock to avoid stale reads */ + memcpy((uint8_t*)&metaCopy, (uint8_t*)meta, sizeof(metaCopy)); + + (void)_UnlockKeystore(server); + + /* Enforce the usage policy with the copied metadata */ + return wh_Server_KeystoreEnforceKeyUsage(&metaCopy, requiredUsage); } #endif /* !WOLFHSM_CFG_NO_CRYPTO && WOLFHSM_CFG_ENABLE_SERVER */ diff --git a/test/Makefile b/test/Makefile index f3dd69719..e2ac499cb 100644 --- a/test/Makefile +++ b/test/Makefile @@ -93,6 +93,25 @@ ifeq ($(COVERAGE),1) LDFLAGS += --coverage endif +# Enable threadsafe mode, adding lock protection to shared structures +ifeq ($(THREADSAFE),1) + DEF += -DWOLFHSM_CFG_THREADSAFE +endif + +# Add thread sanitizer option (mutually exclusive with ASAN) +ifeq ($(TSAN),1) + ifeq ($(ASAN),1) + $(error TSAN and ASAN cannot be used together) + endif + CFLAGS += -fsanitize=thread -fPIE + DEF += -DWOLFSSL_NO_FENCE + ifeq ($(STRESS),1) + # Force TSAN annotations on in stress test + DEF += -DWOLFHSM_CFG_TEST_STRESS_TSAN + endif + LDFLAGS += -fsanitize=thread -pie +endif + ## wolfSSL defines ifeq ($(DEBUG_WOLFSSL),1) DEF += -DDEBUG_WOLFSSL @@ -103,6 +122,7 @@ ifeq ($(NOCRYPTO),1) DEF += -DWOLFHSM_CFG_NO_CRYPTO endif +# Enable scan-build ifeq ($(SCAN),1) SCAN_LOG = scan_test.log # Default target @@ -124,6 +144,11 @@ ifeq ($(TLS),1) DEF += -DWOLFHSM_CFG_TLS endif +# Support stress test mode (only runs thread safety stress test) +ifeq ($(STRESS),1) + DEF += -DWOLFHSM_CFG_TEST_STRESS +endif + ## Project defines # Option to build wolfcrypt tests ifeq ($(TESTWOLFCRYPT),1) @@ -193,7 +218,7 @@ SRC_C += $(wildcard $(WOLFHSM_DIR)/src/*.c) # wolfHSM port/HAL code SRC_C += $(wildcard $(WOLFHSM_PORT_DIR)/*.c) -# Project +# Test source files SRC_C += $(wildcard $(PROJECT_DIR)/*.c) @@ -274,9 +299,13 @@ clean: run: ifeq (,$(wildcard $(BUILD_DIR)/$(BIN).elf)) $(error $(BUILD_DIR)/$(BIN).elf not found. Try: make) +else +ifeq ($(TSAN),1) + TSAN_OPTIONS="suppressions=$(PROJECT_DIR)/tsan.supp" $(BUILD_DIR)/$(BIN).elf else $(BUILD_DIR)/$(BIN).elf endif +endif # Coverage target: build with coverage, run tests, and generate report coverage: diff --git a/test/tsan.supp b/test/tsan.supp new file mode 100644 index 000000000..3bf047d53 --- /dev/null +++ b/test/tsan.supp @@ -0,0 +1,36 @@ +# ThreadSanitizer Suppression File for wolfHSM +# +# This file suppresses known false positives and external library issues. +# Use with: TSAN_OPTIONS="suppressions=tsan.supp" ./wh_test_stress.elf + +# ============================================================================= +# Category 1: wolfSSL/wolfCrypt Global State (External Library) +# ============================================================================= +# These races occur in wolfSSL's global initialization and crypto callback +# registration. They are wolfSSL library issues, not wolfHSM bugs. + +# Race on initRefCount in wolfCrypt_Init/wolfCrypt_Cleanup +race:wolfCrypt_Init +race:wolfCrypt_Cleanup + +# Races on gCryptoDev array in crypto callback registration +race:wc_CryptoCb_RegisterDevice +race:wc_CryptoCb_UnRegisterDevice +race:wc_CryptoCb_GetDevice + +# ============================================================================= +# Category 2: Transport Memory Layer (Lock-free by Design) +# ============================================================================= +# The shared memory transport uses a lock-free protocol with volatile pointers, +# memory fences, and cache operations. TSAN doesn't understand this higher-level +# protocol and flags the raw memory accesses as races. + +# CSR (Control/Status Register) access functions +race:wh_TransportMem_SendRequest +race:wh_TransportMem_RecvRequest +race:wh_TransportMem_SendResponse +race:wh_TransportMem_RecvResponse + +# Shared buffer memcpy operations +race:wh_Utils_memcpy_flush +race:wh_Utils_memcpy_invalidate diff --git a/test/wh_test.c b/test/wh_test.c index 29ba86b8a..39e223342 100644 --- a/test/wh_test.c +++ b/test/wh_test.c @@ -40,6 +40,8 @@ #include "wh_test_keywrap.h" #include "wh_test_multiclient.h" #include "wh_test_log.h" +#include "wh_test_lock.h" +#include "wh_test_threadsafe_stress.h" #if defined(WOLFHSM_CFG_CERTIFICATE_MANAGER) #include "wh_test_cert.h" @@ -104,6 +106,10 @@ int whTest_Unit(void) /* Multi-Client Tests (includes Global Keys when enabled) */ WH_TEST_ASSERT(0 == whTest_MultiClient()); +#if defined(WOLFHSM_CFG_TEST_POSIX) && defined(WOLFHSM_CFG_THREADSAFE) + WH_TEST_ASSERT(0 == whTest_LockPosix()); +#endif + #if defined(WOLFHSM_CFG_SHE_EXTENSION) WH_TEST_ASSERT(0 == whTest_She()); #endif /* WOLFHSM_SHE_EXTENTION */ @@ -255,8 +261,13 @@ int main(void) { int ret = 0; -#if defined(WOLFHSM_CFG_TEST_CLIENT_ONLY) && \ +#if defined(WOLFHSM_CFG_THREADSAFE) && defined(WOLFHSM_CFG_TEST_STRESS) + /* Stress test mode: only run thread safety stress test */ + ret = whTest_ThreadSafeStress(); + +#elif defined(WOLFHSM_CFG_TEST_CLIENT_ONLY) && \ defined(WOLFHSM_CFG_ENABLE_CLIENT) && defined(WOLFHSM_CFG_TEST_POSIX) + /* Test driver should run client tests against the example server */ #if defined(WOLFHSM_CFG_TLS) /* Run TLS client tests */ @@ -265,9 +276,12 @@ int main(void) /* Run TCP client tests (default) */ ret = whTest_ClientTcp(); #endif + #elif defined(WOLFHSM_CFG_ENABLE_CLIENT) && defined(WOLFHSM_CFG_ENABLE_SERVER) + /* Default case: Test driver should run all the unit tests locally */ ret = whTest_Unit(); + #else #error "No client or server enabled in build, one or both must be enabled" #endif diff --git a/test/wh_test_cert.c b/test/wh_test_cert.c index b249609ad..0da3f385f 100644 --- a/test/wh_test_cert.c +++ b/test/wh_test_cert.c @@ -618,7 +618,7 @@ int whTest_CertRamSim(whTestNvmBackendType nvmType) const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB}; whTestNvmBackendUnion nvm_setup; - whNvmConfig n_conf[1]; + whNvmConfig n_conf[1] = {0}; whNvmContext nvm[1] = {{0}}; WH_TEST_RETURN_ON_FAIL( diff --git a/test/wh_test_clientserver.c b/test/wh_test_clientserver.c index 820035976..15f4ea67a 100644 --- a/test/wh_test_clientserver.c +++ b/test/wh_test_clientserver.c @@ -619,7 +619,7 @@ int whTest_ClientServerSequential(whTestNvmBackendType nvmType) const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB}; whTestNvmBackendUnion nvm_setup; - whNvmConfig n_conf[1]; + whNvmConfig n_conf[1] = {0}; whNvmContext nvm[1] = {{0}}; WH_TEST_RETURN_ON_FAIL( @@ -1739,7 +1739,7 @@ static int wh_ClientServer_PosixMemMapThreadTest(whTestNvmBackendType nvmType) const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB}; whTestNvmBackendUnion nvm_setup; - whNvmConfig n_conf[1]; + whNvmConfig n_conf[1] = {0}; whNvmContext nvm[1] = {{0}}; WH_TEST_RETURN_ON_FAIL( diff --git a/test/wh_test_crypto.c b/test/wh_test_crypto.c index b925cfa60..bc1a3d2a1 100644 --- a/test/wh_test_crypto.c +++ b/test/wh_test_crypto.c @@ -5493,7 +5493,7 @@ static int wh_ClientServer_MemThreadTest(whTestNvmBackendType nvmType) const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB}; whTestNvmBackendUnion nvm_setup; - whNvmConfig n_conf[1]; + whNvmConfig n_conf[1] = {0}; whNvmContext nvm[1] = {{0}}; WH_TEST_RETURN_ON_FAIL( diff --git a/test/wh_test_lock.c b/test/wh_test_lock.c new file mode 100644 index 000000000..8e78531bb --- /dev/null +++ b/test/wh_test_lock.c @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2024 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * test/wh_test_lock.c + * + * Super simple smoke tests to ensure a lock implementation has basic + * functionality + */ + +#include +#include + +#include "wolfhsm/wh_settings.h" +#include "wolfhsm/wh_error.h" + +#include "wh_test_common.h" +#include "wh_test_lock.h" + +#ifdef WOLFHSM_CFG_THREADSAFE + +#include "wolfhsm/wh_lock.h" +#include "wolfhsm/wh_nvm.h" +#include "wolfhsm/wh_nvm_flash.h" +#include "wolfhsm/wh_flash_ramsim.h" + +#include "port/posix/posix_lock.h" + +#define FLASH_RAM_SIZE (1024 * 1024) +#define FLASH_SECTOR_SIZE (4096) +#define FLASH_PAGE_SIZE (8) + +/* Test: Lock init/cleanup lifecycle */ +static int testLockLifecycle(whLockConfig* lockConfig) +{ + whLock lock; + int rc; + + memset(&lock, 0, sizeof(lock)); + + WH_TEST_PRINT("Testing lock lifecycle...\n"); + + if (lockConfig == NULL) { + WH_TEST_PRINT(" Lock lifecycle: SKIPPED (no config provided)\n"); + return WH_ERROR_OK; + } + + /* Init should succeed */ + rc = wh_Lock_Init(&lock, lockConfig); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + /* Acquire and release should work */ + rc = wh_Lock_Acquire(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + rc = wh_Lock_Release(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + /* Cleanup should succeed */ + rc = wh_Lock_Cleanup(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + WH_TEST_PRINT(" Lock lifecycle: PASS\n"); + return WH_ERROR_OK; +} + +/* Test: NULL config results in no-op locking */ +static int testNullConfigNoOp(void) +{ + whLock lock; + int rc; + + memset(&lock, 0, sizeof(lock)); + + WH_TEST_PRINT("Testing NULL config no-op...\n"); + + /* Init with NULL config should succeed (no-op mode) */ + rc = wh_Lock_Init(&lock, NULL); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + /* Acquire/release should be no-ops returning OK */ + rc = wh_Lock_Acquire(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + rc = wh_Lock_Release(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + /* Cleanup should succeed */ + rc = wh_Lock_Cleanup(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + WH_TEST_PRINT(" NULL config no-op: PASS\n"); + return WH_ERROR_OK; +} + +/* Test: NVM with lock config */ +static int testNvmWithLock(whLockConfig* lockConfig) +{ + /* Flash simulator */ + uint8_t flashMemory[FLASH_RAM_SIZE]; + const whFlashCb flashCb[1] = {WH_FLASH_RAMSIM_CB}; + whFlashRamsimCtx flashCtx[1] = {0}; + whFlashRamsimCfg flashCfg[1] = {{ + .size = FLASH_RAM_SIZE, + .sectorSize = FLASH_SECTOR_SIZE, + .pageSize = FLASH_PAGE_SIZE, + .erasedByte = 0xFF, + .memory = flashMemory, + }}; + + /* NVM flash layer */ + whNvmCb nvmFlashCb[1] = {WH_NVM_FLASH_CB}; + whNvmFlashContext nvmFlashCtx[1] = {0}; + whNvmFlashConfig nvmFlashCfg = { + .cb = flashCb, + .context = flashCtx, + .config = flashCfg, + }; + + /* NVM context with lock */ + whNvmContext nvm = {0}; + whNvmConfig nvmCfg; + + whNvmMetadata meta; + uint8_t testData[] = "Hello, NVM with lock!"; + uint8_t readBuf[32]; + int rc; + + WH_TEST_PRINT("Testing NVM with lock...\n"); + + memset(flashMemory, 0xFF, sizeof(flashMemory)); + + /* Initialize NVM flash */ + rc = wh_NvmFlash_Init(nvmFlashCtx, &nvmFlashCfg); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + /* Set up NVM config with user-supplied lock */ + nvmCfg.cb = nvmFlashCb; + nvmCfg.context = nvmFlashCtx; + nvmCfg.config = &nvmFlashCfg; + nvmCfg.lockConfig = lockConfig; + + rc = wh_Nvm_Init(&nvm, &nvmCfg); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + /* Test basic NVM operations with locking */ + memset(&meta, 0, sizeof(meta)); + meta.id = 1; + meta.len = sizeof(testData); + + rc = wh_Nvm_AddObject(&nvm, &meta, sizeof(testData), testData); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + memset(readBuf, 0, sizeof(readBuf)); + rc = wh_Nvm_Read(&nvm, 1, 0, sizeof(testData), readBuf); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + WH_TEST_ASSERT_RETURN(memcmp(testData, readBuf, sizeof(testData)) == 0); + + /* Cleanup */ + rc = wh_Nvm_Cleanup(&nvm); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + rc = wh_NvmFlash_Cleanup(nvmFlashCtx); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + WH_TEST_PRINT(" NVM with lock: PASS\n"); + return WH_ERROR_OK; +} + +int whTest_LockConfig(whLockConfig* lockConfig) +{ + WH_TEST_PRINT("Testing lock functionality...\n"); + + WH_TEST_RETURN_ON_FAIL(testLockLifecycle(lockConfig)); + WH_TEST_RETURN_ON_FAIL(testNullConfigNoOp()); + WH_TEST_RETURN_ON_FAIL(testNvmWithLock(lockConfig)); + + WH_TEST_PRINT("Lock tests PASSED\n"); + return WH_ERROR_OK; +} + + +#if defined(WOLFHSM_CFG_TEST_POSIX) +int whTest_LockPosix(void) +{ + posixLockContext lockCtx = {0}; + whLockCb lockCb = POSIX_LOCK_CB; + whLockConfig lockConfig; + + lockConfig.cb = &lockCb; + lockConfig.context = &lockCtx; + lockConfig.config = NULL; /* Use default mutex attributes */ + + return whTest_LockConfig(&lockConfig); +} +#endif /* WOLFHSM_CFG_TEST_POSIX */ + +#endif /* WOLFHSM_CFG_THREADSAFE */ diff --git a/test/wh_test_lock.h b/test/wh_test_lock.h new file mode 100644 index 000000000..4564c30e9 --- /dev/null +++ b/test/wh_test_lock.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2024 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * test/wh_test_lock.h + * + * Thread-safety lock tests + */ +#ifndef TEST_WH_TEST_LOCK_H_ +#define TEST_WH_TEST_LOCK_H_ + +#include "wolfhsm/wh_lock.h" /* For whLockConfig type */ + +/* + * Runs all lock tests using the supplied lock configuration. + * - Tests lock lifecycle (init/cleanup) if lockConfig is not NULL + * - Tests NULL config results in no-op locking + * - Tests NVM with lock config + * + * @param lockConfig Pointer to lock configuration. If NULL, the lock lifecycle + * test will be skipped but other tests will still run. + * Returns 0 on success, and a non-zero error code on failure + */ +int whTest_LockConfig(whLockConfig* lockConfig); + +#if defined(WOLFHSM_CFG_TEST_POSIX) + +/* + * Runs all lock tests using the POSIX lock backend ontop of POSIX sim + * - Tests lock lifecycle (init/cleanup) + * - Tests NULL config results in no-op locking + * - Tests NVM with lock config + * Returns 0 on success, and a non-zero error code on failure + */ +int whTest_LockPosix(void); +#endif /* WOLFHSM_CFG_TEST_POSIX */ + +#endif /* TEST_WH_TEST_LOCK_H_ */ diff --git a/test/wh_test_log.c b/test/wh_test_log.c index f2da3ee3a..e58a4ea68 100644 --- a/test/wh_test_log.c +++ b/test/wh_test_log.c @@ -1480,7 +1480,7 @@ static int whTest_LogClientServerMemTransport(void) const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB}; whTestNvmBackendUnion nvm_setup; - whNvmConfig n_conf[1]; + whNvmConfig n_conf[1] = {0}; whNvmContext nvm[1] = {{0}}; WH_TEST_RETURN_ON_FAIL(whTest_NvmCfgBackend( diff --git a/test/wh_test_server_img_mgr.c b/test/wh_test_server_img_mgr.c index cf5f9092e..dbd4e1484 100644 --- a/test/wh_test_server_img_mgr.c +++ b/test/wh_test_server_img_mgr.c @@ -1230,7 +1230,7 @@ int whTest_ServerImgMgr(whTestNvmBackendType nvmType) const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB}; whTestNvmBackendUnion nvm_setup; - whNvmConfig n_conf[1]; + whNvmConfig n_conf[1] = {0}; whNvmContext nvm[1] = {{0}}; WH_TEST_RETURN_ON_FAIL( diff --git a/test/wh_test_threadsafe_stress.c b/test/wh_test_threadsafe_stress.c new file mode 100644 index 000000000..e7ed13de2 --- /dev/null +++ b/test/wh_test_threadsafe_stress.c @@ -0,0 +1,2068 @@ +/* + * Copyright (C) 2025 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * test/wh_test_threadsafe_stress.c + * + * Phased contention test for thread safety validation. + * + * Intended to drive a high volume of concurrent requests across multiple + * servers sharing a single NVM context, while running under TSAN to detect any + * data races. + * + * Architecture: + * - 1 shared NVM context with lock + * - 4 server contexts sharing the NVM + * - 4 client threads (all doing both NVM and keystore ops) + * - Different contention phases to stress test contention patterns across + * various different APIs + */ + +#include "wolfhsm/wh_settings.h" + +#if defined(WOLFHSM_CFG_THREADSAFE) && defined(WOLFHSM_CFG_TEST_POSIX) && \ + defined(WOLFHSM_CFG_GLOBAL_KEYS) && defined(WOLFHSM_CFG_ENABLE_CLIENT) && \ + defined(WOLFHSM_CFG_ENABLE_SERVER) && !defined(WOLFHSM_CFG_NO_CRYPTO) + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "wolfhsm/wh_error.h" +#include "wolfhsm/wh_comm.h" +#include "wolfhsm/wh_transport_mem.h" +#include "wolfhsm/wh_client.h" +#include "wolfhsm/wh_server.h" +#include "wolfhsm/wh_nvm.h" +#include "wolfhsm/wh_nvm_flash.h" +#include "wolfhsm/wh_flash_ramsim.h" +#include "wolfhsm/wh_lock.h" +#include "wolfhsm/wh_keyid.h" + +#include "port/posix/posix_lock.h" + +#include "wolfssl/wolfcrypt/types.h" +#include "wolfssl/wolfcrypt/random.h" + +#include "wh_test_common.h" +#include "wh_test_threadsafe_stress.h" + + +/* + * TSAN Transport Shims + * + * These wrap the transport layer functions with acquire/release annotations + * to tell TSAN about the happens-before relationship established by the + * CSR handshake. This eliminates false positives on DMA buffer accesses + * while still allowing TSAN to analyze keystore/NVM locking. + */ + +#ifdef WOLFHSM_CFG_TEST_STRESS_TSAN + +/* Provide a sane error message if we want to use TSAN but no TSAN detected. + * GCC uses __SANITIZE_THREAD__, Clang uses __has_feature + */ +#ifndef __has_feature +#define __has_feature(x) 0 +#endif +#if !defined(__SANITIZE_THREAD__) && !__has_feature(thread_sanitizer) +#error ThreadSanitizer not enabled for this build +#endif + +#include + +/* Client sends request - release all prior writes (including DMA buffers) + * IMPORTANT: Release MUST happen BEFORE the send, not after. The transport's + * notify counter synchronizes message delivery, but TSAN doesn't see it. + * If we release after send, the server could acquire before we release, + * breaking the happens-before chain for DMA buffer accesses. */ +static int tsan_SendRequest(void* c, uint16_t len, const void* data) +{ + whTransportMemContext* ctx = (whTransportMemContext*)c; + __tsan_release((void*)ctx->req); + return wh_TransportMem_SendRequest(c, len, data); +} + +/* Client receives response - acquire all server writes */ +static int tsan_RecvResponse(void* c, uint16_t* out_len, void* data) +{ + whTransportMemContext* ctx = (whTransportMemContext*)c; + int rc = wh_TransportMem_RecvResponse(c, out_len, data); + if (rc == WH_ERROR_OK) { + __tsan_acquire((void*)ctx->resp); + } + return rc; +} + +/* Server receives request - acquire all client writes (including DMA buffers) + */ +static int tsan_RecvRequest(void* c, uint16_t* out_len, void* data) +{ + whTransportMemContext* ctx = (whTransportMemContext*)c; + int rc = wh_TransportMem_RecvRequest(c, out_len, data); + if (rc == WH_ERROR_OK) { + __tsan_acquire((void*)ctx->req); + } + return rc; +} + +/* Server sends response - release all prior writes (including DMA write-backs) + * IMPORTANT: Release MUST happen BEFORE the send (see tsan_SendRequest). */ +static int tsan_SendResponse(void* c, uint16_t len, const void* data) +{ + whTransportMemContext* ctx = (whTransportMemContext*)c; + __tsan_release((void*)ctx->resp); + return wh_TransportMem_SendResponse(c, len, data); +} + +/* TSAN-annotated transport callbacks */ +static const whTransportClientCb clientTransportCb = { + .Init = wh_TransportMem_InitClear, + .Send = tsan_SendRequest, + .Recv = tsan_RecvResponse, + .Cleanup = wh_TransportMem_Cleanup, +}; + +static const whTransportServerCb serverTransportCb = { + .Init = wh_TransportMem_Init, + .Recv = tsan_RecvRequest, + .Send = tsan_SendResponse, + .Cleanup = wh_TransportMem_Cleanup, +}; + +#else /* !WOLFHSM_CFG_TEST_STRESS_TSAN */ + +/* Non-TSAN: use standard transport callbacks */ +static const whTransportClientCb clientTransportCb = WH_TRANSPORT_MEM_CLIENT_CB; +static const whTransportServerCb serverTransportCb = WH_TRANSPORT_MEM_SERVER_CB; + +#endif /* WOLFHSM_CFG_TEST_STRESS_TSAN */ + +/* Atomic helpers for C99 compatibility (GCC/Clang built-ins) */ +#define ATOMIC_LOAD_INT(ptr) __atomic_load_n((ptr), __ATOMIC_ACQUIRE) +#define ATOMIC_STORE_INT(ptr, val) \ + __atomic_store_n((ptr), (val), __ATOMIC_RELEASE) +#define ATOMIC_ADD_INT(ptr, val) \ + __atomic_add_fetch((ptr), (val), __ATOMIC_ACQ_REL) + +/* Test configuration */ +#define NUM_CLIENTS 4 +/* Allow external configuration of phase iterations */ +#ifndef WOLFHSM_CFG_TEST_STRESS_PHASE_ITERATIONS +#define WOLFHSM_CFG_TEST_STRESS_PHASE_ITERATIONS 800 +#endif +#define PHASE_ITERATIONS WOLFHSM_CFG_TEST_STRESS_PHASE_ITERATIONS + +#define BUFFER_SIZE 4096 +#define FLASH_RAM_SIZE (1024 * 1024) +#define FLASH_SECTOR_SIZE (128 * 1024) +#define FLASH_PAGE_SIZE 8 +#define MAX_TEST_DURATION_SEC 300 + +/* NVM object parameters */ +#define NVM_OBJECT_DATA_SIZE 64 + +/* Key parameters */ +#define KEY_DATA_SIZE 32 + +/* Hot resources - all clients target these same IDs + * NOTE: Use client-facing keyId format (simple ID + flags), NOT server-internal + * format (WH_MAKE_KEYID). The server's wh_KeyId_TranslateFromClient() extracts + * only the lower 8 bits as ID and checks WH_KEYID_CLIENT_GLOBAL_FLAG for + * global. Using WH_MAKE_KEYID with user=1 sets bit 8, which is + * WH_KEYID_CLIENT_GLOBAL_FLAG! + * + * We need separate key IDs for different test categories because: + * - Revoked keys have NONMODIFIABLE flag and can't be erased or re-cached + * - Each revoke-related phase needs its own key that won't be used elsewhere + */ +#define HOT_KEY_ID_GLOBAL WH_CLIENT_KEYID_MAKE_GLOBAL(1) /* ID=1, global */ +#define HOT_KEY_ID_LOCAL 2 /* ID=2, local */ + +/* Separate key IDs for revoke-related phases (can't be reused after revoke) */ +#define REVOKE_CACHE_KEY_GLOBAL \ + WH_CLIENT_KEYID_MAKE_GLOBAL(10) /* ID=10, global */ +#define REVOKE_CACHE_KEY_LOCAL 11 /* ID=11, local */ +#define REVOKE_EXPORT_KEY_GLOBAL \ + WH_CLIENT_KEYID_MAKE_GLOBAL(12) /* ID=12, global */ +#define REVOKE_EXPORT_KEY_LOCAL 13 /* ID=13, local */ +#define FRESHEN_KEY_GLOBAL WH_CLIENT_KEYID_MAKE_GLOBAL(14) /* ID=14, global */ +#define FRESHEN_KEY_LOCAL 15 /* ID=15, local */ +#define HOT_NVM_ID ((whNvmId)100) +#define HOT_NVM_ID_2 ((whNvmId)101) +#define HOT_NVM_ID_3 ((whNvmId)102) + +/* ============================================================================ + * PHASE DEFINITIONS + * ========================================================================== */ + +/* Contention phase enumeration */ +typedef enum { + /* Keystore phases */ + PHASE_KS_CONCURRENT_CACHE, + PHASE_KS_CACHE_VS_EVICT, + PHASE_KS_CACHE_VS_EXPORT, + PHASE_KS_EVICT_VS_EXPORT, + PHASE_KS_CACHE_VS_COMMIT, + PHASE_KS_COMMIT_VS_EVICT, + PHASE_KS_CONCURRENT_EXPORT, + PHASE_KS_CONCURRENT_EVICT, + + /* NVM phases */ + PHASE_NVM_CONCURRENT_ADD, + PHASE_NVM_ADD_VS_READ, + PHASE_NVM_ADD_VS_DESTROY, + PHASE_NVM_READ_VS_DESTROY, + PHASE_NVM_CONCURRENT_READ, + PHASE_NVM_LIST_DURING_MODIFY, + PHASE_NVM_CONCURRENT_DESTROY, + + /* Cross-subsystem phases */ + PHASE_CROSS_COMMIT_VS_ADD, + PHASE_CROSS_COMMIT_VS_DESTROY, + PHASE_CROSS_FRESHEN_VS_MODIFY, + + /* Keystore - Erase/Revoke */ + PHASE_KS_ERASE_VS_CACHE, /* 2 erase, 2 cache same key */ + PHASE_KS_ERASE_VS_EXPORT, /* 2 erase, 2 export same key */ + PHASE_KS_REVOKE_VS_CACHE, /* 2 revoke, 2 cache same key */ + PHASE_KS_REVOKE_VS_EXPORT, /* 2 revoke, 2 export same key */ + + /* Keystore - GetUniqueId/Freshen */ + PHASE_KS_CONCURRENT_GETUNIQUEID, /* 4 threads request unique IDs */ + PHASE_KS_EXPLICIT_FRESHEN, /* 4 threads freshen same uncached key */ + + /* NVM - GetAvailable/GetMetadata/Reclaim */ + PHASE_NVM_ADD_WITH_RECLAIM, /* Fill NVM, concurrent adds trigger reclaim */ + PHASE_NVM_GETAVAILABLE_VS_ADD, /* 2 query space, 2 add objects */ + PHASE_NVM_GETMETADATA_VS_DESTROY, /* 2 query metadata, 2 destroy */ + +#ifdef WOLFHSM_CFG_DMA + /* DMA Operations */ + PHASE_KS_CACHE_DMA_VS_EXPORT, /* 2 DMA cache, 2 regular export */ + PHASE_KS_EXPORT_DMA_VS_EVICT, /* 2 DMA export, 2 evict */ + PHASE_NVM_ADD_DMA_VS_READ, /* 2 DMA add, 2 regular read */ + PHASE_NVM_READ_DMA_VS_DESTROY, /* 2 DMA read, 2 destroy */ +#endif + + PHASE_COUNT +} ContentionPhase; + +/* Client role assignment per phase */ +typedef enum { + ROLE_OP_A, /* Perform operation A (e.g., Cache, Add) */ + ROLE_OP_B, /* Perform operation B (e.g., Evict, Read) */ +} ClientRole; + +/* Phase configuration */ +typedef struct { + ContentionPhase phase; + const char* name; + int iterations; + ClientRole roles[NUM_CLIENTS]; +} PhaseConfig; + +/* Phase configurations - all 18 phases */ +static const PhaseConfig phases[] = { + /* Keystore phases */ + {PHASE_KS_CONCURRENT_CACHE, + "KS: Concurrent Cache", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + {PHASE_KS_CACHE_VS_EVICT, + "KS: Cache vs Evict", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_CACHE_VS_EXPORT, + "KS: Cache vs Export", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_EVICT_VS_EXPORT, + "KS: Evict vs Export", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_CACHE_VS_COMMIT, + "KS: Cache vs Commit", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_COMMIT_VS_EVICT, + "KS: Commit vs Evict", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_CONCURRENT_EXPORT, + "KS: Concurrent Export", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + {PHASE_KS_CONCURRENT_EVICT, + "KS: Concurrent Evict", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + + /* NVM phases */ + {PHASE_NVM_CONCURRENT_ADD, + "NVM: Concurrent Add", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + {PHASE_NVM_ADD_VS_READ, + "NVM: Add vs Read", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_ADD_VS_DESTROY, + "NVM: Add vs Destroy", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_READ_VS_DESTROY, + "NVM: Read vs Destroy", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_CONCURRENT_READ, + "NVM: Concurrent Read", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + {PHASE_NVM_LIST_DURING_MODIFY, + "NVM: List During Modify", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_CONCURRENT_DESTROY, + "NVM: Concurrent Destroy", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + + /* Cross-subsystem phases */ + {PHASE_CROSS_COMMIT_VS_ADD, + "Cross: Commit vs NVM Add", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_CROSS_COMMIT_VS_DESTROY, + "Cross: Commit vs NVM Destroy", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_CROSS_FRESHEN_VS_MODIFY, + "Cross: Export/Freshen vs NVM Modify", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + + /* Keystore Erase/Revoke */ + {PHASE_KS_ERASE_VS_CACHE, + "KS: Erase vs Cache", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_ERASE_VS_EXPORT, + "KS: Erase vs Export", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_REVOKE_VS_CACHE, + "KS: Revoke vs Cache", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_REVOKE_VS_EXPORT, + "KS: Revoke vs Export", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + + /* Keystore GetUniqueId/Freshen */ + {PHASE_KS_CONCURRENT_GETUNIQUEID, + "KS: Concurrent GetUniqueId", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + {PHASE_KS_EXPLICIT_FRESHEN, + "KS: Explicit Freshen", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + + /* NVM */ + {PHASE_NVM_ADD_WITH_RECLAIM, + "NVM: Add With Reclaim", + PHASE_ITERATIONS / 10, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + {PHASE_NVM_GETAVAILABLE_VS_ADD, + "NVM: GetAvailable vs Add", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_GETMETADATA_VS_DESTROY, + "NVM: GetMetadata vs Destroy", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + +#ifdef WOLFHSM_CFG_DMA + /* DMA Operations */ + {PHASE_KS_CACHE_DMA_VS_EXPORT, + "KS: Cache DMA vs Export", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_KS_EXPORT_DMA_VS_EVICT, + "KS: Export DMA vs Evict", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_ADD_DMA_VS_READ, + "NVM: Add DMA vs Read", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_READ_DMA_VS_DESTROY, + "NVM: Read DMA vs Destroy", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, +#endif +}; + +/* ============================================================================ + * DATA STRUCTURES + * ========================================================================== */ + +/* Forward declaration */ +struct StressTestContext; + +/* Per client-server pair configuration */ +typedef struct { + /* Transport buffers */ + uint8_t reqBuf[BUFFER_SIZE]; + uint8_t respBuf[BUFFER_SIZE]; + whTransportMemConfig tmConfig; + + /* Client side */ + whTransportMemClientContext clientTransportCtx; + whCommClientConfig clientCommConfig; + whClientContext client; + whClientConfig clientConfig; + + /* Server side */ + whTransportMemServerContext serverTransportCtx; + whCommServerConfig serverCommConfig; + whServerContext server; + whServerConfig serverConfig; + + /* Crypto context for this server */ + whServerCryptoContext cryptoCtx; + + /* Thread control */ + pthread_t clientThread; + pthread_t serverThread; + int clientId; + volatile int stopFlag; + volatile int errorCount; + volatile int iterationCount; + + /* Pointer back to shared context */ + struct StressTestContext* sharedCtx; + +#ifdef WOLFHSM_CFG_DMA + /* Per-client DMA buffers */ + uint8_t dmaKeyBuffer[KEY_DATA_SIZE]; + uint8_t dmaNvmBuffer[NVM_OBJECT_DATA_SIZE]; +#endif +} ClientServerPair; + +/* Shared test context */ +typedef struct StressTestContext { + /* Shared NVM */ + uint8_t flashMemory[FLASH_RAM_SIZE]; + whFlashRamsimCtx flashCtx; + whFlashRamsimCfg flashCfg; + whNvmFlashContext nvmFlashCtx; + whNvmFlashConfig nvmFlashCfg; + whNvmContext nvm; + whNvmConfig nvmCfg; + + /* Lock for NVM */ + posixLockContext nvmLockCtx; + pthread_mutexattr_t mutexAttr; + posixLockConfig posixLockCfg; + whLockConfig lockCfg; + + /* Client-server pairs */ + ClientServerPair pairs[NUM_CLIENTS]; + + /* Synchronization - initial start */ + pthread_barrier_t startBarrier; + volatile int globalStopFlag; + + /* Phase synchronization */ + pthread_barrier_t setupBarrier; + pthread_barrier_t setupCompleteBarrier; + pthread_barrier_t streamStartBarrier; + pthread_barrier_t streamEndBarrier; + + /* Phase control */ + volatile int phaseRunning; + volatile ContentionPhase currentPhase; + volatile whKeyId currentKeyId; + volatile ClientRole clientRoles[NUM_CLIENTS]; +} StressTestContext; + +/* Forward declarations */ +static void* serverThread(void* arg); +static void* contentionClientThread(void* arg); + +/* ============================================================================ + * INITIALIZATION HELPERS + * ========================================================================== */ + +static int initSharedNvm(StressTestContext* ctx) +{ + static whFlashCb flashCb = WH_FLASH_RAMSIM_CB; + static whNvmCb nvmCb = WH_NVM_FLASH_CB; + static whLockCb lockCb = POSIX_LOCK_CB; + int rc; + + /* Initialize flash memory */ + memset(ctx->flashMemory, 0xFF, sizeof(ctx->flashMemory)); + + /* Configure flash simulator */ + ctx->flashCfg.size = FLASH_RAM_SIZE; + ctx->flashCfg.sectorSize = FLASH_SECTOR_SIZE; + ctx->flashCfg.pageSize = FLASH_PAGE_SIZE; + ctx->flashCfg.erasedByte = 0xFF; + ctx->flashCfg.memory = ctx->flashMemory; + + /* Configure NVM flash layer */ + ctx->nvmFlashCfg.cb = &flashCb; + ctx->nvmFlashCfg.context = &ctx->flashCtx; + ctx->nvmFlashCfg.config = &ctx->flashCfg; + + /* Initialize NVM flash layer */ + rc = wh_NvmFlash_Init(&ctx->nvmFlashCtx, &ctx->nvmFlashCfg); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Failed to initialize NVM flash: %d\n", rc); + return rc; + } + + /* Configure lock with error-checking mutex for better debugging */ + memset(&ctx->nvmLockCtx, 0, sizeof(ctx->nvmLockCtx)); + pthread_mutexattr_init(&ctx->mutexAttr); + pthread_mutexattr_settype(&ctx->mutexAttr, PTHREAD_MUTEX_ERRORCHECK); + ctx->posixLockCfg.attr = &ctx->mutexAttr; + ctx->lockCfg.cb = &lockCb; + ctx->lockCfg.context = &ctx->nvmLockCtx; + ctx->lockCfg.config = &ctx->posixLockCfg; + + /* Configure NVM with lock */ + ctx->nvmCfg.cb = &nvmCb; + ctx->nvmCfg.context = &ctx->nvmFlashCtx; + ctx->nvmCfg.config = &ctx->nvmFlashCfg; + ctx->nvmCfg.lockConfig = &ctx->lockCfg; + + /* Initialize NVM */ + rc = wh_Nvm_Init(&ctx->nvm, &ctx->nvmCfg); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Failed to initialize NVM: %d\n", rc); + return rc; + } + + return WH_ERROR_OK; +} + +static int initClientServerPair(StressTestContext* ctx, int pairIndex) +{ + ClientServerPair* pair = &ctx->pairs[pairIndex]; + int rc; + + pair->clientId = pairIndex; + pair->sharedCtx = ctx; + pair->stopFlag = 0; + pair->errorCount = 0; + pair->iterationCount = 0; + + /* Configure transport memory */ + pair->tmConfig.req = (whTransportMemCsr*)pair->reqBuf; + pair->tmConfig.req_size = sizeof(pair->reqBuf); + pair->tmConfig.resp = (whTransportMemCsr*)pair->respBuf; + pair->tmConfig.resp_size = sizeof(pair->respBuf); + + /* Configure client transport */ + memset(&pair->clientTransportCtx, 0, sizeof(pair->clientTransportCtx)); + + /* Configure client comm */ + pair->clientCommConfig.transport_cb = &clientTransportCb; + pair->clientCommConfig.transport_context = &pair->clientTransportCtx; + pair->clientCommConfig.transport_config = &pair->tmConfig; + pair->clientCommConfig.client_id = (uint16_t)(100 + pairIndex); + + /* Configure client */ + pair->clientConfig.comm = &pair->clientCommConfig; + + /* Configure server transport */ + memset(&pair->serverTransportCtx, 0, sizeof(pair->serverTransportCtx)); + + /* Configure server comm */ + pair->serverCommConfig.transport_cb = &serverTransportCb; + pair->serverCommConfig.transport_context = &pair->serverTransportCtx; + pair->serverCommConfig.transport_config = &pair->tmConfig; + pair->serverCommConfig.server_id = (uint16_t)(200 + pairIndex); + + /* Configure crypto context */ + pair->cryptoCtx.devId = INVALID_DEVID; + + /* Initialize RNG for this server */ + rc = wc_InitRng_ex(pair->cryptoCtx.rng, NULL, pair->cryptoCtx.devId); + if (rc != 0) { + WH_ERROR_PRINT("Failed to init RNG for pair %d: %d\n", pairIndex, rc); + return rc; + } + + /* Configure server - all share the same NVM */ + pair->serverConfig.comm_config = &pair->serverCommConfig; + pair->serverConfig.nvm = &ctx->nvm; + pair->serverConfig.crypto = &pair->cryptoCtx; + pair->serverConfig.devId = INVALID_DEVID; + + return WH_ERROR_OK; +} + +static void cleanupClientServerPair(ClientServerPair* pair) +{ + wc_FreeRng(pair->cryptoCtx.rng); +} + +/* ============================================================================ + * SERVER THREAD + * ========================================================================== */ + +static void* serverThread(void* arg) +{ + ClientServerPair* pair = (ClientServerPair*)arg; + StressTestContext* ctx = pair->sharedCtx; + int rc; + + /* Initialize server */ + rc = wh_Server_Init(&pair->server, &pair->serverConfig); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Server %d init failed: %d\n", pair->clientId, rc); + ATOMIC_ADD_INT(&pair->errorCount, 1); + return NULL; + } + + /* Set connected state */ + rc = wh_Server_SetConnected(&pair->server, WH_COMM_CONNECTED); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Server %d SetConnected failed: %d\n", pair->clientId, + rc); + ATOMIC_ADD_INT(&pair->errorCount, 1); + wh_Server_Cleanup(&pair->server); + return NULL; + } + + /* Wait for all threads to start */ + pthread_barrier_wait(&ctx->startBarrier); + + /* Process requests until stopped */ + while (!ATOMIC_LOAD_INT(&pair->stopFlag) && + !ATOMIC_LOAD_INT(&ctx->globalStopFlag)) { + rc = wh_Server_HandleRequestMessage(&pair->server); + + if (rc == WH_ERROR_NOTREADY) { + /* No request pending, yield CPU */ + sched_yield(); + continue; + } + + /* Don't count server errors - they're expected during stress */ + } + + wh_Server_Cleanup(&pair->server); + return NULL; +} + +/* ============================================================================ + * CLIENT OPERATIONS + * ========================================================================== */ + +static int doNvmAddObject(whClientContext* client, whNvmId id, int iteration) +{ + uint8_t data[NVM_OBJECT_DATA_SIZE]; + int32_t out_rc; + int rc; + + /* Fill data with pattern */ + memset(data, (uint8_t)(iteration & 0xFF), sizeof(data)); + + /* Send request */ + rc = wh_Client_NvmAddObjectRequest(client, id, WH_NVM_ACCESS_ANY, + WH_NVM_FLAGS_USAGE_ANY, 0, NULL, + sizeof(data), data); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmAddObjectResponse(client, &out_rc); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} + +static int doNvmRead(whClientContext* client, whNvmId id) +{ + uint8_t data[NVM_OBJECT_DATA_SIZE]; + whNvmSize outSz = sizeof(data); + int32_t out_rc; + int rc; + + /* Send request */ + rc = wh_Client_NvmReadRequest(client, id, 0, sizeof(data)); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmReadResponse(client, &out_rc, &outSz, data); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} + +static int doNvmList(whClientContext* client) +{ + whNvmId outId; + whNvmSize outCount; + int32_t out_rc; + int rc; + + /* Send request */ + rc = wh_Client_NvmListRequest(client, WH_NVM_ACCESS_ANY, + WH_NVM_FLAGS_USAGE_ANY, 0); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmListResponse(client, &out_rc, &outCount, &outId); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} + +static int doNvmDestroy(whClientContext* client, whNvmId id) +{ + int32_t out_rc; + int rc; + + /* Send request */ + rc = wh_Client_NvmDestroyObjectsRequest(client, 1, &id); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmDestroyObjectsResponse(client, &out_rc); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} + +static int doNvmGetAvailable(whClientContext* client) +{ + uint32_t availSize; + whNvmId availObjects; + uint32_t reclaimSize; + whNvmId reclaimObjects; + int32_t out_rc; + int rc; + + /* Send request */ + rc = wh_Client_NvmGetAvailableRequest(client); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmGetAvailableResponse(client, &out_rc, &availSize, + &availObjects, &reclaimSize, + &reclaimObjects); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} + +static int doNvmGetMetadata(whClientContext* client, whNvmId id) +{ + whNvmId outId; + whNvmAccess outAccess; + whNvmFlags outFlags; + whNvmSize outLen; + uint8_t label[WH_NVM_LABEL_LEN]; + int32_t out_rc; + int rc; + + /* Send request */ + rc = wh_Client_NvmGetMetadataRequest(client, id); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmGetMetadataResponse(client, &out_rc, &outId, + &outAccess, &outFlags, &outLen, + sizeof(label), label); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} + +static int doKeyCache(whClientContext* client, whKeyId keyId, int iteration) +{ + uint8_t keyData[KEY_DATA_SIZE]; + uint8_t label[WH_NVM_LABEL_LEN]; + whKeyId outKeyId; + int rc; + + /* Fill key data with pattern */ + memset(keyData, (uint8_t)(iteration & 0xFF), sizeof(keyData)); + snprintf((char*)label, sizeof(label), "Key%04X", keyId); + + /* Send request */ + rc = wh_Client_KeyCacheRequest_ex(client, WH_NVM_FLAGS_USAGE_ANY, label, + sizeof(label), keyData, sizeof(keyData), + keyId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyCacheResponse(client, &outKeyId); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doKeyExport(whClientContext* client, whKeyId keyId) +{ + uint8_t keyData[KEY_DATA_SIZE]; + uint8_t label[WH_NVM_LABEL_LEN]; + uint16_t labelSz = sizeof(label); + uint16_t keySz = sizeof(keyData); + int rc; + + /* Send request */ + rc = wh_Client_KeyExportRequest(client, keyId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyExportResponse(client, label, labelSz, keyData, + &keySz); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doKeyEvict(whClientContext* client, whKeyId keyId) +{ + int rc; + + /* Send request */ + rc = wh_Client_KeyEvictRequest(client, keyId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyEvictResponse(client); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doKeyCommit(whClientContext* client, whKeyId keyId) +{ + int rc; + + /* Send request */ + rc = wh_Client_KeyCommitRequest(client, keyId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyCommitResponse(client); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doKeyErase(whClientContext* client, whKeyId keyId) +{ + int rc; + + /* Send request */ + rc = wh_Client_KeyEraseRequest(client, keyId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyEraseResponse(client); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doKeyRevoke(whClientContext* client, whKeyId keyId) +{ + int rc; + + /* Send request */ + rc = wh_Client_KeyRevokeRequest(client, keyId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyRevokeResponse(client); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +#ifdef WOLFHSM_CFG_DMA +static int doKeyCacheDma(ClientServerPair* pair, whKeyId keyId, int iteration) +{ + uint8_t label[WH_NVM_LABEL_LEN]; + whKeyId outKeyId; + int rc; + + /* Fill key data with pattern */ + memset(pair->dmaKeyBuffer, (uint8_t)(iteration & 0xFF), + sizeof(pair->dmaKeyBuffer)); + snprintf((char*)label, sizeof(label), "DmaKey%04X", keyId); + + /* Send DMA request */ + rc = wh_Client_KeyCacheDmaRequest(&pair->client, 0, label, sizeof(label), + pair->dmaKeyBuffer, + sizeof(pair->dmaKeyBuffer), keyId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyCacheDmaResponse(&pair->client, &outKeyId); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doKeyExportDma(ClientServerPair* pair, whKeyId keyId) +{ + uint8_t label[WH_NVM_LABEL_LEN]; + uint16_t outSz; + int rc; + + /* Send DMA request - provide buffer for DMA export */ + rc = wh_Client_KeyExportDmaRequest(&pair->client, keyId, pair->dmaKeyBuffer, + sizeof(pair->dmaKeyBuffer)); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_KeyExportDmaResponse(&pair->client, label, sizeof(label), + &outSz); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doNvmAddObjectDma(ClientServerPair* pair, whNvmId id, int iteration) +{ + whNvmMetadata meta; + int32_t out_rc; + int rc; + + /* Fill DMA buffer with pattern */ + memset(pair->dmaNvmBuffer, (uint8_t)(iteration & 0xFF), + sizeof(pair->dmaNvmBuffer)); + + /* Set up metadata */ + memset(&meta, 0, sizeof(meta)); + meta.id = id; + meta.access = WH_NVM_ACCESS_ANY; + meta.flags = WH_NVM_FLAGS_USAGE_ANY; + meta.len = sizeof(pair->dmaNvmBuffer); + + /* Send DMA request */ + rc = wh_Client_NvmAddObjectDmaRequest( + &pair->client, &meta, sizeof(pair->dmaNvmBuffer), pair->dmaNvmBuffer); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmAddObjectDmaResponse(&pair->client, &out_rc); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} + +static int doNvmReadDma(ClientServerPair* pair, whNvmId id) +{ + int32_t out_rc; + int rc; + + /* Send DMA request */ + rc = wh_Client_NvmReadDmaRequest( + &pair->client, id, 0, sizeof(pair->dmaNvmBuffer), pair->dmaNvmBuffer); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_NvmReadDmaResponse(&pair->client, &out_rc); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + if (rc != WH_ERROR_OK) { + return rc; + } + + return out_rc; +} +#endif /* WOLFHSM_CFG_DMA */ + +/* ============================================================================ + * PHASE SETUP + * ========================================================================== */ + +static int doPhaseSetup(ClientServerPair* pair, ContentionPhase phase, + whKeyId keyId) +{ + whClientContext* client = &pair->client; + int rc; + + switch (phase) { + /* Keystore phases that need clean state (evict first) */ + case PHASE_KS_CONCURRENT_CACHE: + rc = doKeyEvict(client, keyId); + /* Ignore NOTFOUND - key might not exist */ + if (rc == WH_ERROR_NOTFOUND) + rc = WH_ERROR_OK; + return rc; + + /* Keystore phases that need key to exist (cache first) */ + case PHASE_KS_CACHE_VS_EVICT: + case PHASE_KS_CACHE_VS_EXPORT: + case PHASE_KS_EVICT_VS_EXPORT: + case PHASE_KS_CACHE_VS_COMMIT: + case PHASE_KS_COMMIT_VS_EVICT: + case PHASE_KS_CONCURRENT_EXPORT: + case PHASE_KS_CONCURRENT_EVICT: + return doKeyCache(client, keyId, 0); + + /* NVM phases that need clean state (destroy first) */ + case PHASE_NVM_CONCURRENT_ADD: + rc = doNvmDestroy(client, HOT_NVM_ID); + if (rc == WH_ERROR_NOTFOUND) + rc = WH_ERROR_OK; + return rc; + + /* NVM phases that need object to exist (add first) */ + case PHASE_NVM_ADD_VS_READ: + case PHASE_NVM_ADD_VS_DESTROY: + case PHASE_NVM_READ_VS_DESTROY: + case PHASE_NVM_CONCURRENT_READ: + case PHASE_NVM_CONCURRENT_DESTROY: + /* First destroy any existing object to make space */ + (void)doNvmDestroy(client, HOT_NVM_ID); + return doNvmAddObject(client, HOT_NVM_ID, 0); + + /* List during modify needs multiple objects */ + case PHASE_NVM_LIST_DURING_MODIFY: + rc = doNvmAddObject(client, HOT_NVM_ID, 0); + if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE) + return rc; + rc = doNvmAddObject(client, HOT_NVM_ID_2, 0); + if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE) + return rc; + rc = doNvmAddObject(client, HOT_NVM_ID_3, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + /* Cross-subsystem: cache key AND add NVM object */ + case PHASE_CROSS_COMMIT_VS_ADD: + case PHASE_CROSS_COMMIT_VS_DESTROY: + rc = doKeyCache(client, keyId, 0); + if (rc != WH_ERROR_OK) + return rc; + rc = doNvmAddObject(client, HOT_NVM_ID, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + /* Freshen test: commit key to NVM, then evict from cache */ + case PHASE_CROSS_FRESHEN_VS_MODIFY: + rc = doKeyCache(client, keyId, 0); + if (rc != WH_ERROR_OK) + return rc; + rc = doKeyCommit(client, keyId); + if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE) + return rc; + rc = doKeyEvict(client, keyId); + if (rc != WH_ERROR_OK && rc != WH_ERROR_NOTFOUND) + return rc; + /* Add NVM object for the NVM modify operations */ + rc = doNvmAddObject(client, HOT_NVM_ID, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + /* Erase phases: cache key, commit to NVM (erase needs both) */ + case PHASE_KS_ERASE_VS_CACHE: + case PHASE_KS_ERASE_VS_EXPORT: + rc = doKeyCache(client, keyId, 0); + if (rc != WH_ERROR_OK) + return rc; + rc = doKeyCommit(client, keyId); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + /* Revoke phases: cache key, commit to NVM + * Each revoke phase uses a unique key ID (see selectKeyIdForPhase) + * because revoked keys have NONMODIFIABLE flag and can't be erased */ + case PHASE_KS_REVOKE_VS_CACHE: + case PHASE_KS_REVOKE_VS_EXPORT: + rc = doKeyCache(client, keyId, 0); + if (rc != WH_ERROR_OK) + return rc; + rc = doKeyCommit(client, keyId); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + /* GetUniqueId: no setup needed */ + case PHASE_KS_CONCURRENT_GETUNIQUEID: + return WH_ERROR_OK; + + /* Explicit freshen: evict any existing key, cache, commit, then evict + * The initial evict makes room in cache. NOSPACE is tolerated because + * NVM may be full from revoke phases - test still runs testing NOTFOUND + */ + case PHASE_KS_EXPLICIT_FRESHEN: + /* Evict first to make room if key already cached */ + (void)doKeyEvict(client, keyId); + rc = doKeyCache(client, keyId, 0); + /* NOSPACE is OK - NVM may be full from revoke phases */ + if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE) + return rc; + if (rc == WH_ERROR_OK) { + rc = doKeyCommit(client, keyId); + if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE) + return rc; + rc = doKeyEvict(client, keyId); + if (rc == WH_ERROR_NOTFOUND) + rc = WH_ERROR_OK; + } + else { + rc = WH_ERROR_OK; /* Tolerate NOSPACE */ + } + return rc; + + /* Add with reclaim: fill NVM with objects to trigger reclaim */ + case PHASE_NVM_ADD_WITH_RECLAIM: { + int i; + /* Add 10 objects to fill up NVM and trigger reclaim during test */ + for (i = 0; i < 10; i++) { + rc = doNvmAddObject(client, (whNvmId)(HOT_NVM_ID + i), 0); + if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE) + return rc; + } + return WH_ERROR_OK; + } + + /* GetAvailable vs Add: add one object */ + case PHASE_NVM_GETAVAILABLE_VS_ADD: + (void)doNvmDestroy(client, HOT_NVM_ID); + rc = doNvmAddObject(client, HOT_NVM_ID, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + /* GetMetadata vs Destroy: destroy then add object */ + case PHASE_NVM_GETMETADATA_VS_DESTROY: + (void)doNvmDestroy(client, HOT_NVM_ID); + rc = doNvmAddObject(client, HOT_NVM_ID, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + +#ifdef WOLFHSM_CFG_DMA + /* DMA phases: evict first to make room in cache, tolerate NOSPACE + * (cache may be full from earlier phases like GetUniqueId) */ + case PHASE_KS_CACHE_DMA_VS_EXPORT: + case PHASE_KS_EXPORT_DMA_VS_EVICT: + /* Evict first to make room if key already cached */ + (void)doKeyEvict(client, keyId); + rc = doKeyCache(client, keyId, 0); + /* NOSPACE is OK - cache may be full from earlier phases */ + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + case PHASE_NVM_ADD_DMA_VS_READ: + (void)doNvmDestroy(client, HOT_NVM_ID); + return doNvmAddObject(client, HOT_NVM_ID, 0); + + case PHASE_NVM_READ_DMA_VS_DESTROY: + (void)doNvmDestroy(client, HOT_NVM_ID); + return doNvmAddObject(client, HOT_NVM_ID, 0); +#endif + + default: + return WH_ERROR_OK; + } +} + +/* ============================================================================ + * PHASE OPERATION DISPATCH + * ========================================================================== */ + +static int executePhaseOperation(ClientServerPair* pair, ContentionPhase phase, + ClientRole role, int iteration, whKeyId keyId) +{ + whClientContext* client = &pair->client; + + switch (phase) { + /* Keystore phases */ + case PHASE_KS_CONCURRENT_CACHE: + return doKeyCache(client, keyId, iteration); + + case PHASE_KS_CACHE_VS_EVICT: + if (role == ROLE_OP_A) + return doKeyCache(client, keyId, iteration); + else + return doKeyEvict(client, keyId); + + case PHASE_KS_CACHE_VS_EXPORT: + if (role == ROLE_OP_A) + return doKeyCache(client, keyId, iteration); + else + return doKeyExport(client, keyId); + + case PHASE_KS_EVICT_VS_EXPORT: + if (role == ROLE_OP_A) + return doKeyEvict(client, keyId); + else + return doKeyExport(client, keyId); + + case PHASE_KS_CACHE_VS_COMMIT: + if (role == ROLE_OP_A) + return doKeyCache(client, keyId, iteration); + else + return doKeyCommit(client, keyId); + + case PHASE_KS_COMMIT_VS_EVICT: + if (role == ROLE_OP_A) + return doKeyCommit(client, keyId); + else + return doKeyEvict(client, keyId); + + case PHASE_KS_CONCURRENT_EXPORT: + return doKeyExport(client, keyId); + + case PHASE_KS_CONCURRENT_EVICT: + return doKeyEvict(client, keyId); + + /* NVM phases */ + case PHASE_NVM_CONCURRENT_ADD: + return doNvmAddObject(client, HOT_NVM_ID, iteration); + + case PHASE_NVM_ADD_VS_READ: + if (role == ROLE_OP_A) + return doNvmAddObject(client, HOT_NVM_ID, iteration); + else + return doNvmRead(client, HOT_NVM_ID); + + case PHASE_NVM_ADD_VS_DESTROY: + if (role == ROLE_OP_A) + return doNvmAddObject(client, HOT_NVM_ID, iteration); + else + return doNvmDestroy(client, HOT_NVM_ID); + + case PHASE_NVM_READ_VS_DESTROY: + if (role == ROLE_OP_A) + return doNvmRead(client, HOT_NVM_ID); + else + return doNvmDestroy(client, HOT_NVM_ID); + + case PHASE_NVM_CONCURRENT_READ: + return doNvmRead(client, HOT_NVM_ID); + + case PHASE_NVM_LIST_DURING_MODIFY: + if (role == ROLE_OP_A) { + /* Alternate between add and destroy */ + if (iteration % 2 == 0) + return doNvmAddObject(client, HOT_NVM_ID, iteration); + else + return doNvmDestroy(client, HOT_NVM_ID); + } + else { + return doNvmList(client); + } + + case PHASE_NVM_CONCURRENT_DESTROY: + return doNvmDestroy(client, HOT_NVM_ID); + + /* Cross-subsystem phases */ + case PHASE_CROSS_COMMIT_VS_ADD: + if (role == ROLE_OP_A) + return doKeyCommit(client, keyId); + else + return doNvmAddObject(client, HOT_NVM_ID, iteration); + + case PHASE_CROSS_COMMIT_VS_DESTROY: + if (role == ROLE_OP_A) + return doKeyCommit(client, keyId); + else + return doNvmDestroy(client, HOT_NVM_ID); + + case PHASE_CROSS_FRESHEN_VS_MODIFY: + if (role == ROLE_OP_A) { + /* Export triggers freshen from NVM if not in cache */ + return doKeyExport(client, keyId); + } + else { + /* Modify NVM while freshen might be happening */ + if (iteration % 2 == 0) + return doNvmAddObject(client, HOT_NVM_ID, iteration); + else + return doNvmDestroy(client, HOT_NVM_ID); + } + + /* Erase vs Cache */ + case PHASE_KS_ERASE_VS_CACHE: + if (role == ROLE_OP_A) + return doKeyErase(client, keyId); + else + return doKeyCache(client, keyId, iteration); + + /* Erase vs Export */ + case PHASE_KS_ERASE_VS_EXPORT: + if (role == ROLE_OP_A) + return doKeyErase(client, keyId); + else + return doKeyExport(client, keyId); + + /* Revoke vs Cache */ + case PHASE_KS_REVOKE_VS_CACHE: + if (role == ROLE_OP_A) + return doKeyRevoke(client, keyId); + else + return doKeyCache(client, keyId, iteration); + + /* Revoke vs Export */ + case PHASE_KS_REVOKE_VS_EXPORT: + if (role == ROLE_OP_A) + return doKeyRevoke(client, keyId); + else + return doKeyExport(client, keyId); + + /* Concurrent GetUniqueId: all threads request fresh keys via cache with + * ERASED */ + case PHASE_KS_CONCURRENT_GETUNIQUEID: + return doKeyCache(client, WH_KEYID_ERASED, iteration); + + /* Explicit Freshen: all threads export (triggers freshen from NVM) */ + case PHASE_KS_EXPLICIT_FRESHEN: + return doKeyExport(client, keyId); + + /* Add With Reclaim: all threads add unique objects to trigger reclaim + */ + case PHASE_NVM_ADD_WITH_RECLAIM: + /* Use client ID and iteration to create unique object IDs */ + return doNvmAddObject(client, + (whNvmId)(HOT_NVM_ID + 10 + + (pair->clientId * 1000) + + iteration), + iteration); + + /* GetAvailable vs Add */ + case PHASE_NVM_GETAVAILABLE_VS_ADD: + if (role == ROLE_OP_A) + return doNvmGetAvailable(client); + else + return doNvmAddObject(client, HOT_NVM_ID, iteration); + + /* GetMetadata vs Destroy */ + case PHASE_NVM_GETMETADATA_VS_DESTROY: + if (role == ROLE_OP_A) + return doNvmGetMetadata(client, HOT_NVM_ID); + else + return doNvmDestroy(client, HOT_NVM_ID); + +#ifdef WOLFHSM_CFG_DMA + /* DMA: Cache DMA vs Export */ + case PHASE_KS_CACHE_DMA_VS_EXPORT: + if (role == ROLE_OP_A) + return doKeyCacheDma(pair, keyId, iteration); + else + return doKeyExport(client, keyId); + + /* DMA: Export DMA vs Evict */ + case PHASE_KS_EXPORT_DMA_VS_EVICT: + if (role == ROLE_OP_A) + return doKeyExportDma(pair, keyId); + else + return doKeyEvict(client, keyId); + + /* DMA: Add DMA vs Read */ + case PHASE_NVM_ADD_DMA_VS_READ: + if (role == ROLE_OP_A) + return doNvmAddObjectDma(pair, HOT_NVM_ID, iteration); + else + return doNvmRead(client, HOT_NVM_ID); + + /* DMA: Read DMA vs Destroy */ + case PHASE_NVM_READ_DMA_VS_DESTROY: + if (role == ROLE_OP_A) + return doNvmReadDma(pair, HOT_NVM_ID); + else + return doNvmDestroy(client, HOT_NVM_ID); +#endif + + default: + return WH_ERROR_OK; + } +} + +/* ============================================================================ + * RESULT VALIDATION + * ========================================================================== */ + +static int isAcceptableResult(ContentionPhase phase, int rc) +{ + /* Always acceptable */ + if (rc == WH_ERROR_OK) + return 1; + + switch (phase) { + /* Cache operations: NOSPACE acceptable (cache full) */ + case PHASE_KS_CONCURRENT_CACHE: + case PHASE_KS_CACHE_VS_EVICT: + case PHASE_KS_CACHE_VS_EXPORT: + case PHASE_KS_CACHE_VS_COMMIT: + return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND); + + /* Evict/Export: NOTFOUND acceptable (already evicted) */ + case PHASE_KS_EVICT_VS_EXPORT: + case PHASE_KS_COMMIT_VS_EVICT: + case PHASE_KS_CONCURRENT_EVICT: + case PHASE_KS_CONCURRENT_EXPORT: + return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE); + + /* NVM Add: NOSPACE acceptable */ + case PHASE_NVM_CONCURRENT_ADD: + case PHASE_NVM_ADD_VS_READ: + case PHASE_NVM_ADD_VS_DESTROY: + case PHASE_NVM_LIST_DURING_MODIFY: + return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND); + + /* NVM Read/Destroy: NOTFOUND acceptable */ + case PHASE_NVM_READ_VS_DESTROY: + case PHASE_NVM_CONCURRENT_READ: + case PHASE_NVM_CONCURRENT_DESTROY: + return (rc == WH_ERROR_NOTFOUND); + + /* Cross-subsystem: multiple acceptable */ + case PHASE_CROSS_COMMIT_VS_ADD: + case PHASE_CROSS_COMMIT_VS_DESTROY: + case PHASE_CROSS_FRESHEN_VS_MODIFY: + return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND); + + /* Erase phases: NOTFOUND, NOSPACE acceptable */ + case PHASE_KS_ERASE_VS_CACHE: + case PHASE_KS_ERASE_VS_EXPORT: + return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE); + + /* Revoke phases: NOTFOUND, ACCESS, USAGE, NOSPACE acceptable */ + case PHASE_KS_REVOKE_VS_CACHE: + case PHASE_KS_REVOKE_VS_EXPORT: + return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_ACCESS || + rc == WH_ERROR_USAGE || rc == WH_ERROR_NOSPACE); + + /* GetUniqueId: NOSPACE acceptable */ + case PHASE_KS_CONCURRENT_GETUNIQUEID: + return (rc == WH_ERROR_NOSPACE); + + /* Explicit Freshen: NOTFOUND, NOSPACE acceptable */ + case PHASE_KS_EXPLICIT_FRESHEN: + return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE); + + /* Add With Reclaim: NOSPACE, ACCESS acceptable + * ACCESS can occur if reclaim is modifying object metadata + * concurrently with add operations */ + case PHASE_NVM_ADD_WITH_RECLAIM: + return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_ACCESS); + + /* GetAvailable vs Add: NOSPACE, NOTFOUND acceptable */ + case PHASE_NVM_GETAVAILABLE_VS_ADD: + return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND); + + /* GetMetadata vs Destroy: NOTFOUND acceptable */ + case PHASE_NVM_GETMETADATA_VS_DESTROY: + return (rc == WH_ERROR_NOTFOUND); + +#ifdef WOLFHSM_CFG_DMA + /* DMA phases: same as non-DMA equivalents */ + case PHASE_KS_CACHE_DMA_VS_EXPORT: + return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND); + + case PHASE_KS_EXPORT_DMA_VS_EVICT: + return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE); + + case PHASE_NVM_ADD_DMA_VS_READ: + return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND); + + case PHASE_NVM_READ_DMA_VS_DESTROY: + return (rc == WH_ERROR_NOTFOUND); +#endif + + default: + return 0; + } +} + +/* ============================================================================ + * CLIENT THREAD (CONTINUOUS STREAMING) + * ========================================================================== */ + +static void* contentionClientThread(void* arg) +{ + ClientServerPair* pair = (ClientServerPair*)arg; + StressTestContext* ctx = pair->sharedCtx; + int rc; + int localIteration; + + /* Initialize client */ + rc = wh_Client_Init(&pair->client, &pair->clientConfig); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Client %d init failed: %d\n", pair->clientId, rc); + ATOMIC_ADD_INT(&pair->errorCount, 1); + return NULL; + } + + /* Wait for all threads to start */ + pthread_barrier_wait(&ctx->startBarrier); + + /* Always call barrier first, then check exit flag - prevents deadlock */ + while (1) { + /* ===== SETUP PHASE (once per phase) ===== */ + pthread_barrier_wait(&ctx->setupBarrier); + if (ATOMIC_LOAD_INT(&ctx->globalStopFlag)) { + /* Call remaining barriers before exiting to prevent deadlock */ + pthread_barrier_wait(&ctx->setupCompleteBarrier); + pthread_barrier_wait(&ctx->streamStartBarrier); + pthread_barrier_wait(&ctx->streamEndBarrier); + break; + } + + /* Only client 0 does setup */ + if (pair->clientId == 0) { + rc = doPhaseSetup(pair, ctx->currentPhase, ctx->currentKeyId); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Setup failed for phase %d: %d\n", + ctx->currentPhase, rc); + } + } + + pthread_barrier_wait(&ctx->setupCompleteBarrier); + + /* ===== STREAMING PHASE (tight loop, no barriers) ===== */ + pthread_barrier_wait(&ctx->streamStartBarrier); + + ContentionPhase phase = ctx->currentPhase; + whKeyId keyId = ctx->currentKeyId; + ClientRole role = ctx->clientRoles[pair->clientId]; + localIteration = 0; + + /* Stream requests until phaseRunning becomes 0 */ + while (ATOMIC_LOAD_INT(&ctx->phaseRunning)) { + rc = + executePhaseOperation(pair, phase, role, localIteration, keyId); + + /* Count iteration */ + localIteration++; + ATOMIC_ADD_INT(&pair->iterationCount, 1); + + /* Track unexpected errors */ + if (!isAcceptableResult(phase, rc)) { + ATOMIC_ADD_INT(&pair->errorCount, 1); + } + + /* NO BARRIER HERE - continuous streaming */ + } + + /* Wait for all clients to finish streaming */ + pthread_barrier_wait(&ctx->streamEndBarrier); + } + + wh_Client_Cleanup(&pair->client); + return NULL; +} + +/* ============================================================================ + * PHASE EXECUTION + * ========================================================================== */ + +static int allClientsReachedIterations(StressTestContext* ctx, int target) +{ + int i; + for (i = 0; i < NUM_CLIENTS; i++) { + if (ATOMIC_LOAD_INT(&ctx->pairs[i].iterationCount) < target) { + return 0; + } + } + return 1; +} + +/* Select the appropriate keyId for a phase. + * Revoke-related phases need unique key IDs because revoked keys can't be + * erased or re-cached. Each phase type that might leave a key revoked needs + * its own key ID to avoid conflicts. + */ +static whKeyId selectKeyIdForPhase(ContentionPhase phase, int isGlobal) +{ + switch (phase) { + case PHASE_KS_REVOKE_VS_CACHE: + return isGlobal ? REVOKE_CACHE_KEY_GLOBAL : REVOKE_CACHE_KEY_LOCAL; + case PHASE_KS_REVOKE_VS_EXPORT: + return isGlobal ? REVOKE_EXPORT_KEY_GLOBAL + : REVOKE_EXPORT_KEY_LOCAL; + /* Freshen uses HOT_KEY_ID (not unique IDs) so it can reuse keys that + * are already committed by earlier phases - avoids NVM space issues */ + default: + return isGlobal ? HOT_KEY_ID_GLOBAL : HOT_KEY_ID_LOCAL; + } +} + +static int runPhase(StressTestContext* ctx, const PhaseConfig* config, + whKeyId keyId) +{ + int i; +#ifdef WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC + time_t phaseStart; +#endif + int totalIterations = 0; + int totalErrors = 0; + int timedOut = 0; + const char* keyScope = + (keyId & WH_KEYID_CLIENT_GLOBAL_FLAG) ? "global" : "local"; + + WH_TEST_PRINT(" Phase: %s (%s key)\n", config->name, keyScope); + + /* 1. Set phase info for all clients */ + ctx->currentPhase = config->phase; + ctx->currentKeyId = keyId; + for (i = 0; i < NUM_CLIENTS; i++) { + ctx->clientRoles[i] = config->roles[i]; + } + + /* 2. Signal clients to run setup */ + pthread_barrier_wait(&ctx->setupBarrier); + + /* 3. Wait for setup to complete */ + pthread_barrier_wait(&ctx->setupCompleteBarrier); + + /* 4. Reset iteration counts */ + for (i = 0; i < NUM_CLIENTS; i++) { + ATOMIC_STORE_INT(&ctx->pairs[i].iterationCount, 0); + ATOMIC_STORE_INT(&ctx->pairs[i].errorCount, 0); + } + + /* 5. Signal clients to start streaming */ + ATOMIC_STORE_INT(&ctx->phaseRunning, 1); + pthread_barrier_wait(&ctx->streamStartBarrier); + + /* 6. Let clients stream for configured iterations OR duration */ +#ifdef WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC + phaseStart = time(NULL); +#endif + while (!allClientsReachedIterations(ctx, config->iterations)) { +#ifdef WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC + if (time(NULL) - phaseStart > + WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC) { + WH_ERROR_PRINT(" Phase timeout after %d seconds\n", + WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC); + timedOut = 1; + break; + } +#endif + { + struct timespec ts = {0, 10000000}; /* 10ms */ + nanosleep(&ts, NULL); + } + } + + /* 7. Signal clients to stop streaming */ + ATOMIC_STORE_INT(&ctx->phaseRunning, 0); + + /* 8. Wait for all clients to finish current operation */ + pthread_barrier_wait(&ctx->streamEndBarrier); + + /* 9. Collect and print results */ + for (i = 0; i < NUM_CLIENTS; i++) { + int iters = ATOMIC_LOAD_INT(&ctx->pairs[i].iterationCount); + int errors = ATOMIC_LOAD_INT(&ctx->pairs[i].errorCount); + totalIterations += iters; + totalErrors += errors; + } + WH_TEST_PRINT(" Total: %d iterations, %d errors\n", totalIterations, + totalErrors); + + /* Return error if phase failed */ + if (timedOut || totalErrors > 0) { + return WH_ERROR_ABORTED; + } + return WH_ERROR_OK; +} + +/* ============================================================================ + * MAIN TEST FUNCTION + * ========================================================================== */ + +static void printFinalStats(StressTestContext* ctx) +{ + int i; + int totalErrors = 0; + + WH_TEST_PRINT("\n=== Final Statistics ===\n"); + for (i = 0; i < NUM_CLIENTS; i++) { + WH_TEST_PRINT("Client %d: total errors=%d\n", i, + ctx->pairs[i].errorCount); + totalErrors += ctx->pairs[i].errorCount; + } + WH_TEST_PRINT("Total errors across all phases: %d\n", totalErrors); +} + +int whTest_ThreadSafeStress(void) +{ + StressTestContext ctx; + int i; + int rc; + int testResult = 0; + int phasesFailed = 0; + size_t phaseIdx; + + memset(&ctx, 0, sizeof(ctx)); + + WH_TEST_PRINT("=== Deterministic Contention Stress Test ===\n"); + WH_TEST_PRINT("Clients: %d, Phases: %zu, Iterations per phase: %d\n", + NUM_CLIENTS, sizeof(phases) / sizeof(phases[0]), + PHASE_ITERATIONS); + WH_TEST_PRINT("Key scopes: global, local\n"); + + /* Initialize wolfCrypt */ + rc = wolfCrypt_Init(); + if (rc != 0) { + WH_ERROR_PRINT("wolfCrypt_Init failed: %d\n", rc); + return rc; + } + + /* Initialize shared NVM with lock */ + rc = initSharedNvm(&ctx); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Failed to initialize shared NVM\n"); + wolfCrypt_Cleanup(); + return rc; + } + + /* Initialize client-server pairs */ + for (i = 0; i < NUM_CLIENTS; i++) { + rc = initClientServerPair(&ctx, i); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Failed to initialize pair %d\n", i); + testResult = rc; + goto cleanup; + } + } + + /* Initialize barriers + * startBarrier: main + servers + clients = NUM_CLIENTS * 2 + 1 + * phase barriers: main + clients = NUM_CLIENTS + 1 + */ + rc = pthread_barrier_init(&ctx.startBarrier, NULL, NUM_CLIENTS * 2 + 1); + if (rc != 0) { + WH_ERROR_PRINT("Failed to init start barrier: %d\n", rc); + testResult = rc; + goto cleanup; + } + + rc = pthread_barrier_init(&ctx.setupBarrier, NULL, NUM_CLIENTS + 1); + if (rc != 0) { + WH_ERROR_PRINT("Failed to init setup barrier: %d\n", rc); + testResult = rc; + goto cleanup; + } + + rc = pthread_barrier_init(&ctx.setupCompleteBarrier, NULL, NUM_CLIENTS + 1); + if (rc != 0) { + WH_ERROR_PRINT("Failed to init setupComplete barrier: %d\n", rc); + testResult = rc; + goto cleanup; + } + + rc = pthread_barrier_init(&ctx.streamStartBarrier, NULL, NUM_CLIENTS + 1); + if (rc != 0) { + WH_ERROR_PRINT("Failed to init streamStart barrier: %d\n", rc); + testResult = rc; + goto cleanup; + } + + rc = pthread_barrier_init(&ctx.streamEndBarrier, NULL, NUM_CLIENTS + 1); + if (rc != 0) { + WH_ERROR_PRINT("Failed to init streamEnd barrier: %d\n", rc); + testResult = rc; + goto cleanup; + } + + WH_TEST_PRINT("Starting %d server threads and %d client threads...\n", + NUM_CLIENTS, NUM_CLIENTS); + + /* Start all server threads */ + for (i = 0; i < NUM_CLIENTS; i++) { + rc = pthread_create(&ctx.pairs[i].serverThread, NULL, serverThread, + &ctx.pairs[i]); + if (rc != 0) { + WH_ERROR_PRINT("Failed to create server thread %d: %d\n", i, rc); + ATOMIC_STORE_INT(&ctx.globalStopFlag, 1); + testResult = rc; + goto join_threads; + } + } + + /* Start all client threads */ + for (i = 0; i < NUM_CLIENTS; i++) { + rc = pthread_create(&ctx.pairs[i].clientThread, NULL, + contentionClientThread, &ctx.pairs[i]); + if (rc != 0) { + WH_ERROR_PRINT("Failed to create client thread %d: %d\n", i, rc); + ATOMIC_STORE_INT(&ctx.globalStopFlag, 1); + testResult = rc; + goto join_threads; + } + } + + /* Synchronize start - wait for all threads to be ready */ + pthread_barrier_wait(&ctx.startBarrier); + WH_TEST_PRINT("All threads started, running phases...\n\n"); + + /* Run all phases */ + for (phaseIdx = 0; phaseIdx < sizeof(phases) / sizeof(phases[0]); + phaseIdx++) { + whKeyId globalKey = selectKeyIdForPhase(phases[phaseIdx].phase, 1); + whKeyId localKey = selectKeyIdForPhase(phases[phaseIdx].phase, 0); + + rc = runPhase(&ctx, &phases[phaseIdx], globalKey); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Phase %zu (global) failed: %d\n", phaseIdx, rc); + phasesFailed++; + if (testResult == 0) { + testResult = rc; /* Record first error */ + } + /* Continue to next phase - don't break */ + } + + rc = runPhase(&ctx, &phases[phaseIdx], localKey); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Phase %zu (local) failed: %d\n", phaseIdx, rc); + phasesFailed++; + if (testResult == 0) { + testResult = rc; /* Record first error */ + } + /* Continue to next phase - don't break */ + } + } + + /* Signal global stop */ + ATOMIC_STORE_INT(&ctx.globalStopFlag, 1); + + /* Need to release clients from barrier waits - run one more "dummy" phase + */ + pthread_barrier_wait(&ctx.setupBarrier); + pthread_barrier_wait(&ctx.setupCompleteBarrier); + ATOMIC_STORE_INT(&ctx.phaseRunning, 0); + pthread_barrier_wait(&ctx.streamStartBarrier); + pthread_barrier_wait(&ctx.streamEndBarrier); + +join_threads: + /* Signal stop for servers */ + for (i = 0; i < NUM_CLIENTS; i++) { + ATOMIC_STORE_INT(&ctx.pairs[i].stopFlag, 1); + } + + /* Join all client threads */ + for (i = 0; i < NUM_CLIENTS; i++) { + if (ctx.pairs[i].clientThread != 0) { + pthread_join(ctx.pairs[i].clientThread, NULL); + } + } + + /* Join all server threads */ + for (i = 0; i < NUM_CLIENTS; i++) { + if (ctx.pairs[i].serverThread != 0) { + pthread_join(ctx.pairs[i].serverThread, NULL); + } + } + + /* Print statistics */ + printFinalStats(&ctx); + + pthread_barrier_destroy(&ctx.startBarrier); + pthread_barrier_destroy(&ctx.setupBarrier); + pthread_barrier_destroy(&ctx.setupCompleteBarrier); + pthread_barrier_destroy(&ctx.streamStartBarrier); + pthread_barrier_destroy(&ctx.streamEndBarrier); + +cleanup: + /* Cleanup client-server pairs */ + for (i = 0; i < NUM_CLIENTS; i++) { + cleanupClientServerPair(&ctx.pairs[i]); + } + + /* Cleanup NVM */ + wh_Nvm_Cleanup(&ctx.nvm); + + /* Cleanup mutex attributes */ + pthread_mutexattr_destroy(&ctx.mutexAttr); + + /* Cleanup wolfCrypt */ + wolfCrypt_Cleanup(); + + if (testResult == 0) { + WH_TEST_PRINT( + "\n=== Deterministic Contention Stress Test PASSED ===\n"); + } + else { + WH_ERROR_PRINT("\nPhases failed: %d\n", phasesFailed); + WH_ERROR_PRINT("=== Deterministic Contention Stress Test FAILED ===\n"); + } + + return testResult; +} + +#else /* !WOLFHSM_CFG_THREADSAFE || !WOLFHSM_CFG_TEST_POSIX || \ + !WOLFHSM_CFG_GLOBAL_KEYS || !WOLFHSM_CFG_ENABLE_CLIENT || \ + !WOLFHSM_CFG_ENABLE_SERVER || WOLFHSM_CFG_NO_CRYPTO */ + +#include "wh_test_threadsafe_stress.h" +#include "wh_test_common.h" + +int whTest_ThreadSafeStress(void) +{ + WH_TEST_PRINT("Thread safety stress test skipped "); + WH_TEST_PRINT("(requires THREADSAFE + TEST_POSIX + GLOBAL_KEYS + "); + WH_TEST_PRINT("ENABLE_CLIENT + ENABLE_SERVER + crypto)\n"); + return 0; +} + +#endif /* WOLFHSM_CFG_THREADSAFE && WOLFHSM_CFG_TEST_POSIX && ... */ diff --git a/test/wh_test_threadsafe_stress.h b/test/wh_test_threadsafe_stress.h new file mode 100644 index 000000000..fb6b7a17a --- /dev/null +++ b/test/wh_test_threadsafe_stress.h @@ -0,0 +1,40 @@ +/* + * Copyright (C) 2025 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * test/wh_test_threadsafe_stress.h + * + * Multithreaded stress test for thread safety validation. + */ + +#ifndef TEST_WH_TEST_THREADSAFE_STRESS_H_ +#define TEST_WH_TEST_THREADSAFE_STRESS_H_ + +/* + * Runs multithreaded stress tests for thread safety validation. + * Tests concurrent access to shared NVM and global key cache from + * multiple client threads via separate server contexts. + * + * Requires: WOLFHSM_CFG_THREADSAFE, WOLFHSM_CFG_TEST_POSIX, + * WOLFHSM_CFG_GLOBAL_KEYS + * + * Returns 0 on success, non-zero on failure. + */ +int whTest_ThreadSafeStress(void); + +#endif /* TEST_WH_TEST_THREADSAFE_STRESS_H_ */ diff --git a/wolfhsm/wh_lock.h b/wolfhsm/wh_lock.h new file mode 100644 index 000000000..46e39c0bc --- /dev/null +++ b/wolfhsm/wh_lock.h @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2024 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * wolfhsm/wh_lock.h + * + * Platform-agnostic lock abstraction for thread-safe access to shared + * resources. This module provides a callback-based locking mechanism + * that allows platform-specific implementations (pthreads, FreeRTOS, + * bare-metal spinlocks, etc.) without introducing OS dependencies in the + * core wolfHSM code. + * + * Each shared resource (NVM, crypto) embeds its own whLock instance, + * allowing independent locking and arbitrary sharing topologies. + * + * When WOLFHSM_CFG_THREADSAFE is defined: + * - Lock operations use platform callbacks for actual synchronization + * - NULL lockConfig results in no-op locking (single-threaded mode) + * + * When WOLFHSM_CFG_THREADSAFE is not defined: + * - All lock operations are no-ops with zero overhead + */ + +#ifndef WOLFHSM_WH_LOCK_H_ +#define WOLFHSM_WH_LOCK_H_ + + +/** + * Lock callback function signatures. + * + * All callbacks receive a user-provided context pointer (from whLockConfig). + * Each lock instance protects exactly one resource. + * + * Return: WH_ERROR_OK on success, negative error code on failure + */ + +/** Initialize a lock - called once during setup */ +typedef int (*whLockInitCb)(void* context, const void* config); + +/** Cleanup a lock - called once during teardown */ +typedef int (*whLockCleanupCb)(void* context); + +/** Acquire exclusive lock (blocking) */ +typedef int (*whLockAcquireCb)(void* context); + +/** Release exclusive lock */ +typedef int (*whLockReleaseCb)(void* context); + +/** + * Lock callback table + * + * Platforms provide implementations of these callbacks. If the entire + * callback table is NULL, all locking operations become no-ops + * (single-threaded mode). Individual callbacks may also be NULL to + * skip specific operations. + */ +typedef struct whLockCb_t { + whLockInitCb init; /* Initialize lock resources */ + whLockCleanupCb cleanup; /* Free lock resources */ + whLockAcquireCb acquire; /* Acquire exclusive lock */ + whLockReleaseCb release; /* Release exclusive lock */ +} whLockCb; + +/** + * Lock instance structure + * + * Holds callback table and platform-specific context. + * The context pointer is passed to all callbacks. + */ +typedef struct whLock_t { + const whLockCb* cb; /* Platform callbacks (may be NULL) */ + void* context; /* Platform context (e.g., mutex pointer) */ +} whLock; + +/** + * Lock configuration for initialization + */ +typedef struct whLockConfig_t { + const whLockCb* cb; /* Callback table */ + void* context; /* Platform context */ + const void* config; /* Backend-specific config (passed to init cb) */ +} whLockConfig; + +/** + * @brief Initializes a lock instance. + * + * This function initializes the lock by calling the init callback. + * If config is NULL or config->cb is NULL, locking is disabled + * (single-threaded mode) and all lock operations become no-ops. + * + * @param[in] lock Pointer to the lock structure. + * @param[in] config Pointer to the lock configuration (may be NULL). + * @return int Returns WH_ERROR_OK on success, or a negative error code on + * failure. + */ +int wh_Lock_Init(whLock* lock, const whLockConfig* config); + +/** + * @brief Cleans up a lock instance. + * + * This function cleans up the lock by calling the cleanup callback. + * + * @param[in] lock Pointer to the lock structure. + * @return int Returns WH_ERROR_OK on success, or WH_ERROR_BADARGS if lock + * is NULL. + */ +int wh_Lock_Cleanup(whLock* lock); + +/** + * @brief Acquires exclusive access to a lock. + * + * This function blocks until the lock is acquired. If no callbacks are + * configured, this is a no-op that returns WH_ERROR_OK. + * + * @param[in] lock Pointer to the lock structure. + * @return int Returns WH_ERROR_OK on success, or a negative error code on + * failure (e.g., WH_ERROR_LOCKED if acquisition failed). + */ +int wh_Lock_Acquire(whLock* lock); + +/** + * @brief Releases exclusive access to a lock. + * + * This function releases a previously acquired lock. If no callbacks are + * configured, this is a no-op that returns WH_ERROR_OK. + * + * @param[in] lock Pointer to the lock structure. + * @return int Returns WH_ERROR_OK on success, or a negative error code on + * failure. + */ +int wh_Lock_Release(whLock* lock); + + +#endif /* WOLFHSM_WH_LOCK_H_ */ diff --git a/wolfhsm/wh_nvm.h b/wolfhsm/wh_nvm.h index 374180d07..6c3df8f77 100644 --- a/wolfhsm/wh_nvm.h +++ b/wolfhsm/wh_nvm.h @@ -44,6 +44,7 @@ #include "wolfhsm/wh_common.h" /* For whNvm types */ #include "wolfhsm/wh_keycache.h" /* For whKeyCacheContext */ +#include "wolfhsm/wh_lock.h" typedef struct { int (*Init)(void* context, const void *config); @@ -96,6 +97,9 @@ typedef struct whNvmContext_t { #if !defined(WOLFHSM_CFG_NO_CRYPTO) && defined(WOLFHSM_CFG_GLOBAL_KEYS) whKeyCacheContext globalCache; /* Global key cache */ #endif +#ifdef WOLFHSM_CFG_THREADSAFE + whLock lock; /* Lock for NVM and global cache operations */ +#endif } whNvmContext; /* Simple helper configuration structure associated with an NVM instance */ @@ -103,6 +107,9 @@ typedef struct whNvmConfig_t { whNvmCb *cb; void* context; void* config; +#ifdef WOLFHSM_CFG_THREADSAFE + whLockConfig* lockConfig; +#endif } whNvmConfig; diff --git a/wolfhsm/wh_nvm_internal.h b/wolfhsm/wh_nvm_internal.h new file mode 100644 index 000000000..f757d9eb6 --- /dev/null +++ b/wolfhsm/wh_nvm_internal.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2024 wolfSSL Inc. + * + * This file is part of wolfHSM. + * + * wolfHSM is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3 of the License, or + * (at your option) any later version. + * + * wolfHSM is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with wolfHSM. If not, see . + */ +/* + * wolfhsm/wh_nvm_internal.h + * + * Additional NVM helper API for internal library use only + * + */ + +#ifndef WOLFHSM_WH_NVM_INTERNAL_H_ +#define WOLFHSM_WH_NVM_INTERNAL_H_ + +/* Pick up compile-time configuration */ +#include "wolfhsm/wh_settings.h" +#include "wolfhsm/wh_nvm.h" + +#include + +#ifdef WOLFHSM_CFG_THREADSAFE +/* + * Internal unlocked NVM functions. Same functionality as their public + * counterparts but assume the caller already holds context->lock and do NOT + * attempt to aquire the lock. For use by server keystore internals when + * performing atomic multi-step operations. + */ + +int wh_Nvm_GetMetadataUnlocked(whNvmContext* context, whNvmId id, + whNvmMetadata* meta); + +int wh_Nvm_ReadUnlocked(whNvmContext* context, whNvmId id, whNvmSize offset, + whNvmSize data_len, uint8_t* data); + +int wh_Nvm_AddObjectWithReclaimUnlocked(whNvmContext* context, + whNvmMetadata* meta, whNvmSize dataLen, + const uint8_t* data); + +int wh_Nvm_DestroyObjectsUnlocked(whNvmContext* context, whNvmId list_count, + const whNvmId* id_list); + +int wh_Nvm_AddObjectCheckedUnlocked(whNvmContext* context, whNvmMetadata* meta, + whNvmSize data_len, const uint8_t* data); + +int wh_Nvm_DestroyObjectsCheckedUnlocked(whNvmContext* context, + whNvmId list_count, + const whNvmId* id_list); + +int wh_Nvm_ReadCheckedUnlocked(whNvmContext* context, whNvmId id, + whNvmSize offset, whNvmSize data_len, + uint8_t* data); +#else + +/* + * When THREADSAFE is not defined, unlocked functions map to regular ones. + * This allows keystore code to always use unlocked variants without + * conditional compilation. + */ +#define wh_Nvm_GetMetadataUnlocked(ctx, id, meta) \ + wh_Nvm_GetMetadata((ctx), (id), (meta)) +#define wh_Nvm_ReadUnlocked(ctx, id, off, len, data) \ + wh_Nvm_Read((ctx), (id), (off), (len), (data)) +#define wh_Nvm_AddObjectWithReclaimUnlocked(ctx, meta, len, data) \ + wh_Nvm_AddObjectWithReclaim((ctx), (meta), (len), (data)) +#define wh_Nvm_DestroyObjectsUnlocked(ctx, cnt, list) \ + wh_Nvm_DestroyObjects((ctx), (cnt), (list)) +#define wh_Nvm_AddObjectCheckedUnlocked(ctx, meta, len, data) \ + wh_Nvm_AddObjectChecked((ctx), (meta), (len), (data)) +#define wh_Nvm_DestroyObjectsCheckedUnlocked(ctx, cnt, list) \ + wh_Nvm_DestroyObjectsChecked((ctx), (cnt), (list)) +#define wh_Nvm_ReadCheckedUnlocked(ctx, id, off, len, data) \ + wh_Nvm_ReadChecked((ctx), (id), (off), (len), (data)) + +#endif + + +#endif /* !WOLFHSM_WH_NVM_INTERNAL_H_ */ diff --git a/wolfhsm/wh_settings.h b/wolfhsm/wh_settings.h index b33ecc9e9..1c828f6f8 100644 --- a/wolfhsm/wh_settings.h +++ b/wolfhsm/wh_settings.h @@ -102,6 +102,13 @@ * WOLFHSM_CFG_DEBUG_VERBOSE - If defined, enable verbose debug output * Default: Not defined * + * WOLFHSM_CFG_THREADSAFE - If defined, enable thread-safe access to shared + * server resources. Requires platform to provide lock callbacks via + * whLockConfig. When enabled, protects global key cache, NVM operations, + * and hardware crypto (if shared). When not defined, all lock operations + * are no-ops with zero overhead. + * Default: Not defined + * * WOLFHSM_CFG_PRINTF - Function or macro for printf redirection. Must have * signature: int func(const char* fmt, ...) * Default: stdlib printf @@ -165,6 +172,8 @@ #include #endif /* WOLFSSL_USER_SETTINGS */ +#include "wolfssl/wolfcrypt/types.h" + #if defined(WOLFHSM_CFG_DEBUG) || defined(WOLFHSM_CFG_DEBUG_VERBOSE) #define WOLFHSM_CFG_HEXDUMP #endif From 554d4e63eb8d60f7ffd68c72580fa8528fdad8a7 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Tue, 27 Jan 2026 10:57:59 -0700 Subject: [PATCH 02/13] Address code review feedback. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit API/Error handling: - Add initialized flag to whLock structure to distinguish init states - Enhance error handling: acquire/release check initialized flag - Make wh_Lock_Cleanup zero structure for clear post-cleanup state - Document init/cleanup must be single-threaded (no atomics) - Document cleanup preconditions (no active contention required) - Update all API docs with precise return codes and error conditions - Change blocking acquire failure from ERROR_LOCKED to ERROR_ABORTED - Add comment explaining why non-blocking acquire is not provided POSIX port improvements: - Enhanced errno mapping in posix_lock.c (EINVAL→BADARGS, etc) - Trap PTHREAD_MUTEX_ERRORCHECK errors (EDEADLK, EPERM) Test coverage: - Add testUninitializedLock to validate error handling - Enhance testLockLifecycle with post-cleanup validation tests Misc: - Apply consistent critical section style pattern in wh_nvm.c - Update copyright years to 2026 - Rename stress test files to wh_test_posix_threadsafe_stress.* --- port/posix/posix_lock.c | 25 ++- src/wh_lock.c | 38 ++++- src/wh_nvm.c | 146 +++++++----------- test/wh_test.c | 2 +- test/wh_test_lock.c | 42 ++++- ...ss.c => wh_test_posix_threadsafe_stress.c} | 16 +- ...ss.h => wh_test_posix_threadsafe_stress.h} | 13 +- wolfhsm/wh_lock.h | 67 ++++++-- 8 files changed, 223 insertions(+), 126 deletions(-) rename test/{wh_test_threadsafe_stress.c => wh_test_posix_threadsafe_stress.c} (99%) rename test/{wh_test_threadsafe_stress.h => wh_test_posix_threadsafe_stress.h} (75%) diff --git a/port/posix/posix_lock.c b/port/posix/posix_lock.c index 898c633f7..f8767281e 100644 --- a/port/posix/posix_lock.c +++ b/port/posix/posix_lock.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2024 wolfSSL Inc. + * Copyright (C) 2026 wolfSSL Inc. * * This file is part of wolfHSM. * @@ -28,6 +28,7 @@ #ifdef WOLFHSM_CFG_THREADSAFE #include +#include #include "wolfhsm/wh_error.h" #include "wolfhsm/wh_lock.h" @@ -57,6 +58,9 @@ int posixLock_Init(void* context, const void* config) rc = pthread_mutex_init(&ctx->mutex, attr); if (rc != 0) { + if (rc == EINVAL) { + return WH_ERROR_BADARGS; + } return WH_ERROR_ABORTED; } @@ -80,6 +84,9 @@ int posixLock_Cleanup(void* context) rc = pthread_mutex_destroy(&ctx->mutex); if (rc != 0) { + if (rc == EINVAL) { + return WH_ERROR_BADARGS; + } return WH_ERROR_ABORTED; } @@ -103,6 +110,14 @@ int posixLock_Acquire(void* context) rc = pthread_mutex_lock(&ctx->mutex); if (rc != 0) { + /* Trap error-checking mutex errors that indicate bugs */ + if (rc == EDEADLK) { + /* Deadlock would occur - owner trying to re-acquire */ + return WH_ERROR_ABORTED; + } + if (rc == EINVAL) { + return WH_ERROR_BADARGS; + } return WH_ERROR_ABORTED; } @@ -125,6 +140,14 @@ int posixLock_Release(void* context) rc = pthread_mutex_unlock(&ctx->mutex); if (rc != 0) { + /* Trap error-checking mutex errors that indicate bugs */ + if (rc == EPERM) { + /* Non-owner attempting to unlock */ + return WH_ERROR_LOCKED; + } + if (rc == EINVAL) { + return WH_ERROR_BADARGS; + } return WH_ERROR_ABORTED; } diff --git a/src/wh_lock.c b/src/wh_lock.c index a42ccd15e..47a850e45 100644 --- a/src/wh_lock.c +++ b/src/wh_lock.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2024 wolfSSL Inc. + * Copyright (C) 2026 wolfSSL Inc. * * This file is part of wolfHSM. * @@ -27,6 +27,7 @@ #include "wolfhsm/wh_settings.h" #include /* For NULL */ +#include /* For memset */ #include "wolfhsm/wh_lock.h" #include "wolfhsm/wh_error.h" @@ -45,6 +46,7 @@ int wh_Lock_Init(whLock* lock, const whLockConfig* config) if ((config == NULL) || (config->cb == NULL)) { lock->cb = NULL; lock->context = NULL; + lock->initialized = 1; /* Mark as initialized even in no-op mode */ return WH_ERROR_OK; } @@ -57,10 +59,12 @@ int wh_Lock_Init(whLock* lock, const whLockConfig* config) if (ret != WH_ERROR_OK) { lock->cb = NULL; lock->context = NULL; + /* Do not set initialized on failure */ return ret; } } + lock->initialized = 1; /* Mark as initialized after successful init */ return WH_ERROR_OK; } @@ -74,16 +78,26 @@ int wh_Lock_Cleanup(whLock* lock) (void)lock->cb->cleanup(lock->context); } - lock->cb = NULL; - lock->context = NULL; + /* Zero the entire structure to make post-cleanup state distinguishable */ + memset(lock, 0, sizeof(*lock)); return WH_ERROR_OK; } int wh_Lock_Acquire(whLock* lock) { - /* No-op if lock is NULL or not configured */ - if ((lock == NULL) || (lock->cb == NULL) || (lock->cb->acquire == NULL)) { + /* Return error if lock is NULL */ + if (lock == NULL) { + return WH_ERROR_BADARGS; + } + + /* Return error if lock is not initialized */ + if (lock->initialized == 0) { + return WH_ERROR_BADARGS; + } + + /* No-op if not configured (no callbacks) */ + if ((lock->cb == NULL) || (lock->cb->acquire == NULL)) { return WH_ERROR_OK; } @@ -92,8 +106,18 @@ int wh_Lock_Acquire(whLock* lock) int wh_Lock_Release(whLock* lock) { - /* No-op if lock is NULL or not configured */ - if ((lock == NULL) || (lock->cb == NULL) || (lock->cb->release == NULL)) { + /* Return error if lock is NULL */ + if (lock == NULL) { + return WH_ERROR_BADARGS; + } + + /* Return error if lock is not initialized */ + if (lock->initialized == 0) { + return WH_ERROR_BADARGS; + } + + /* No-op if not configured (no callbacks) */ + if ((lock->cb == NULL) || (lock->cb->release == NULL)) { return WH_ERROR_OK; } diff --git a/src/wh_nvm.c b/src/wh_nvm.c index df2c1106a..12a746eb9 100644 --- a/src/wh_nvm.c +++ b/src/wh_nvm.c @@ -303,15 +303,12 @@ int wh_Nvm_GetAvailable(whNvmContext* context, } rc = _LockNvm(context); - if (rc != WH_ERROR_OK) { - return rc; - } - - rc = _GetAvailableUnlocked(context, out_avail_size, out_avail_objects, - out_reclaim_size, out_reclaim_objects); - - (void)_UnlockNvm(context); + if (rc == WH_ERROR_OK) { + rc = _GetAvailableUnlocked(context, out_avail_size, out_avail_objects, + out_reclaim_size, out_reclaim_objects); + (void)_UnlockNvm(context); + } return rc; } @@ -330,33 +327,30 @@ int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, } ret = _LockNvm(context); - if (ret != WH_ERROR_OK) { - return ret; - } - - /* check if we have available object and data space */ - ret = _GetAvailableUnlocked(context, &availableSize, &availableObjects, - &reclaimSize, &reclaimObjects); - if (ret == 0) { - if ( (availableSize < dataLen) || - (availableObjects == 0) ) { - /* There's no available space, so try to reclaim space, */ - if ( (availableSize + reclaimSize >= dataLen) && - (availableObjects + reclaimObjects > 0) ) { - /* Reclaim will make sufficient space available */ - ret = _DestroyObjectsUnlocked(context, 0, NULL); - } else { - /* Reclaim will not help */ - ret = WH_ERROR_NOSPACE; + if (ret == WH_ERROR_OK) { + /* check if we have available object and data space */ + ret = _GetAvailableUnlocked(context, &availableSize, &availableObjects, + &reclaimSize, &reclaimObjects); + if (ret == 0) { + if ((availableSize < dataLen) || (availableObjects == 0)) { + /* There's no available space, so try to reclaim space, */ + if ((availableSize + reclaimSize >= dataLen) && + (availableObjects + reclaimObjects > 0)) { + /* Reclaim will make sufficient space available */ + ret = _DestroyObjectsUnlocked(context, 0, NULL); + } + else { + /* Reclaim will not help */ + ret = WH_ERROR_NOSPACE; + } } } - } - if (ret == 0) { - ret = _AddObjectUnlocked(context, meta, dataLen, data); - } - - (void)_UnlockNvm(context); + if (ret == 0) { + ret = _AddObjectUnlocked(context, meta, dataLen, data); + } + (void)_UnlockNvm(context); + } return ret; } @@ -370,14 +364,11 @@ int wh_Nvm_AddObject(whNvmContext* context, whNvmMetadata *meta, } rc = _LockNvm(context); - if (rc != WH_ERROR_OK) { - return rc; - } - - rc = _AddObjectUnlocked(context, meta, data_len, data); - - (void)_UnlockNvm(context); + if (rc == WH_ERROR_OK) { + rc = _AddObjectUnlocked(context, meta, data_len, data); + (void)_UnlockNvm(context); + } return rc; } @@ -391,14 +382,11 @@ int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta, } ret = _LockNvm(context); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _Nvm_AddObjectCheckedUnlocked(context, meta, data_len, data); - - (void)_UnlockNvm(context); + if (ret == WH_ERROR_OK) { + ret = _Nvm_AddObjectCheckedUnlocked(context, meta, data_len, data); + (void)_UnlockNvm(context); + } return ret; } @@ -413,14 +401,11 @@ int wh_Nvm_List(whNvmContext* context, } rc = _LockNvm(context); - if (rc != WH_ERROR_OK) { - return rc; - } - - rc = _ListUnlocked(context, access, flags, start_id, out_count, out_id); - - (void)_UnlockNvm(context); + if (rc == WH_ERROR_OK) { + rc = _ListUnlocked(context, access, flags, start_id, out_count, out_id); + (void)_UnlockNvm(context); + } return rc; } @@ -434,14 +419,11 @@ int wh_Nvm_GetMetadata(whNvmContext* context, whNvmId id, } rc = _LockNvm(context); - if (rc != WH_ERROR_OK) { - return rc; - } - - rc = _GetMetadataUnlocked(context, id, meta); - - (void)_UnlockNvm(context); + if (rc == WH_ERROR_OK) { + rc = _GetMetadataUnlocked(context, id, meta); + (void)_UnlockNvm(context); + } return rc; } @@ -456,14 +438,11 @@ int wh_Nvm_DestroyObjects(whNvmContext* context, whNvmId list_count, } rc = _LockNvm(context); - if (rc != WH_ERROR_OK) { - return rc; - } - - rc = _DestroyObjectsUnlocked(context, list_count, id_list); - - (void)_UnlockNvm(context); + if (rc == WH_ERROR_OK) { + rc = _DestroyObjectsUnlocked(context, list_count, id_list); + (void)_UnlockNvm(context); + } return rc; } @@ -477,14 +456,11 @@ int wh_Nvm_DestroyObjectsChecked(whNvmContext* context, whNvmId list_count, } ret = _LockNvm(context); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _Nvm_DestroyObjectsCheckedUnlocked(context, list_count, id_list); - - (void)_UnlockNvm(context); + if (ret == WH_ERROR_OK) { + ret = _Nvm_DestroyObjectsCheckedUnlocked(context, list_count, id_list); + (void)_UnlockNvm(context); + } return ret; } @@ -499,14 +475,11 @@ int wh_Nvm_Read(whNvmContext* context, whNvmId id, whNvmSize offset, } rc = _LockNvm(context); - if (rc != WH_ERROR_OK) { - return rc; - } - - rc = _ReadUnlocked(context, id, offset, data_len, data); - - (void)_UnlockNvm(context); + if (rc == WH_ERROR_OK) { + rc = _ReadUnlocked(context, id, offset, data_len, data); + (void)_UnlockNvm(context); + } return rc; } @@ -520,14 +493,11 @@ int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset, } ret = _LockNvm(context); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _Nvm_ReadCheckedUnlocked(context, id, offset, data_len, data); - - (void)_UnlockNvm(context); + if (ret == WH_ERROR_OK) { + ret = _Nvm_ReadCheckedUnlocked(context, id, offset, data_len, data); + (void)_UnlockNvm(context); + } return ret; } diff --git a/test/wh_test.c b/test/wh_test.c index 39e223342..2d6d23ba5 100644 --- a/test/wh_test.c +++ b/test/wh_test.c @@ -41,7 +41,7 @@ #include "wh_test_multiclient.h" #include "wh_test_log.h" #include "wh_test_lock.h" -#include "wh_test_threadsafe_stress.h" +#include "wh_test_posix_threadsafe_stress.h" #if defined(WOLFHSM_CFG_CERTIFICATE_MANAGER) #include "wh_test_cert.h" diff --git a/test/wh_test_lock.c b/test/wh_test_lock.c index 8e78531bb..59ebf3410 100644 --- a/test/wh_test_lock.c +++ b/test/wh_test_lock.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2024 wolfSSL Inc. + * Copyright (C) 2026 wolfSSL Inc. * * This file is part of wolfHSM. * @@ -75,6 +75,18 @@ static int testLockLifecycle(whLockConfig* lockConfig) rc = wh_Lock_Cleanup(&lock); WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + /* After cleanup, acquire should fail */ + rc = wh_Lock_Acquire(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS); + + /* After cleanup, release should fail */ + rc = wh_Lock_Release(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS); + + /* Second cleanup should succeed (idempotent) */ + rc = wh_Lock_Cleanup(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + WH_TEST_PRINT(" Lock lifecycle: PASS\n"); return WH_ERROR_OK; } @@ -108,6 +120,33 @@ static int testNullConfigNoOp(void) return WH_ERROR_OK; } +/* Test: Operations on uninitialized lock should fail appropriately */ +static int testUninitializedLock(void) +{ + whLock lock; + int rc; + + WH_TEST_PRINT("Testing uninitialized lock...\n"); + + /* Create zeroed lock structure (no init call) */ + memset(&lock, 0, sizeof(lock)); + + /* Acquire on uninitialized lock should fail */ + rc = wh_Lock_Acquire(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS); + + /* Release on uninitialized lock should fail */ + rc = wh_Lock_Release(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS); + + /* Cleanup on uninitialized lock should succeed (idempotent) */ + rc = wh_Lock_Cleanup(&lock); + WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK); + + WH_TEST_PRINT(" Uninitialized lock: PASS\n"); + return WH_ERROR_OK; +} + /* Test: NVM with lock config */ static int testNvmWithLock(whLockConfig* lockConfig) { @@ -188,6 +227,7 @@ int whTest_LockConfig(whLockConfig* lockConfig) WH_TEST_RETURN_ON_FAIL(testLockLifecycle(lockConfig)); WH_TEST_RETURN_ON_FAIL(testNullConfigNoOp()); + WH_TEST_RETURN_ON_FAIL(testUninitializedLock()); WH_TEST_RETURN_ON_FAIL(testNvmWithLock(lockConfig)); WH_TEST_PRINT("Lock tests PASSED\n"); diff --git a/test/wh_test_threadsafe_stress.c b/test/wh_test_posix_threadsafe_stress.c similarity index 99% rename from test/wh_test_threadsafe_stress.c rename to test/wh_test_posix_threadsafe_stress.c index e7ed13de2..7fe1db3d5 100644 --- a/test/wh_test_threadsafe_stress.c +++ b/test/wh_test_posix_threadsafe_stress.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2025 wolfSSL Inc. + * Copyright (C) 2026 wolfSSL Inc. * * This file is part of wolfHSM. * @@ -17,13 +17,13 @@ * along with wolfHSM. If not, see . */ /* - * test/wh_test_threadsafe_stress.c + * test/wh_test_posix_threadsafe_stress.c * - * Phased contention test for thread safety validation. + * POSIX-based phased contention test for thread safety validation. * * Intended to drive a high volume of concurrent requests across multiple * servers sharing a single NVM context, while running under TSAN to detect any - * data races. + * data races. Uses POSIX threading primitives (pthreads). * * Architecture: * - 1 shared NVM context with lock @@ -31,6 +31,10 @@ * - 4 client threads (all doing both NVM and keystore ops) * - Different contention phases to stress test contention patterns across * various different APIs + * + * NOTE: Uses PTHREAD_MUTEX_ERRORCHECK attribute to trap undefined behavior + * errors (EDEADLK for deadlock, EPERM for non-owner unlock) which indicate + * bugs in the locking implementation. */ #include "wolfhsm/wh_settings.h" @@ -65,7 +69,7 @@ #include "wolfssl/wolfcrypt/random.h" #include "wh_test_common.h" -#include "wh_test_threadsafe_stress.h" +#include "wh_test_posix_threadsafe_stress.h" /* @@ -2054,7 +2058,7 @@ int whTest_ThreadSafeStress(void) !WOLFHSM_CFG_GLOBAL_KEYS || !WOLFHSM_CFG_ENABLE_CLIENT || \ !WOLFHSM_CFG_ENABLE_SERVER || WOLFHSM_CFG_NO_CRYPTO */ -#include "wh_test_threadsafe_stress.h" +#include "wh_test_posix_threadsafe_stress.h" #include "wh_test_common.h" int whTest_ThreadSafeStress(void) diff --git a/test/wh_test_threadsafe_stress.h b/test/wh_test_posix_threadsafe_stress.h similarity index 75% rename from test/wh_test_threadsafe_stress.h rename to test/wh_test_posix_threadsafe_stress.h index fb6b7a17a..3b0891b49 100644 --- a/test/wh_test_threadsafe_stress.h +++ b/test/wh_test_posix_threadsafe_stress.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2025 wolfSSL Inc. + * Copyright (C) 2026 wolfSSL Inc. * * This file is part of wolfHSM. * @@ -17,13 +17,14 @@ * along with wolfHSM. If not, see . */ /* - * test/wh_test_threadsafe_stress.h + * test/wh_test_posix_threadsafe_stress.h * - * Multithreaded stress test for thread safety validation. + * POSIX multithreaded stress test for thread safety validation. + * This test uses POSIX threading primitives (pthreads). */ -#ifndef TEST_WH_TEST_THREADSAFE_STRESS_H_ -#define TEST_WH_TEST_THREADSAFE_STRESS_H_ +#ifndef TEST_WH_TEST_POSIX_THREADSAFE_STRESS_H_ +#define TEST_WH_TEST_POSIX_THREADSAFE_STRESS_H_ /* * Runs multithreaded stress tests for thread safety validation. @@ -37,4 +38,4 @@ */ int whTest_ThreadSafeStress(void); -#endif /* TEST_WH_TEST_THREADSAFE_STRESS_H_ */ +#endif /* TEST_WH_TEST_POSIX_THREADSAFE_STRESS_H_ */ diff --git a/wolfhsm/wh_lock.h b/wolfhsm/wh_lock.h index 46e39c0bc..62e0b35a0 100644 --- a/wolfhsm/wh_lock.h +++ b/wolfhsm/wh_lock.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2024 wolfSSL Inc. + * Copyright (C) 2026 wolfSSL Inc. * * This file is part of wolfHSM. * @@ -81,10 +81,17 @@ typedef struct whLockCb_t { * * Holds callback table and platform-specific context. * The context pointer is passed to all callbacks. + * + * IMPORTANT: Lock initialization and cleanup must be performed in a + * single-threaded context before any threads attempt to use the lock. The + * initialized flag is not atomic and is only safe to access during + * initialization/cleanup phases. */ typedef struct whLock_t { const whLockCb* cb; /* Platform callbacks (may be NULL) */ void* context; /* Platform context (e.g., mutex pointer) */ + int initialized; /* 1 if initialized, 0 otherwise (not atomic) */ + uint8_t WH_PAD[4]; } whLock; /** @@ -103,21 +110,42 @@ typedef struct whLockConfig_t { * If config is NULL or config->cb is NULL, locking is disabled * (single-threaded mode) and all lock operations become no-ops. * - * @param[in] lock Pointer to the lock structure. - * @param[in] config Pointer to the lock configuration (may be NULL). - * @return int Returns WH_ERROR_OK on success, or a negative error code on - * failure. + * IMPORTANT: Initialization must be performed in a single-threaded context + * before any threads attempt to use the lock. The initialized flag is not + * atomic and is only safe to access during initialization/cleanup phases. + * + * On error, no state is modified. + * + * @param[in] lock Pointer to the lock structure. Must not be NULL. + * @param[in] config Pointer to the lock configuration (may be NULL for no-op + * mode). + * @return int Returns WH_ERROR_OK on success. + * Returns WH_ERROR_BADARGS if lock is NULL. + * Returns negative error code on callback initialization failure. */ int wh_Lock_Init(whLock* lock, const whLockConfig* config); /** * @brief Cleans up a lock instance. * - * This function cleans up the lock by calling the cleanup callback. + * This function cleans up the lock by calling the cleanup callback and then + * zeros the entire structure to make post-cleanup state distinguishable. + * + * IMPORTANT: Cleanup must only be called when no other threads are accessing + * the lock. Calling cleanup while the lock is held or while other threads are + * waiting results in undefined behavior. In typical usage, cleanup occurs + * during controlled shutdown sequences after all worker threads have been + * stopped. + * + * Cleanup must be performed in a single-threaded context, similar to + * initialization. + * + * This function is idempotent - calling cleanup on an already cleaned up or + * uninitialized lock returns WH_ERROR_OK. * * @param[in] lock Pointer to the lock structure. - * @return int Returns WH_ERROR_OK on success, or WH_ERROR_BADARGS if lock - * is NULL. + * @return int Returns WH_ERROR_OK on success. + * Returns WH_ERROR_BADARGS if lock is NULL. */ int wh_Lock_Cleanup(whLock* lock); @@ -125,11 +153,13 @@ int wh_Lock_Cleanup(whLock* lock); * @brief Acquires exclusive access to a lock. * * This function blocks until the lock is acquired. If no callbacks are - * configured, this is a no-op that returns WH_ERROR_OK. + * configured (no-op mode), this returns WH_ERROR_OK immediately. * - * @param[in] lock Pointer to the lock structure. - * @return int Returns WH_ERROR_OK on success, or a negative error code on - * failure (e.g., WH_ERROR_LOCKED if acquisition failed). + * @param[in] lock Pointer to the lock structure. Must not be NULL. + * @return int Returns WH_ERROR_OK on success (lock acquired or no-op mode). + * Returns WH_ERROR_BADARGS if lock is NULL or not initialized. + * Returns WH_ERROR_ABORTED if the blocking operation itself failed + * (indicates callback failure, not contention). */ int wh_Lock_Acquire(whLock* lock); @@ -137,11 +167,16 @@ int wh_Lock_Acquire(whLock* lock); * @brief Releases exclusive access to a lock. * * This function releases a previously acquired lock. If no callbacks are - * configured, this is a no-op that returns WH_ERROR_OK. + * configured (no-op mode), this returns WH_ERROR_OK immediately. * - * @param[in] lock Pointer to the lock structure. - * @return int Returns WH_ERROR_OK on success, or a negative error code on - * failure. + * Releasing a lock that is initialized but not currently acquired returns + * WH_ERROR_OK (idempotent behavior). + * + * @param[in] lock Pointer to the lock structure. Must not be NULL. + * @return int Returns WH_ERROR_OK on success (lock released or no-op mode). + * Returns WH_ERROR_BADARGS if lock is NULL or not initialized. + * Returns WH_ERROR_LOCKED (optional) if non-owner attempts to release + * or owner attempts double-acquire (platform-specific). */ int wh_Lock_Release(whLock* lock); From 31de356617b8e74c7cc635d2ddb9f07496112f0a Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Wed, 28 Jan 2026 08:10:06 -0700 Subject: [PATCH 03/13] Address review feedback from rizlik. Extends NVM locking to cert, counter, img_mgr, and nvm modules Adds proper thread-safety locking discipline to additional server modules that perform compound NVM operations. This prevents TOCTOU (Time-Of-Check-Time-Of-Use) issues where metadata could become stale between check and use/writeback. Changes: - wh_server_cert.c: Add NVM locking for atomic GetMetadata + Read operations in certificate read and export paths - wh_server_counter.c: Add NVM locking for atomic read-modify-write counter increment operations - wh_server_img_mgr.c: Add NVM locking for atomic signature load operations - wh_server_keystore.c: Refactor to use unlocked internal variants for compound operations (GetUniqueId + CacheKey, policy check + erase, freshen + export). Add locking discipline documentation. - wh_server_nvm.c: Add NVM locking for DMA read operations to ensure metadata remains valid throughout transfer. Add locking discipline documentation. - wh_test_posix_threadsafe_stress.c: Add new stress test phases for counter concurrent increment, counter increment vs read, NVM read vs resize, NVM concurrent resize, and NVM read DMA vs resize. Add counter atomicity validation. All compound operations now follow the pattern: 1. Acquire server->nvm->lock 2. Use only *Unlocked() variants internally 3. Keep lock held for entire operation including DMA 4. Release lock after all metadata-dependent operations complete --- src/wh_server_cert.c | 105 +++++-- src/wh_server_counter.c | 75 +++-- src/wh_server_img_mgr.c | 52 +++- src/wh_server_keystore.c | 368 ++++++++++++++----------- src/wh_server_nvm.c | 138 +++++++--- test/wh_test_posix_threadsafe_stress.c | 366 ++++++++++++++++++++++++ 6 files changed, 856 insertions(+), 248 deletions(-) diff --git a/src/wh_server_cert.c b/src/wh_server_cert.c index 0d44e778c..b5333956a 100644 --- a/src/wh_server_cert.c +++ b/src/wh_server_cert.c @@ -35,6 +35,7 @@ #include "wolfhsm/wh_server_cert.h" #include "wolfhsm/wh_server_nvm.h" #include "wolfhsm/wh_server_keystore.h" +#include "wolfhsm/wh_nvm_internal.h" #include "wolfhsm/wh_message.h" #include "wolfhsm/wh_message_cert.h" @@ -42,6 +43,27 @@ #include "wolfssl/ssl.h" #include "wolfssl/wolfcrypt/asn.h" +/* Helper functions for NVM locking */ +#ifdef WOLFHSM_CFG_THREADSAFE +static int _LockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Acquire(&server->nvm->lock); + } + return WH_ERROR_OK; +} + +static int _UnlockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Release(&server->nvm->lock); + } + return WH_ERROR_OK; +} +#else +#define _LockNvm(server) (WH_ERROR_OK) +#define _UnlockNvm(server) (WH_ERROR_OK) +#endif /* WOLFHSM_CFG_THREADSAFE */ static int _verifyChainAgainstCmStore(whServerContext* server, WOLFSSL_CERT_MANAGER* cm, @@ -226,9 +248,10 @@ int wh_Server_CertEraseTrusted(whServerContext* server, whNvmId id) return rc; } -/* Get a trusted certificate from NVM storage */ -int wh_Server_CertReadTrusted(whServerContext* server, whNvmId id, - uint8_t* cert, uint32_t* inout_cert_len) +/* Get a trusted certificate from NVM storage (unlocked variant) + * Assumes caller holds server->nvm->lock */ +static int _CertReadTrustedUnlocked(whServerContext* server, whNvmId id, + uint8_t* cert, uint32_t* inout_cert_len) { int rc; whNvmMetadata meta; @@ -238,9 +261,8 @@ int wh_Server_CertReadTrusted(whServerContext* server, whNvmId id, return WH_ERROR_BADARGS; } - /* Get metadata to check the certificate size */ - rc = wh_Nvm_GetMetadata(server->nvm, id, &meta); + rc = wh_Nvm_GetMetadataUnlocked(server->nvm, id, &meta); if (rc != 0) { return rc; } @@ -254,7 +276,22 @@ int wh_Server_CertReadTrusted(whServerContext* server, whNvmId id, * be reflected back to the user on length mismatch failure */ *inout_cert_len = meta.len; - return wh_Nvm_Read(server->nvm, id, 0, meta.len, cert); + return wh_Nvm_ReadUnlocked(server->nvm, id, 0, meta.len, cert); +} + +/* Get a trusted certificate from NVM storage */ +int wh_Server_CertReadTrusted(whServerContext* server, whNvmId id, + uint8_t* cert, uint32_t* inout_cert_len) +{ + int rc; + + /* Acquire lock for atomic GetMetadata + Read */ + rc = _LockNvm(server); + if (rc == WH_ERROR_OK) { + rc = _CertReadTrustedUnlocked(server, id, cert, inout_cert_len); + (void)_UnlockNvm(server); + } + return rc; } /* Verify a certificate against trusted certificates */ @@ -463,21 +500,30 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, ? max_transport_cert_len : WOLFHSM_CFG_MAX_CERT_SIZE; - /* Check metadata to check if the certificate is non-exportable. - * This is unfortunately redundant since metadata is checked in - * wh_Server_CertReadTrusted(). */ - rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); + /* Acquire lock for atomic flag check + read */ + rc = _LockNvm(server); if (rc == WH_ERROR_OK) { - /* Check if the certificate is non-exportable */ - if (meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) { - resp.rc = WH_ERROR_ACCESS; + /* Check metadata to check if the certificate is + * non-exportable */ + rc = wh_Nvm_GetMetadataUnlocked(server->nvm, req.id, &meta); + if (rc == WH_ERROR_OK) { + /* Check if the certificate is non-exportable */ + if (meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) { + resp.rc = WH_ERROR_ACCESS; + resp.cert_len = 0; + } + else { + rc = _CertReadTrustedUnlocked(server, req.id, cert_data, + &cert_len); + resp.rc = rc; + resp.cert_len = cert_len; + } } else { - rc = wh_Server_CertReadTrusted(server, req.id, cert_data, - &cert_len); resp.rc = rc; - resp.cert_len = cert_len; + resp.cert_len = 0; } + (void)_UnlockNvm(server); } else { resp.rc = rc; @@ -590,18 +636,25 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0}); } if (resp.rc == WH_ERROR_OK) { - /* Check metadata to see if the certificate is non-exportable */ - resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); + /* Acquire lock for atomic flag check + read */ + resp.rc = _LockNvm(server); if (resp.rc == WH_ERROR_OK) { - if ((meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) != 0) { - resp.rc = WH_ERROR_ACCESS; - } - else { - /* Clamp cert_len to actual stored length */ - cert_len = req.cert_len; - resp.rc = wh_Server_CertReadTrusted( - server, req.id, cert_data, &cert_len); + /* Check metadata to see if the certificate is + * non-exportable */ + resp.rc = + wh_Nvm_GetMetadataUnlocked(server->nvm, req.id, &meta); + if (resp.rc == WH_ERROR_OK) { + if ((meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) != 0) { + resp.rc = WH_ERROR_ACCESS; + } + else { + /* Clamp cert_len to actual stored length */ + cert_len = req.cert_len; + resp.rc = _CertReadTrustedUnlocked( + server, req.id, cert_data, &cert_len); + } } + (void)_UnlockNvm(server); } } if (resp.rc == WH_ERROR_OK) { diff --git a/src/wh_server_counter.c b/src/wh_server_counter.c index 10c672ce5..e48bd7f9c 100644 --- a/src/wh_server_counter.c +++ b/src/wh_server_counter.c @@ -34,9 +34,32 @@ #include "wolfhsm/wh_message.h" #include "wolfhsm/wh_message_counter.h" #include "wolfhsm/wh_server.h" +#include "wolfhsm/wh_nvm_internal.h" #include "wolfhsm/wh_server_counter.h" +/* Helper functions for NVM locking */ +#ifdef WOLFHSM_CFG_THREADSAFE +static int _LockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Acquire(&server->nvm->lock); + } + return WH_ERROR_OK; +} + +static int _UnlockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Release(&server->nvm->lock); + } + return WH_ERROR_OK; +} +#else +#define _LockNvm(server) (WH_ERROR_OK) +#define _UnlockNvm(server) (WH_ERROR_OK) +#endif /* WOLFHSM_CFG_THREADSAFE */ + int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, uint16_t action, uint16_t req_size, const void* req_packet, uint16_t* out_resp_size, @@ -104,33 +127,41 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, (void)wh_MessageCounter_TranslateIncrementRequest( magic, (whMessageCounter_IncrementRequest*)req_packet, &req); - /* read the counter, stored in the metadata label */ - ret = wh_Nvm_GetMetadata( - server->nvm, - WH_MAKE_KEYID(WH_KEYTYPE_COUNTER, - (uint16_t)server->comm->client_id, - (uint16_t)req.counterId), - meta); - resp.rc = ret; + counterId = WH_MAKE_KEYID(WH_KEYTYPE_COUNTER, + (uint16_t)server->comm->client_id, + (uint16_t)req.counterId); - /* increment and write the counter back */ + /* Acquire lock for atomic read-modify-write */ + ret = _LockNvm(server); if (ret == WH_ERROR_OK) { - *counter = *counter + 1; - /* set counter to uint32_t max if it rolled over */ - if (*counter == 0) { - *counter = UINT32_MAX; + /* read the counter, stored in the metadata label */ + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, counterId, meta); + resp.rc = ret; + + /* increment and write the counter back */ + if (ret == WH_ERROR_OK) { + *counter = *counter + 1; + /* set counter to uint32_t max if it rolled over */ + if (*counter == 0) { + *counter = UINT32_MAX; + } + /* only update if we didn't saturate */ + else { + ret = wh_Nvm_AddObjectWithReclaimUnlocked( + server->nvm, meta, 0, NULL); + resp.rc = ret; + } } - /* only update if we didn't saturate */ - else { - ret = - wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0, NULL); - resp.rc = ret; + + /* return counter to the caller */ + if (ret == WH_ERROR_OK) { + resp.counter = *counter; } + /* Release lock */ + (void)_UnlockNvm(server); } - - /* return counter to the caller */ - if (ret == WH_ERROR_OK) { - resp.counter = *counter; + else { + resp.rc = ret; } (void)wh_MessageCounter_TranslateIncrementResponse( diff --git a/src/wh_server_img_mgr.c b/src/wh_server_img_mgr.c index 9a32c1093..9c9c1519a 100644 --- a/src/wh_server_img_mgr.c +++ b/src/wh_server_img_mgr.c @@ -37,6 +37,7 @@ #include "wolfhsm/wh_server_img_mgr.h" #include "wolfhsm/wh_server_keystore.h" #include "wolfhsm/wh_nvm.h" +#include "wolfhsm/wh_nvm_internal.h" #ifndef WOLFHSM_CFG_NO_CRYPTO #include "wolfssl/wolfcrypt/settings.h" @@ -50,6 +51,28 @@ #include "wolfssl/wolfcrypt/rsa.h" #endif +/* Helper functions for NVM locking */ +#ifdef WOLFHSM_CFG_THREADSAFE +static int _LockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Acquire(&server->nvm->lock); + } + return WH_ERROR_OK; +} + +static int _UnlockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Release(&server->nvm->lock); + } + return WH_ERROR_OK; +} +#else +#define _LockNvm(server) (WH_ERROR_OK) +#define _UnlockNvm(server) (WH_ERROR_OK) +#endif /* WOLFHSM_CFG_THREADSAFE */ + int wh_Server_ImgMgrInit(whServerImgMgrContext* context, const whServerImgMgrConfig* config) { @@ -113,21 +136,28 @@ int wh_Server_ImgMgrVerifyImg(whServerImgMgrContext* context, } /* Load the signature from NVM */ - ret = wh_Nvm_GetMetadata(server->nvm, img->sigNvmId, &sigMeta); - if (ret != WH_ERROR_OK) { - return ret; - } - - /* Ensure signature fits in buffer */ - if (sigMeta.len > sigSize) { - return WH_ERROR_BADARGS; + /* Acquire lock for atomic GetMetadata + Read */ + ret = _LockNvm(server); + if (ret == WH_ERROR_OK) { + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, img->sigNvmId, &sigMeta); + if (ret == WH_ERROR_OK) { + /* Ensure signature fits in buffer */ + if (sigMeta.len > sigSize) { + ret = WH_ERROR_BADARGS; + } + else { + ret = wh_Nvm_ReadUnlocked(server->nvm, img->sigNvmId, 0, + sigMeta.len, sigBuf); + if (ret == WH_ERROR_OK) { + actualSigSize = sigMeta.len; + } + } + } + (void)_UnlockNvm(server); } - - ret = wh_Nvm_Read(server->nvm, img->sigNvmId, 0, sigMeta.len, sigBuf); if (ret != WH_ERROR_OK) { return ret; } - actualSigSize = sigMeta.len; /* Invoke verify method callback */ if (img->verifyMethod != NULL) { diff --git a/src/wh_server_keystore.c b/src/wh_server_keystore.c index 4d96cd278..9c6cec7a4 100644 --- a/src/wh_server_keystore.c +++ b/src/wh_server_keystore.c @@ -19,6 +19,23 @@ /* * src/wh_server_keystore.c * + * LOCKING DISCIPLINE FOR COMPOUND OPERATIONS: + * + * All compound operations (those involving multiple cache/NVM accesses) MUST: + * 1. Acquire server->nvm->lock at the start using _LockKeystore() + * 2. Use only *Unlocked() variants of NVM/cache functions internally + * 3. Keep lock held for entire operation, including DMA transfers + * 4. Release lock only after all metadata-dependent operations complete + * + * Rationale: Releasing the lock mid-operation creates TOCTOU (Time-Of-Check- + * Time-Of-Use) vulnerabilities where metadata can become stale or objects can + * be destroyed/replaced between checks. DMA operations that depend on cached + * metadata MUST occur under the same lock that validated that metadata. + * + * Examples of correct implementations in this file: + * - wh_Server_KeystoreCacheKeyDmaChecked() (lines 2518-2542) + * - wh_Server_KeystoreExportKeyDmaChecked() (lines 2579-2616) + * - wh_Server_KeystoreRevokeKey() (lines 1179-1231) */ /* Pick up compile-time configuration */ @@ -417,7 +434,8 @@ static int _MarkKeyCommittedUnlocked(whKeyCacheContext* ctx, whKeyId keyId, return ret; } -int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) +/* Unlocked variant - assumes caller holds server->nvm->lock */ +static int _GetUniqueIdUnlocked(whServerContext* server, whNvmId* inout_id) { int ret = WH_ERROR_OK; int found = 0; @@ -435,11 +453,6 @@ int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) return WH_ERROR_BADARGS; } - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - /* try every index until we find a unique one, don't worry about capacity */ for (id = WH_KEYID_IDMAX; id > WH_KEYID_ERASED; id--) { /* id loop var is not an input client ID so we don't need to handle the @@ -453,7 +466,7 @@ int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) continue; } else if (ret != WH_ERROR_NOTFOUND) { - goto cleanup; + return ret; } /* Check if keyId exists in NVM */ @@ -465,20 +478,30 @@ int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) } if (ret != WH_ERROR_OK) { - goto cleanup; + return ret; } } if (!found) { - ret = WH_ERROR_NOSPACE; - goto cleanup; + return WH_ERROR_NOSPACE; } /* Return found id */ *inout_id = buildId; - ret = WH_ERROR_OK; + return WH_ERROR_OK; +} + +int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) +{ + int ret; + + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + ret = _GetUniqueIdUnlocked(server, inout_id); -cleanup: (void)_UnlockKeystore(server); return ret; } @@ -535,8 +558,17 @@ int wh_Server_KeystoreGetCacheSlotChecked(whServerContext* server, return ret; } -static int _KeystoreCacheKeyLocked(whServerContext* server, whNvmMetadata* meta, - uint8_t* in, int checked) +/* Forward declarations for unlocked variants */ +#ifdef WOLFHSM_CFG_DMA +static int _KeystoreCacheKeyDmaUnlocked(whServerContext* server, + whNvmMetadata* meta, uint64_t keyAddr, + int checked); +#endif + +/* Unlocked variant - assumes caller holds server->nvm->lock */ +static int _KeystoreCacheKeyUnlocked(whServerContext* server, + whNvmMetadata* meta, uint8_t* in, + int checked) { uint8_t* slotBuf = NULL; whNvmMetadata* slotMeta = NULL; @@ -550,12 +582,7 @@ static int _KeystoreCacheKeyLocked(whServerContext* server, whNvmMetadata* meta, return WH_ERROR_BADARGS; } - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - /* Use unlocked variants since we already hold the lock */ + /* Use unlocked variants - caller already holds lock */ if (checked) { ret = _GetCacheSlotCheckedUnlocked(server, meta->id, meta->len, &slotBuf, &slotMeta); @@ -576,6 +603,21 @@ static int _KeystoreCacheKeyLocked(whServerContext* server, whNvmMetadata* meta, WH_DEBUG_VERBOSE_HEXDUMP("[server] cacheKey: key=", in, meta->len); } + return ret; +} + +static int _KeystoreCacheKeyLocked(whServerContext* server, whNvmMetadata* meta, + uint8_t* in, int checked) +{ + int ret; + + ret = _LockKeystore(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + ret = _KeystoreCacheKeyUnlocked(server, meta, in, checked); + (void)_UnlockKeystore(server); return ret; @@ -1099,37 +1141,39 @@ int wh_Server_KeystoreEraseKeyChecked(whServerContext* server, whNvmId keyId) } ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - /* Check eviction policy first */ - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EVICT, keyId); - if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { - goto cleanup; - } - - /* Check NVM destroy policy (NONMODIFIABLE/NONDESTROYABLE enforcement) */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, &meta); if (ret == WH_ERROR_OK) { - if (meta.flags & - (WH_NVM_FLAGS_NONMODIFIABLE | WH_NVM_FLAGS_NONDESTROYABLE)) { - ret = WH_ERROR_ACCESS; - goto cleanup; + /* Check eviction policy first */ + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EVICT, keyId); + if (ret == WH_ERROR_NOTFOUND) { + ret = WH_ERROR_OK; /* NOTFOUND is acceptable for eviction policy */ } - } - else if (ret != WH_ERROR_NOTFOUND) { - goto cleanup; - } - /* Remove the key from the cache if present */ - (void)_EvictKeyUnlocked(server, keyId); + if (ret == WH_ERROR_OK) { + /* Check NVM destroy policy (NONMODIFIABLE/NONDESTROYABLE + * enforcement) */ + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, &meta); + if (ret == WH_ERROR_OK) { + if (meta.flags & (WH_NVM_FLAGS_NONMODIFIABLE | + WH_NVM_FLAGS_NONDESTROYABLE)) { + ret = WH_ERROR_ACCESS; + } + } + else if (ret == WH_ERROR_NOTFOUND) { + ret = + WH_ERROR_OK; /* NOTFOUND is acceptable for metadata check */ + } - /* Destroy the object */ - ret = wh_Nvm_DestroyObjectsUnlocked(server->nvm, 1, &keyId); + if (ret == WH_ERROR_OK) { + /* Remove the key from the cache if present */ + (void)_EvictKeyUnlocked(server, keyId); -cleanup: - (void)_UnlockKeystore(server); + /* Destroy the object */ + ret = wh_Nvm_DestroyObjectsUnlocked(server->nvm, 1, &keyId); + } + } + + (void)_UnlockKeystore(server); + } return ret; } @@ -1163,51 +1207,45 @@ int wh_Server_KeystoreRevokeKey(whServerContext* server, whNvmId keyId) } ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_REVOKE, keyId); - if (ret != WH_ERROR_OK) { - goto cleanup; - } - - /* Check if key is in NVM */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, NULL); if (ret == WH_ERROR_OK) { - isInNvm = 1; - } - else if (ret != WH_ERROR_NOTFOUND) { - goto cleanup; - } - - /* Freshen key into cache */ - ret = _FreshenKeyUnlocked(server, keyId, &cacheBuf, &cacheMeta); - if (ret != WH_ERROR_OK) { - goto cleanup; - } - - /* If already revoked and committed, nothing to do */ - if (_isKeyRevoked(cacheMeta) && _KeyIsCommittedUnlocked(server, keyId)) { - ret = WH_ERROR_OK; - goto cleanup; - } - - /* Revoke the key by updating its metadata */ - _revokeKey(cacheMeta); - - /* Commit the changes */ - if (isInNvm) { - ret = wh_Nvm_AddObjectWithReclaimUnlocked(server->nvm, cacheMeta, - cacheMeta->len, cacheBuf); + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_REVOKE, keyId); if (ret == WH_ERROR_OK) { - _MarkKeyCommittedUnlocked(_GetCacheContext(server, keyId), keyId, - 1); + /* Check if key is in NVM */ + ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, NULL); + if (ret == WH_ERROR_OK) { + isInNvm = 1; + } + else if (ret == WH_ERROR_NOTFOUND) { + ret = WH_ERROR_OK; /* NOTFOUND is acceptable */ + } + + if (ret == WH_ERROR_OK) { + /* Freshen key into cache */ + ret = _FreshenKeyUnlocked(server, keyId, &cacheBuf, &cacheMeta); + if (ret == WH_ERROR_OK) { + /* If already revoked and committed, nothing to do */ + if (!(_isKeyRevoked(cacheMeta) && + _KeyIsCommittedUnlocked(server, keyId))) { + /* Revoke the key by updating its metadata */ + _revokeKey(cacheMeta); + + /* Commit the changes */ + if (isInNvm) { + ret = wh_Nvm_AddObjectWithReclaimUnlocked( + server->nvm, cacheMeta, cacheMeta->len, + cacheBuf); + if (ret == WH_ERROR_OK) { + _MarkKeyCommittedUnlocked( + _GetCacheContext(server, keyId), keyId, 1); + } + } + } + } + } } - } -cleanup: - (void)_UnlockKeystore(server); + (void)_UnlockKeystore(server); + } return ret; } @@ -1986,11 +2024,19 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, /* get a new id if one wasn't provided */ if (WH_KEYID_ISERASED(meta->id)) { - ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); + /* Atomic GetUniqueId + CacheKey */ + ret = _LockKeystore(server); + if (ret == WH_ERROR_OK) { + ret = _GetUniqueIdUnlocked(server, &meta->id); + if (ret == WH_ERROR_OK) { + ret = _KeystoreCacheKeyUnlocked(server, meta, in, 1); + } + (void)_UnlockKeystore(server); + } resp.rc = ret; } - /* write the key */ - if (ret == WH_ERROR_OK) { + else { + /* ID provided, use normal locking */ ret = wh_Server_KeystoreCacheKeyChecked(server, meta, in); resp.rc = ret; } @@ -2029,12 +2075,25 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, /* get a new id if one wasn't provided */ if (WH_KEYID_ISERASED(meta->id)) { - ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); + /* Atomic GetUniqueId + CacheKey DMA */ + ret = _LockKeystore(server); + if (ret == WH_ERROR_OK) { + ret = _GetUniqueIdUnlocked(server, &meta->id); + if (ret == WH_ERROR_OK) { + ret = _KeystoreCacheKeyDmaUnlocked(server, meta, + req.key.addr, 1); + } + (void)_UnlockKeystore(server); + } resp.rc = ret; + /* propagate bad address to client if DMA operation failed */ + if (ret != WH_ERROR_OK) { + resp.dmaAddrStatus.badAddr.addr = req.key.addr; + resp.dmaAddrStatus.badAddr.sz = req.key.sz; + } } - - /* write the key using DMA */ - if (ret == WH_ERROR_OK) { + else { + /* ID provided, use normal locking */ ret = wh_Server_KeystoreCacheKeyDmaChecked(server, meta, req.key.addr); resp.rc = ret; @@ -2405,22 +2464,19 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, #ifdef WOLFHSM_CFG_DMA -int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, - uint64_t keyAddr, int checked) +/* Unlocked variant - assumes caller holds server->nvm->lock */ +static int _KeystoreCacheKeyDmaUnlocked(whServerContext* server, + whNvmMetadata* meta, uint64_t keyAddr, + int checked) { - int ret; - uint8_t* buffer = NULL; - whNvmMetadata* slotMeta = NULL; + int ret; + uint8_t* buffer = NULL; + whNvmMetadata* slotMeta = NULL; if ((server == NULL) || (meta == NULL)) { return WH_ERROR_BADARGS; } - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - /* Get a cache slot, optionally checking if necessary */ if (checked) { ret = _GetCacheSlotCheckedUnlocked(server, meta->id, meta->len, &buffer, @@ -2431,7 +2487,7 @@ int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, &slotMeta); } if (ret != 0) { - goto cleanup; + return ret; } /* Copy metadata */ @@ -2450,8 +2506,21 @@ int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, 0); } -cleanup: - (void)_UnlockKeystore(server); + return ret; +} + +int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, + uint64_t keyAddr, int checked) +{ + int ret; + + ret = _LockKeystore(server); + if (ret == WH_ERROR_OK) { + + ret = _KeystoreCacheKeyDmaUnlocked(server, meta, keyAddr, checked); + + (void)_UnlockKeystore(server); + } return ret; } int wh_Server_KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, @@ -2479,31 +2548,28 @@ int wh_Server_KeystoreExportKeyDma(whServerContext* server, whKeyId keyId, } ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - /* bring key in cache */ - ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); - if (ret != WH_ERROR_OK) { - goto cleanup; - } + if (ret == WH_ERROR_OK) { + ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); + if (ret == WH_ERROR_OK) { + if (keySz < cacheMeta->len) { + ret = WH_ERROR_NOSPACE; + } + else { + memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); + } - if (keySz < cacheMeta->len) { - ret = WH_ERROR_NOSPACE; - goto cleanup; + if (ret == WH_ERROR_OK) { + /* Copy key data using DMA */ + ret = whServerDma_CopyToClient(server, keyAddr, buffer, + outMeta->len, + (whServerDmaFlags){0}); + } + } + (void)_UnlockKeystore(server); } - - memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); - - /* Copy key data using DMA */ - ret = whServerDma_CopyToClient(server, keyAddr, buffer, outMeta->len, - (whServerDmaFlags){0}); - -cleanup: - (void)_UnlockKeystore(server); return ret; } + int wh_Server_KeystoreExportKeyDmaChecked(whServerContext* server, whKeyId keyId, uint64_t keyAddr, uint64_t keySz, @@ -2518,34 +2584,28 @@ int wh_Server_KeystoreExportKeyDmaChecked(whServerContext* server, } ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EXPORT, keyId); - if (ret != WH_ERROR_OK) { - goto cleanup; - } - - /* Freshen key into cache */ - ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); - if (ret != WH_ERROR_OK) { - goto cleanup; - } + if (ret == WH_ERROR_OK) { + ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EXPORT, keyId); + if (ret == WH_ERROR_OK) { + ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); + if (ret == WH_ERROR_OK) { + if (keySz < cacheMeta->len) { + ret = WH_ERROR_NOSPACE; + } + else { + memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); + } - if (keySz < cacheMeta->len) { - ret = WH_ERROR_NOSPACE; - goto cleanup; + if (ret == WH_ERROR_OK) { + /* Copy key data using DMA */ + ret = whServerDma_CopyToClient(server, keyAddr, buffer, + outMeta->len, + (whServerDmaFlags){0}); + } + } + } + (void)_UnlockKeystore(server); } - - memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); - - /* Copy key data using DMA */ - ret = whServerDma_CopyToClient(server, keyAddr, buffer, outMeta->len, - (whServerDmaFlags){0}); - -cleanup: - (void)_UnlockKeystore(server); return ret; } #endif /* WOLFHSM_CFG_DMA */ diff --git a/src/wh_server_nvm.c b/src/wh_server_nvm.c index 6425dd1ef..246a071a9 100644 --- a/src/wh_server_nvm.c +++ b/src/wh_server_nvm.c @@ -19,6 +19,21 @@ /* * src/wh_server_nvm.c * + * LOCKING DISCIPLINE FOR COMPOUND OPERATIONS: + * + * All compound operations (those involving multiple cache/NVM accesses) MUST: + * 1. Acquire server->nvm->lock at the start using _LockNvm() + * 2. Use only *Unlocked() variants of NVM/cache functions internally + * 3. Keep lock held for entire operation, including DMA transfers + * 4. Release lock only after all metadata-dependent operations complete + * + * Rationale: Releasing the lock mid-operation creates TOCTOU (Time-Of-Check- + * Time-Of-Use) vulnerabilities where metadata can become stale or objects can + * be destroyed/replaced between checks. DMA operations that depend on cached + * metadata MUST occur under the same lock that validated that metadata. + * + * This pattern matches keystore DMA operations (KeystoreCacheKeyDmaChecked, + * KeystoreExportKeyDmaChecked) which hold locks throughout their execution. */ /* Pick up compile-time configuration */ @@ -36,6 +51,7 @@ #include "wolfhsm/wh_comm.h" #include "wolfhsm/wh_nvm.h" +#include "wolfhsm/wh_nvm_internal.h" #include "wolfhsm/wh_message.h" #include "wolfhsm/wh_message_nvm.h" @@ -43,10 +59,33 @@ #include "wolfhsm/wh_server.h" #include "wolfhsm/wh_server_nvm.h" -/* Handle NVM read, do access checking and clamping */ -static int _HandleNvmRead(whServerContext* server, uint8_t* out_data, - whNvmSize offset, whNvmSize len, whNvmSize* out_len, - whNvmId id) +/* Helper functions for NVM locking */ +#ifdef WOLFHSM_CFG_THREADSAFE +static int _LockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Acquire(&server->nvm->lock); + } + return WH_ERROR_OK; +} + +static int _UnlockNvm(whServerContext* server) +{ + if (server->nvm != NULL) { + return wh_Lock_Release(&server->nvm->lock); + } + return WH_ERROR_OK; +} +#else +#define _LockNvm(server) (WH_ERROR_OK) +#define _UnlockNvm(server) (WH_ERROR_OK) +#endif /* WOLFHSM_CFG_THREADSAFE */ + +/* Handle NVM read (unlocked variant), do access checking and clamping + * Assumes caller holds server->nvm->lock */ +static int _HandleNvmReadUnlocked(whServerContext* server, uint8_t* out_data, + whNvmSize offset, whNvmSize len, + whNvmSize* out_len, whNvmId id) { whNvmMetadata meta; int32_t rc; @@ -59,7 +98,7 @@ static int _HandleNvmRead(whServerContext* server, uint8_t* out_data, return WH_ERROR_ABORTED; } - rc = wh_Nvm_GetMetadata(server->nvm, id, &meta); + rc = wh_Nvm_GetMetadataUnlocked(server->nvm, id, &meta); if (rc != WH_ERROR_OK) { return rc; } @@ -72,13 +111,29 @@ static int _HandleNvmRead(whServerContext* server, uint8_t* out_data, len = meta.len - offset; } - rc = wh_Nvm_ReadChecked(server->nvm, id, offset, len, out_data); + rc = wh_Nvm_ReadCheckedUnlocked(server->nvm, id, offset, len, out_data); if (rc != WH_ERROR_OK) return rc; *out_len = len; return WH_ERROR_OK; } +/* Handle NVM read, do access checking and clamping */ +static int _HandleNvmRead(whServerContext* server, uint8_t* out_data, + whNvmSize offset, whNvmSize len, whNvmSize* out_len, + whNvmId id) +{ + int32_t rc; + + /* Acquire lock for atomic GetMetadata + ReadChecked */ + rc = _LockNvm(server); + if (rc == WH_ERROR_OK) { + rc = _HandleNvmReadUnlocked(server, out_data, offset, len, out_len, id); + (void)_UnlockNvm(server); + } + return rc; +} + int wh_Server_HandleNvmRequest(whServerContext* server, uint16_t magic, uint16_t action, uint16_t seq, uint16_t req_size, const void* req_packet, @@ -373,38 +428,51 @@ int wh_Server_HandleNvmRequest(whServerContext* server, wh_MessageNvm_TranslateReadDmaRequest(magic, (whMessageNvm_ReadDmaRequest*)req_packet, &req); - resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); - } - - if (resp.rc == 0) { - if (req.offset >= meta.len) { - resp.rc = WH_ERROR_BADARGS; - } - } - - if (resp.rc == 0) { - read_len = req.data_len; - /* Clamp length to object size */ - if ((req.offset + read_len) > meta.len) { - read_len = meta.len - req.offset; + /* Acquire lock for entire operation to prevent TOCTOU */ + resp.rc = _LockNvm(server); + if (resp.rc == WH_ERROR_OK) { + resp.rc = + wh_Nvm_GetMetadataUnlocked(server->nvm, req.id, &meta); + + if (resp.rc == 0) { + if (req.offset >= meta.len) { + resp.rc = WH_ERROR_BADARGS; + } + } + + if (resp.rc == 0) { + read_len = req.data_len; + /* Clamp length to object size */ + if ((req.offset + read_len) > meta.len) { + read_len = meta.len - req.offset; + } + } + + /* use unclamped length for DMA address processing in case DMA + * callbacks are sensible to alignment and/or size. + * Keep lock held during DMA to ensure metadata remains valid. + */ + if (resp.rc == 0) { + /* perform platform-specific host address processing */ + resp.rc = wh_Server_DmaProcessClientAddress( + server, req.data_hostaddr, &data, req.data_len, + WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0}); + } + + if (resp.rc == WH_ERROR_OK) { + /* Process the Read action */ + resp.rc = wh_Nvm_ReadCheckedUnlocked(server->nvm, req.id, + req.offset, read_len, + (uint8_t*)data); + } + /* Release lock after all metadata-dependent operations */ + (void)_UnlockNvm(server); } } - - /* use unclamped length for DMA address processing in case DMA callbacks - * are sensible to alignment and/or size */ if (resp.rc == 0) { - /* perform platform-specific host address processing */ - resp.rc = wh_Server_DmaProcessClientAddress( - server, req.data_hostaddr, &data, req.data_len, - WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0}); - } - if (resp.rc == 0) { - /* Process the Read action */ - resp.rc = wh_Nvm_ReadChecked(server->nvm, req.id, req.offset, - read_len, (uint8_t*)data); - } - if (resp.rc == 0) { - /* perform platform-specific host address processing */ + /* perform platform-specific host address processing. + * POST processing can be outside lock as it doesn't depend on + * metadata validity. */ resp.rc = wh_Server_DmaProcessClientAddress( server, req.data_hostaddr, &data, req.data_len, WH_DMA_OPER_CLIENT_WRITE_POST, (whServerDmaFlags){0}); diff --git a/test/wh_test_posix_threadsafe_stress.c b/test/wh_test_posix_threadsafe_stress.c index 7fe1db3d5..4e009b97e 100644 --- a/test/wh_test_posix_threadsafe_stress.c +++ b/test/wh_test_posix_threadsafe_stress.c @@ -215,6 +215,7 @@ static const whTransportServerCb serverTransportCb = WH_TRANSPORT_MEM_SERVER_CB; #define HOT_NVM_ID ((whNvmId)100) #define HOT_NVM_ID_2 ((whNvmId)101) #define HOT_NVM_ID_3 ((whNvmId)102) +#define HOT_COUNTER_ID ((whNvmId)200) /* ============================================================================ * PHASE DEFINITIONS @@ -240,6 +241,8 @@ typedef enum { PHASE_NVM_CONCURRENT_READ, PHASE_NVM_LIST_DURING_MODIFY, PHASE_NVM_CONCURRENT_DESTROY, + PHASE_NVM_READ_VS_RESIZE, /* 2 read, 2 resize same object */ + PHASE_NVM_CONCURRENT_RESIZE, /* 4 threads resize same object */ /* Cross-subsystem phases */ PHASE_CROSS_COMMIT_VS_ADD, @@ -261,12 +264,17 @@ typedef enum { PHASE_NVM_GETAVAILABLE_VS_ADD, /* 2 query space, 2 add objects */ PHASE_NVM_GETMETADATA_VS_DESTROY, /* 2 query metadata, 2 destroy */ + /* Counter */ + PHASE_COUNTER_CONCURRENT_INCREMENT, /* 4 threads increment same counter */ + PHASE_COUNTER_INCREMENT_VS_READ, /* 2 increment, 2 read same counter */ + #ifdef WOLFHSM_CFG_DMA /* DMA Operations */ PHASE_KS_CACHE_DMA_VS_EXPORT, /* 2 DMA cache, 2 regular export */ PHASE_KS_EXPORT_DMA_VS_EVICT, /* 2 DMA export, 2 evict */ PHASE_NVM_ADD_DMA_VS_READ, /* 2 DMA add, 2 regular read */ PHASE_NVM_READ_DMA_VS_DESTROY, /* 2 DMA read, 2 destroy */ + PHASE_NVM_READ_DMA_VS_RESIZE, /* 2 DMA read, 2 resize same object */ #endif PHASE_COUNT @@ -407,6 +415,25 @@ static const PhaseConfig phases[] = { "NVM: GetMetadata vs Destroy", PHASE_ITERATIONS, {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_READ_VS_RESIZE, + "NVM Read vs Resize", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + + {PHASE_NVM_CONCURRENT_RESIZE, + "NVM Concurrent Resize", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + + /* Counter */ + {PHASE_COUNTER_CONCURRENT_INCREMENT, + "Counter Concurrent Increment", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}}, + {PHASE_COUNTER_INCREMENT_VS_READ, + "Counter Increment vs Read", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, #ifdef WOLFHSM_CFG_DMA /* DMA Operations */ @@ -426,6 +453,10 @@ static const PhaseConfig phases[] = { "NVM: Read DMA vs Destroy", PHASE_ITERATIONS, {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, + {PHASE_NVM_READ_DMA_VS_RESIZE, + "NVM Read DMA vs Resize", + PHASE_ITERATIONS, + {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}}, #endif }; @@ -1010,6 +1041,94 @@ static int doKeyRevoke(whClientContext* client, whKeyId keyId) return rc; } +static int doCounterInit(whClientContext* client, whNvmId counterId, + uint32_t initialValue) +{ + uint32_t counter = 0; + int rc; + + /* Send request */ + rc = wh_Client_CounterInitRequest(client, counterId, initialValue); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_CounterInitResponse(client, &counter); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doCounterIncrement(whClientContext* client, whNvmId counterId, + uint32_t* out_counter) +{ + int rc; + + /* Send request */ + rc = wh_Client_CounterIncrementRequest(client, counterId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_CounterIncrementResponse(client, out_counter); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doCounterRead(whClientContext* client, whNvmId counterId, + uint32_t* out_counter) +{ + int rc; + + /* Send request */ + rc = wh_Client_CounterReadRequest(client, counterId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_CounterReadResponse(client, out_counter); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + +static int doCounterDestroy(whClientContext* client, whNvmId counterId) +{ + int rc; + + /* Send request */ + rc = wh_Client_CounterDestroyRequest(client, counterId); + if (rc != WH_ERROR_OK) { + return rc; + } + + /* Wait for response from server thread */ + do { + rc = wh_Client_CounterDestroyResponse(client); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return rc; +} + #ifdef WOLFHSM_CFG_DMA static int doKeyCacheDma(ClientServerPair* pair, whKeyId keyId, int iteration) { @@ -1300,6 +1419,28 @@ static int doPhaseSetup(ClientServerPair* pair, ContentionPhase phase, rc = WH_ERROR_OK; return rc; + /* Counter phases - create counter with initial value 0 */ + case PHASE_COUNTER_CONCURRENT_INCREMENT: + case PHASE_COUNTER_INCREMENT_VS_READ: + /* Destroy any existing counter first */ + (void)doCounterDestroy(client, HOT_COUNTER_ID); + /* Initialize counter with value 0 */ + rc = doCounterInit(client, HOT_COUNTER_ID, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + + /* NVM Read vs Resize - create object with initial size */ + case PHASE_NVM_READ_VS_RESIZE: + case PHASE_NVM_CONCURRENT_RESIZE: + /* Destroy any existing object first */ + (void)doNvmDestroy(client, HOT_NVM_ID); + /* Create object with initial size (64 bytes) */ + rc = doNvmAddObject(client, HOT_NVM_ID, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; + #ifdef WOLFHSM_CFG_DMA /* DMA phases: evict first to make room in cache, tolerate NOSPACE * (cache may be full from earlier phases like GetUniqueId) */ @@ -1320,6 +1461,14 @@ static int doPhaseSetup(ClientServerPair* pair, ContentionPhase phase, case PHASE_NVM_READ_DMA_VS_DESTROY: (void)doNvmDestroy(client, HOT_NVM_ID); return doNvmAddObject(client, HOT_NVM_ID, 0); + + /* NVM Read DMA vs Resize */ + case PHASE_NVM_READ_DMA_VS_RESIZE: + (void)doNvmDestroy(client, HOT_NVM_ID); + rc = doNvmAddObject(client, HOT_NVM_ID, 0); + if (rc == WH_ERROR_NOSPACE) + rc = WH_ERROR_OK; + return rc; #endif default: @@ -1504,6 +1653,91 @@ static int executePhaseOperation(ClientServerPair* pair, ContentionPhase phase, else return doNvmDestroy(client, HOT_NVM_ID); + /* Counter Concurrent Increment */ + case PHASE_COUNTER_CONCURRENT_INCREMENT: { + uint32_t counter = 0; + return doCounterIncrement(client, HOT_COUNTER_ID, &counter); + } + + /* Counter Increment vs Read */ + case PHASE_COUNTER_INCREMENT_VS_READ: { + uint32_t counter = 0; + if (role == ROLE_OP_A) + return doCounterIncrement(client, HOT_COUNTER_ID, &counter); + else + return doCounterRead(client, HOT_COUNTER_ID, &counter); + } + + /* NVM Read vs Resize - alternating object sizes */ + case PHASE_NVM_READ_VS_RESIZE: + if (role == ROLE_OP_A) { + /* Read operation */ + return doNvmRead(client, HOT_NVM_ID); + } + else { + /* Resize operation: destroy and re-add with different size + * Alternate between 64 bytes (full) and 32 bytes (half) */ + int rc; + uint8_t data[NVM_OBJECT_DATA_SIZE]; + int32_t out_rc; + whNvmSize newSize = (iteration % 2 == 0) + ? NVM_OBJECT_DATA_SIZE + : (NVM_OBJECT_DATA_SIZE / 2); + + /* Destroy existing object */ + (void)doNvmDestroy(client, HOT_NVM_ID); + + /* Re-add with new size */ + memset(data, (uint8_t)(iteration & 0xFF), newSize); + rc = wh_Client_NvmAddObjectRequest( + client, HOT_NVM_ID, WH_NVM_ACCESS_ANY, + WH_NVM_FLAGS_USAGE_ANY, 0, NULL, newSize, data); + if (rc != WH_ERROR_OK) { + return rc; + } + + do { + rc = wh_Client_NvmAddObjectResponse(client, &out_rc); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return (rc == WH_ERROR_OK) ? out_rc : rc; + } + + /* NVM Concurrent Resize */ + case PHASE_NVM_CONCURRENT_RESIZE: { + /* All threads resize: destroy and re-add with different size */ + int rc; + uint8_t data[NVM_OBJECT_DATA_SIZE]; + int32_t out_rc; + whNvmSize newSize = (iteration % 2 == 0) + ? NVM_OBJECT_DATA_SIZE + : (NVM_OBJECT_DATA_SIZE / 2); + + /* Destroy existing object */ + (void)doNvmDestroy(client, HOT_NVM_ID); + + /* Re-add with new size */ + memset(data, (uint8_t)(iteration & 0xFF), newSize); + rc = wh_Client_NvmAddObjectRequest( + client, HOT_NVM_ID, WH_NVM_ACCESS_ANY, WH_NVM_FLAGS_USAGE_ANY, + 0, NULL, newSize, data); + if (rc != WH_ERROR_OK) { + return rc; + } + + do { + rc = wh_Client_NvmAddObjectResponse(client, &out_rc); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return (rc == WH_ERROR_OK) ? out_rc : rc; + } + #ifdef WOLFHSM_CFG_DMA /* DMA: Cache DMA vs Export */ case PHASE_KS_CACHE_DMA_VS_EXPORT: @@ -1532,6 +1766,50 @@ static int executePhaseOperation(ClientServerPair* pair, ContentionPhase phase, return doNvmReadDma(pair, HOT_NVM_ID); else return doNvmDestroy(client, HOT_NVM_ID); + + /* NVM Read DMA vs Resize */ + case PHASE_NVM_READ_DMA_VS_RESIZE: + if (role == ROLE_OP_A) { + /* DMA Read operation */ + return doNvmReadDma(pair, HOT_NVM_ID); + } + else { + /* Resize operation: destroy and re-add with different size */ + int rc; + uint8_t data[NVM_OBJECT_DATA_SIZE]; + int32_t out_rc; + whNvmSize newSize = (iteration % 2 == 0) + ? NVM_OBJECT_DATA_SIZE + : (NVM_OBJECT_DATA_SIZE / 2); + whNvmMetadata meta; + + /* Destroy existing object */ + (void)doNvmDestroy(client, HOT_NVM_ID); + + /* Re-add with new size */ + memset(data, (uint8_t)(iteration & 0xFF), newSize); + memset(&meta, 0, sizeof(meta)); + meta.id = HOT_NVM_ID; + meta.access = WH_NVM_ACCESS_ANY; + meta.flags = WH_NVM_FLAGS_USAGE_ANY; + meta.len = newSize; + + rc = wh_Client_NvmAddObjectRequest( + client, HOT_NVM_ID, WH_NVM_ACCESS_ANY, + WH_NVM_FLAGS_USAGE_ANY, 0, NULL, newSize, data); + if (rc != WH_ERROR_OK) { + return rc; + } + + do { + rc = wh_Client_NvmAddObjectResponse(client, &out_rc); + if (rc == WH_ERROR_NOTREADY) { + sched_yield(); + } + } while (rc == WH_ERROR_NOTREADY); + + return (rc == WH_ERROR_OK) ? out_rc : rc; + } #endif default: @@ -1616,6 +1894,19 @@ static int isAcceptableResult(ContentionPhase phase, int rc) case PHASE_NVM_GETMETADATA_VS_DESTROY: return (rc == WH_ERROR_NOTFOUND); + /* Counter phases - NOTFOUND marked acceptable to prevent test abort, + * but validation will catch it as a bug (counter < expectedMin). + * NOTFOUND shouldn't occur in CONCURRENT_INCREMENT (no destroys). */ + case PHASE_COUNTER_CONCURRENT_INCREMENT: + case PHASE_COUNTER_INCREMENT_VS_READ: + return (rc == WH_ERROR_NOTFOUND); + + /* NVM Read vs Resize - NOTFOUND acceptable (object destroyed + * during resize), NOSPACE acceptable (NVM full) */ + case PHASE_NVM_READ_VS_RESIZE: + case PHASE_NVM_CONCURRENT_RESIZE: + return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE); + #ifdef WOLFHSM_CFG_DMA /* DMA phases: same as non-DMA equivalents */ case PHASE_KS_CACHE_DMA_VS_EXPORT: @@ -1629,6 +1920,10 @@ static int isAcceptableResult(ContentionPhase phase, int rc) case PHASE_NVM_READ_DMA_VS_DESTROY: return (rc == WH_ERROR_NOTFOUND); + + /* NVM Read DMA vs Resize */ + case PHASE_NVM_READ_DMA_VS_RESIZE: + return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE); #endif default: @@ -1729,6 +2024,70 @@ static int allClientsReachedIterations(StressTestContext* ctx, int target) return 1; } +/* Post-phase validation for applicable tests + * Returns WH_ERROR_OK if validation passes, error code otherwise */ +static int validatePhaseResult(StressTestContext* ctx, ContentionPhase phase, + int totalIterations, int totalErrors) +{ + int rc; + + switch (phase) { + case PHASE_COUNTER_CONCURRENT_INCREMENT: { + /* Validate counter value matches expected increments + * Expected: number of successful increments + * Count ROLE_OP_A threads (all 4 in this phase) */ + uint32_t counter = 0; + int opACount = 0; + int i; + + /* Count how many threads were doing increments (ROLE_OP_A) */ + for (i = 0; i < NUM_CLIENTS; i++) { + if (ctx->clientRoles[i] == ROLE_OP_A) { + opACount++; + } + } + + /* Read final counter value using client 0 */ + rc = doCounterRead(&ctx->pairs[0].client, HOT_COUNTER_ID, &counter); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT( + " VALIDATION FAILED: Counter read failed: %d\n", rc); + return WH_ERROR_ABORTED; + } + + /* Calculate expected value: iterations per client × number of + * incrementing clients Each incrementing client did + * config->iterations increments + * Account for errors: totalIterations counts all attempts, + * but totalErrors counts unacceptable failures that didn't + * increment */ + uint32_t expectedMin = totalIterations - totalErrors; + + WH_TEST_PRINT(" Counter validation: value=%u, expected_min=%u " + "(iters=%d, errors=%d)\n", + counter, expectedMin, totalIterations, totalErrors); + + /* Counter must equal expectedMin. If counter < expectedMin, this + * indicates either: + * 1. Lost increments due to locking bug (race condition) + * 2. NOTFOUND occurred (shouldn't happen - no concurrent destroys) + */ + if (counter < expectedMin) { + WH_ERROR_PRINT(" VALIDATION FAILED: Counter value %u < " + "expected min %u\n", + counter, expectedMin); + return WH_ERROR_ABORTED; + } + + return WH_ERROR_OK; + } + + /* Other phases don't need special validation yet */ + default: + return WH_ERROR_OK; + } +} + /* Select the appropriate keyId for a phase. * Revoke-related phases need unique key IDs because revoked keys can't be * erased or re-cached. Each phase type that might leave a key revoked needs @@ -1753,6 +2112,7 @@ static int runPhase(StressTestContext* ctx, const PhaseConfig* config, whKeyId keyId) { int i; + int rc; #ifdef WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC time_t phaseStart; #endif @@ -1823,6 +2183,12 @@ static int runPhase(StressTestContext* ctx, const PhaseConfig* config, WH_TEST_PRINT(" Total: %d iterations, %d errors\n", totalIterations, totalErrors); + /* 10. Run phase-specific validation */ + rc = validatePhaseResult(ctx, config->phase, totalIterations, totalErrors); + if (rc != WH_ERROR_OK) { + return rc; + } + /* Return error if phase failed */ if (timedOut || totalErrors > 0) { return WH_ERROR_ABORTED; From a58ca2bbd1797add395c1381235d2c80cfbc490f Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Wed, 28 Jan 2026 12:23:39 -0700 Subject: [PATCH 04/13] Massive refactor to locking integration. Pull locking out of lower level server module APIs (keystore, NVM, counter, etc.) and aquire lock in request handling functions (e.g. wh_Server_HandleXXXRequest()) --- src/wh_nvm.c | 434 +++----------- src/wh_server.c | 18 + src/wh_server_cert.c | 189 +++--- src/wh_server_counter.c | 101 ++-- src/wh_server_img_mgr.c | 52 +- src/wh_server_keystore.c | 1167 ++++++++++++++----------------------- src/wh_server_nvm.c | 223 ++++--- src/wh_server_she.c | 104 +++- wolfhsm/wh_nvm.h | 11 + wolfhsm/wh_nvm_internal.h | 91 --- wolfhsm/wh_server.h | 11 + 11 files changed, 889 insertions(+), 1512 deletions(-) delete mode 100644 wolfhsm/wh_nvm_internal.h diff --git a/src/wh_nvm.c b/src/wh_nvm.c index 12a746eb9..12af0ec4c 100644 --- a/src/wh_nvm.c +++ b/src/wh_nvm.c @@ -30,99 +30,19 @@ #include "wolfhsm/wh_common.h" #include "wolfhsm/wh_error.h" - +#include "wolfhsm/wh_lock.h" #include "wolfhsm/wh_nvm.h" -#ifdef WOLFHSM_CFG_THREADSAFE -/* Helper functions for NVM locking */ -static int _LockNvm(whNvmContext* context) -{ - return wh_Lock_Acquire(&context->lock); -} - -static int _UnlockNvm(whNvmContext* context) -{ - return wh_Lock_Release(&context->lock); -} -#else -#define _LockNvm(context) (WH_ERROR_OK) -#define _UnlockNvm(context) (WH_ERROR_OK) -#endif /* WOLFHSM_CFG_THREADSAFE */ - -/* - * Internal unlocked callback helpers. - * These call the NVM callbacks directly without acquiring locks. - * Callers MUST hold the NVM lock when using these functions. - */ -static int _GetAvailableUnlocked(whNvmContext* context, - uint32_t* out_avail_size, - whNvmId* out_avail_objects, - uint32_t* out_reclaim_size, - whNvmId* out_reclaim_objects) -{ - if (context->cb->GetAvailable == NULL) { - return WH_ERROR_ABORTED; - } - return context->cb->GetAvailable(context->context, out_avail_size, - out_avail_objects, out_reclaim_size, - out_reclaim_objects); -} - -static int _DestroyObjectsUnlocked(whNvmContext* context, whNvmId list_count, - const whNvmId* id_list) -{ - if (context->cb->DestroyObjects == NULL) { - return WH_ERROR_ABORTED; - } - return context->cb->DestroyObjects(context->context, list_count, id_list); -} - -static int _AddObjectUnlocked(whNvmContext* context, whNvmMetadata* meta, - whNvmSize data_len, const uint8_t* data) -{ - if (context->cb->AddObject == NULL) { - return WH_ERROR_ABORTED; - } - return context->cb->AddObject(context->context, meta, data_len, data); -} - -static int _ListUnlocked(whNvmContext* context, whNvmAccess access, - whNvmFlags flags, whNvmId start_id, whNvmId* out_count, - whNvmId* out_id) -{ - if (context->cb->List == NULL) { - return WH_ERROR_ABORTED; - } - return context->cb->List(context->context, access, flags, start_id, - out_count, out_id); -} - -static int _GetMetadataUnlocked(whNvmContext* context, whNvmId id, - whNvmMetadata* meta) -{ - if (context->cb->GetMetadata == NULL) { - return WH_ERROR_ABORTED; - } - return context->cb->GetMetadata(context->context, id, meta); -} - -static int _ReadUnlocked(whNvmContext* context, whNvmId id, whNvmSize offset, - whNvmSize data_len, uint8_t* data) -{ - if (context->cb->Read == NULL) { - return WH_ERROR_ABORTED; - } - return context->cb->Read(context->context, id, offset, data_len, data); -} - typedef enum { WH_NVM_OP_ADD = 0, WH_NVM_OP_READ, WH_NVM_OP_DESTROY, } whNvmOp; -static int wh_Nvm_CheckPolicyUnlocked(whNvmContext* context, whNvmOp op, - whNvmId id, whNvmMetadata* existing_meta) +/* Centralized policy check. + * existing_meta is optionally populated when the object is present. */ +static int wh_Nvm_CheckPolicy(whNvmContext* context, whNvmOp op, whNvmId id, + whNvmMetadata* existing_meta) { whNvmMetadata meta; int ret; @@ -131,7 +51,7 @@ static int wh_Nvm_CheckPolicyUnlocked(whNvmContext* context, whNvmOp op, return WH_ERROR_BADARGS; } - ret = _GetMetadataUnlocked(context, id, &meta); + ret = wh_Nvm_GetMetadata(context, id, &meta); if (ret != WH_ERROR_OK) { return ret; } @@ -167,56 +87,6 @@ static int wh_Nvm_CheckPolicyUnlocked(whNvmContext* context, whNvmOp op, return WH_ERROR_OK; } -static int _Nvm_AddObjectCheckedUnlocked(whNvmContext* context, - whNvmMetadata* meta, - whNvmSize data_len, - const uint8_t* data) -{ - int ret; - - ret = wh_Nvm_CheckPolicyUnlocked(context, WH_NVM_OP_ADD, meta->id, NULL); - if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { - return ret; - } - - return _AddObjectUnlocked(context, meta, data_len, data); -} - -static int _Nvm_DestroyObjectsCheckedUnlocked(whNvmContext* context, - whNvmId list_count, - const whNvmId* id_list) -{ - whNvmId i; - int ret; - - if (id_list == NULL && list_count != 0) { - return WH_ERROR_BADARGS; - } - - for (i = 0; i < list_count; i++) { - ret = wh_Nvm_CheckPolicyUnlocked(context, WH_NVM_OP_DESTROY, id_list[i], - NULL); - if (ret != WH_ERROR_OK) { - return ret; - } - } - - return _DestroyObjectsUnlocked(context, list_count, id_list); -} - -static int _Nvm_ReadCheckedUnlocked(whNvmContext* context, whNvmId id, - whNvmSize offset, whNvmSize data_len, - uint8_t* data) -{ - int ret; - - ret = wh_Nvm_CheckPolicyUnlocked(context, WH_NVM_OP_READ, id, NULL); - if (ret != WH_ERROR_OK) { - return ret; - } - - return _ReadUnlocked(context, id, offset, data_len, data); -} int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) { @@ -226,8 +96,6 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) return WH_ERROR_BADARGS; } - memset(context, 0, sizeof(*context)); - context->cb = config->cb; context->context = config->context; @@ -249,11 +117,11 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) if (context->cb != NULL && context->cb->Init != NULL) { rc = context->cb->Init(context->context, config->config); if (rc != 0) { -#ifdef WOLFHSM_CFG_THREADSAFE - wh_Lock_Cleanup(&context->lock); -#endif context->cb = NULL; context->context = NULL; +#ifdef WOLFHSM_CFG_THREADSAFE + (void)wh_Lock_Cleanup(&context->lock); +#endif } } @@ -283,7 +151,7 @@ int wh_Nvm_Cleanup(whNvmContext* context) } #ifdef WOLFHSM_CFG_THREADSAFE - wh_Lock_Cleanup(&context->lock); + (void)wh_Lock_Cleanup(&context->lock); #endif context->cb = NULL; @@ -296,20 +164,18 @@ int wh_Nvm_GetAvailable(whNvmContext* context, uint32_t *out_avail_size, whNvmId *out_avail_objects, uint32_t *out_reclaim_size, whNvmId *out_reclaim_objects) { - int rc; - - if ((context == NULL) || (context->cb == NULL)) { + if ( (context == NULL) || + (context->cb == NULL) ) { return WH_ERROR_BADARGS; } - rc = _LockNvm(context); - if (rc == WH_ERROR_OK) { - rc = _GetAvailableUnlocked(context, out_avail_size, out_avail_objects, - out_reclaim_size, out_reclaim_objects); - - (void)_UnlockNvm(context); + /* No callback? Return ABORTED */ + if (context->cb->GetAvailable == NULL) { + return WH_ERROR_ABORTED; } - return rc; + return context->cb->GetAvailable(context->context, + out_avail_size, out_avail_objects, + out_reclaim_size, out_reclaim_objects); } int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, @@ -322,34 +188,30 @@ int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, uint16_t reclaimObjects; /* Note that meta and data pointers are validated by AddObject later */ - if ((context == NULL) || (context->cb == NULL)) { + if (context == NULL) { return WH_ERROR_BADARGS; } - ret = _LockNvm(context); - if (ret == WH_ERROR_OK) { - /* check if we have available object and data space */ - ret = _GetAvailableUnlocked(context, &availableSize, &availableObjects, - &reclaimSize, &reclaimObjects); - if (ret == 0) { - if ((availableSize < dataLen) || (availableObjects == 0)) { - /* There's no available space, so try to reclaim space, */ - if ((availableSize + reclaimSize >= dataLen) && - (availableObjects + reclaimObjects > 0)) { - /* Reclaim will make sufficient space available */ - ret = _DestroyObjectsUnlocked(context, 0, NULL); - } - else { - /* Reclaim will not help */ - ret = WH_ERROR_NOSPACE; - } + /* check if we have available object and data space */ + ret = wh_Nvm_GetAvailable(context, + &availableSize, &availableObjects, + &reclaimSize, &reclaimObjects); + if (ret == 0) { + if ( (availableSize < dataLen) || + (availableObjects == 0) ) { + /* There's no available space, so try to reclaim space, */ + if ( (availableSize + reclaimSize >= dataLen) && + (availableObjects + reclaimObjects > 0) ) { + /* Reclaim will make sufficient space available */ + ret = wh_Nvm_DestroyObjects(context, 0, NULL); + } else { + /* Reclaim witl not help */ + ret = WH_ERROR_NOSPACE; } } - if (ret == 0) { - ret = _AddObjectUnlocked(context, meta, dataLen, data); - } - - (void)_UnlockNvm(context); + } + if (ret == 0) { + ret = wh_Nvm_AddObject(context, meta, dataLen, data); } return ret; } @@ -357,19 +219,16 @@ int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, int wh_Nvm_AddObject(whNvmContext* context, whNvmMetadata *meta, whNvmSize data_len, const uint8_t* data) { - int rc; - - if ((context == NULL) || (context->cb == NULL)) { + if ( (context == NULL) || + (context->cb == NULL) ) { return WH_ERROR_BADARGS; } - rc = _LockNvm(context); - if (rc == WH_ERROR_OK) { - rc = _AddObjectUnlocked(context, meta, data_len, data); - - (void)_UnlockNvm(context); + /* No callback? Return ABORTED */ + if (context->cb->AddObject == NULL) { + return WH_ERROR_ABORTED; } - return rc; + return context->cb->AddObject(context->context, meta, data_len, data); } int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta, @@ -377,110 +236,96 @@ int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta, { int ret; - if ((context == NULL) || (context->cb == NULL)) { - return WH_ERROR_BADARGS; + ret = wh_Nvm_CheckPolicy(context, WH_NVM_OP_ADD, meta->id, NULL); + if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { + return ret; } - ret = _LockNvm(context); - if (ret == WH_ERROR_OK) { - ret = _Nvm_AddObjectCheckedUnlocked(context, meta, data_len, data); - - (void)_UnlockNvm(context); - } - return ret; + return wh_Nvm_AddObject(context, meta, data_len, data); } int wh_Nvm_List(whNvmContext* context, whNvmAccess access, whNvmFlags flags, whNvmId start_id, whNvmId *out_count, whNvmId *out_id) { - int rc; - - if ((context == NULL) || (context->cb == NULL)) { + if ( (context == NULL) || + (context->cb == NULL) ) { return WH_ERROR_BADARGS; } - rc = _LockNvm(context); - if (rc == WH_ERROR_OK) { - rc = _ListUnlocked(context, access, flags, start_id, out_count, out_id); - - (void)_UnlockNvm(context); + /* No callback? Return ABORTED */ + if (context->cb->List == NULL) { + return WH_ERROR_ABORTED; } - return rc; + return context->cb->List(context->context, access, flags, start_id, + out_count, out_id); } int wh_Nvm_GetMetadata(whNvmContext* context, whNvmId id, whNvmMetadata* meta) { - int rc; - - if ((context == NULL) || (context->cb == NULL)) { + if ( (context == NULL) || + (context->cb == NULL) ) { return WH_ERROR_BADARGS; } - rc = _LockNvm(context); - if (rc == WH_ERROR_OK) { - rc = _GetMetadataUnlocked(context, id, meta); - - (void)_UnlockNvm(context); + /* No callback? Return ABORTED */ + if (context->cb->GetMetadata == NULL) { + return WH_ERROR_ABORTED; } - return rc; + return context->cb->GetMetadata(context->context, id, meta); } int wh_Nvm_DestroyObjects(whNvmContext* context, whNvmId list_count, const whNvmId* id_list) { - int rc; - - if ((context == NULL) || (context->cb == NULL)) { + if ( (context == NULL) || + (context->cb == NULL) ) { return WH_ERROR_BADARGS; } - rc = _LockNvm(context); - if (rc == WH_ERROR_OK) { - rc = _DestroyObjectsUnlocked(context, list_count, id_list); - - (void)_UnlockNvm(context); + /* No callback? Return ABORTED */ + if (context->cb->DestroyObjects == NULL) { + return WH_ERROR_ABORTED; } - return rc; + return context->cb->DestroyObjects(context->context, list_count, id_list); } int wh_Nvm_DestroyObjectsChecked(whNvmContext* context, whNvmId list_count, const whNvmId* id_list) { + whNvmId i; int ret; - if ((context == NULL) || (context->cb == NULL)) { + if (id_list == NULL && list_count != 0) { return WH_ERROR_BADARGS; } - ret = _LockNvm(context); - if (ret == WH_ERROR_OK) { - ret = _Nvm_DestroyObjectsCheckedUnlocked(context, list_count, id_list); - - (void)_UnlockNvm(context); + for (i = 0; i < list_count; i++) { + ret = wh_Nvm_CheckPolicy(context, WH_NVM_OP_DESTROY, id_list[i], NULL); + if (ret != WH_ERROR_OK) { + return ret; + } } - return ret; + + return wh_Nvm_DestroyObjects(context, list_count, id_list); } int wh_Nvm_Read(whNvmContext* context, whNvmId id, whNvmSize offset, whNvmSize data_len, uint8_t* data) { - int rc; - - if ((context == NULL) || (context->cb == NULL)) { + if ( (context == NULL) || + (context->cb == NULL) ) { return WH_ERROR_BADARGS; } - rc = _LockNvm(context); - if (rc == WH_ERROR_OK) { - rc = _ReadUnlocked(context, id, offset, data_len, data); - - (void)_UnlockNvm(context); + /* No callback? Return ABORTED */ + if (context->cb->Read == NULL) { + return WH_ERROR_ABORTED; } - return rc; + return context->cb->Read(context->context, id, offset, data_len, data); } int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset, @@ -488,127 +333,30 @@ int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset, { int ret; - if ((context == NULL) || (context->cb == NULL)) { - return WH_ERROR_BADARGS; + ret = wh_Nvm_CheckPolicy(context, WH_NVM_OP_READ, id, NULL); + if (ret != WH_ERROR_OK) { + return ret; } - ret = _LockNvm(context); - if (ret == WH_ERROR_OK) { - ret = _Nvm_ReadCheckedUnlocked(context, id, offset, data_len, data); - - (void)_UnlockNvm(context); - } - return ret; + return wh_Nvm_Read(context, id, offset, data_len, data); } - #ifdef WOLFHSM_CFG_THREADSAFE -/* - * Library internal unlocked NVM functions. Exposes unlocked versions of NVM - * functionality for consumption elsewhere in compound operations to prevent - * recursive locking. These assume the caller already holds context->lock. - * - * If WOLFHSM_CFG_THREADSAFE is not defined, these functions are redirected - * to their regular public-facing (locking) counterparts in wh_nvm_internal.h - */ - -int wh_Nvm_GetMetadataUnlocked(whNvmContext* context, whNvmId id, - whNvmMetadata* meta) +int wh_Nvm_Lock(whNvmContext* nvm) { - if ((context == NULL) || (context->cb == NULL)) { + if (nvm == NULL) { return WH_ERROR_BADARGS; } - - return _GetMetadataUnlocked(context, id, meta); + return wh_Lock_Acquire(&nvm->lock); } -int wh_Nvm_ReadUnlocked(whNvmContext* context, whNvmId id, whNvmSize offset, - whNvmSize data_len, uint8_t* data) +int wh_Nvm_Unlock(whNvmContext* nvm) { - if ((context == NULL) || (context->cb == NULL)) { + if (nvm == NULL) { return WH_ERROR_BADARGS; } - - return _ReadUnlocked(context, id, offset, data_len, data); -} - -int wh_Nvm_AddObjectWithReclaimUnlocked(whNvmContext* context, - whNvmMetadata* meta, whNvmSize dataLen, - const uint8_t* data) -{ - int ret; - uint32_t availableSize; - uint32_t reclaimSize; - uint16_t availableObjects; - uint16_t reclaimObjects; - - if ((context == NULL) || (context->cb == NULL)) { - return WH_ERROR_BADARGS; - } - - /* Check if we have available object and data space */ - ret = _GetAvailableUnlocked(context, &availableSize, &availableObjects, - &reclaimSize, &reclaimObjects); - if (ret == 0) { - if ((availableSize < dataLen) || (availableObjects == 0)) { - /* No available space, try to reclaim */ - if ((availableSize + reclaimSize >= dataLen) && - (availableObjects + reclaimObjects > 0)) { - ret = _DestroyObjectsUnlocked(context, 0, NULL); - } - else { - ret = WH_ERROR_NOSPACE; - } - } - } - if (ret == 0) { - ret = _AddObjectUnlocked(context, meta, dataLen, data); - } - - return ret; -} - -int wh_Nvm_DestroyObjectsUnlocked(whNvmContext* context, whNvmId list_count, - const whNvmId* id_list) -{ - if ((context == NULL) || (context->cb == NULL)) { - return WH_ERROR_BADARGS; - } - - return _DestroyObjectsUnlocked(context, list_count, id_list); -} - -int wh_Nvm_AddObjectCheckedUnlocked(whNvmContext* context, whNvmMetadata* meta, - whNvmSize data_len, const uint8_t* data) -{ - if ((context == NULL) || (context->cb == NULL)) { - return WH_ERROR_BADARGS; - } - - return _Nvm_AddObjectCheckedUnlocked(context, meta, data_len, data); -} - -int wh_Nvm_DestroyObjectsCheckedUnlocked(whNvmContext* context, - whNvmId list_count, - const whNvmId* id_list) -{ - if ((context == NULL) || (context->cb == NULL)) { - return WH_ERROR_BADARGS; - } - - return _Nvm_DestroyObjectsCheckedUnlocked(context, list_count, id_list); -} - -int wh_Nvm_ReadCheckedUnlocked(whNvmContext* context, whNvmId id, - whNvmSize offset, whNvmSize data_len, - uint8_t* data) -{ - if ((context == NULL) || (context->cb == NULL)) { - return WH_ERROR_BADARGS; - } - - return _Nvm_ReadCheckedUnlocked(context, id, offset, data_len, data); + return wh_Lock_Release(&nvm->lock); } #endif /* WOLFHSM_CFG_THREADSAFE */ diff --git a/src/wh_server.c b/src/wh_server.c index 83df8acbf..8426fb653 100644 --- a/src/wh_server.c +++ b/src/wh_server.c @@ -463,4 +463,22 @@ int wh_Server_HandleRequestMessage(whServerContext* server) return rc; } +#ifdef WOLFHSM_CFG_THREADSAFE +int wh_Server_NvmLock(whServerContext* server) +{ + if (server == NULL || server->nvm == NULL) { + return WH_ERROR_BADARGS; + } + return wh_Lock_Acquire(&server->nvm->lock); +} + +int wh_Server_NvmUnlock(whServerContext* server) +{ + if (server == NULL || server->nvm == NULL) { + return WH_ERROR_BADARGS; + } + return wh_Lock_Release(&server->nvm->lock); +} +#endif /* WOLFHSM_CFG_THREADSAFE */ + #endif /* WOLFHSM_CFG_ENABLE_SERVER */ diff --git a/src/wh_server_cert.c b/src/wh_server_cert.c index b5333956a..5b00e18fa 100644 --- a/src/wh_server_cert.c +++ b/src/wh_server_cert.c @@ -35,7 +35,6 @@ #include "wolfhsm/wh_server_cert.h" #include "wolfhsm/wh_server_nvm.h" #include "wolfhsm/wh_server_keystore.h" -#include "wolfhsm/wh_nvm_internal.h" #include "wolfhsm/wh_message.h" #include "wolfhsm/wh_message_cert.h" @@ -43,27 +42,6 @@ #include "wolfssl/ssl.h" #include "wolfssl/wolfcrypt/asn.h" -/* Helper functions for NVM locking */ -#ifdef WOLFHSM_CFG_THREADSAFE -static int _LockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Acquire(&server->nvm->lock); - } - return WH_ERROR_OK; -} - -static int _UnlockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Release(&server->nvm->lock); - } - return WH_ERROR_OK; -} -#else -#define _LockNvm(server) (WH_ERROR_OK) -#define _UnlockNvm(server) (WH_ERROR_OK) -#endif /* WOLFHSM_CFG_THREADSAFE */ static int _verifyChainAgainstCmStore(whServerContext* server, WOLFSSL_CERT_MANAGER* cm, @@ -248,10 +226,9 @@ int wh_Server_CertEraseTrusted(whServerContext* server, whNvmId id) return rc; } -/* Get a trusted certificate from NVM storage (unlocked variant) - * Assumes caller holds server->nvm->lock */ -static int _CertReadTrustedUnlocked(whServerContext* server, whNvmId id, - uint8_t* cert, uint32_t* inout_cert_len) +/* Get a trusted certificate from NVM storage */ +int wh_Server_CertReadTrusted(whServerContext* server, whNvmId id, + uint8_t* cert, uint32_t* inout_cert_len) { int rc; whNvmMetadata meta; @@ -261,8 +238,9 @@ static int _CertReadTrustedUnlocked(whServerContext* server, whNvmId id, return WH_ERROR_BADARGS; } + /* Get metadata to check the certificate size */ - rc = wh_Nvm_GetMetadataUnlocked(server->nvm, id, &meta); + rc = wh_Nvm_GetMetadata(server->nvm, id, &meta); if (rc != 0) { return rc; } @@ -276,22 +254,7 @@ static int _CertReadTrustedUnlocked(whServerContext* server, whNvmId id, * be reflected back to the user on length mismatch failure */ *inout_cert_len = meta.len; - return wh_Nvm_ReadUnlocked(server->nvm, id, 0, meta.len, cert); -} - -/* Get a trusted certificate from NVM storage */ -int wh_Server_CertReadTrusted(whServerContext* server, whNvmId id, - uint8_t* cert, uint32_t* inout_cert_len) -{ - int rc; - - /* Acquire lock for atomic GetMetadata + Read */ - rc = _LockNvm(server); - if (rc == WH_ERROR_OK) { - rc = _CertReadTrustedUnlocked(server, id, cert, inout_cert_len); - (void)_UnlockNvm(server); - } - return rc; + return wh_Nvm_Read(server->nvm, id, 0, meta.len, cert); } /* Verify a certificate against trusted certificates */ @@ -451,9 +414,13 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, cert_data = (const uint8_t*)req_packet + sizeof(req); /* Process the add trusted action */ - rc = wh_Server_CertAddTrusted(server, req.id, req.access, req.flags, - req.label, WH_NVM_LABEL_LEN, - cert_data, req.cert_len); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + rc = wh_Server_CertAddTrusted( + server, req.id, req.access, req.flags, req.label, + WH_NVM_LABEL_LEN, cert_data, req.cert_len); + (void)WH_SERVER_NVM_UNLOCK(server); + } resp.rc = rc; /* Convert the response struct */ @@ -471,7 +438,11 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, magic, (whMessageCert_EraseTrustedRequest*)req_packet, &req); /* Process the delete trusted action */ - rc = wh_Server_CertEraseTrusted(server, req.id); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + rc = wh_Server_CertEraseTrusted(server, req.id); + (void)WH_SERVER_NVM_UNLOCK(server); + } resp.rc = rc; /* Convert the response struct */ @@ -500,35 +471,26 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, ? max_transport_cert_len : WOLFHSM_CFG_MAX_CERT_SIZE; - /* Acquire lock for atomic flag check + read */ - rc = _LockNvm(server); + /* Check metadata to check if the certificate is non-exportable. + * This is unfortunately redundant since metadata is checked in + * wh_Server_CertReadTrusted(). */ + rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { - /* Check metadata to check if the certificate is - * non-exportable */ - rc = wh_Nvm_GetMetadataUnlocked(server->nvm, req.id, &meta); + rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); if (rc == WH_ERROR_OK) { /* Check if the certificate is non-exportable */ if (meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) { - resp.rc = WH_ERROR_ACCESS; - resp.cert_len = 0; + rc = WH_ERROR_ACCESS; } else { - rc = _CertReadTrustedUnlocked(server, req.id, cert_data, - &cert_len); - resp.rc = rc; + rc = wh_Server_CertReadTrusted(server, req.id, + cert_data, &cert_len); resp.cert_len = cert_len; } } - else { - resp.rc = rc; - resp.cert_len = 0; - } - (void)_UnlockNvm(server); - } - else { - resp.rc = rc; - resp.cert_len = 0; + (void)WH_SERVER_NVM_UNLOCK(server); } + resp.rc = rc; /* Convert the response struct */ wh_MessageCert_TranslateReadTrustedResponse( @@ -557,14 +519,19 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, whKeyId keyId = wh_KeyId_TranslateFromClient( WH_KEYTYPE_CRYPTO, server->comm->client_id, req.keyId); - /* Process the verify action */ - resp.rc = wh_Server_CertVerify(server, cert_data, req.cert_len, - req.trustedRootNvmId, req.flags, - req.cachedKeyFlags, &keyId); - /* Propagate the keyId back to the client with flags preserved - */ - resp.keyId = wh_KeyId_TranslateToClient(keyId); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + /* Process the verify action */ + rc = wh_Server_CertVerify(server, cert_data, req.cert_len, + req.trustedRootNvmId, req.flags, + req.cachedKeyFlags, &keyId); + /* Propagate the keyId back to the client with flags + * preserved */ + resp.keyId = wh_KeyId_TranslateToClient(keyId); + (void)WH_SERVER_NVM_UNLOCK(server); + } + resp.rc = rc; } /* Convert the response struct */ @@ -596,9 +563,14 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, } if (resp.rc == WH_ERROR_OK) { /* Process the add trusted action */ - resp.rc = wh_Server_CertAddTrusted( - server, req.id, req.access, req.flags, req.label, - WH_NVM_LABEL_LEN, cert_data, req.cert_len); + resp.rc = WH_SERVER_NVM_LOCK(server); + if (resp.rc == WH_ERROR_OK) { + resp.rc = wh_Server_CertAddTrusted( + server, req.id, req.access, req.flags, req.label, + WH_NVM_LABEL_LEN, cert_data, req.cert_len); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ } if (resp.rc == WH_ERROR_OK) { /* Post-process client address */ @@ -636,13 +608,10 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0}); } if (resp.rc == WH_ERROR_OK) { - /* Acquire lock for atomic flag check + read */ - resp.rc = _LockNvm(server); + /* Check metadata to see if the certificate is non-exportable */ + resp.rc = WH_SERVER_NVM_LOCK(server); if (resp.rc == WH_ERROR_OK) { - /* Check metadata to see if the certificate is - * non-exportable */ - resp.rc = - wh_Nvm_GetMetadataUnlocked(server->nvm, req.id, &meta); + resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); if (resp.rc == WH_ERROR_OK) { if ((meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) != 0) { resp.rc = WH_ERROR_ACCESS; @@ -650,12 +619,13 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, else { /* Clamp cert_len to actual stored length */ cert_len = req.cert_len; - resp.rc = _CertReadTrustedUnlocked( + resp.rc = wh_Server_CertReadTrusted( server, req.id, cert_data, &cert_len); } } - (void)_UnlockNvm(server); - } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ } if (resp.rc == WH_ERROR_OK) { /* Post-process client address */ @@ -671,9 +641,10 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, }; break; case WH_MESSAGE_CERT_ACTION_VERIFY_DMA: { - whMessageCert_VerifyDmaRequest req = {0}; + whMessageCert_VerifyDmaRequest req = {0}; whMessageCert_VerifyDmaResponse resp = {0}; - void* cert_data = NULL; + void* cert_data = NULL; + whKeyId keyId = WH_KEYID_ERASED; if (req_size != sizeof(req)) { /* Request is malformed */ @@ -684,24 +655,29 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, wh_MessageCert_TranslateVerifyDmaRequest( magic, (whMessageCert_VerifyDmaRequest*)req_packet, &req); + /* Map client keyId to server keyId space */ + keyId = wh_KeyId_TranslateFromClient( + WH_KEYTYPE_CRYPTO, server->comm->client_id, req.keyId); + /* Process client address */ resp.rc = wh_Server_DmaProcessClientAddress( server, req.cert_addr, &cert_data, req.cert_len, WH_DMA_OPER_CLIENT_READ_PRE, (whServerDmaFlags){0}); } if (resp.rc == WH_ERROR_OK) { - /* Map client keyId to server keyId space */ - whKeyId keyId = wh_KeyId_TranslateFromClient( - WH_KEYTYPE_CRYPTO, server->comm->client_id, req.keyId); + resp.rc = WH_SERVER_NVM_LOCK(server); + if (resp.rc == WH_ERROR_OK) { + /* Process the verify action */ + resp.rc = wh_Server_CertVerify( + server, cert_data, req.cert_len, req.trustedRootNvmId, + req.flags, req.cachedKeyFlags, &keyId); - /* Process the verify action */ - resp.rc = wh_Server_CertVerify(server, cert_data, req.cert_len, - req.trustedRootNvmId, req.flags, - req.cachedKeyFlags, &keyId); + /* Propagate the keyId back to the client with flags + * preserved */ + resp.keyId = wh_KeyId_TranslateToClient(keyId); - /* Propagate the keyId back to the client with flags preserved - */ - resp.keyId = wh_KeyId_TranslateToClient(keyId); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ } if (resp.rc == WH_ERROR_OK) { /* Post-process client address */ @@ -730,8 +706,13 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, cert_data = (const uint8_t*)req_packet + sizeof(req); /* Process the verify action */ - rc = wh_Server_CertVerifyAcert(server, cert_data, req.cert_len, - req.trustedRootNvmId); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + rc = wh_Server_CertVerifyAcert(server, cert_data, req.cert_len, + req.trustedRootNvmId); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ /* Signature confirmation error is not an error for the server, so * propagate this error to the client in the response, otherwise @@ -773,8 +754,14 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, } if (rc == WH_ERROR_OK) { /* Process the verify action */ - rc = wh_Server_CertVerifyAcert(server, cert_data, req.cert_len, - req.trustedRootNvmId); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + rc = wh_Server_CertVerifyAcert( + server, cert_data, req.cert_len, req.trustedRootNvmId); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + /* Signature confirmation error is not an error for the server, * so propagate this error to the client in the response, * otherwise return the error code from the verify action */ diff --git a/src/wh_server_counter.c b/src/wh_server_counter.c index e48bd7f9c..f883dbdfb 100644 --- a/src/wh_server_counter.c +++ b/src/wh_server_counter.c @@ -34,32 +34,9 @@ #include "wolfhsm/wh_message.h" #include "wolfhsm/wh_message_counter.h" #include "wolfhsm/wh_server.h" -#include "wolfhsm/wh_nvm_internal.h" #include "wolfhsm/wh_server_counter.h" -/* Helper functions for NVM locking */ -#ifdef WOLFHSM_CFG_THREADSAFE -static int _LockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Acquire(&server->nvm->lock); - } - return WH_ERROR_OK; -} - -static int _UnlockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Release(&server->nvm->lock); - } - return WH_ERROR_OK; -} -#else -#define _LockNvm(server) (WH_ERROR_OK) -#define _UnlockNvm(server) (WH_ERROR_OK) -#endif /* WOLFHSM_CFG_THREADSAFE */ - int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, uint16_t action, uint16_t req_size, const void* req_packet, uint16_t* out_resp_size, @@ -98,10 +75,16 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, (uint16_t)req.counterId); /* use the label buffer to hold the counter value */ *counter = req.counter; - ret = wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0, NULL); + + ret = WH_SERVER_NVM_LOCK(server); if (ret == WH_ERROR_OK) { - resp.counter = *counter; - } + ret = wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0, NULL); + if (ret == WH_ERROR_OK) { + resp.counter = *counter; + } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ resp.rc = ret; (void)wh_MessageCounter_TranslateInitResponse( @@ -127,16 +110,15 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, (void)wh_MessageCounter_TranslateIncrementRequest( magic, (whMessageCounter_IncrementRequest*)req_packet, &req); - counterId = WH_MAKE_KEYID(WH_KEYTYPE_COUNTER, - (uint16_t)server->comm->client_id, - (uint16_t)req.counterId); - - /* Acquire lock for atomic read-modify-write */ - ret = _LockNvm(server); + ret = WH_SERVER_NVM_LOCK(server); if (ret == WH_ERROR_OK) { /* read the counter, stored in the metadata label */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, counterId, meta); - resp.rc = ret; + ret = wh_Nvm_GetMetadata( + server->nvm, + WH_MAKE_KEYID(WH_KEYTYPE_COUNTER, + (uint16_t)server->comm->client_id, + (uint16_t)req.counterId), + meta); /* increment and write the counter back */ if (ret == WH_ERROR_OK) { @@ -147,9 +129,8 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, } /* only update if we didn't saturate */ else { - ret = wh_Nvm_AddObjectWithReclaimUnlocked( - server->nvm, meta, 0, NULL); - resp.rc = ret; + ret = wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0, + NULL); } } @@ -157,12 +138,10 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, if (ret == WH_ERROR_OK) { resp.counter = *counter; } - /* Release lock */ - (void)_UnlockNvm(server); - } - else { - resp.rc = ret; - } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + resp.rc = ret; (void)wh_MessageCounter_TranslateIncrementResponse( magic, &resp, (whMessageCounter_IncrementResponse*)resp_packet); @@ -186,19 +165,24 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, (void)wh_MessageCounter_TranslateReadRequest( magic, (whMessageCounter_ReadRequest*)req_packet, &req); - /* read the counter, stored in the metadata label */ - ret = wh_Nvm_GetMetadata( - server->nvm, - WH_MAKE_KEYID(WH_KEYTYPE_COUNTER, - (uint16_t)server->comm->client_id, - (uint16_t)req.counterId), - meta); - resp.rc = ret; - - /* return counter to the caller */ + ret = WH_SERVER_NVM_LOCK(server); if (ret == WH_ERROR_OK) { - resp.counter = *counter; - } + /* read the counter, stored in the metadata label */ + ret = wh_Nvm_GetMetadata( + server->nvm, + WH_MAKE_KEYID(WH_KEYTYPE_COUNTER, + (uint16_t)server->comm->client_id, + (uint16_t)req.counterId), + meta); + + /* return counter to the caller */ + if (ret == WH_ERROR_OK) { + resp.counter = *counter; + } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + resp.rc = ret; (void)wh_MessageCounter_TranslateReadResponse( magic, &resp, (whMessageCounter_ReadResponse*)resp_packet); @@ -227,7 +211,12 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, (uint16_t)server->comm->client_id, (uint16_t)req.counterId); - ret = wh_Nvm_DestroyObjects(server->nvm, 1, &counterId); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = wh_Nvm_DestroyObjects(server->nvm, 1, &counterId); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ resp.rc = ret; (void)wh_MessageCounter_TranslateDestroyResponse( diff --git a/src/wh_server_img_mgr.c b/src/wh_server_img_mgr.c index 9c9c1519a..9a32c1093 100644 --- a/src/wh_server_img_mgr.c +++ b/src/wh_server_img_mgr.c @@ -37,7 +37,6 @@ #include "wolfhsm/wh_server_img_mgr.h" #include "wolfhsm/wh_server_keystore.h" #include "wolfhsm/wh_nvm.h" -#include "wolfhsm/wh_nvm_internal.h" #ifndef WOLFHSM_CFG_NO_CRYPTO #include "wolfssl/wolfcrypt/settings.h" @@ -51,28 +50,6 @@ #include "wolfssl/wolfcrypt/rsa.h" #endif -/* Helper functions for NVM locking */ -#ifdef WOLFHSM_CFG_THREADSAFE -static int _LockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Acquire(&server->nvm->lock); - } - return WH_ERROR_OK; -} - -static int _UnlockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Release(&server->nvm->lock); - } - return WH_ERROR_OK; -} -#else -#define _LockNvm(server) (WH_ERROR_OK) -#define _UnlockNvm(server) (WH_ERROR_OK) -#endif /* WOLFHSM_CFG_THREADSAFE */ - int wh_Server_ImgMgrInit(whServerImgMgrContext* context, const whServerImgMgrConfig* config) { @@ -136,28 +113,21 @@ int wh_Server_ImgMgrVerifyImg(whServerImgMgrContext* context, } /* Load the signature from NVM */ - /* Acquire lock for atomic GetMetadata + Read */ - ret = _LockNvm(server); - if (ret == WH_ERROR_OK) { - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, img->sigNvmId, &sigMeta); - if (ret == WH_ERROR_OK) { - /* Ensure signature fits in buffer */ - if (sigMeta.len > sigSize) { - ret = WH_ERROR_BADARGS; - } - else { - ret = wh_Nvm_ReadUnlocked(server->nvm, img->sigNvmId, 0, - sigMeta.len, sigBuf); - if (ret == WH_ERROR_OK) { - actualSigSize = sigMeta.len; - } - } - } - (void)_UnlockNvm(server); + ret = wh_Nvm_GetMetadata(server->nvm, img->sigNvmId, &sigMeta); + if (ret != WH_ERROR_OK) { + return ret; } + + /* Ensure signature fits in buffer */ + if (sigMeta.len > sigSize) { + return WH_ERROR_BADARGS; + } + + ret = wh_Nvm_Read(server->nvm, img->sigNvmId, 0, sigMeta.len, sigBuf); if (ret != WH_ERROR_OK) { return ret; } + actualSigSize = sigMeta.len; /* Invoke verify method callback */ if (img->verifyMethod != NULL) { diff --git a/src/wh_server_keystore.c b/src/wh_server_keystore.c index 9c6cec7a4..dbf62c33b 100644 --- a/src/wh_server_keystore.c +++ b/src/wh_server_keystore.c @@ -19,23 +19,6 @@ /* * src/wh_server_keystore.c * - * LOCKING DISCIPLINE FOR COMPOUND OPERATIONS: - * - * All compound operations (those involving multiple cache/NVM accesses) MUST: - * 1. Acquire server->nvm->lock at the start using _LockKeystore() - * 2. Use only *Unlocked() variants of NVM/cache functions internally - * 3. Keep lock held for entire operation, including DMA transfers - * 4. Release lock only after all metadata-dependent operations complete - * - * Rationale: Releasing the lock mid-operation creates TOCTOU (Time-Of-Check- - * Time-Of-Use) vulnerabilities where metadata can become stale or objects can - * be destroyed/replaced between checks. DMA operations that depend on cached - * metadata MUST occur under the same lock that validated that metadata. - * - * Examples of correct implementations in this file: - * - wh_Server_KeystoreCacheKeyDmaChecked() (lines 2518-2542) - * - wh_Server_KeystoreExportKeyDmaChecked() (lines 2579-2616) - * - wh_Server_KeystoreRevokeKey() (lines 1179-1231) */ /* Pick up compile-time configuration */ @@ -59,7 +42,6 @@ #include "wolfhsm/wh_utils.h" #include "wolfhsm/wh_server.h" #include "wolfhsm/wh_log.h" -#include "wolfhsm/wh_nvm_internal.h" #ifdef WOLFHSM_CFG_SHE_EXTENSION #include "wolfhsm/wh_server_she.h" @@ -67,10 +49,9 @@ #include "wolfhsm/wh_server_keystore.h" - -static int _FindInCacheUnlocked(whServerContext* server, whKeyId keyId, - int* out_index, int* out_big, - uint8_t** out_buffer, whNvmMetadata** out_meta); +static int _FindInCache(whServerContext* server, whKeyId keyId, int* out_index, + int* out_big, uint8_t** out_buffer, + whNvmMetadata** out_meta); #ifdef WOLFHSM_CFG_GLOBAL_KEYS /* @@ -82,32 +63,6 @@ static int _IsGlobalKey(whKeyId keyId) } #endif /* WOLFHSM_CFG_GLOBAL_KEYS */ -/* - * Thread-safe locking helpers for keystore operations. - * Use the unified NVM lock to protect both NVM and cache access. - * These are no-ops when WOLFHSM_CFG_THREADSAFE is not defined. - */ -#ifdef WOLFHSM_CFG_THREADSAFE -static int _LockKeystore(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Acquire(&server->nvm->lock); - } - return WH_ERROR_OK; -} - -static int _UnlockKeystore(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Release(&server->nvm->lock); - } - return WH_ERROR_OK; -} -#else -#define _LockKeystore(server) (WH_ERROR_OK) -#define _UnlockKeystore(server) (WH_ERROR_OK) -#endif /* WOLFHSM_CFG_THREADSAFE */ - /* * @brief Get the appropriate cache context based on keyId * @@ -136,14 +91,14 @@ typedef enum { WH_KS_OP_REVOKE, } whKsOp; -static int _KeyIsCommittedUnlocked(whServerContext* server, whKeyId keyId) +static int _KeyIsCommitted(whServerContext* server, whKeyId keyId) { int ret; int big; int index; whKeyCacheContext* ctx = _GetCacheContext(server, keyId); - ret = _FindInCacheUnlocked(server, keyId, &index, &big, NULL, NULL); + ret = _FindInCache(server, keyId, &index, &big, NULL, NULL); if (ret != WH_ERROR_OK) { return 0; } @@ -155,13 +110,10 @@ static int _KeyIsCommittedUnlocked(whServerContext* server, whKeyId keyId) return ctx->bigCache[index].committed; } } - /* Centralized cache/NVM policy: enforce NONMODIFIABLE/NONEXPORTABLE at the - * keystore layer. Usage enforcement remains separate. - * - * This is an unlocked function - caller must hold the keystore lock. */ -static int _KeystoreCheckPolicyUnlocked(whServerContext* server, whKsOp op, - whKeyId keyId) + * keystore layer. Usage enforcement remains separate. */ +static int _KeystoreCheckPolicy(whServerContext* server, whKsOp op, + whKeyId keyId) { whNvmMetadata* cacheMeta = NULL; whNvmMetadata nvmMeta; @@ -175,7 +127,7 @@ static int _KeystoreCheckPolicyUnlocked(whServerContext* server, whKsOp op, } /* Check cache first */ - ret = _FindInCacheUnlocked(server, keyId, NULL, NULL, NULL, &cacheMeta); + ret = _FindInCache(server, keyId, NULL, NULL, NULL, &cacheMeta); if (ret == WH_ERROR_OK && cacheMeta != NULL) { foundInCache = 1; } @@ -185,7 +137,7 @@ static int _KeystoreCheckPolicyUnlocked(whServerContext* server, whKsOp op, /* Check NVM if not in cache */ if (!foundInCache) { - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, &nvmMeta); + ret = wh_Nvm_GetMetadata(server->nvm, keyId, &nvmMeta); if (ret == WH_ERROR_OK) { foundInNvm = 1; } @@ -210,7 +162,7 @@ static int _KeystoreCheckPolicyUnlocked(whServerContext* server, whKsOp op, break; case WH_KS_OP_EVICT: - if (_KeyIsCommittedUnlocked(server, keyId)) { + if (_KeyIsCommitted(server, keyId)) { /* Committed keys can always be evicted */ break; } @@ -237,14 +189,12 @@ static int _KeystoreCheckPolicyUnlocked(whServerContext* server, whKsOp op, return WH_ERROR_OK; } - /** * @brief Find a key in the specified cache context */ -static int _FindInKeyCacheUnlocked(whKeyCacheContext* ctx, whKeyId keyId, - int* out_index, int* out_big, - uint8_t** out_buffer, - whNvmMetadata** out_meta) +static int _FindInKeyCache(whKeyCacheContext* ctx, whKeyId keyId, + int* out_index, int* out_big, uint8_t** out_buffer, + whNvmMetadata** out_meta) { int ret = WH_ERROR_NOTFOUND; int i; @@ -303,8 +253,8 @@ static int _EvictSlot(uint8_t* buf, whNvmMetadata* meta) /** * @brief Get an available cache slot from the specified cache context */ -static int _GetKeyCacheSlotUnlocked(whKeyCacheContext* ctx, uint16_t keySz, - uint8_t** outBuf, whNvmMetadata** outMeta) +static int _GetKeyCacheSlot(whKeyCacheContext* ctx, uint16_t keySz, + uint8_t** outBuf, whNvmMetadata** outMeta) { int foundIndex = -1; int i; @@ -397,13 +347,12 @@ static int _GetKeyCacheSlotUnlocked(whKeyCacheContext* ctx, uint16_t keySz, * @brief Evict a key from the specified cache context * zeroes the buffer */ -static int _EvictKeyFromCacheUnlocked(whKeyCacheContext* ctx, whKeyId keyId) +static int _EvictKeyFromCache(whKeyCacheContext* ctx, whKeyId keyId) { whNvmMetadata* meta = NULL; uint8_t* outBuffer = NULL; - int ret = - _FindInKeyCacheUnlocked(ctx, keyId, NULL, NULL, &outBuffer, &meta); + int ret = _FindInKeyCache(ctx, keyId, NULL, NULL, &outBuffer, &meta); if (ret == WH_ERROR_OK && meta != NULL) { return _EvictSlot(outBuffer, meta); @@ -415,12 +364,12 @@ static int _EvictKeyFromCacheUnlocked(whKeyCacheContext* ctx, whKeyId keyId) /** * @brief Mark a cached key as committed */ -static int _MarkKeyCommittedUnlocked(whKeyCacheContext* ctx, whKeyId keyId, - int committed) +static int _MarkKeyCommitted(whKeyCacheContext* ctx, whKeyId keyId, + int committed) { int index = -1; int big = -1; - int ret = _FindInKeyCacheUnlocked(ctx, keyId, &index, &big, NULL, NULL); + int ret = _FindInKeyCache(ctx, keyId, &index, &big, NULL, NULL); if (ret == WH_ERROR_OK) { if (big == 0) { @@ -434,8 +383,7 @@ static int _MarkKeyCommittedUnlocked(whKeyCacheContext* ctx, whKeyId keyId, return ret; } -/* Unlocked variant - assumes caller holds server->nvm->lock */ -static int _GetUniqueIdUnlocked(whServerContext* server, whNvmId* inout_id) +int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) { int ret = WH_ERROR_OK; int found = 0; @@ -444,7 +392,7 @@ static int _GetUniqueIdUnlocked(whServerContext* server, whNvmId* inout_id) whKeyId key_id = *inout_id; int type = WH_KEYID_TYPE(key_id); int user = WH_KEYID_USER(key_id); - whNvmId buildId = WH_KEYID_ERASED; + whNvmId buildId; whKeyCacheContext* ctx = _GetCacheContext(server, key_id); @@ -460,7 +408,7 @@ static int _GetUniqueIdUnlocked(whServerContext* server, whNvmId* inout_id) buildId = WH_MAKE_KEYID(type, user, id); /* Check against cache keys using unified cache functions */ - ret = _FindInKeyCacheUnlocked(ctx, buildId, NULL, NULL, NULL, NULL); + ret = _FindInKeyCache(ctx, buildId, NULL, NULL, NULL, NULL); if (ret == WH_ERROR_OK) { /* Found in cache, try next ID */ continue; @@ -470,7 +418,7 @@ static int _GetUniqueIdUnlocked(whServerContext* server, whNvmId* inout_id) } /* Check if keyId exists in NVM */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, buildId, NULL); + ret = wh_Nvm_GetMetadata(server->nvm, buildId, NULL); if (ret == WH_ERROR_NOTFOUND) { /* key doesn't exist in NVM, we found a candidate ID */ found = 1; @@ -491,50 +439,38 @@ static int _GetUniqueIdUnlocked(whServerContext* server, whNvmId* inout_id) return WH_ERROR_OK; } -int wh_Server_KeystoreGetUniqueId(whServerContext* server, whNvmId* inout_id) -{ - int ret; - - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _GetUniqueIdUnlocked(server, inout_id); - - (void)_UnlockKeystore(server); - return ret; -} - -/* Forward declarations for unlocked functions */ -static int _GetCacheSlotUnlocked(whServerContext* server, whKeyId keyId, - uint16_t keySz, uint8_t** outBuf, - whNvmMetadata** outMeta); -static int _GetCacheSlotCheckedUnlocked(whServerContext* server, whKeyId keyId, - uint16_t keySz, uint8_t** outBuf, - whNvmMetadata** outMeta); - /* find a slot to cache a key. If key is already there, is evicted first */ int wh_Server_KeystoreGetCacheSlot(whServerContext* server, whKeyId keyId, uint16_t keySz, uint8_t** outBuf, whNvmMetadata** outMeta) { - int ret; + whKeyCacheContext* ctx; + int ret; + int idx = -1; + int isBig = -1; + uint8_t* buf = NULL; + whNvmMetadata* foundMeta = NULL; if (server == NULL || (keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BUFSIZE && keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BIG_BUFSIZE)) { return WH_ERROR_BADARGS; } - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { + + ret = _FindInCache(server, keyId, &idx, &isBig, &buf, &foundMeta); + if (ret == WH_ERROR_OK) { + /* Key is already cached; evict it first */ + ret = wh_Server_KeystoreEvictKey(server, keyId); + if (ret != WH_ERROR_OK) { + return ret; + } + } + else if (ret != WH_ERROR_NOTFOUND) { return ret; } - ret = _GetCacheSlotUnlocked(server, keyId, keySz, outBuf, outMeta); - - (void)_UnlockKeystore(server); - return ret; + ctx = _GetCacheContext(server, keyId); + return _GetKeyCacheSlot(ctx, keySz, outBuf, outMeta); } int wh_Server_KeystoreGetCacheSlotChecked(whServerContext* server, @@ -543,35 +479,19 @@ int wh_Server_KeystoreGetCacheSlotChecked(whServerContext* server, whNvmMetadata** outMeta) { int ret; - - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { + ret = _KeystoreCheckPolicy(server, WH_KS_OP_CACHE, keyId); + if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { return ret; } - - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_CACHE, keyId); - if (ret == WH_ERROR_OK || ret == WH_ERROR_NOTFOUND) { - ret = _GetCacheSlotUnlocked(server, keyId, keySz, outBuf, outMeta); - } - - (void)_UnlockKeystore(server); - return ret; + return wh_Server_KeystoreGetCacheSlot(server, keyId, keySz, outBuf, + outMeta); } -/* Forward declarations for unlocked variants */ -#ifdef WOLFHSM_CFG_DMA -static int _KeystoreCacheKeyDmaUnlocked(whServerContext* server, - whNvmMetadata* meta, uint64_t keyAddr, - int checked); -#endif - -/* Unlocked variant - assumes caller holds server->nvm->lock */ -static int _KeystoreCacheKeyUnlocked(whServerContext* server, - whNvmMetadata* meta, uint8_t* in, - int checked) +static int _KeystoreCacheKey(whServerContext* server, whNvmMetadata* meta, + uint8_t* in, int checked) { - uint8_t* slotBuf = NULL; - whNvmMetadata* slotMeta = NULL; + uint8_t* slotBuf; + whNvmMetadata* slotMeta; int ret; /* make sure id is valid */ @@ -582,141 +502,75 @@ static int _KeystoreCacheKeyUnlocked(whServerContext* server, return WH_ERROR_BADARGS; } - /* Use unlocked variants - caller already holds lock */ if (checked) { - ret = _GetCacheSlotCheckedUnlocked(server, meta->id, meta->len, - &slotBuf, &slotMeta); + ret = wh_Server_KeystoreGetCacheSlotChecked(server, meta->id, meta->len, + &slotBuf, &slotMeta); } else { - ret = _GetCacheSlotUnlocked(server, meta->id, meta->len, &slotBuf, - &slotMeta); + ret = wh_Server_KeystoreGetCacheSlot(server, meta->id, meta->len, + &slotBuf, &slotMeta); } - if (ret == WH_ERROR_OK) { - memcpy(slotBuf, in, meta->len); - memcpy((uint8_t*)slotMeta, (uint8_t*)meta, sizeof(whNvmMetadata)); - - _MarkKeyCommittedUnlocked(_GetCacheContext(server, meta->id), meta->id, - 0); - - WH_DEBUG_SERVER_VERBOSE("hsmCacheKey: cached keyid=0x%X, len=%u\n", - meta->id, meta->len); - WH_DEBUG_VERBOSE_HEXDUMP("[server] cacheKey: key=", in, meta->len); - } - - return ret; -} - -static int _KeystoreCacheKeyLocked(whServerContext* server, whNvmMetadata* meta, - uint8_t* in, int checked) -{ - int ret; - - ret = _LockKeystore(server); if (ret != WH_ERROR_OK) { return ret; } - ret = _KeystoreCacheKeyUnlocked(server, meta, in, checked); + memcpy(slotBuf, in, meta->len); + memcpy((uint8_t*)slotMeta, (uint8_t*)meta, sizeof(whNvmMetadata)); + _MarkKeyCommitted(_GetCacheContext(server, meta->id), meta->id, 0); - (void)_UnlockKeystore(server); + WH_DEBUG_SERVER_VERBOSE("hsmCacheKey: cached keyid=0x%X, len=%u\n", + meta->id, meta->len); + WH_DEBUG_VERBOSE_HEXDUMP("[server] cacheKey: key=", in, meta->len); - return ret; + return WH_ERROR_OK; } int wh_Server_KeystoreCacheKey(whServerContext* server, whNvmMetadata* meta, uint8_t* in) { - return _KeystoreCacheKeyLocked(server, meta, in, 0); + return _KeystoreCacheKey(server, meta, in, 0); } int wh_Server_KeystoreCacheKeyChecked(whServerContext* server, whNvmMetadata* meta, uint8_t* in) { - return _KeystoreCacheKeyLocked(server, meta, in, 1); + return _KeystoreCacheKey(server, meta, in, 1); } -static int _FindInCacheUnlocked(whServerContext* server, whKeyId keyId, - int* out_index, int* out_big, - uint8_t** out_buffer, whNvmMetadata** out_meta) +static int _FindInCache(whServerContext* server, whKeyId keyId, int* out_index, + int* out_big, uint8_t** out_buffer, + whNvmMetadata** out_meta) { whKeyCacheContext* ctx = _GetCacheContext(server, keyId); - return _FindInKeyCacheUnlocked(ctx, keyId, out_index, out_big, out_buffer, - out_meta); -} - -/* - * Internal unlocked keystore functions. - * These assume the caller already holds server->nvm->lock for global keys. - * For use when performing atomic multi-step operations. - * When WOLFHSM_CFG_THREADSAFE is not defined, these use the regular NVM - * functions via macros defined in wh_nvm.h. - */ - -/* Unlocked version of wh_Server_KeystoreEvictKey. - * Uses _EvictKeyFromCacheUnlocked which doesn't need the NVM lock. */ -static int _EvictKeyUnlocked(whServerContext* server, whNvmId keyId) -{ - whKeyCacheContext* ctx; - - if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { - return WH_ERROR_BADARGS; - } - - ctx = _GetCacheContext(server, keyId); - return _EvictKeyFromCacheUnlocked(ctx, keyId); + return _FindInKeyCache(ctx, keyId, out_index, out_big, out_buffer, + out_meta); } -/* Unlocked version of wh_Server_KeystoreGetCacheSlot. - * Uses _EvictKeyUnlocked instead of the locked public version. */ -static int _GetCacheSlotUnlocked(whServerContext* server, whKeyId keyId, - uint16_t keySz, uint8_t** outBuf, - whNvmMetadata** outMeta) +#ifdef WOLFHSM_CFG_KEYWRAP +static int _ExistsInCache(whServerContext* server, whKeyId keyId) { - whKeyCacheContext* ctx; - int ret; - int idx = -1; - int isBig = -1; - uint8_t* buf = NULL; - whNvmMetadata* foundMeta = NULL; + int ret = 0; + int foundIndex = -1; + int foundBigIndex = -1; + whNvmMetadata* tmpMeta; + uint8_t* tmpBuf; - if (server == NULL || (keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BUFSIZE && - keySz > WOLFHSM_CFG_SERVER_KEYCACHE_BIG_BUFSIZE)) { - return WH_ERROR_BADARGS; - } + ret = _FindInCache(server, keyId, &foundIndex, &foundBigIndex, &tmpBuf, + &tmpMeta); - ret = _FindInCacheUnlocked(server, keyId, &idx, &isBig, &buf, &foundMeta); - if (ret == WH_ERROR_OK) { - /* Key is already cached; evict it first */ - ret = _EvictKeyUnlocked(server, keyId); - if (ret != WH_ERROR_OK) { - return ret; - } - } - else if (ret != WH_ERROR_NOTFOUND) { - return ret; + if (ret != WH_ERROR_OK) { + /* Key doesn't exist in the cache */ + return 0; } - ctx = _GetCacheContext(server, keyId); - return _GetKeyCacheSlotUnlocked(ctx, keySz, outBuf, outMeta); -} - -/* Unlocked version of wh_Server_KeystoreGetCacheSlotChecked. - * Uses _KeystoreCheckPolicyUnlocked and _GetCacheSlotUnlocked. */ -static int _GetCacheSlotCheckedUnlocked(whServerContext* server, whKeyId keyId, - uint16_t keySz, uint8_t** outBuf, - whNvmMetadata** outMeta) -{ - int ret; - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_CACHE, keyId); - if (ret != WH_ERROR_OK && ret != WH_ERROR_NOTFOUND) { - return ret; - } - return _GetCacheSlotUnlocked(server, keyId, keySz, outBuf, outMeta); + /* Key exists in the cache */ + return 1; } +#endif /* WOLFHSM_CFG_KEYWRAP */ -/* Unlocked version of wh_Server_KeystoreFreshenKey. - * Uses wh_Nvm_*Unlocked functions for NVM access. */ -static int _FreshenKeyUnlocked(whServerContext* server, whKeyId keyId, - uint8_t** outBuf, whNvmMetadata** outMeta) +/* try to put the specified key into cache if it isn't already, return pointers + * to meta and the cached data*/ +int wh_Server_KeystoreFreshenKey(whServerContext* server, whKeyId keyId, + uint8_t** outBuf, whNvmMetadata** outMeta) { int ret = 0; int foundIndex = -1; @@ -735,13 +589,13 @@ static int _FreshenKeyUnlocked(whServerContext* server, whKeyId keyId, cacheBufOut = (outBuf != NULL) ? outBuf : (uint8_t**)&cacheBufLocal; cacheMetaOut = (outMeta != NULL) ? outMeta : &cacheMetaLocal; - ret = _FindInCacheUnlocked(server, keyId, &foundIndex, &foundBigIndex, - cacheBufOut, cacheMetaOut); + ret = _FindInCache(server, keyId, &foundIndex, &foundBigIndex, cacheBufOut, + cacheMetaOut); if (ret != WH_ERROR_NOTFOUND) { return ret; } - /* Key not in the cache */ + /* key not in the cache */ /* For wrapped keys, just probe the cache and error if not found. We * don't support automatically unwrapping and caching outside of the @@ -751,22 +605,21 @@ static int _FreshenKeyUnlocked(whServerContext* server, whKeyId keyId, } /* Not in cache. Check if it is in NVM */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, tmpMeta); + ret = wh_Nvm_GetMetadata(server->nvm, keyId, tmpMeta); if (ret == WH_ERROR_OK) { /* Key found in NVM, get a free cache slot */ - ret = _GetCacheSlotUnlocked(server, keyId, tmpMeta->len, cacheBufOut, - cacheMetaOut); + ret = wh_Server_KeystoreGetCacheSlot(server, keyId, tmpMeta->len, + cacheBufOut, cacheMetaOut); if (ret == WH_ERROR_OK) { /* Read the key from NVM into the cache slot */ - ret = wh_Nvm_ReadUnlocked(server->nvm, keyId, 0, tmpMeta->len, - *cacheBufOut); + ret = + wh_Nvm_Read(server->nvm, keyId, 0, tmpMeta->len, *cacheBufOut); if (ret == WH_ERROR_OK) { /* Copy the metadata to the cache slot if key read is - * successful */ + * successful*/ memcpy((uint8_t*)*cacheMetaOut, (uint8_t*)tmpMeta, sizeof(whNvmMetadata)); - _MarkKeyCommittedUnlocked(_GetCacheContext(server, keyId), - keyId, 1); + _MarkKeyCommitted(_GetCacheContext(server, keyId), keyId, 1); } } } @@ -774,74 +627,11 @@ static int _FreshenKeyUnlocked(whServerContext* server, whKeyId keyId, return ret; } -/* Unlocked version of wh_Server_KeystoreCommitKey. - * Uses wh_Nvm_AddObjectWithReclaimUnlocked for NVM access. */ -static int _CommitKeyUnlocked(whServerContext* server, whNvmId keyId) -{ - uint8_t* slotBuf; - whNvmMetadata* slotMeta; - whNvmSize size; - int ret; - whKeyCacheContext* ctx; - - if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { - return WH_ERROR_BADARGS; - } - - if (WH_KEYID_TYPE(keyId) == WH_KEYTYPE_WRAPPED) { - return WH_ERROR_ABORTED; - } - - ctx = _GetCacheContext(server, keyId); - - /* Find the key in the appropriate cache context */ - ret = _FindInKeyCacheUnlocked(ctx, keyId, NULL, NULL, &slotBuf, &slotMeta); - if (ret == WH_ERROR_OK) { - size = slotMeta->len; - ret = wh_Nvm_AddObjectWithReclaimUnlocked(server->nvm, slotMeta, size, - slotBuf); - if (ret == 0) { - (void)_MarkKeyCommittedUnlocked(ctx, keyId, 1); - } - } - - return ret; -} - -/* Unlocked version of _KeystoreCacheKeyLocked. - * Uses _GetCacheSlotUnlocked to avoid re-acquiring the lock. */ -static int _CacheKeyUnlocked(whServerContext* server, whNvmMetadata* meta, - uint8_t* in) -{ - uint8_t* slotBuf; - whNvmMetadata* slotMeta; - int ret; - - if ((server == NULL) || (meta == NULL) || (in == NULL) || - WH_KEYID_ISERASED(meta->id) || - ((meta->len > WOLFHSM_CFG_SERVER_KEYCACHE_BUFSIZE) && - (meta->len > WOLFHSM_CFG_SERVER_KEYCACHE_BIG_BUFSIZE))) { - return WH_ERROR_BADARGS; - } - - ret = - _GetCacheSlotUnlocked(server, meta->id, meta->len, &slotBuf, &slotMeta); - if (ret != WH_ERROR_OK) { - return ret; - } - - memcpy(slotBuf, in, meta->len); - memcpy((uint8_t*)slotMeta, (uint8_t*)meta, sizeof(whNvmMetadata)); - _MarkKeyCommittedUnlocked(_GetCacheContext(server, meta->id), meta->id, 0); - - return WH_ERROR_OK; -} - -/* Unlocked version of wh_Server_KeystoreReadKey. - * Uses unlocked NVM functions and _CacheKeyUnlocked. */ -static int _ReadKeyUnlocked(whServerContext* server, whKeyId keyId, - whNvmMetadata* outMeta, uint8_t* out, - uint32_t* outSz) +/* Reads key from cache or NVM. If keyId is a wrapped key will attempt to read + * from cache but NOT from NVM */ +int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, + whNvmMetadata* outMeta, uint8_t* out, + uint32_t* outSz) { int ret = 0; whNvmMetadata meta[1]; @@ -855,13 +645,11 @@ static int _ReadKeyUnlocked(whServerContext* server, whKeyId keyId, } /* Check the cache using unified function */ - ret = _FindInCacheUnlocked(server, keyId, NULL, NULL, &cacheBuffer, - &cacheMeta); + ret = _FindInCache(server, keyId, NULL, NULL, &cacheBuffer, &cacheMeta); if (ret == WH_ERROR_OK) { /* Found in cache */ - if (cacheMeta->len > *outSz) { + if (cacheMeta->len > *outSz) return WH_ERROR_NOSPACE; - } if (outMeta != NULL) { memcpy((uint8_t*)outMeta, (uint8_t*)cacheMeta, sizeof(whNvmMetadata)); @@ -881,7 +669,7 @@ static int _ReadKeyUnlocked(whServerContext* server, whKeyId keyId, } /* Not in cache, try to read the metadata from NVM */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, meta); + ret = wh_Nvm_GetMetadata(server->nvm, keyId, meta); if (ret == 0) { /* set outSz */ *outSz = meta->len; @@ -890,11 +678,11 @@ static int _ReadKeyUnlocked(whServerContext* server, whKeyId keyId, memcpy((uint8_t*)outMeta, (uint8_t*)meta, sizeof(meta)); /* read the object */ if (out != NULL) - ret = wh_Nvm_ReadUnlocked(server->nvm, keyId, 0, *outSz, out); + ret = wh_Nvm_Read(server->nvm, keyId, 0, *outSz, out); } /* cache key if free slot, will only kick out other committed keys */ if (ret == 0 && out != NULL) { - (void)_CacheKeyUnlocked(server, meta, out); + (void)wh_Server_KeystoreCacheKey(server, meta, out); } #ifdef WOLFHSM_CFG_SHE_EXTENSION /* use empty key of zeros if we couldn't find the master ecu key */ @@ -915,125 +703,39 @@ static int _ReadKeyUnlocked(whServerContext* server, whKeyId keyId, return ret; } -#ifdef WOLFHSM_CFG_KEYWRAP -static int _ExistsInCacheUnlocked(whServerContext* server, whKeyId keyId) -{ - int ret = 0; - int foundIndex = -1; - int foundBigIndex = -1; - whNvmMetadata* tmpMeta; - uint8_t* tmpBuf; - - ret = _FindInCacheUnlocked(server, keyId, &foundIndex, &foundBigIndex, - &tmpBuf, &tmpMeta); - - if (ret != WH_ERROR_OK) { - /* Key doesn't exist in the cache */ - return 0; - } - - /* Key exists in the cache */ - return 1; -} -#endif /* WOLFHSM_CFG_KEYWRAP */ - -/* try to put the specified key into cache if it isn't already, return pointers - * to meta and the cached data*/ -int wh_Server_KeystoreFreshenKey(whServerContext* server, whKeyId keyId, - uint8_t** outBuf, whNvmMetadata** outMeta) -{ - int ret; - - if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { - return WH_ERROR_BADARGS; - } - - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _FreshenKeyUnlocked(server, keyId, outBuf, outMeta); - - (void)_UnlockKeystore(server); - - return ret; -} - -/* Reads key from cache or NVM. If keyId is a wrapped key will attempt to read - * from cache but NOT from NVM */ -int wh_Server_KeystoreReadKey(whServerContext* server, whKeyId keyId, - whNvmMetadata* outMeta, uint8_t* out, - uint32_t* outSz) -{ - int ret; - - if ((server == NULL) || (outSz == NULL) || - (WH_KEYID_ISERASED(keyId) && - (WH_KEYID_TYPE(keyId) != WH_KEYTYPE_SHE))) { - return WH_ERROR_BADARGS; - } - - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _ReadKeyUnlocked(server, keyId, outMeta, out, outSz); - - (void)_UnlockKeystore(server); - - return ret; -} - int wh_Server_KeystoreReadKeyChecked(whServerContext* server, whKeyId keyId, whNvmMetadata* outMeta, uint8_t* out, uint32_t* outSz) { int ret; - ret = _LockKeystore(server); + ret = _KeystoreCheckPolicy(server, WH_KS_OP_EXPORT, keyId); if (ret != WH_ERROR_OK) { return ret; } - - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EXPORT, keyId); - if (ret == WH_ERROR_OK) { - ret = _ReadKeyUnlocked(server, keyId, outMeta, out, outSz); - } - - (void)_UnlockKeystore(server); - - return ret; + return wh_Server_KeystoreReadKey(server, keyId, outMeta, out, outSz); } int wh_Server_KeystoreEvictKey(whServerContext* server, whNvmId keyId) { int ret = 0; whKeyCacheContext* ctx; - - if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { - return WH_ERROR_BADARGS; - } - - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; + + if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { + return WH_ERROR_BADARGS; } /* Get the appropriate cache context for this key */ ctx = _GetCacheContext(server, keyId); /* Use the unified evict function */ - ret = _EvictKeyFromCacheUnlocked(ctx, keyId); + ret = _EvictKeyFromCache(ctx, keyId); if (ret == 0) { WH_DEBUG_SERVER_VERBOSE("wh_Server_KeystoreEvictKey: evicted keyid=0x%X\n", keyId); } - (void)_UnlockKeystore(server); - return ret; } @@ -1041,24 +743,20 @@ int wh_Server_KeystoreEvictKeyChecked(whServerContext* server, whNvmId keyId) { int ret; - ret = _LockKeystore(server); + ret = _KeystoreCheckPolicy(server, WH_KS_OP_EVICT, keyId); if (ret != WH_ERROR_OK) { return ret; } - - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EVICT, keyId); - if (ret == WH_ERROR_OK) { - ret = _EvictKeyUnlocked(server, keyId); - } - - (void)_UnlockKeystore(server); - - return ret; + return wh_Server_KeystoreEvictKey(server, keyId); } int wh_Server_KeystoreCommitKey(whServerContext* server, whNvmId keyId) { - int ret; + uint8_t* slotBuf; + whNvmMetadata* slotMeta; + whNvmSize size; + int ret; + whKeyCacheContext* ctx; if ((server == NULL) || WH_KEYID_ISERASED(keyId)) { return WH_ERROR_BADARGS; @@ -1068,15 +766,19 @@ int wh_Server_KeystoreCommitKey(whServerContext* server, whNvmId keyId) return WH_ERROR_ABORTED; } - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - ret = _CommitKeyUnlocked(server, keyId); - - (void)_UnlockKeystore(server); + /* Get the appropriate cache context for this key */ + ctx = _GetCacheContext(server, keyId); + /* Find the key in the appropriate cache context obtained above. */ + ret = _FindInKeyCache(ctx, keyId, NULL, NULL, &slotBuf, &slotMeta); + if (ret == WH_ERROR_OK) { + size = slotMeta->len; + ret = wh_Nvm_AddObjectWithReclaim(server->nvm, slotMeta, size, slotBuf); + if (ret == 0) { + /* Mark key as committed using unified function */ + (void)_MarkKeyCommitted(ctx, keyId, 1); + } + } return ret; } @@ -1084,25 +786,15 @@ int wh_Server_KeystoreCommitKeyChecked(whServerContext* server, whNvmId keyId) { int ret; - ret = _LockKeystore(server); + ret = _KeystoreCheckPolicy(server, WH_KS_OP_COMMIT, keyId); if (ret != WH_ERROR_OK) { return ret; } - - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_COMMIT, keyId); - if (ret == WH_ERROR_OK) { - ret = _CommitKeyUnlocked(server, keyId); - } - - (void)_UnlockKeystore(server); - - return ret; + return wh_Server_KeystoreCommitKey(server, keyId); } int wh_Server_KeystoreEraseKey(whServerContext* server, whNvmId keyId) { - int ret; - if ((server == NULL) || (WH_KEYID_ISERASED(keyId))) { return WH_ERROR_BADARGS; } @@ -1111,27 +803,15 @@ int wh_Server_KeystoreEraseKey(whServerContext* server, whNvmId keyId) return WH_ERROR_ABORTED; } - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - - /* Remove the key from the cache if present */ - (void)_EvictKeyUnlocked(server, keyId); + /* remove the key from the cache if present */ + (void)wh_Server_KeystoreEvictKey(server, keyId); - /* Destroy the object */ - ret = wh_Nvm_DestroyObjectsUnlocked(server->nvm, 1, &keyId); - - (void)_UnlockKeystore(server); - - return ret; + /* destroy the object */ + return wh_Nvm_DestroyObjects(server->nvm, 1, &keyId); } int wh_Server_KeystoreEraseKeyChecked(whServerContext* server, whNvmId keyId) { - int ret; - whNvmMetadata meta; - if ((server == NULL) || (WH_KEYID_ISERASED(keyId))) { return WH_ERROR_BADARGS; } @@ -1140,41 +820,11 @@ int wh_Server_KeystoreEraseKeyChecked(whServerContext* server, whNvmId keyId) return WH_ERROR_ABORTED; } - ret = _LockKeystore(server); - if (ret == WH_ERROR_OK) { - /* Check eviction policy first */ - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EVICT, keyId); - if (ret == WH_ERROR_NOTFOUND) { - ret = WH_ERROR_OK; /* NOTFOUND is acceptable for eviction policy */ - } - - if (ret == WH_ERROR_OK) { - /* Check NVM destroy policy (NONMODIFIABLE/NONDESTROYABLE - * enforcement) */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, &meta); - if (ret == WH_ERROR_OK) { - if (meta.flags & (WH_NVM_FLAGS_NONMODIFIABLE | - WH_NVM_FLAGS_NONDESTROYABLE)) { - ret = WH_ERROR_ACCESS; - } - } - else if (ret == WH_ERROR_NOTFOUND) { - ret = - WH_ERROR_OK; /* NOTFOUND is acceptable for metadata check */ - } - - if (ret == WH_ERROR_OK) { - /* Remove the key from the cache if present */ - (void)_EvictKeyUnlocked(server, keyId); - - /* Destroy the object */ - ret = wh_Nvm_DestroyObjectsUnlocked(server->nvm, 1, &keyId); - } - } + /* remove the key from the cache if present */ + (void)wh_Server_KeystoreEvictKeyChecked(server, keyId); - (void)_UnlockKeystore(server); - } - return ret; + /* destroy the object */ + return wh_Nvm_DestroyObjectsChecked(server->nvm, 1, &keyId); } static void _revokeKey(whNvmMetadata* meta) @@ -1206,46 +856,41 @@ int wh_Server_KeystoreRevokeKey(whServerContext* server, whNvmId keyId) return WH_ERROR_BADARGS; } - ret = _LockKeystore(server); + ret = _KeystoreCheckPolicy(server, WH_KS_OP_REVOKE, keyId); + if (ret != WH_ERROR_OK) { + return ret; + } + + ret = wh_Nvm_GetMetadata(server->nvm, keyId, NULL); if (ret == WH_ERROR_OK) { - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_REVOKE, keyId); - if (ret == WH_ERROR_OK) { - /* Check if key is in NVM */ - ret = wh_Nvm_GetMetadataUnlocked(server->nvm, keyId, NULL); - if (ret == WH_ERROR_OK) { - isInNvm = 1; - } - else if (ret == WH_ERROR_NOTFOUND) { - ret = WH_ERROR_OK; /* NOTFOUND is acceptable */ - } + isInNvm = 1; + } + else if (ret != WH_ERROR_NOTFOUND) { + return ret; + } - if (ret == WH_ERROR_OK) { - /* Freshen key into cache */ - ret = _FreshenKeyUnlocked(server, keyId, &cacheBuf, &cacheMeta); - if (ret == WH_ERROR_OK) { - /* If already revoked and committed, nothing to do */ - if (!(_isKeyRevoked(cacheMeta) && - _KeyIsCommittedUnlocked(server, keyId))) { - /* Revoke the key by updating its metadata */ - _revokeKey(cacheMeta); - - /* Commit the changes */ - if (isInNvm) { - ret = wh_Nvm_AddObjectWithReclaimUnlocked( - server->nvm, cacheMeta, cacheMeta->len, - cacheBuf); - if (ret == WH_ERROR_OK) { - _MarkKeyCommittedUnlocked( - _GetCacheContext(server, keyId), keyId, 1); - } - } - } - } - } - } + /* be sure to have the key in the cache */ + ret = wh_Server_KeystoreFreshenKey(server, keyId, &cacheBuf, &cacheMeta); + if (ret != WH_ERROR_OK) { + return ret; + } + + /* if already revoked and committed, nothing to do */ + if (_isKeyRevoked(cacheMeta) && _KeyIsCommitted(server, keyId)) { + return WH_ERROR_OK; + } - (void)_UnlockKeystore(server); + /* Revoke the key by updating its metadata */ + _revokeKey(cacheMeta); + /* commit the changes */ + if (isInNvm) { + ret = wh_Nvm_AddObjectWithReclaim(server->nvm, cacheMeta, + cacheMeta->len, cacheBuf); + if (ret == WH_ERROR_OK) { + _MarkKeyCommitted(_GetCacheContext(server, keyId), keyId, 1); + } } + return ret; } @@ -1824,7 +1469,7 @@ static int _HandleKeyUnwrapAndCacheRequest( #endif /* WOLFHSM_CFG_GLOBAL_KEYS */ /* Ensure a key with the unwrapped ID does not already exist in cache */ - if (_ExistsInCacheUnlocked(server, metadata.id)) { + if (_ExistsInCache(server, metadata.id)) { return WH_ERROR_ABORTED; } @@ -2022,28 +1667,29 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, } memcpy(meta->label, req.label, req.labelSz); - /* get a new id if one wasn't provided */ - if (WH_KEYID_ISERASED(meta->id)) { - /* Atomic GetUniqueId + CacheKey */ - ret = _LockKeystore(server); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + /* get a new id if one wasn't provided */ + if (WH_KEYID_ISERASED(meta->id)) { + ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); + resp.rc = ret; + } + /* write the key */ if (ret == WH_ERROR_OK) { - ret = _GetUniqueIdUnlocked(server, &meta->id); - if (ret == WH_ERROR_OK) { - ret = _KeystoreCacheKeyUnlocked(server, meta, in, 1); - } - (void)_UnlockKeystore(server); + ret = wh_Server_KeystoreCacheKeyChecked(server, meta, in); + resp.rc = ret; } - resp.rc = ret; - } + if (ret == WH_ERROR_OK) { + /* Translate server keyId back to client format with flags + */ + resp.id = wh_KeyId_TranslateToClient(meta->id); + } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ else { - /* ID provided, use normal locking */ - ret = wh_Server_KeystoreCacheKeyChecked(server, meta, in); resp.rc = ret; } - if (ret == WH_ERROR_OK) { - /* Translate server keyId back to client format with flags */ - resp.id = wh_KeyId_TranslateToClient(meta->id); - } (void)wh_MessageKeystore_TranslateCacheResponse( magic, &resp, (whMessageKeystore_CacheResponse*)resp_packet); @@ -2073,40 +1719,39 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, } memcpy(meta->label, req.label, req.labelSz); - /* get a new id if one wasn't provided */ - if (WH_KEYID_ISERASED(meta->id)) { - /* Atomic GetUniqueId + CacheKey DMA */ - ret = _LockKeystore(server); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + /* get a new id if one wasn't provided */ + if (WH_KEYID_ISERASED(meta->id)) { + ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); + resp.rc = ret; + } + + /* write the key using DMA */ if (ret == WH_ERROR_OK) { - ret = _GetUniqueIdUnlocked(server, &meta->id); - if (ret == WH_ERROR_OK) { - ret = _KeystoreCacheKeyDmaUnlocked(server, meta, - req.key.addr, 1); + ret = wh_Server_KeystoreCacheKeyDmaChecked(server, meta, + req.key.addr); + resp.rc = ret; + /* propagate bad address to client if DMA operation failed + */ + if (ret != WH_ERROR_OK) { + resp.dmaAddrStatus.badAddr.addr = req.key.addr; + resp.dmaAddrStatus.badAddr.sz = req.key.sz; } - (void)_UnlockKeystore(server); } - resp.rc = ret; - /* propagate bad address to client if DMA operation failed */ - if (ret != WH_ERROR_OK) { - resp.dmaAddrStatus.badAddr.addr = req.key.addr; - resp.dmaAddrStatus.badAddr.sz = req.key.sz; + + if (ret == WH_ERROR_OK) { + /* Translate server keyId back to client format with flags + */ + resp.id = wh_KeyId_TranslateToClient(meta->id); } - } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ else { - /* ID provided, use normal locking */ - ret = wh_Server_KeystoreCacheKeyDmaChecked(server, meta, - req.key.addr); resp.rc = ret; - /* propagate bad address to client if DMA operation failed */ - if (ret != WH_ERROR_OK) { - resp.dmaAddrStatus.badAddr.addr = req.key.addr; - resp.dmaAddrStatus.badAddr.sz = req.key.sz; - } } - /* Translate server keyId back to client format with flags */ - resp.id = wh_KeyId_TranslateToClient(meta->id); - (void)wh_MessageKeystore_TranslateCacheDmaResponse( magic, &resp, (whMessageKeystore_CacheDmaResponse*)resp_packet); @@ -2121,22 +1766,30 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, (void)wh_MessageKeystore_TranslateExportDmaRequest( magic, (whMessageKeystore_ExportDmaRequest*)req_packet, &req); - ret = wh_Server_KeystoreExportKeyDmaChecked( - server, - wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO, - server->comm->client_id, req.id), - req.key.addr, req.key.sz, meta); - resp.rc = ret; + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = wh_Server_KeystoreExportKeyDmaChecked( + server, + wh_KeyId_TranslateFromClient( + WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id), + req.key.addr, req.key.sz, meta); + resp.rc = ret; - /* propagate bad address to client if DMA operation failed */ - if (ret != WH_ERROR_OK) { - resp.dmaAddrStatus.badAddr.addr = req.key.addr; - resp.dmaAddrStatus.badAddr.sz = req.key.sz; - } + /* propagate bad address to client if DMA operation failed */ + if (ret != WH_ERROR_OK) { + resp.dmaAddrStatus.badAddr.addr = req.key.addr; + resp.dmaAddrStatus.badAddr.sz = req.key.sz; + } - if (ret == WH_ERROR_OK) { - resp.len = req.key.sz; - memcpy(resp.label, meta->label, sizeof(meta->label)); + if (ret == WH_ERROR_OK) { + resp.len = req.key.sz; + memcpy(resp.label, meta->label, sizeof(meta->label)); + } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = ret; } (void)wh_MessageKeystore_TranslateExportDmaResponse( @@ -2154,12 +1807,20 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, (void)wh_MessageKeystore_TranslateEvictRequest( magic, (whMessageKeystore_EvictRequest*)req_packet, &req); - ret = wh_Server_KeystoreEvictKeyChecked( - server, - wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO, - server->comm->client_id, req.id)); - resp.rc = ret; - resp.ok = 0; /* unused */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = wh_Server_KeystoreEvictKeyChecked( + server, + wh_KeyId_TranslateFromClient( + WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); + resp.rc = ret; + resp.ok = 0; /* unused */ + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = ret; + } (void)wh_MessageKeystore_TranslateEvictResponse( magic, &resp, (whMessageKeystore_EvictResponse*)resp_packet); @@ -2179,23 +1840,32 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, out = (uint8_t*)resp_packet + sizeof(resp); keySz = WOLFHSM_CFG_COMM_DATA_LEN - sizeof(resp); - /* read the key */ - ret = wh_Server_KeystoreReadKeyChecked( - server, - wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO, - server->comm->client_id, req.id), - meta, out, &keySz); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + /* read the key */ + ret = wh_Server_KeystoreReadKeyChecked( + server, + wh_KeyId_TranslateFromClient( + WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id), + meta, out, &keySz); - resp.rc = ret; + resp.rc = ret; - /* Only provide key output if no error */ - if (ret == WH_ERROR_OK) { - resp.len = keySz; - } + /* Only provide key output if no error */ + if (ret == WH_ERROR_OK) { + resp.len = keySz; + } + else { + resp.len = 0; + } + memcpy(resp.label, meta->label, sizeof(meta->label)); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ else { + resp.rc = ret; resp.len = 0; } - memcpy(resp.label, meta->label, sizeof(meta->label)); (void)wh_MessageKeystore_TranslateExportResponse( magic, &resp, (whMessageKeystore_ExportResponse*)resp_packet); @@ -2211,12 +1881,20 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, (void)wh_MessageKeystore_TranslateCommitRequest( magic, (whMessageKeystore_CommitRequest*)req_packet, &req); - ret = wh_Server_KeystoreCommitKeyChecked( - server, - wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO, - server->comm->client_id, req.id)); - resp.rc = ret; - resp.ok = 0; /* unused */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = wh_Server_KeystoreCommitKeyChecked( + server, + wh_KeyId_TranslateFromClient( + WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); + resp.rc = ret; + resp.ok = 0; /* unused */ + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = ret; + } (void)wh_MessageKeystore_TranslateCommitResponse( magic, &resp, (whMessageKeystore_CommitResponse*)resp_packet); @@ -2232,12 +1910,20 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, (void)wh_MessageKeystore_TranslateEraseRequest( magic, (whMessageKeystore_EraseRequest*)req_packet, &req); - ret = wh_Server_KeystoreEraseKeyChecked( - server, - wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO, - server->comm->client_id, req.id)); - resp.rc = ret; - resp.ok = 0; /* unused */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = wh_Server_KeystoreEraseKeyChecked( + server, + wh_KeyId_TranslateFromClient( + WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); + resp.rc = ret; + resp.ok = 0; /* unused */ + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = ret; + } (void)wh_MessageKeystore_TranslateEraseResponse( magic, &resp, (whMessageKeystore_EraseResponse*)resp_packet); @@ -2252,12 +1938,20 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, (void)wh_MessageKeystore_TranslateRevokeRequest( magic, (whMessageKeystore_RevokeRequest*)req_packet, &req); - ret = wh_Server_KeystoreRevokeKey( - server, - wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO, - server->comm->client_id, req.id)); - resp.rc = ret; - ret = WH_ERROR_OK; + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = wh_Server_KeystoreRevokeKey( + server, + wh_KeyId_TranslateFromClient( + WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); + resp.rc = ret; + ret = WH_ERROR_OK; + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = ret; + } (void)wh_MessageKeystore_TranslateRevokeResponse( magic, &resp, (whMessageKeystore_RevokeResponse*)resp_packet); @@ -2292,9 +1986,22 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, respData = (uint8_t*)resp_packet + sizeof(whMessageKeystore_KeyWrapResponse); - ret = _HandleKeyWrapRequest(server, &wrapReq, reqData, reqDataSz, - &wrapResp, respData, respDataSz); - wrapResp.rc = ret; + /* Note: Locking here is mega-overkill, as there is only one small + * section inside this request pipeline that needs to be locked - + * freshening the server key and checking usage. Consider relocating + * locking to this section */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = + _HandleKeyWrapRequest(server, &wrapReq, reqData, reqDataSz, + &wrapResp, respData, respDataSz); + wrapResp.rc = ret; + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + wrapResp.rc = ret; + } (void)wh_MessageKeystore_TranslateKeyWrapResponse(magic, &wrapResp, resp_packet); @@ -2328,10 +2035,22 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, respData = (uint8_t*)resp_packet + sizeof(whMessageKeystore_KeyUnwrapAndExportResponse); - ret = _HandleKeyUnwrapAndExportRequest(server, &unwrapReq, reqData, - reqDataSz, &unwrapResp, - respData, respDataSz); - unwrapResp.rc = ret; + /* Note: Locking here is mega-overkill, as there is only one small + * section inside this request pipeline that needs to be locked - + * freshening the server key and checking usage. Consider relocating + * locking to this section */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _HandleKeyUnwrapAndExportRequest( + server, &unwrapReq, reqData, reqDataSz, &unwrapResp, + respData, respDataSz); + unwrapResp.rc = ret; + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + unwrapResp.rc = ret; + } (void)wh_MessageKeystore_TranslateKeyUnwrapAndExportResponse( magic, &unwrapResp, resp_packet); @@ -2366,10 +2085,18 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, respData = (uint8_t*)resp_packet + sizeof(whMessageKeystore_KeyUnwrapAndCacheResponse); - ret = _HandleKeyUnwrapAndCacheRequest(server, &cacheReq, reqData, - reqDataSz, &cacheResp, - respData, respDataSz); - cacheResp.rc = ret; + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _HandleKeyUnwrapAndCacheRequest( + server, &cacheReq, reqData, reqDataSz, &cacheResp, respData, + respDataSz); + cacheResp.rc = ret; + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + cacheResp.rc = ret; + } (void)wh_MessageKeystore_TranslateKeyUnwrapAndCacheResponse( magic, &cacheResp, resp_packet); @@ -2404,9 +2131,22 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, respData = (uint8_t*)resp_packet + sizeof(whMessageKeystore_DataWrapResponse); - ret = _HandleDataWrapRequest(server, &wrapReq, reqData, reqDataSz, - &wrapResp, respData, respDataSz); - wrapResp.rc = ret; + /* Note: Locking here is mega-overkill, as there is only one small + * section inside this request pipeline that needs to be locked - + * freshening the server key and checking usage. Consider relocating + * locking to this section */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = + _HandleDataWrapRequest(server, &wrapReq, reqData, reqDataSz, + &wrapResp, respData, respDataSz); + wrapResp.rc = ret; + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + wrapResp.rc = ret; + } (void)wh_MessageKeystore_TranslateDataWrapResponse(magic, &wrapResp, resp_packet); @@ -2441,10 +2181,22 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, respData = (uint8_t*)resp_packet + sizeof(whMessageKeystore_DataUnwrapResponse); - ret = - _HandleDataUnwrapRequest(server, &unwrapReq, reqData, reqDataSz, - &unwrapResp, respData, respDataSz); - unwrapResp.rc = ret; + /* Note: Locking here is mega-overkill, as there is only one small + * section inside this request pipeline that needs to be locked - + * freshening the server key and checking usage. Consider relocating + * locking to this section */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _HandleDataUnwrapRequest(server, &unwrapReq, reqData, + reqDataSz, &unwrapResp, respData, + respDataSz); + unwrapResp.rc = ret; + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + unwrapResp.rc = ret; + } (void)wh_MessageKeystore_TranslateDataUnwrapResponse( magic, &unwrapResp, resp_packet); @@ -2464,27 +2216,21 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, #ifdef WOLFHSM_CFG_DMA -/* Unlocked variant - assumes caller holds server->nvm->lock */ -static int _KeystoreCacheKeyDmaUnlocked(whServerContext* server, - whNvmMetadata* meta, uint64_t keyAddr, - int checked) +int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, + uint64_t keyAddr, int checked) { - int ret; - uint8_t* buffer = NULL; - whNvmMetadata* slotMeta = NULL; - - if ((server == NULL) || (meta == NULL)) { - return WH_ERROR_BADARGS; - } + int ret; + uint8_t* buffer; + whNvmMetadata* slotMeta; - /* Get a cache slot, optionally checking if necessary */ + /* Get a cache slot */ if (checked) { - ret = _GetCacheSlotCheckedUnlocked(server, meta->id, meta->len, &buffer, - &slotMeta); + ret = wh_Server_KeystoreGetCacheSlotChecked(server, meta->id, meta->len, + &buffer, &slotMeta); } else { - ret = _GetCacheSlotUnlocked(server, meta->id, meta->len, &buffer, - &slotMeta); + ret = wh_Server_KeystoreGetCacheSlot(server, meta->id, meta->len, + &buffer, &slotMeta); } if (ret != 0) { return ret; @@ -2502,27 +2248,11 @@ static int _KeystoreCacheKeyDmaUnlocked(whServerContext* server, slotMeta->id = WH_KEYID_ERASED; } else { - _MarkKeyCommittedUnlocked(_GetCacheContext(server, meta->id), meta->id, - 0); + _MarkKeyCommitted(_GetCacheContext(server, meta->id), meta->id, 0); } return ret; } - -int _KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, - uint64_t keyAddr, int checked) -{ - int ret; - - ret = _LockKeystore(server); - if (ret == WH_ERROR_OK) { - - ret = _KeystoreCacheKeyDmaUnlocked(server, meta, keyAddr, checked); - - (void)_UnlockKeystore(server); - } - return ret; -} int wh_Server_KeystoreCacheKeyDma(whServerContext* server, whNvmMetadata* meta, uint64_t keyAddr) { @@ -2543,70 +2273,37 @@ int wh_Server_KeystoreExportKeyDma(whServerContext* server, whKeyId keyId, uint8_t* buffer; whNvmMetadata* cacheMeta; - if ((server == NULL) || (outMeta == NULL)) { - return WH_ERROR_BADARGS; + /* bring key in cache */ + ret = wh_Server_KeystoreFreshenKey(server, keyId, &buffer, &cacheMeta); + if (ret != WH_ERROR_OK) { + return ret; } - ret = _LockKeystore(server); - if (ret == WH_ERROR_OK) { - ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); - if (ret == WH_ERROR_OK) { - if (keySz < cacheMeta->len) { - ret = WH_ERROR_NOSPACE; - } - else { - memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); - } - - if (ret == WH_ERROR_OK) { - /* Copy key data using DMA */ - ret = whServerDma_CopyToClient(server, keyAddr, buffer, - outMeta->len, - (whServerDmaFlags){0}); - } - } - (void)_UnlockKeystore(server); + if (keySz < cacheMeta->len) { + return WH_ERROR_NOSPACE; } + + memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); + + /* Copy key data using DMA */ + ret = whServerDma_CopyToClient(server, keyAddr, buffer, outMeta->len, + (whServerDmaFlags){0}); + return ret; } - int wh_Server_KeystoreExportKeyDmaChecked(whServerContext* server, whKeyId keyId, uint64_t keyAddr, uint64_t keySz, whNvmMetadata* outMeta) { - int ret; - uint8_t* buffer; - whNvmMetadata* cacheMeta; - - if ((server == NULL) || (outMeta == NULL)) { - return WH_ERROR_BADARGS; - } - - ret = _LockKeystore(server); - if (ret == WH_ERROR_OK) { - ret = _KeystoreCheckPolicyUnlocked(server, WH_KS_OP_EXPORT, keyId); - if (ret == WH_ERROR_OK) { - ret = _FreshenKeyUnlocked(server, keyId, &buffer, &cacheMeta); - if (ret == WH_ERROR_OK) { - if (keySz < cacheMeta->len) { - ret = WH_ERROR_NOSPACE; - } - else { - memcpy(outMeta, cacheMeta, sizeof(whNvmMetadata)); - } + int ret; - if (ret == WH_ERROR_OK) { - /* Copy key data using DMA */ - ret = whServerDma_CopyToClient(server, keyAddr, buffer, - outMeta->len, - (whServerDmaFlags){0}); - } - } - } - (void)_UnlockKeystore(server); + ret = _KeystoreCheckPolicy(server, WH_KS_OP_EXPORT, keyId); + if (ret != WH_ERROR_OK) { + return ret; } - return ret; + return wh_Server_KeystoreExportKeyDma(server, keyId, keyAddr, keySz, + outMeta); } #endif /* WOLFHSM_CFG_DMA */ @@ -2639,32 +2336,20 @@ int wh_Server_KeystoreFindEnforceKeyUsage(whServerContext* server, { int ret; whNvmMetadata* meta = NULL; - whNvmMetadata metaCopy; /* Validate input parameters */ if (server == NULL) { return WH_ERROR_BADARGS; } - ret = _LockKeystore(server); - if (ret != WH_ERROR_OK) { - return ret; - } - /* Freshen the key to obtain the metadata */ - ret = _FreshenKeyUnlocked(server, keyId, NULL, &meta); + ret = wh_Server_KeystoreFreshenKey(server, keyId, NULL, &meta); if (ret != WH_ERROR_OK) { - (void)_UnlockKeystore(server); return ret; } - /* Copy metadata while holding the lock to avoid stale reads */ - memcpy((uint8_t*)&metaCopy, (uint8_t*)meta, sizeof(metaCopy)); - - (void)_UnlockKeystore(server); - - /* Enforce the usage policy with the copied metadata */ - return wh_Server_KeystoreEnforceKeyUsage(&metaCopy, requiredUsage); + /* Enforce the usage policy with the obtained metadata */ + return wh_Server_KeystoreEnforceKeyUsage(meta, requiredUsage); } #endif /* !WOLFHSM_CFG_NO_CRYPTO && WOLFHSM_CFG_ENABLE_SERVER */ diff --git a/src/wh_server_nvm.c b/src/wh_server_nvm.c index 246a071a9..5edb77b9f 100644 --- a/src/wh_server_nvm.c +++ b/src/wh_server_nvm.c @@ -19,21 +19,6 @@ /* * src/wh_server_nvm.c * - * LOCKING DISCIPLINE FOR COMPOUND OPERATIONS: - * - * All compound operations (those involving multiple cache/NVM accesses) MUST: - * 1. Acquire server->nvm->lock at the start using _LockNvm() - * 2. Use only *Unlocked() variants of NVM/cache functions internally - * 3. Keep lock held for entire operation, including DMA transfers - * 4. Release lock only after all metadata-dependent operations complete - * - * Rationale: Releasing the lock mid-operation creates TOCTOU (Time-Of-Check- - * Time-Of-Use) vulnerabilities where metadata can become stale or objects can - * be destroyed/replaced between checks. DMA operations that depend on cached - * metadata MUST occur under the same lock that validated that metadata. - * - * This pattern matches keystore DMA operations (KeystoreCacheKeyDmaChecked, - * KeystoreExportKeyDmaChecked) which hold locks throughout their execution. */ /* Pick up compile-time configuration */ @@ -51,7 +36,6 @@ #include "wolfhsm/wh_comm.h" #include "wolfhsm/wh_nvm.h" -#include "wolfhsm/wh_nvm_internal.h" #include "wolfhsm/wh_message.h" #include "wolfhsm/wh_message_nvm.h" @@ -59,33 +43,10 @@ #include "wolfhsm/wh_server.h" #include "wolfhsm/wh_server_nvm.h" -/* Helper functions for NVM locking */ -#ifdef WOLFHSM_CFG_THREADSAFE -static int _LockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Acquire(&server->nvm->lock); - } - return WH_ERROR_OK; -} - -static int _UnlockNvm(whServerContext* server) -{ - if (server->nvm != NULL) { - return wh_Lock_Release(&server->nvm->lock); - } - return WH_ERROR_OK; -} -#else -#define _LockNvm(server) (WH_ERROR_OK) -#define _UnlockNvm(server) (WH_ERROR_OK) -#endif /* WOLFHSM_CFG_THREADSAFE */ - -/* Handle NVM read (unlocked variant), do access checking and clamping - * Assumes caller holds server->nvm->lock */ -static int _HandleNvmReadUnlocked(whServerContext* server, uint8_t* out_data, - whNvmSize offset, whNvmSize len, - whNvmSize* out_len, whNvmId id) +/* Handle NVM read, do access checking and clamping */ +static int _HandleNvmRead(whServerContext* server, uint8_t* out_data, + whNvmSize offset, whNvmSize len, whNvmSize* out_len, + whNvmId id) { whNvmMetadata meta; int32_t rc; @@ -98,7 +59,7 @@ static int _HandleNvmReadUnlocked(whServerContext* server, uint8_t* out_data, return WH_ERROR_ABORTED; } - rc = wh_Nvm_GetMetadataUnlocked(server->nvm, id, &meta); + rc = wh_Nvm_GetMetadata(server->nvm, id, &meta); if (rc != WH_ERROR_OK) { return rc; } @@ -111,29 +72,13 @@ static int _HandleNvmReadUnlocked(whServerContext* server, uint8_t* out_data, len = meta.len - offset; } - rc = wh_Nvm_ReadCheckedUnlocked(server->nvm, id, offset, len, out_data); + rc = wh_Nvm_ReadChecked(server->nvm, id, offset, len, out_data); if (rc != WH_ERROR_OK) return rc; *out_len = len; return WH_ERROR_OK; } -/* Handle NVM read, do access checking and clamping */ -static int _HandleNvmRead(whServerContext* server, uint8_t* out_data, - whNvmSize offset, whNvmSize len, whNvmSize* out_len, - whNvmId id) -{ - int32_t rc; - - /* Acquire lock for atomic GetMetadata + ReadChecked */ - rc = _LockNvm(server); - if (rc == WH_ERROR_OK) { - rc = _HandleNvmReadUnlocked(server, out_data, offset, len, out_len, id); - (void)_UnlockNvm(server); - } - return rc; -} - int wh_Server_HandleNvmRequest(whServerContext* server, uint16_t magic, uint16_t action, uint16_t seq, uint16_t req_size, const void* req_packet, @@ -208,10 +153,17 @@ int wh_Server_HandleNvmRequest(whServerContext* server, wh_MessageNvm_TranslateListRequest(magic, (whMessageNvm_ListRequest*)req_packet, &req); - /* Process the list action */ - resp.rc = wh_Nvm_List(server->nvm, - req.access, req.flags, req.startId, - &resp.count, &resp.id); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + /* Process the list action */ + resp.rc = wh_Nvm_List(server->nvm, req.access, req.flags, + req.startId, &resp.count, &resp.id); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; + } } /* Convert the response struct */ wh_MessageNvm_TranslateListResponse(magic, @@ -228,10 +180,18 @@ int wh_Server_HandleNvmRequest(whServerContext* server, /* Request is malformed */ resp.rc = WH_ERROR_ABORTED; } else { - /* Process the available action */ - resp.rc = wh_Nvm_GetAvailable(server->nvm, - &resp.avail_size, &resp.avail_objects, + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + /* Process the available action */ + resp.rc = wh_Nvm_GetAvailable( + server->nvm, &resp.avail_size, &resp.avail_objects, &resp.reclaim_size, &resp.reclaim_objects); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; + } } /* Convert the response struct */ wh_MessageNvm_TranslateGetAvailableResponse(magic, @@ -253,15 +213,23 @@ int wh_Server_HandleNvmRequest(whServerContext* server, wh_MessageNvm_TranslateGetMetadataRequest(magic, (whMessageNvm_GetMetadataRequest*)req_packet, &req); - /* Process the getmetadata action */ - resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + /* Process the getmetadata action */ + resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); + + if (resp.rc == 0) { + resp.id = meta.id; + resp.access = meta.access; + resp.flags = meta.flags; + resp.len = meta.len; + memcpy(resp.label, meta.label, sizeof(resp.label)); + } - if (resp.rc == 0) { - resp.id = meta.id; - resp.access = meta.access; - resp.flags = meta.flags; - resp.len = meta.len; - memcpy(resp.label, meta.label, sizeof(resp.label)); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; } } /* Convert the response struct */ @@ -292,8 +260,17 @@ int wh_Server_HandleNvmRequest(whServerContext* server, meta.flags = req.flags; meta.len = req.len; memcpy(meta.label, req.label, sizeof(meta.label)); - resp.rc = - wh_Nvm_AddObjectChecked(server->nvm, &meta, req.len, data); + + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + resp.rc = wh_Nvm_AddObjectChecked(server->nvm, &meta, + req.len, data); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; + } } } /* Convert the response struct */ @@ -316,9 +293,17 @@ int wh_Server_HandleNvmRequest(whServerContext* server, (whMessageNvm_DestroyObjectsRequest*)req_packet, &req); if (req.list_count <= WH_MESSAGE_NVM_MAX_DESTROY_OBJECTS_COUNT) { - /* Process the DestroyObjects action */ - resp.rc = wh_Nvm_DestroyObjectsChecked( - server->nvm, req.list_count, req.list); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + /* Process the DestroyObjects action */ + resp.rc = wh_Nvm_DestroyObjectsChecked( + server->nvm, req.list_count, req.list); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; + } } else { /* Problem in transport or request */ @@ -348,10 +333,18 @@ int wh_Server_HandleNvmRequest(whServerContext* server, wh_MessageNvm_TranslateReadRequest( magic, (whMessageNvm_ReadRequest*)req_packet, &req); - resp.rc = _HandleNvmRead(server, data, req.offset, req.data_len, - &req.data_len, req.id); - if (resp.rc == WH_ERROR_OK) { - data_len = req.data_len; + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + resp.rc = _HandleNvmRead(server, data, req.offset, req.data_len, + &req.data_len, req.id); + if (resp.rc == WH_ERROR_OK) { + data_len = req.data_len; + } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; } } /* Convert the response struct */ @@ -389,10 +382,18 @@ int wh_Server_HandleNvmRequest(whServerContext* server, WH_DMA_OPER_CLIENT_READ_PRE, (whServerDmaFlags){0}); } if (resp.rc == 0) { - /* Process the AddObject action */ - resp.rc = - wh_Nvm_AddObjectChecked(server->nvm, (whNvmMetadata*)metadata, - req.data_len, (const uint8_t*)data); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + /* Process the AddObject action */ + resp.rc = wh_Nvm_AddObjectChecked( + server->nvm, (whNvmMetadata*)metadata, req.data_len, + (const uint8_t*)data); + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; + } } if (resp.rc == 0) { /* perform platform-specific host address processing */ @@ -416,7 +417,7 @@ int wh_Server_HandleNvmRequest(whServerContext* server, whMessageNvm_ReadDmaRequest req = {0}; whMessageNvm_SimpleResponse resp = {0}; whNvmMetadata meta = {0}; - whNvmSize read_len; + whNvmSize read_len = 0; void* data = NULL; if (req_size != sizeof(req)) { @@ -428,11 +429,9 @@ int wh_Server_HandleNvmRequest(whServerContext* server, wh_MessageNvm_TranslateReadDmaRequest(magic, (whMessageNvm_ReadDmaRequest*)req_packet, &req); - /* Acquire lock for entire operation to prevent TOCTOU */ - resp.rc = _LockNvm(server); - if (resp.rc == WH_ERROR_OK) { - resp.rc = - wh_Nvm_GetMetadataUnlocked(server->nvm, req.id, &meta); + rc = WH_SERVER_NVM_LOCK(server); + if (rc == WH_ERROR_OK) { + resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); if (resp.rc == 0) { if (req.offset >= meta.len) { @@ -449,34 +448,32 @@ int wh_Server_HandleNvmRequest(whServerContext* server, } /* use unclamped length for DMA address processing in case DMA - * callbacks are sensible to alignment and/or size. - * Keep lock held during DMA to ensure metadata remains valid. - */ + * callbacks are sensible to alignment and/or size */ if (resp.rc == 0) { /* perform platform-specific host address processing */ resp.rc = wh_Server_DmaProcessClientAddress( server, req.data_hostaddr, &data, req.data_len, WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0}); } - - if (resp.rc == WH_ERROR_OK) { + if (resp.rc == 0) { /* Process the Read action */ - resp.rc = wh_Nvm_ReadCheckedUnlocked(server->nvm, req.id, - req.offset, read_len, - (uint8_t*)data); + resp.rc = + wh_Nvm_ReadChecked(server->nvm, req.id, req.offset, + read_len, (uint8_t*)data); } - /* Release lock after all metadata-dependent operations */ - (void)_UnlockNvm(server); + if (resp.rc == 0) { + /* perform platform-specific host address processing */ + resp.rc = wh_Server_DmaProcessClientAddress( + server, req.data_hostaddr, &data, req.data_len, + WH_DMA_OPER_CLIENT_WRITE_POST, (whServerDmaFlags){0}); + } + + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + else { + resp.rc = rc; } } - if (resp.rc == 0) { - /* perform platform-specific host address processing. - * POST processing can be outside lock as it doesn't depend on - * metadata validity. */ - resp.rc = wh_Server_DmaProcessClientAddress( - server, req.data_hostaddr, &data, req.data_len, - WH_DMA_OPER_CLIENT_WRITE_POST, (whServerDmaFlags){0}); - } /* Convert the response struct */ wh_MessageNvm_TranslateSimpleResponse(magic, &resp, (whMessageNvm_SimpleResponse*)resp_packet); diff --git a/src/wh_server_she.c b/src/wh_server_she.c index 18a39745a..a3382170d 100644 --- a/src/wh_server_she.c +++ b/src/wh_server_she.c @@ -1608,68 +1608,120 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, resp_packet); break; case WH_SHE_SECURE_BOOT_INIT: - ret = _SecureBootInit(server, magic, req_size, req_packet, - out_resp_size, resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _SecureBootInit(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_SECURE_BOOT_UPDATE: ret = _SecureBootUpdate(server, magic, req_size, req_packet, out_resp_size, resp_packet); break; case WH_SHE_SECURE_BOOT_FINISH: - ret = _SecureBootFinish(server, magic, req_size, req_packet, - out_resp_size, resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _SecureBootFinish(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_GET_STATUS: ret = _GetStatus(server, magic, req_size, req_packet, out_resp_size, resp_packet); break; case WH_SHE_LOAD_KEY: - ret = _LoadKey(server, magic, req_size, req_packet, out_resp_size, - resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _LoadKey(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_LOAD_PLAIN_KEY: - ret = _LoadPlainKey(server, magic, req_size, req_packet, - out_resp_size, resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _LoadPlainKey(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_EXPORT_RAM_KEY: - ret = _ExportRamKey(server, magic, req_size, req_packet, - out_resp_size, resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _ExportRamKey(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_INIT_RND: - ret = _InitRnd(server, magic, req_size, req_packet, out_resp_size, - resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _InitRnd(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_RND: ret = _Rnd(server, magic, req_size, req_packet, out_resp_size, resp_packet); break; case WH_SHE_EXTEND_SEED: - ret = _ExtendSeed(server, magic, req_size, req_packet, - out_resp_size, resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _ExtendSeed(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_ENC_ECB: - ret = _EncEcb(server, magic, req_size, req_packet, out_resp_size, - resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _EncEcb(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_ENC_CBC: - ret = _EncCbc(server, magic, req_size, req_packet, out_resp_size, - resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _EncCbc(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_DEC_ECB: - ret = _DecEcb(server, magic, req_size, req_packet, out_resp_size, - resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _DecEcb(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_DEC_CBC: - ret = _DecCbc(server, magic, req_size, req_packet, out_resp_size, - resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _DecCbc(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_GEN_MAC: - ret = _GenerateMac(server, magic, req_size, req_packet, - out_resp_size, resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _GenerateMac(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; case WH_SHE_VERIFY_MAC: - ret = _VerifyMac(server, magic, req_size, req_packet, out_resp_size, - resp_packet); + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + ret = _VerifyMac(server, magic, req_size, req_packet, + out_resp_size, resp_packet); + (void)WH_SERVER_NVM_UNLOCK(server); + } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ break; default: ret = WH_ERROR_BADARGS; diff --git a/wolfhsm/wh_nvm.h b/wolfhsm/wh_nvm.h index 6c3df8f77..0d6945229 100644 --- a/wolfhsm/wh_nvm.h +++ b/wolfhsm/wh_nvm.h @@ -145,4 +145,15 @@ int wh_Nvm_Read(whNvmContext* context, whNvmId id, whNvmSize offset, int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset, whNvmSize data_len, uint8_t* data); +/** NVM Locking API for handler-level thread safety */ +#ifdef WOLFHSM_CFG_THREADSAFE +int wh_Nvm_Lock(whNvmContext* nvm); +int wh_Nvm_Unlock(whNvmContext* nvm); +#define WH_NVM_LOCK(nvm) wh_Nvm_Lock(nvm) +#define WH_NVM_UNLOCK(nvm) wh_Nvm_Unlock(nvm) +#else +#define WH_NVM_LOCK(nvm) (WH_ERROR_OK) +#define WH_NVM_UNLOCK(nvm) (WH_ERROR_OK) +#endif + #endif /* !WOLFHSM_WH_NVM_H_ */ diff --git a/wolfhsm/wh_nvm_internal.h b/wolfhsm/wh_nvm_internal.h deleted file mode 100644 index f757d9eb6..000000000 --- a/wolfhsm/wh_nvm_internal.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (C) 2024 wolfSSL Inc. - * - * This file is part of wolfHSM. - * - * wolfHSM is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 3 of the License, or - * (at your option) any later version. - * - * wolfHSM is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with wolfHSM. If not, see . - */ -/* - * wolfhsm/wh_nvm_internal.h - * - * Additional NVM helper API for internal library use only - * - */ - -#ifndef WOLFHSM_WH_NVM_INTERNAL_H_ -#define WOLFHSM_WH_NVM_INTERNAL_H_ - -/* Pick up compile-time configuration */ -#include "wolfhsm/wh_settings.h" -#include "wolfhsm/wh_nvm.h" - -#include - -#ifdef WOLFHSM_CFG_THREADSAFE -/* - * Internal unlocked NVM functions. Same functionality as their public - * counterparts but assume the caller already holds context->lock and do NOT - * attempt to aquire the lock. For use by server keystore internals when - * performing atomic multi-step operations. - */ - -int wh_Nvm_GetMetadataUnlocked(whNvmContext* context, whNvmId id, - whNvmMetadata* meta); - -int wh_Nvm_ReadUnlocked(whNvmContext* context, whNvmId id, whNvmSize offset, - whNvmSize data_len, uint8_t* data); - -int wh_Nvm_AddObjectWithReclaimUnlocked(whNvmContext* context, - whNvmMetadata* meta, whNvmSize dataLen, - const uint8_t* data); - -int wh_Nvm_DestroyObjectsUnlocked(whNvmContext* context, whNvmId list_count, - const whNvmId* id_list); - -int wh_Nvm_AddObjectCheckedUnlocked(whNvmContext* context, whNvmMetadata* meta, - whNvmSize data_len, const uint8_t* data); - -int wh_Nvm_DestroyObjectsCheckedUnlocked(whNvmContext* context, - whNvmId list_count, - const whNvmId* id_list); - -int wh_Nvm_ReadCheckedUnlocked(whNvmContext* context, whNvmId id, - whNvmSize offset, whNvmSize data_len, - uint8_t* data); -#else - -/* - * When THREADSAFE is not defined, unlocked functions map to regular ones. - * This allows keystore code to always use unlocked variants without - * conditional compilation. - */ -#define wh_Nvm_GetMetadataUnlocked(ctx, id, meta) \ - wh_Nvm_GetMetadata((ctx), (id), (meta)) -#define wh_Nvm_ReadUnlocked(ctx, id, off, len, data) \ - wh_Nvm_Read((ctx), (id), (off), (len), (data)) -#define wh_Nvm_AddObjectWithReclaimUnlocked(ctx, meta, len, data) \ - wh_Nvm_AddObjectWithReclaim((ctx), (meta), (len), (data)) -#define wh_Nvm_DestroyObjectsUnlocked(ctx, cnt, list) \ - wh_Nvm_DestroyObjects((ctx), (cnt), (list)) -#define wh_Nvm_AddObjectCheckedUnlocked(ctx, meta, len, data) \ - wh_Nvm_AddObjectChecked((ctx), (meta), (len), (data)) -#define wh_Nvm_DestroyObjectsCheckedUnlocked(ctx, cnt, list) \ - wh_Nvm_DestroyObjectsChecked((ctx), (cnt), (list)) -#define wh_Nvm_ReadCheckedUnlocked(ctx, id, off, len, data) \ - wh_Nvm_ReadChecked((ctx), (id), (off), (len), (data)) - -#endif - - -#endif /* !WOLFHSM_WH_NVM_INTERNAL_H_ */ diff --git a/wolfhsm/wh_server.h b/wolfhsm/wh_server.h index 4e9226583..6d7f63114 100644 --- a/wolfhsm/wh_server.h +++ b/wolfhsm/wh_server.h @@ -497,4 +497,15 @@ int whServerDma_CopyToClient(struct whServerContext_t* server, whServerDmaFlags flags); #endif /* WOLFHSM_CFG_DMA */ +/** Server NVM Locking API for handler-level thread safety */ +#ifdef WOLFHSM_CFG_THREADSAFE +int wh_Server_NvmLock(whServerContext* server); +int wh_Server_NvmUnlock(whServerContext* server); +#define WH_SERVER_NVM_LOCK(server) wh_Server_NvmLock(server) +#define WH_SERVER_NVM_UNLOCK(server) wh_Server_NvmUnlock(server) +#else +#define WH_SERVER_NVM_LOCK(server) (WH_ERROR_OK) +#define WH_SERVER_NVM_UNLOCK(server) (WH_ERROR_OK) +#endif + #endif /* !WOLFHSM_WH_SERVER_H_ */ From de16a6a9b30197bb82c0ced37cf0d86aef350ba5 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Wed, 28 Jan 2026 14:54:50 -0700 Subject: [PATCH 05/13] - cleanups, formatting, test housekeeping fixes surrounding macro protection - TSAN options to fail-fast in CI on error --- .github/workflows/build-and-test-stress.yml | 2 +- src/wh_lock.c | 4 +- src/wh_server_cert.c | 24 +++-- src/wh_server_counter.c | 8 +- src/wh_server_keystore.c | 114 ++++++-------------- src/wh_server_nvm.c | 99 +++++++---------- src/wh_server_she.c | 26 ++--- test/Makefile | 9 +- test/wh_test.c | 3 +- test/wh_test_lock.c | 14 ++- test/wh_test_posix_threadsafe_stress.c | 19 +--- 11 files changed, 132 insertions(+), 190 deletions(-) diff --git a/.github/workflows/build-and-test-stress.yml b/.github/workflows/build-and-test-stress.yml index bf83ff78f..7a0aee18a 100644 --- a/.github/workflows/build-and-test-stress.yml +++ b/.github/workflows/build-and-test-stress.yml @@ -1,4 +1,4 @@ -name: Build and Test Stress +name: Multi-threaded Stress Test on: push: diff --git a/src/wh_lock.c b/src/wh_lock.c index 47a850e45..da2a29e76 100644 --- a/src/wh_lock.c +++ b/src/wh_lock.c @@ -44,8 +44,8 @@ int wh_Lock_Init(whLock* lock, const whLockConfig* config) /* Allow NULL config for single-threaded mode (no-op locking) */ if ((config == NULL) || (config->cb == NULL)) { - lock->cb = NULL; - lock->context = NULL; + lock->cb = NULL; + lock->context = NULL; lock->initialized = 1; /* Mark as initialized even in no-op mode */ return WH_ERROR_OK; } diff --git a/src/wh_server_cert.c b/src/wh_server_cert.c index 5b00e18fa..5cc9b77e3 100644 --- a/src/wh_server_cert.c +++ b/src/wh_server_cert.c @@ -419,8 +419,9 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, rc = wh_Server_CertAddTrusted( server, req.id, req.access, req.flags, req.label, WH_NVM_LABEL_LEN, cert_data, req.cert_len); + (void)WH_SERVER_NVM_UNLOCK(server); - } + } /* WH_SERVER_NVM_LOCK() */ resp.rc = rc; /* Convert the response struct */ @@ -441,8 +442,9 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { rc = wh_Server_CertEraseTrusted(server, req.id); + (void)WH_SERVER_NVM_UNLOCK(server); - } + } /* WH_SERVER_NVM_LOCK() */ resp.rc = rc; /* Convert the response struct */ @@ -488,8 +490,9 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, resp.cert_len = cert_len; } } + (void)WH_SERVER_NVM_UNLOCK(server); - } + } /* WH_SERVER_NVM_LOCK() */ resp.rc = rc; /* Convert the response struct */ @@ -529,8 +532,9 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, /* Propagate the keyId back to the client with flags * preserved */ resp.keyId = wh_KeyId_TranslateToClient(keyId); + (void)WH_SERVER_NVM_UNLOCK(server); - } + } /* WH_SERVER_NVM_LOCK() */ resp.rc = rc; } @@ -570,7 +574,7 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, WH_NVM_LABEL_LEN, cert_data, req.cert_len); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ } if (resp.rc == WH_ERROR_OK) { /* Post-process client address */ @@ -620,12 +624,12 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, /* Clamp cert_len to actual stored length */ cert_len = req.cert_len; resp.rc = wh_Server_CertReadTrusted( - server, req.id, cert_data, &cert_len); + server, req.id, cert_data, &cert_len); } } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ } if (resp.rc == WH_ERROR_OK) { /* Post-process client address */ @@ -677,7 +681,7 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, resp.keyId = wh_KeyId_TranslateToClient(keyId); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ } if (resp.rc == WH_ERROR_OK) { /* Post-process client address */ @@ -712,7 +716,7 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, req.trustedRootNvmId); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ /* Signature confirmation error is not an error for the server, so * propagate this error to the client in the response, otherwise @@ -760,7 +764,7 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic, server, cert_data, req.cert_len, req.trustedRootNvmId); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ /* Signature confirmation error is not an error for the server, * so propagate this error to the client in the response, diff --git a/src/wh_server_counter.c b/src/wh_server_counter.c index f883dbdfb..7bd1d1af2 100644 --- a/src/wh_server_counter.c +++ b/src/wh_server_counter.c @@ -84,7 +84,7 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ resp.rc = ret; (void)wh_MessageCounter_TranslateInitResponse( @@ -140,7 +140,7 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ resp.rc = ret; (void)wh_MessageCounter_TranslateIncrementResponse( @@ -181,7 +181,7 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ resp.rc = ret; (void)wh_MessageCounter_TranslateReadResponse( @@ -216,7 +216,7 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic, ret = wh_Nvm_DestroyObjects(server->nvm, 1, &counterId); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ resp.rc = ret; (void)wh_MessageCounter_TranslateDestroyResponse( diff --git a/src/wh_server_keystore.c b/src/wh_server_keystore.c index dbf62c33b..72e45fedf 100644 --- a/src/wh_server_keystore.c +++ b/src/wh_server_keystore.c @@ -1671,13 +1671,11 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, if (ret == WH_ERROR_OK) { /* get a new id if one wasn't provided */ if (WH_KEYID_ISERASED(meta->id)) { - ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); - resp.rc = ret; + ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); } /* write the key */ if (ret == WH_ERROR_OK) { ret = wh_Server_KeystoreCacheKeyChecked(server, meta, in); - resp.rc = ret; } if (ret == WH_ERROR_OK) { /* Translate server keyId back to client format with flags @@ -1686,10 +1684,8 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = ret; (void)wh_MessageKeystore_TranslateCacheResponse( magic, &resp, (whMessageKeystore_CacheResponse*)resp_packet); @@ -1723,15 +1719,13 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, if (ret == WH_ERROR_OK) { /* get a new id if one wasn't provided */ if (WH_KEYID_ISERASED(meta->id)) { - ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); - resp.rc = ret; + ret = wh_Server_KeystoreGetUniqueId(server, &meta->id); } /* write the key using DMA */ if (ret == WH_ERROR_OK) { - ret = wh_Server_KeystoreCacheKeyDmaChecked(server, meta, - req.key.addr); - resp.rc = ret; + ret = wh_Server_KeystoreCacheKeyDmaChecked(server, meta, + req.key.addr); /* propagate bad address to client if DMA operation failed */ if (ret != WH_ERROR_OK) { @@ -1747,10 +1741,8 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = ret; (void)wh_MessageKeystore_TranslateCacheDmaResponse( magic, &resp, (whMessageKeystore_CacheDmaResponse*)resp_packet); @@ -1773,7 +1765,6 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, wh_KeyId_TranslateFromClient( WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id), req.key.addr, req.key.sz, meta); - resp.rc = ret; /* propagate bad address to client if DMA operation failed */ if (ret != WH_ERROR_OK) { @@ -1787,10 +1778,8 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = ret; (void)wh_MessageKeystore_TranslateExportDmaResponse( magic, &resp, @@ -1813,14 +1802,11 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, server, wh_KeyId_TranslateFromClient( WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); - resp.rc = ret; resp.ok = 0; /* unused */ (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = ret; (void)wh_MessageKeystore_TranslateEvictResponse( magic, &resp, (whMessageKeystore_EvictResponse*)resp_packet); @@ -1840,7 +1826,8 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, out = (uint8_t*)resp_packet + sizeof(resp); keySz = WOLFHSM_CFG_COMM_DATA_LEN - sizeof(resp); - ret = WH_SERVER_NVM_LOCK(server); + resp.len = 0; + ret = WH_SERVER_NVM_LOCK(server); if (ret == WH_ERROR_OK) { /* read the key */ ret = wh_Server_KeystoreReadKeyChecked( @@ -1849,23 +1836,15 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id), meta, out, &keySz); - resp.rc = ret; - /* Only provide key output if no error */ if (ret == WH_ERROR_OK) { resp.len = keySz; } - else { - resp.len = 0; - } memcpy(resp.label, meta->label, sizeof(meta->label)); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - resp.len = 0; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = ret; (void)wh_MessageKeystore_TranslateExportResponse( magic, &resp, (whMessageKeystore_ExportResponse*)resp_packet); @@ -1887,14 +1866,11 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, server, wh_KeyId_TranslateFromClient( WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); - resp.rc = ret; resp.ok = 0; /* unused */ (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = ret; (void)wh_MessageKeystore_TranslateCommitResponse( magic, &resp, (whMessageKeystore_CommitResponse*)resp_packet); @@ -1916,14 +1892,11 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, server, wh_KeyId_TranslateFromClient( WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); - resp.rc = ret; resp.ok = 0; /* unused */ (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = ret; (void)wh_MessageKeystore_TranslateEraseResponse( magic, &resp, (whMessageKeystore_EraseResponse*)resp_packet); @@ -1938,20 +1911,16 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, (void)wh_MessageKeystore_TranslateRevokeRequest( magic, (whMessageKeystore_RevokeRequest*)req_packet, &req); - ret = WH_SERVER_NVM_LOCK(server); + ret = WH_SERVER_NVM_LOCK(server); + resp.rc = ret; if (ret == WH_ERROR_OK) { - ret = wh_Server_KeystoreRevokeKey( + resp.rc = wh_Server_KeystoreRevokeKey( server, wh_KeyId_TranslateFromClient( WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id)); - resp.rc = ret; - ret = WH_ERROR_OK; (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ (void)wh_MessageKeystore_TranslateRevokeResponse( magic, &resp, (whMessageKeystore_RevokeResponse*)resp_packet); @@ -1995,13 +1964,10 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, ret = _HandleKeyWrapRequest(server, &wrapReq, reqData, reqDataSz, &wrapResp, respData, respDataSz); - wrapResp.rc = ret; (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - wrapResp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + wrapResp.rc = ret; (void)wh_MessageKeystore_TranslateKeyWrapResponse(magic, &wrapResp, resp_packet); @@ -2044,13 +2010,10 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, ret = _HandleKeyUnwrapAndExportRequest( server, &unwrapReq, reqData, reqDataSz, &unwrapResp, respData, respDataSz); - unwrapResp.rc = ret; (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - unwrapResp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + unwrapResp.rc = ret; (void)wh_MessageKeystore_TranslateKeyUnwrapAndExportResponse( magic, &unwrapResp, resp_packet); @@ -2090,13 +2053,10 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, ret = _HandleKeyUnwrapAndCacheRequest( server, &cacheReq, reqData, reqDataSz, &cacheResp, respData, respDataSz); - cacheResp.rc = ret; (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - cacheResp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + cacheResp.rc = ret; (void)wh_MessageKeystore_TranslateKeyUnwrapAndCacheResponse( magic, &cacheResp, resp_packet); @@ -2140,13 +2100,10 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, ret = _HandleDataWrapRequest(server, &wrapReq, reqData, reqDataSz, &wrapResp, respData, respDataSz); - wrapResp.rc = ret; (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - wrapResp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + wrapResp.rc = ret; (void)wh_MessageKeystore_TranslateDataWrapResponse(magic, &wrapResp, resp_packet); @@ -2190,13 +2147,10 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, ret = _HandleDataUnwrapRequest(server, &unwrapReq, reqData, reqDataSz, &unwrapResp, respData, respDataSz); - unwrapResp.rc = ret; (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - unwrapResp.rc = ret; - } + } /* WH_SERVER_NVM_LOCK() */ + unwrapResp.rc = ret; (void)wh_MessageKeystore_TranslateDataUnwrapResponse( magic, &unwrapResp, resp_packet); diff --git a/src/wh_server_nvm.c b/src/wh_server_nvm.c index 5edb77b9f..eb6280abf 100644 --- a/src/wh_server_nvm.c +++ b/src/wh_server_nvm.c @@ -156,14 +156,12 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { /* Process the list action */ - resp.rc = wh_Nvm_List(server->nvm, req.access, req.flags, - req.startId, &resp.count, &resp.id); + rc = wh_Nvm_List(server->nvm, req.access, req.flags, + req.startId, &resp.count, &resp.id); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } /* Convert the response struct */ wh_MessageNvm_TranslateListResponse(magic, @@ -183,15 +181,13 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { /* Process the available action */ - resp.rc = wh_Nvm_GetAvailable( + rc = wh_Nvm_GetAvailable( server->nvm, &resp.avail_size, &resp.avail_objects, &resp.reclaim_size, &resp.reclaim_objects); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } /* Convert the response struct */ wh_MessageNvm_TranslateGetAvailableResponse(magic, @@ -216,9 +212,9 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { /* Process the getmetadata action */ - resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); + rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); - if (resp.rc == 0) { + if (rc == 0) { resp.id = meta.id; resp.access = meta.access; resp.flags = meta.flags; @@ -227,10 +223,8 @@ int wh_Server_HandleNvmRequest(whServerContext* server, } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } /* Convert the response struct */ wh_MessageNvm_TranslateGetMetadataResponse(magic, @@ -263,21 +257,19 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { - resp.rc = wh_Nvm_AddObjectChecked(server->nvm, &meta, - req.len, data); + rc = wh_Nvm_AddObjectChecked(server->nvm, &meta, req.len, + data); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } } /* Convert the response struct */ wh_MessageNvm_TranslateSimpleResponse(magic, &resp, (whMessageNvm_SimpleResponse*)resp_packet); *out_resp_size = sizeof(resp); - }; break; + }; break; case WH_MESSAGE_NVM_ACTION_DESTROYOBJECTS: { @@ -296,14 +288,12 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { /* Process the DestroyObjects action */ - resp.rc = wh_Nvm_DestroyObjectsChecked( - server->nvm, req.list_count, req.list); + rc = wh_Nvm_DestroyObjectsChecked(server->nvm, + req.list_count, req.list); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } else { /* Problem in transport or request */ @@ -335,17 +325,15 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { - resp.rc = _HandleNvmRead(server, data, req.offset, req.data_len, - &req.data_len, req.id); - if (resp.rc == WH_ERROR_OK) { + rc = _HandleNvmRead(server, data, req.offset, req.data_len, + &req.data_len, req.id); + if (rc == WH_ERROR_OK) { data_len = req.data_len; } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } /* Convert the response struct */ wh_MessageNvm_TranslateReadResponse( @@ -385,15 +373,13 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { /* Process the AddObject action */ - resp.rc = wh_Nvm_AddObjectChecked( + rc = wh_Nvm_AddObjectChecked( server->nvm, (whNvmMetadata*)metadata, req.data_len, (const uint8_t*)data); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } if (resp.rc == 0) { /* perform platform-specific host address processing */ @@ -431,15 +417,15 @@ int wh_Server_HandleNvmRequest(whServerContext* server, rc = WH_SERVER_NVM_LOCK(server); if (rc == WH_ERROR_OK) { - resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); + rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); - if (resp.rc == 0) { + if (rc == 0) { if (req.offset >= meta.len) { - resp.rc = WH_ERROR_BADARGS; + rc = WH_ERROR_BADARGS; } } - if (resp.rc == 0) { + if (rc == 0) { read_len = req.data_len; /* Clamp length to object size */ if ((req.offset + read_len) > meta.len) { @@ -449,30 +435,27 @@ int wh_Server_HandleNvmRequest(whServerContext* server, /* use unclamped length for DMA address processing in case DMA * callbacks are sensible to alignment and/or size */ - if (resp.rc == 0) { + if (rc == 0) { /* perform platform-specific host address processing */ - resp.rc = wh_Server_DmaProcessClientAddress( + rc = wh_Server_DmaProcessClientAddress( server, req.data_hostaddr, &data, req.data_len, WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0}); } - if (resp.rc == 0) { + if (rc == 0) { /* Process the Read action */ - resp.rc = - wh_Nvm_ReadChecked(server->nvm, req.id, req.offset, - read_len, (uint8_t*)data); + rc = wh_Nvm_ReadChecked(server->nvm, req.id, req.offset, + read_len, (uint8_t*)data); } - if (resp.rc == 0) { + if (rc == 0) { /* perform platform-specific host address processing */ - resp.rc = wh_Server_DmaProcessClientAddress( + rc = wh_Server_DmaProcessClientAddress( server, req.data_hostaddr, &data, req.data_len, WH_DMA_OPER_CLIENT_WRITE_POST, (whServerDmaFlags){0}); } (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ - else { - resp.rc = rc; - } + } /* WH_SERVER_NVM_LOCK() */ + resp.rc = rc; } /* Convert the response struct */ wh_MessageNvm_TranslateSimpleResponse(magic, diff --git a/src/wh_server_she.c b/src/wh_server_she.c index a3382170d..633dfc4b2 100644 --- a/src/wh_server_she.c +++ b/src/wh_server_she.c @@ -1613,7 +1613,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _SecureBootInit(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_SECURE_BOOT_UPDATE: ret = _SecureBootUpdate(server, magic, req_size, req_packet, @@ -1625,7 +1625,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _SecureBootFinish(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_GET_STATUS: ret = _GetStatus(server, magic, req_size, req_packet, out_resp_size, @@ -1637,7 +1637,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _LoadKey(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_LOAD_PLAIN_KEY: ret = WH_SERVER_NVM_LOCK(server); @@ -1645,7 +1645,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _LoadPlainKey(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_EXPORT_RAM_KEY: ret = WH_SERVER_NVM_LOCK(server); @@ -1653,7 +1653,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _ExportRamKey(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_INIT_RND: ret = WH_SERVER_NVM_LOCK(server); @@ -1661,7 +1661,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _InitRnd(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_RND: ret = _Rnd(server, magic, req_size, req_packet, out_resp_size, @@ -1673,7 +1673,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _ExtendSeed(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_ENC_ECB: ret = WH_SERVER_NVM_LOCK(server); @@ -1681,7 +1681,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _EncEcb(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_ENC_CBC: ret = WH_SERVER_NVM_LOCK(server); @@ -1689,7 +1689,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _EncCbc(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_DEC_ECB: ret = WH_SERVER_NVM_LOCK(server); @@ -1697,7 +1697,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _DecEcb(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_DEC_CBC: ret = WH_SERVER_NVM_LOCK(server); @@ -1705,7 +1705,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _DecCbc(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_GEN_MAC: ret = WH_SERVER_NVM_LOCK(server); @@ -1713,7 +1713,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _GenerateMac(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; case WH_SHE_VERIFY_MAC: ret = WH_SERVER_NVM_LOCK(server); @@ -1721,7 +1721,7 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic, ret = _VerifyMac(server, magic, req_size, req_packet, out_resp_size, resp_packet); (void)WH_SERVER_NVM_UNLOCK(server); - } /* WH_SERVER_NVM_LOCK() == WH_ERROR_OK */ + } /* WH_SERVER_NVM_LOCK() */ break; default: ret = WH_ERROR_BADARGS; diff --git a/test/Makefile b/test/Makefile index e2ac499cb..259218045 100644 --- a/test/Makefile +++ b/test/Makefile @@ -146,6 +146,9 @@ endif # Support stress test mode (only runs thread safety stress test) ifeq ($(STRESS),1) + ifneq ($(THREADSAFE),1) + $(error Stress test requires building with THREADSAFE=1) + endif DEF += -DWOLFHSM_CFG_TEST_STRESS endif @@ -301,7 +304,11 @@ ifeq (,$(wildcard $(BUILD_DIR)/$(BIN).elf)) $(error $(BUILD_DIR)/$(BIN).elf not found. Try: make) else ifeq ($(TSAN),1) - TSAN_OPTIONS="suppressions=$(PROJECT_DIR)/tsan.supp" $(BUILD_DIR)/$(BIN).elf + # TSAN options: + # - fail fast on first data race detected + # - non-zero exit code if races are detected to ensure CI fails + # - use our custom suppressions file to ignore wolfCrypt and SHM transport + TSAN_OPTIONS="halt_on_error=1:exitcode=66:suppressions=$(PROJECT_DIR)/tsan.supp" $(BUILD_DIR)/$(BIN).elf else $(BUILD_DIR)/$(BIN).elf endif diff --git a/test/wh_test.c b/test/wh_test.c index 2d6d23ba5..3fd9a1bb5 100644 --- a/test/wh_test.c +++ b/test/wh_test.c @@ -261,7 +261,8 @@ int main(void) { int ret = 0; -#if defined(WOLFHSM_CFG_THREADSAFE) && defined(WOLFHSM_CFG_TEST_STRESS) +#if defined(WOLFHSM_CFG_THREADSAFE) && defined(WOLFHSM_CFG_TEST_STRESS) && \ + defined(WOLFHSM_CFG_TEST_POSIX) /* Stress test mode: only run thread safety stress test */ ret = whTest_ThreadSafeStress(); diff --git a/test/wh_test_lock.c b/test/wh_test_lock.c index 59ebf3410..6ba42c187 100644 --- a/test/wh_test_lock.c +++ b/test/wh_test_lock.c @@ -39,11 +39,12 @@ #include "wolfhsm/wh_nvm_flash.h" #include "wolfhsm/wh_flash_ramsim.h" +#ifdef WOLFHSM_CFG_TEST_POSIX #include "port/posix/posix_lock.h" - #define FLASH_RAM_SIZE (1024 * 1024) #define FLASH_SECTOR_SIZE (4096) #define FLASH_PAGE_SIZE (8) +#endif /* Test: Lock init/cleanup lifecycle */ static int testLockLifecycle(whLockConfig* lockConfig) @@ -147,8 +148,10 @@ static int testUninitializedLock(void) return WH_ERROR_OK; } -/* Test: NVM with lock config */ -static int testNvmWithLock(whLockConfig* lockConfig) +#ifdef WOLFHSM_CFG_TEST_POSIX +/* Test: NVM simulator with lock config. Restrict to POSIX port test due to + * resource utilization */ +static int testNvmRamSimWithLock(whLockConfig* lockConfig) { /* Flash simulator */ uint8_t flashMemory[FLASH_RAM_SIZE]; @@ -220,6 +223,7 @@ static int testNvmWithLock(whLockConfig* lockConfig) WH_TEST_PRINT(" NVM with lock: PASS\n"); return WH_ERROR_OK; } +#endif /* WOLFHSM_CFG_TEST_POSIX */ int whTest_LockConfig(whLockConfig* lockConfig) { @@ -228,7 +232,9 @@ int whTest_LockConfig(whLockConfig* lockConfig) WH_TEST_RETURN_ON_FAIL(testLockLifecycle(lockConfig)); WH_TEST_RETURN_ON_FAIL(testNullConfigNoOp()); WH_TEST_RETURN_ON_FAIL(testUninitializedLock()); - WH_TEST_RETURN_ON_FAIL(testNvmWithLock(lockConfig)); +#ifdef WOLFHSM_CFG_TEST_POSIX + WH_TEST_RETURN_ON_FAIL(testNvmRamSimWithLock(lockConfig)); +#endif WH_TEST_PRINT("Lock tests PASSED\n"); return WH_ERROR_OK; diff --git a/test/wh_test_posix_threadsafe_stress.c b/test/wh_test_posix_threadsafe_stress.c index 4e009b97e..ad0c6de00 100644 --- a/test/wh_test_posix_threadsafe_stress.c +++ b/test/wh_test_posix_threadsafe_stress.c @@ -2420,19 +2420,6 @@ int whTest_ThreadSafeStress(void) return testResult; } -#else /* !WOLFHSM_CFG_THREADSAFE || !WOLFHSM_CFG_TEST_POSIX || \ - !WOLFHSM_CFG_GLOBAL_KEYS || !WOLFHSM_CFG_ENABLE_CLIENT || \ - !WOLFHSM_CFG_ENABLE_SERVER || WOLFHSM_CFG_NO_CRYPTO */ - -#include "wh_test_posix_threadsafe_stress.h" -#include "wh_test_common.h" - -int whTest_ThreadSafeStress(void) -{ - WH_TEST_PRINT("Thread safety stress test skipped "); - WH_TEST_PRINT("(requires THREADSAFE + TEST_POSIX + GLOBAL_KEYS + "); - WH_TEST_PRINT("ENABLE_CLIENT + ENABLE_SERVER + crypto)\n"); - return 0; -} - -#endif /* WOLFHSM_CFG_THREADSAFE && WOLFHSM_CFG_TEST_POSIX && ... */ +#endif /* !WOLFHSM_CFG_THREADSAFE || !WOLFHSM_CFG_TEST_POSIX || \ + !WOLFHSM_CFG_GLOBAL_KEYS || !WOLFHSM_CFG_ENABLE_CLIENT || \ + !WOLFHSM_CFG_ENABLE_SERVER || WOLFHSM_CFG_NO_CRYPTO */ From d7a4566cae2ee2020a4c85e6bb859f1e824a4870 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Wed, 28 Jan 2026 16:53:02 -0700 Subject: [PATCH 06/13] update wh_nvm.h doxygen --- wolfhsm/wh_nvm.h | 463 +++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 391 insertions(+), 72 deletions(-) diff --git a/wolfhsm/wh_nvm.h b/wolfhsm/wh_nvm.h index 0d6945229..b06c16018 100644 --- a/wolfhsm/wh_nvm.h +++ b/wolfhsm/wh_nvm.h @@ -16,21 +16,36 @@ * You should have received a copy of the GNU General Public License * along with wolfHSM. If not, see . */ -/* - * wolfhsm/wh_nvm.h +/** + * @file wolfhsm/wh_nvm.h + * + * @brief Non-Volatile Memory (NVM) object management interface. + * + * This library provides management of NVM objects with basic metadata + * association to blocks of data. The backend storage is expected to have + * flash-style semantics with Read, Erase, and Program hooks. + * + * This library provides reliable, atomic operations (recoverable) to ensure + * transactions are fully committed prior to returning success. Initial + * indexing and handling of incomplete transactions are allowed to take longer + * than ordinary runtime function calls. + * + * NVM objects are added with a fixed length and data. Removal of objects + * causes the backend to replicate the entire partition without the listed + * objects present, which also maximizes the contiguous free space. * - * Abstract library to provide management of NVM objects providing basic - * metadata association with blocks of data. The backend storage is expected - * to have flash-style semantics with Read, Erase, and Program hooks. + * ## Thread Safety * - * This library is expected to provide reliable, atomic operations (recoverable) - * to ensure transactions are fully committed prior to returning success. - * Initial indexing and handling of incomplete transactions are allowed to take - * longer than ordinary runtime function calls. + * When built with `WOLFHSM_CFG_THREADSAFE`, the NVM context contains an + * embedded lock. The lock lifecycle is managed automatically: + * - wh_Nvm_Init() initializes the lock + * - wh_Nvm_Cleanup() cleans up the lock * - * NVM objects are added with a fixed length and data. Removal of objects - * causes the backend to replicate the entire partition without the - * listed objects present, which also maximizes the contiguous free space. + * However, callers are responsible for acquiring and releasing the lock + * around NVM operations using the WH_NVM_LOCK/WH_NVM_UNLOCK helper macros, + * which transparently account for build-time locking support. The NVM API + * functions themselves do NOT acquire the lock internally, allowing callers to + * group multiple operations under a single lock acquisition. * */ @@ -46,114 +61,418 @@ #include "wolfhsm/wh_keycache.h" /* For whKeyCacheContext */ #include "wolfhsm/wh_lock.h" +/** + * @brief NVM backend callback table. + * + * Platforms provide implementations of these callbacks to interface with + * the underlying non-volatile storage hardware. All callbacks receive a + * platform-specific context pointer. + */ typedef struct { - int (*Init)(void* context, const void *config); + /** Initialize the NVM backend */ + int (*Init)(void* context, const void* config); + + /** Cleanup the NVM backend */ int (*Cleanup)(void* context); - /* Retrieve the current free space, or the maximum data object length that can - * be successfully created and the number of free entries in the directory. - * Also get the sizes that could be reclaimed if the partition was regenerated: - * wh_Nvm_DestroyObjects(c, 0, NULL); - * Any out_ parameters may be NULL without error. */ - int (*GetAvailable)(void* context, - uint32_t *out_avail_size, whNvmId *out_avail_objects, - uint32_t *out_reclaim_size, whNvmId *out_reclaim_objects); - - /* Add a new object. Duplicate ids are allowed, but only the most recent - * version will be accessible. */ - int (*AddObject)(void* context, whNvmMetadata *meta, - whNvmSize data_len, const uint8_t* data); - - /* Retrieve the next matching id starting at start_id. Sets out_count to the - * total number of id's that match access and flags. */ + /** + * Retrieve the current free space, or the maximum data object length that + * can be successfully created and the number of free entries in the + * directory. Also get the sizes that could be reclaimed if the partition + * was regenerated via wh_Nvm_DestroyObjects(c, 0, NULL). Any out_ + * parameters may be NULL without error. + */ + int (*GetAvailable)(void* context, uint32_t* out_avail_size, + whNvmId* out_avail_objects, uint32_t* out_reclaim_size, + whNvmId* out_reclaim_objects); + + /** + * Add a new object. Duplicate IDs are allowed, but only the most recent + * version will be accessible. + */ + int (*AddObject)(void* context, whNvmMetadata* meta, whNvmSize data_len, + const uint8_t* data); + + /** + * Retrieve the next matching ID starting at start_id. Sets out_count to + * the total number of IDs that match access and flags. + */ int (*List)(void* context, whNvmAccess access, whNvmFlags flags, - whNvmId start_id, whNvmId *out_count, whNvmId *out_id); - - /* Retrieve object metadata using the id */ - int (*GetMetadata)(void* context, whNvmId id, - whNvmMetadata* meta); - - /* Destroy a list of objects by replicating the current state without the id's - * in the provided list. Id's in the list that are not present do not cause an - * error. Atomically: erase the inactive partition, add all remaining objects, - * switch the active partition, and erase the old active (now inactive) - * partition. Interruption prior completing the write of the new partition will - * recover as before the replication. Interruption after the new partition is - * fully populated will recover as after, including restarting erasure. */ + whNvmId start_id, whNvmId* out_count, whNvmId* out_id); + + /** Retrieve object metadata using the ID */ + int (*GetMetadata)(void* context, whNvmId id, whNvmMetadata* meta); + + /** + * Destroy a list of objects by replicating the current state without the + * IDs in the provided list. IDs in the list that are not present do not + * cause an error. Atomically: erase the inactive partition, add all + * remaining objects, switch the active partition, and erase the old active + * (now inactive) partition. Interruption prior to completing the write of + * the new partition will recover as before the replication. Interruption + * after the new partition is fully populated will recover as after, + * including restarting erasure. + */ int (*DestroyObjects)(void* context, whNvmId list_count, - const whNvmId* id_list); + const whNvmId* id_list); - /* Read the data of the object starting at the byte offset */ + /** Read the data of the object starting at the byte offset */ int (*Read)(void* context, whNvmId id, whNvmSize offset, whNvmSize data_len, uint8_t* data); } whNvmCb; -/** NVM Context helper structs and functions */ -/* Simple helper context structure associated with an NVM instance */ +/** + * @brief NVM context structure. + * + * Holds the state for an NVM instance including the backend callbacks, + * platform-specific context, and optional thread synchronization lock. + * + * When `WOLFHSM_CFG_GLOBAL_KEYS` is enabled, also contains the global key + * cache for keys shared across all clients. + */ typedef struct whNvmContext_t { - whNvmCb *cb; - void* context; + whNvmCb* cb; /**< Backend callback table */ + void* context; /**< Platform-specific backend context */ #if !defined(WOLFHSM_CFG_NO_CRYPTO) && defined(WOLFHSM_CFG_GLOBAL_KEYS) - whKeyCacheContext globalCache; /* Global key cache */ + whKeyCacheContext globalCache; /**< Global key cache (shared keys) */ #endif #ifdef WOLFHSM_CFG_THREADSAFE - whLock lock; /* Lock for NVM and global cache operations */ + whLock lock; /**< Lock for serializing NVM and global cache operations */ #endif } whNvmContext; -/* Simple helper configuration structure associated with an NVM instance */ +/** + * @brief NVM configuration structure. + * + * Used to initialize an NVM context with wh_Nvm_Init(). + */ typedef struct whNvmConfig_t { - whNvmCb *cb; - void* context; - void* config; + whNvmCb* cb; /**< Backend callback table */ + void* context; /**< Platform-specific backend context */ + void* config; /**< Backend-specific configuration */ #ifdef WOLFHSM_CFG_THREADSAFE - whLockConfig* lockConfig; + whLockConfig* + lockConfig; /**< Lock configuration (NULL for no-op locking) */ #endif } whNvmConfig; -int wh_Nvm_Init(whNvmContext* context, const whNvmConfig *config); +/** + * @brief Initializes an NVM context. + * + * This function initializes the NVM context with the provided configuration, + * including setting up the backend callbacks and platform-specific context. + * If crypto is enabled with global keys, the global key cache is also + * initialized. + * + * When built with `WOLFHSM_CFG_THREADSAFE`, this function initializes the + * embedded lock using the provided lock configuration. If lockConfig is NULL, + * locking is disabled (single-threaded mode). Note that while init/cleanup + * manage the lock lifecycle, the caller is responsible for explicitly + * acquiring and releasing the lock around NVM operations using wh_Nvm_Lock() + * and wh_Nvm_Unlock(). + * + * @param[in,out] context Pointer to the NVM context to initialize. + * Must not be NULL. + * @param[in] config Pointer to the NVM configuration. Must not be NULL. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context or config is NULL. + * Other negative error codes on backend or lock initialization + * failure. + */ +int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config); + +/** + * @brief Cleans up an NVM context. + * + * This function cleans up the NVM context by calling the backend cleanup + * callback, clearing any global key cache if applicable, and zeroing the + * context structure. + * + * When built with `WOLFHSM_CFG_THREADSAFE`, this function also cleans up the + * embedded lock. Cleanup must only be called when no other threads are + * accessing the NVM context. Note that while init/cleanup manage the lock + * lifecycle, the caller is responsible for ensuring no threads hold the lock + * when cleanup is called. + * + * @param[in,out] context Pointer to the NVM context to cleanup. + * Must not be NULL and must have been initialized. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_ABORTED if the backend cleanup callback is NULL. + * Other negative error codes on backend cleanup failure. + */ int wh_Nvm_Cleanup(whNvmContext* context); -int wh_Nvm_GetAvailable(whNvmContext* context, - uint32_t *out_avail_size, whNvmId *out_avail_objects, - uint32_t *out_reclaim_size, whNvmId *out_reclaim_objects); +/** + * @brief Retrieves available NVM space and object capacity. + * + * Gets the current free space (maximum data object length that can be created) + * and the number of free directory entries. Also retrieves the sizes that + * could be reclaimed if the partition was regenerated via + * wh_Nvm_DestroyObjects(context, 0, NULL). + * + * All output parameters may be NULL without error if that information is not + * needed. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[out] out_avail_size Current available data size in bytes (optional). + * @param[out] out_avail_objects Current available object slots (optional). + * @param[out] out_reclaim_size Reclaimable data size in bytes (optional). + * @param[out] out_reclaim_objects Reclaimable object slots (optional). + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_ABORTED if the backend callback is NULL. + */ +int wh_Nvm_GetAvailable(whNvmContext* context, uint32_t* out_avail_size, + whNvmId* out_avail_objects, uint32_t* out_reclaim_size, + whNvmId* out_reclaim_objects); + +/** + * @brief Adds an object to NVM, reclaiming space if necessary. + * + * Attempts to add an object to NVM. If there is insufficient space, this + * function will attempt to reclaim space by calling wh_Nvm_DestroyObjects() + * with an empty list (which compacts the partition) before retrying. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in,out] meta Pointer to the object metadata. Must not be NULL. + * @param[in] dataLen Length of the object data in bytes. + * @param[in] data Pointer to the object data. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL. + * WH_ERROR_NOSPACE if insufficient space even after reclaim. + * Other negative error codes on backend failure. + */ +int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata* meta, + whNvmSize dataLen, const uint8_t* data); -int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta, - whNvmSize dataLen, const uint8_t* data); +/** + * @brief Adds an object to NVM. + * + * Adds a new object with the specified metadata and data. Duplicate IDs are + * allowed, but only the most recent version will be accessible. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in,out] meta Pointer to the object metadata. Must not be NULL. + * @param[in] data_len Length of the object data in bytes. + * @param[in] data Pointer to the object data. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_ABORTED if the backend callback is NULL. + * Other negative error codes on backend failure. + */ +int wh_Nvm_AddObject(whNvmContext* context, whNvmMetadata* meta, + whNvmSize data_len, const uint8_t* data); -int wh_Nvm_AddObject(whNvmContext* context, whNvmMetadata *meta, - whNvmSize data_len, const uint8_t* data); +/** + * @brief Adds an object to NVM with policy checking. + * + * Same as wh_Nvm_AddObject(), but first checks if an existing object with the + * same ID has the WH_NVM_FLAGS_NONMODIFIABLE flag set. If so, returns an + * access error. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in,out] meta Pointer to the object metadata. Must not be NULL. + * @param[in] data_len Length of the object data in bytes. + * @param[in] data Pointer to the object data. + * @return int WH_ERROR_OK on success. + * WH_ERROR_ACCESS if existing object is non-modifiable. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * Other negative error codes on backend failure. + */ int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta, whNvmSize data_len, const uint8_t* data); -int wh_Nvm_List(whNvmContext* context, - whNvmAccess access, whNvmFlags flags, whNvmId start_id, - whNvmId *out_count, whNvmId *out_id); +/** + * @brief Lists objects in NVM matching specified criteria. + * + * Retrieves the next matching object ID starting at start_id. Also sets + * out_count to the total number of IDs that match the access and flags + * criteria. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in] access Access level filter for matching objects. + * @param[in] flags Flags filter for matching objects. + * @param[in] start_id ID to start searching from. + * @param[out] out_count Total count of matching objects (optional). + * @param[out] out_id Next matching object ID (optional). + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_ABORTED if the backend callback is NULL. + * WH_ERROR_NOTFOUND if no matching objects found. + */ +int wh_Nvm_List(whNvmContext* context, whNvmAccess access, whNvmFlags flags, + whNvmId start_id, whNvmId* out_count, whNvmId* out_id); -int wh_Nvm_GetMetadata(whNvmContext* context, whNvmId id, - whNvmMetadata* meta); +/** + * @brief Retrieves metadata for an NVM object. + * + * Gets the metadata associated with the specified object ID. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in] id ID of the object to retrieve metadata for. + * @param[out] meta Pointer to store the retrieved metadata. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_ABORTED if the backend callback is NULL. + * WH_ERROR_NOTFOUND if the object does not exist. + */ +int wh_Nvm_GetMetadata(whNvmContext* context, whNvmId id, whNvmMetadata* meta); +/** + * @brief Destroys a list of objects from NVM. + * + * Destroys objects by replicating the current state without the IDs in the + * provided list. IDs in the list that are not present do not cause an error. + * + * The operation is atomic: erase the inactive partition, add all remaining + * objects, switch the active partition, and erase the old active (now + * inactive) partition. Interruption prior to completing the write of the new + * partition will recover as before the replication. Interruption after the + * new partition is fully populated will recover as after, including + * restarting erasure. + * + * If list_count is 0 and id_list is NULL, this function compacts the + * partition by replicating without removing any objects, which reclaims + * space from previously deleted objects. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in] list_count Number of IDs in the list (0 for compaction only). + * @param[in] id_list Array of object IDs to destroy (NULL if list_count is 0). + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_ABORTED if the backend callback is NULL. + * Other negative error codes on backend failure. + */ int wh_Nvm_DestroyObjects(whNvmContext* context, whNvmId list_count, - const whNvmId* id_list); + const whNvmId* id_list); + +/** + * @brief Destroys a list of objects from NVM with policy checking. + * + * Same as wh_Nvm_DestroyObjects(), but first checks if any object in the list + * has the WH_NVM_FLAGS_NONMODIFIABLE or WH_NVM_FLAGS_NONDESTROYABLE flags set. + * If so, returns an access error without destroying any objects. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in] list_count Number of IDs in the list. + * @param[in] id_list Array of object IDs to destroy. + * @return int WH_ERROR_OK on success. + * WH_ERROR_ACCESS if any object is non-modifiable or + * non-destroyable. + * WH_ERROR_BADARGS if context is NULL, not initialized, or + * id_list is NULL with non-zero list_count. + * Other negative error codes on backend failure. + */ int wh_Nvm_DestroyObjectsChecked(whNvmContext* context, whNvmId list_count, const whNvmId* id_list); +/** + * @brief Reads data from an NVM object. + * + * Reads data from the specified object starting at the given byte offset. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in] id ID of the object to read from. + * @param[in] offset Byte offset within the object to start reading. + * @param[in] data_len Number of bytes to read. + * @param[out] data Buffer to store the read data. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_ABORTED if the backend callback is NULL. + * WH_ERROR_NOTFOUND if the object does not exist. + * Other negative error codes on backend failure. + */ int wh_Nvm_Read(whNvmContext* context, whNvmId id, whNvmSize offset, whNvmSize data_len, uint8_t* data); + +/** + * @brief Reads data from an NVM object with policy checking. + * + * Same as wh_Nvm_Read(), but first checks if the object has the + * WH_NVM_FLAGS_NONEXPORTABLE flag set. If so, returns an access error. + * + * @param[in] context Pointer to the NVM context. Must not be NULL. + * @param[in] id ID of the object to read from. + * @param[in] offset Byte offset within the object to start reading. + * @param[in] data_len Number of bytes to read. + * @param[out] data Buffer to store the read data. + * @return int WH_ERROR_OK on success. + * WH_ERROR_ACCESS if object is non-exportable. + * WH_ERROR_BADARGS if context is NULL or not initialized. + * WH_ERROR_NOTFOUND if the object does not exist. + * Other negative error codes on backend failure. + */ int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset, whNvmSize data_len, uint8_t* data); -/** NVM Locking API for handler-level thread safety */ +/** + * @brief Thread-safe access to NVM resources. + * + * When built with `WOLFHSM_CFG_THREADSAFE`, callers must explicitly acquire + * and release the NVM lock around operations that access shared NVM state. + * The NVM API functions do NOT acquire locks internally, allowing callers + * to group multiple operations under a single lock acquisition for atomicity. + * + * Use the WH_NVM_LOCK() and WH_NVM_UNLOCK() macros for portable code that + * compiles correctly regardless of whether thread safety is enabled. + * + * Example usage: + * @code + * int ret; + * ret = WH_NVM_LOCK(nvm); + * if (ret == WH_ERROR_OK) { + * ret = wh_Nvm_AddObject(nvm, &meta, dataLen, data); + * (void)WH_NVM_UNLOCK(nvm); + * } + * @endcode + * @{ + */ + #ifdef WOLFHSM_CFG_THREADSAFE + +/** + * @brief Acquires the NVM lock. Should not be used directly, callers should + * instead use the WH_NVM_LOCK() macro, as it compiles to a no-op when + * WOLFHSM_CFG_THREADSAFE is not defined. + * + * Blocks until exclusive access to the NVM context is acquired. Must be + * paired with a corresponding call to wh_Nvm_Unlock(). + * + * @param[in] nvm Pointer to the NVM context. Must not be NULL. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if nvm is NULL. + * Other negative error codes on lock acquisition failure. + */ int wh_Nvm_Lock(whNvmContext* nvm); + +/** + * @brief Releases the NVM lock. Should not be used directly, callers should + * instead use the WH_NVM_UNLOCK() macro, as it compiles to a no-op when + * WOLFHSM_CFG_THREADSAFE is not defined. + * + * Releases exclusive access to the NVM context previously acquired via + * wh_Nvm_Lock(). + * + * @param[in] nvm Pointer to the NVM context. Must not be NULL. + * @return int WH_ERROR_OK on success. + * WH_ERROR_BADARGS if nvm is NULL. + * Other negative error codes on lock release failure. + */ int wh_Nvm_Unlock(whNvmContext* nvm); + +/* Helper macro for NVM locking */ #define WH_NVM_LOCK(nvm) wh_Nvm_Lock(nvm) +/* Helper macro for NVM unlocking */ #define WH_NVM_UNLOCK(nvm) wh_Nvm_Unlock(nvm) -#else + +#else /* !WOLFHSM_CFG_THREADSAFE */ + #define WH_NVM_LOCK(nvm) (WH_ERROR_OK) #define WH_NVM_UNLOCK(nvm) (WH_ERROR_OK) -#endif + +#endif /* WOLFHSM_CFG_THREADSAFE */ #endif /* !WOLFHSM_WH_NVM_H_ */ From c5b5fea59b2075c9e0e3c1eff2594aecd601b0c0 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Thu, 29 Jan 2026 07:49:59 -0700 Subject: [PATCH 07/13] add thread safe docs --- docs/draft/README.md | 8 ++ docs/draft/THREADSAFE.md | 218 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 226 insertions(+) create mode 100644 docs/draft/README.md create mode 100644 docs/draft/THREADSAFE.md diff --git a/docs/draft/README.md b/docs/draft/README.md new file mode 100644 index 000000000..5db146b1b --- /dev/null +++ b/docs/draft/README.md @@ -0,0 +1,8 @@ +# Draft Documentation + +This directory holds documentation for individual features added to wolfHSM. + +Eventually these documents should be refined and incorporated into the "V2" +wolfHSM documentation and the online manual. For now we leave them here so they +can inform code review and still be helpful so users, despite being in "draft" +state. diff --git a/docs/draft/THREADSAFE.md b/docs/draft/THREADSAFE.md new file mode 100644 index 000000000..4b1c939e5 --- /dev/null +++ b/docs/draft/THREADSAFE.md @@ -0,0 +1,218 @@ +# Thread Safety in wolfHSM + +## Overview + +wolfHSM supports server-side thread safety via the `WOLFHSM_CFG_THREADSAFE` build flag. When enabled, the server can safely process requests from multiple clients concurrently, with each request processing loop running on a separate thread and communicating through its own server context. + +The thread safety model serializes access to shared resources like NVM storage and the global key cache using a single lock embedded in the NVM context. Multiple server contexts can safely share a single NVM context, with the lock ensuring atomic access to shared state. + +The thread safety feature does NOT imply that a single server context can be shared across threads, as it only serializes internal access to shared resources between multiple server contexts that would occur internally as a consequence of using the server API. Concurrent access to a single server context is not currently supported. + +## Build Configuration + +Thread safety is enabled when building with `WOLFHSM_CFG_THREADSAFE` defined. + +To enable thread safety in the posix test harness, you can build with: + +```bash +make -C test THREADSAFE=1 +``` + +When `WOLFHSM_CFG_THREADSAFE` is not defined, all locking operations compile to no-ops with zero overhead. + +## What Is Protected + +Currently only the global NVM context is protected by a lock, guaranteeing thread safe access to: + +- **NVM operations**: All operations on non-volatile storage +- **Global key cache**: Keys marked as global (shared across clients) via `WOLFHSM_CFG_GLOBAL_KEYS` +- **Local key cache**: Per-server key cache operations that must synchronize with NVM + +The lock does **not** protect: + +- Transport layer operations (each server has its own comm context) +- Per-request scratch memory (allocated per-handler) +- Server context fields that are not shared + +## Architecture + +``` +┌─────────────┐ ┌─────────────┐ ┌─────────────┐ +│ Server 1 │ │ Server 2 │ │ Server 3 │ +│ (localCache)│ │ (localCache)│ │ (localCache)│ +└──────┬──────┘ └──────┬──────┘ └──────┬──────┘ + │ │ │ + └────────────────┼────────────────┘ + │ + ▼ + ┌───────────────────┐ + │ NVM Context │ + │ ┌─────────────┐ │ + │ │ Lock │ │ + │ ├─────────────┤ │ + │ │ Global Keys │ │ + │ ├─────────────┤ │ + │ │ NVM Backend│ │ + │ └─────────────┘ │ + └───────────────────┘ +``` + +## Lock Lifecycle + +The lock is initialized and cleaned up with the NVM context: + +```c +whNvmConfig nvmConfig = { + .cb = &nvmFlashCb, + .context = &flashContext, + .config = &flashConfig, +#ifdef WOLFHSM_CFG_THREADSAFE + .lockConfig = &lockConfig, /* Platform-specific lock config */ +#endif +}; + +wh_Nvm_Init(&nvmContext, &nvmConfig); /* Initializes lock */ +/* ... use NVM ... */ +wh_Nvm_Cleanup(&nvmContext); /* Cleans up lock */ +``` + +## Request Handler Locking + +Request handlers in the server acquire the lock around compound operations. The pattern is: + +```c +case SOME_ACTION: { + /* Translate request (no lock needed) */ + wh_MessageFoo_TranslateRequest(magic, req_packet, &req); + + /* Acquire lock for atomic compound operation */ + ret = WH_SERVER_NVM_LOCK(server); + if (ret == WH_ERROR_OK) { + /* Perform work while holding lock */ + ret = wh_Server_KeystoreXxx(server, ...); + if (ret == WH_ERROR_OK) { + ret = wh_Nvm_Xxx(server->nvm, ...); + } + + /* Release lock */ + (void)WH_SERVER_NVM_UNLOCK(server); + } + resp.rc = ret; + + /* Translate response (no lock needed) */ + wh_MessageFoo_TranslateResponse(magic, &resp, resp_packet); +} +``` + +Key points: +- Lock acquired **after** request translation +- Lock released **before** response translation +- All NVM and keystore operations within the lock are atomic +- The lock ensures multi-step operations (e.g., check-then-modify) are not interleaved + +## Server-Side Development + +When developing server-side code that accesses shared resources outside the request handling pipeline, you must manually acquire the lock. + +### Using the Server Lock Macros + +```c +int my_server_function(whServerContext* server) +{ + int ret; + + ret = WH_SERVER_NVM_LOCK(server); + if (ret != WH_ERROR_OK) { + return ret; + } + + /* Access NVM or global keystore while holding lock */ + ret = wh_Nvm_Read(server->nvm, id, offset, len, buffer); + if (ret == WH_ERROR_OK) { + ret = wh_Server_KeystoreCacheKey(server, &meta, keyData); + } + + (void)WH_SERVER_NVM_UNLOCK(server); + return ret; +} +``` + +### Using the NVM Lock Macros Directly + +If you only have access to the NVM context: + +```c +int my_nvm_function(whNvmContext* nvm) +{ + int ret; + + ret = WH_NVM_LOCK(nvm); + if (ret != WH_ERROR_OK) { + return ret; + } + + /* Access NVM while holding lock */ + ret = wh_Nvm_GetMetadata(nvm, id, &meta); + if (ret == WH_ERROR_OK) { + ret = wh_Nvm_Read(nvm, id, 0, meta.len, buffer); + } + + (void)WH_NVM_UNLOCK(nvm); + return ret; +} +``` + +### Lock Macro Behavior + +| Macro | THREADSAFE defined | THREADSAFE not defined | +|-------|-------------------|------------------------| +| `WH_SERVER_NVM_LOCK(server)` | Calls `wh_Server_NvmLock()` | Returns `WH_ERROR_OK` | +| `WH_SERVER_NVM_UNLOCK(server)` | Calls `wh_Server_NvmUnlock()` | Returns `WH_ERROR_OK` | +| `WH_NVM_LOCK(nvm)` | Calls `wh_Nvm_Lock()` | Returns `WH_ERROR_OK` | +| `WH_NVM_UNLOCK(nvm)` | Calls `wh_Nvm_Unlock()` | Returns `WH_ERROR_OK` | + +Using these macros ensures code compiles and runs correctly regardless of whether thread safety is enabled. + +## Platform-Specific Lock Implementation + +The lock abstraction is a generic interface that relies on callbacks for the actual implementation, allowing platform-specific implementations. wolfHSM provides a reference POSIX implementation using pthreads for use in the POSIX port: + +```c +#include "port/posix/posix_lock.h" + +static posixLockContext lockCtx; +static const whLockCb lockCb = POSIX_LOCK_CB; + +whLockConfig lockConfig = { + .cb = &lockCb, + .context = &lockCtx, + .config = NULL, /* Use default mutex attributes */ +}; +``` + +To implement for another platform, you can implement your own callbacks matching the `whLockCb` interface: + +```c +typedef struct whLockCb_t { + whLockInitCb init; /* Initialize lock resources */ + whLockCleanupCb cleanup; /* Free lock resources */ + whLockAcquireCb acquire; /* Acquire exclusive lock (blocking) */ + whLockReleaseCb release; /* Release exclusive lock */ +} whLockCb; +``` + +## Testing Thread Safety on the POSIX port + +Run the standard test suite with DMA, SHE, and thread safety enabled + +```bash +make -C test clean && make -j -C test DMA=1 THREADSAFE=1 && make -C test run +``` + +Run the multithreaded stress test with the same functionality under ThreadSanitizer to detect data races: + +```bash +make -C test clean && make -j -C test STRESS=1 TSAN=1 DMA=1 SHE=1 THREADSAFE=1 && make -C test run TSAN=1 +``` + +The stress test runs multiple client threads against multiple server contexts sharing a single NVM context, exercising contention patterns across keystore, NVM, counter, and certificate APIs. From ee5412a4a8d22cdbf904d72a719e90d3ed9cf556 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:18:56 -0700 Subject: [PATCH 08/13] housekeeping - pull some logic out of locked critical sections --- src/wh_server_keystore.c | 21 ++++++++++----------- src/wh_server_nvm.c | 16 ++++++++-------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/src/wh_server_keystore.c b/src/wh_server_keystore.c index 72e45fedf..bbf0eca0c 100644 --- a/src/wh_server_keystore.c +++ b/src/wh_server_keystore.c @@ -1677,14 +1677,14 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, if (ret == WH_ERROR_OK) { ret = wh_Server_KeystoreCacheKeyChecked(server, meta, in); } - if (ret == WH_ERROR_OK) { - /* Translate server keyId back to client format with flags - */ - resp.id = wh_KeyId_TranslateToClient(meta->id); - } (void)WH_SERVER_NVM_UNLOCK(server); } /* WH_SERVER_NVM_LOCK() */ + + if (ret == WH_ERROR_OK) { + /* Translate server keyId back to client format with flags */ + resp.id = wh_KeyId_TranslateToClient(meta->id); + } resp.rc = ret; (void)wh_MessageKeystore_TranslateCacheResponse( @@ -1734,14 +1734,13 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic, } } - if (ret == WH_ERROR_OK) { - /* Translate server keyId back to client format with flags - */ - resp.id = wh_KeyId_TranslateToClient(meta->id); - } - (void)WH_SERVER_NVM_UNLOCK(server); } /* WH_SERVER_NVM_LOCK() */ + + if (ret == WH_ERROR_OK) { + /* Translate server keyId back to client format with flags */ + resp.id = wh_KeyId_TranslateToClient(meta->id); + } resp.rc = ret; (void)wh_MessageKeystore_TranslateCacheDmaResponse( diff --git a/src/wh_server_nvm.c b/src/wh_server_nvm.c index eb6280abf..9c675b62a 100644 --- a/src/wh_server_nvm.c +++ b/src/wh_server_nvm.c @@ -214,16 +214,16 @@ int wh_Server_HandleNvmRequest(whServerContext* server, /* Process the getmetadata action */ rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta); - if (rc == 0) { - resp.id = meta.id; - resp.access = meta.access; - resp.flags = meta.flags; - resp.len = meta.len; - memcpy(resp.label, meta.label, sizeof(resp.label)); - } - (void)WH_SERVER_NVM_UNLOCK(server); } /* WH_SERVER_NVM_LOCK() */ + + if (rc == WH_ERROR_OK) { + resp.id = meta.id; + resp.access = meta.access; + resp.flags = meta.flags; + resp.len = meta.len; + memcpy(resp.label, meta.label, sizeof(resp.label)); + } resp.rc = rc; } /* Convert the response struct */ From e3f3708be299360a4854519430d5b468913380a7 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:19:05 -0700 Subject: [PATCH 09/13] simplify comment --- test/wh_test_posix_threadsafe_stress.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/wh_test_posix_threadsafe_stress.c b/test/wh_test_posix_threadsafe_stress.c index ad0c6de00..0f300ecd2 100644 --- a/test/wh_test_posix_threadsafe_stress.c +++ b/test/wh_test_posix_threadsafe_stress.c @@ -191,10 +191,7 @@ static const whTransportServerCb serverTransportCb = WH_TRANSPORT_MEM_SERVER_CB; /* Hot resources - all clients target these same IDs * NOTE: Use client-facing keyId format (simple ID + flags), NOT server-internal - * format (WH_MAKE_KEYID). The server's wh_KeyId_TranslateFromClient() extracts - * only the lower 8 bits as ID and checks WH_KEYID_CLIENT_GLOBAL_FLAG for - * global. Using WH_MAKE_KEYID with user=1 sets bit 8, which is - * WH_KEYID_CLIENT_GLOBAL_FLAG! + * format (WH_MAKE_KEYID). * * We need separate key IDs for different test categories because: * - Revoked keys have NONMODIFIABLE flag and can't be erased or re-cached From 5b88468c137a96bd4219d29d0966cb762267b846 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:28:44 -0700 Subject: [PATCH 10/13] fix return code value --- src/wh_nvm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wh_nvm.c b/src/wh_nvm.c index 12af0ec4c..4c225dc7c 100644 --- a/src/wh_nvm.c +++ b/src/wh_nvm.c @@ -116,7 +116,7 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) if (context->cb != NULL && context->cb->Init != NULL) { rc = context->cb->Init(context->context, config->config); - if (rc != 0) { + if (rc != WH_ERROR_OK) { context->cb = NULL; context->context = NULL; #ifdef WOLFHSM_CFG_THREADSAFE From 052cd4849c669e82918f2ddfda2ec33033c23d39 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:30:39 -0700 Subject: [PATCH 11/13] document NVM list concurrency issues --- wolfhsm/wh_client.h | 4 ++++ wolfhsm/wh_nvm.h | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/wolfhsm/wh_client.h b/wolfhsm/wh_client.h index f1b1b70a5..3b695d8ab 100644 --- a/wolfhsm/wh_client.h +++ b/wolfhsm/wh_client.h @@ -1517,6 +1517,10 @@ int wh_Client_NvmListResponse(whClientContext* c, int32_t* out_rc, * the criteria. * @param[out] out_id Pointer to store the ID of the first matching NVM object. * @return int Returns 0 on success, or a negative error code on failure. + * + * @note Enumerating all objects requires calling this function in a loop + * with successive start_id values. In thread-safe builds, concurrent + * NVM modifications between calls may result in inconsistent results. */ int wh_Client_NvmList(whClientContext* c, whNvmAccess access, whNvmFlags flags, whNvmId start_id, int32_t* out_rc, whNvmId* out_count, diff --git a/wolfhsm/wh_nvm.h b/wolfhsm/wh_nvm.h index b06c16018..e41f7f7c0 100644 --- a/wolfhsm/wh_nvm.h +++ b/wolfhsm/wh_nvm.h @@ -302,6 +302,11 @@ int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta, * WH_ERROR_BADARGS if context is NULL or not initialized. * WH_ERROR_ABORTED if the backend callback is NULL. * WH_ERROR_NOTFOUND if no matching objects found. + * + * @note This function returns one matching ID per call. Enumerating all + * objects requires multiple calls with successive start_id values. + * In thread-safe builds, if the NVM lock is not held across calls, + * concurrent modifications may result in skipped or repeated entries. */ int wh_Nvm_List(whNvmContext* context, whNvmAccess access, whNvmFlags flags, whNvmId start_id, whNvmId* out_count, whNvmId* out_id); From 01945147038db8b02d31376d6796bb60984176a0 Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:42:08 -0700 Subject: [PATCH 12/13] move client intialization before threads are spawned to prevent race on wolfCrypt init and cryptoCb registration --- test/tsan.supp | 17 +---------------- test/wh_test_posix_threadsafe_stress.c | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 25 deletions(-) diff --git a/test/tsan.supp b/test/tsan.supp index 3bf047d53..293cb645f 100644 --- a/test/tsan.supp +++ b/test/tsan.supp @@ -4,22 +4,7 @@ # Use with: TSAN_OPTIONS="suppressions=tsan.supp" ./wh_test_stress.elf # ============================================================================= -# Category 1: wolfSSL/wolfCrypt Global State (External Library) -# ============================================================================= -# These races occur in wolfSSL's global initialization and crypto callback -# registration. They are wolfSSL library issues, not wolfHSM bugs. - -# Race on initRefCount in wolfCrypt_Init/wolfCrypt_Cleanup -race:wolfCrypt_Init -race:wolfCrypt_Cleanup - -# Races on gCryptoDev array in crypto callback registration -race:wc_CryptoCb_RegisterDevice -race:wc_CryptoCb_UnRegisterDevice -race:wc_CryptoCb_GetDevice - -# ============================================================================= -# Category 2: Transport Memory Layer (Lock-free by Design) +# Category: Transport Memory Layer (Lock-free by Design) # ============================================================================= # The shared memory transport uses a lock-free protocol with volatile pointers, # memory fences, and cache operations. TSAN doesn't understand this higher-level diff --git a/test/wh_test_posix_threadsafe_stress.c b/test/wh_test_posix_threadsafe_stress.c index 0f300ecd2..fb4c6a0fc 100644 --- a/test/wh_test_posix_threadsafe_stress.c +++ b/test/wh_test_posix_threadsafe_stress.c @@ -657,11 +657,21 @@ static int initClientServerPair(StressTestContext* ctx, int pairIndex) pair->serverConfig.crypto = &pair->cryptoCtx; pair->serverConfig.devId = INVALID_DEVID; + /* Initialize client in the main thread to avoid concurrent calls to + * wolfCrypt_Init() and wc_CryptoCb_RegisterDevice() */ + rc = wh_Client_Init(&pair->client, &pair->clientConfig); + if (rc != WH_ERROR_OK) { + WH_ERROR_PRINT("Client %d init failed: %d\n", pairIndex, rc); + wc_FreeRng(pair->cryptoCtx.rng); + return rc; + } + return WH_ERROR_OK; } static void cleanupClientServerPair(ClientServerPair* pair) { + wh_Client_Cleanup(&pair->client); wc_FreeRng(pair->cryptoCtx.rng); } @@ -1939,14 +1949,6 @@ static void* contentionClientThread(void* arg) int rc; int localIteration; - /* Initialize client */ - rc = wh_Client_Init(&pair->client, &pair->clientConfig); - if (rc != WH_ERROR_OK) { - WH_ERROR_PRINT("Client %d init failed: %d\n", pair->clientId, rc); - ATOMIC_ADD_INT(&pair->errorCount, 1); - return NULL; - } - /* Wait for all threads to start */ pthread_barrier_wait(&ctx->startBarrier); @@ -2002,7 +2004,6 @@ static void* contentionClientThread(void* arg) pthread_barrier_wait(&ctx->streamEndBarrier); } - wh_Client_Cleanup(&pair->client); return NULL; } From a5db2eeac37bcec8443ee2aaf5416f661aad653f Mon Sep 17 00:00:00 2001 From: Brett Nicholas <7547222+bigbrett@users.noreply.github.com> Date: Thu, 29 Jan 2026 13:48:07 -0700 Subject: [PATCH 13/13] simplify --- src/wh_nvm.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/wh_nvm.c b/src/wh_nvm.c index 4c225dc7c..371e4e79c 100644 --- a/src/wh_nvm.c +++ b/src/wh_nvm.c @@ -108,8 +108,6 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config) /* Initialize lock (NULL lockConfig = no-op locking) */ rc = wh_Lock_Init(&context->lock, config->lockConfig); if (rc != WH_ERROR_OK) { - context->cb = NULL; - context->context = NULL; return rc; } #endif