diff --git a/.github/workflows/build-and-test-stress.yml b/.github/workflows/build-and-test-stress.yml
new file mode 100644
index 00000000..7a0aee18
--- /dev/null
+++ b/.github/workflows/build-and-test-stress.yml
@@ -0,0 +1,35 @@
+name: Multi-threaded Stress Test
+
+on:
+ push:
+ branches: [ 'master', 'main', 'release/**' ]
+ pull_request:
+ branches: [ '*' ]
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ steps:
+ - uses: actions/checkout@v4
+
+ # List host CPU info
+ - name: Host CPU info
+ run: cat /proc/cpuinfo
+
+ # List compiler version
+ - name: List compiler version
+ run: gcc --version
+
+ # pull and build wolfssl
+ - name: Checkout wolfssl
+ uses: actions/checkout@v4
+ with:
+ repository: wolfssl/wolfssl
+ path: wolfssl
+
+ # Build and run stress tests with TSAN
+ - name: Build and run stress tests with TSAN
+ run: cd test && make clean && make -j THREADSAFE=1 TSAN=1 STRESS=1 DMA=1 SHE=1 WOLFSSL_DIR=../wolfssl && make TSAN=1 run
diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
index aa6d4f32..9e77d634 100644
--- a/.github/workflows/build-and-test.yml
+++ b/.github/workflows/build-and-test.yml
@@ -64,3 +64,15 @@ jobs:
# Build and test with DEBUG_VERBOSE=1 (includes DEBUG)
- name: Build and test with DEBUG_VERBOSE
run: cd test && make clean && make -j DEBUG_VERBOSE=1 WOLFSSL_DIR=../wolfssl && make run
+
+ # Build and test in multithreaded mode with everything enabled
+ - name: Build and test with THREADSAFE and everything
+ run: cd test && make clean && make -j THREADSAFE=1 DMA=1 SHE=1 ASAN=1 WOLFSSL_DIR=../wolfssl && make run
+
+ # Build and test in multithreaded mode with everything enabled and wolfCrypt tests
+ - name: Build and test with THREADSAFE and TESTWOLFCRYPT and everything
+ run: cd test && make clean && make -j THREADSAFE=1 TESTWOLFCRYPT=1 DMA=1 SHE=1 ASAN=1 WOLFSSL_DIR=../wolfssl && make run
+
+ # Build and test in multithreaded mode with everything enabled and wolfCrypt tests with dma
+ - name: Build and test with THREADSAFE and TESTWOLFCRYPT with DMA
+ run: cd test && make clean && make -j THREADSAFE=1 TESTWOLFCRYPT=1 TESTWOLFCRYPT_DMA=1 DMA=1 SHE=1 ASAN=1 WOLFSSL_DIR=../wolfssl && make run
diff --git a/docs/draft/README.md b/docs/draft/README.md
new file mode 100644
index 00000000..5db146b1
--- /dev/null
+++ b/docs/draft/README.md
@@ -0,0 +1,8 @@
+# Draft Documentation
+
+This directory holds documentation for individual features added to wolfHSM.
+
+Eventually these documents should be refined and incorporated into the "V2"
+wolfHSM documentation and the online manual. For now we leave them here so they
+can inform code review and still be helpful so users, despite being in "draft"
+state.
diff --git a/docs/draft/THREADSAFE.md b/docs/draft/THREADSAFE.md
new file mode 100644
index 00000000..4b1c939e
--- /dev/null
+++ b/docs/draft/THREADSAFE.md
@@ -0,0 +1,218 @@
+# Thread Safety in wolfHSM
+
+## Overview
+
+wolfHSM supports server-side thread safety via the `WOLFHSM_CFG_THREADSAFE` build flag. When enabled, the server can safely process requests from multiple clients concurrently, with each request processing loop running on a separate thread and communicating through its own server context.
+
+The thread safety model serializes access to shared resources like NVM storage and the global key cache using a single lock embedded in the NVM context. Multiple server contexts can safely share a single NVM context, with the lock ensuring atomic access to shared state.
+
+The thread safety feature does NOT imply that a single server context can be shared across threads, as it only serializes internal access to shared resources between multiple server contexts that would occur internally as a consequence of using the server API. Concurrent access to a single server context is not currently supported.
+
+## Build Configuration
+
+Thread safety is enabled when building with `WOLFHSM_CFG_THREADSAFE` defined.
+
+To enable thread safety in the posix test harness, you can build with:
+
+```bash
+make -C test THREADSAFE=1
+```
+
+When `WOLFHSM_CFG_THREADSAFE` is not defined, all locking operations compile to no-ops with zero overhead.
+
+## What Is Protected
+
+Currently only the global NVM context is protected by a lock, guaranteeing thread safe access to:
+
+- **NVM operations**: All operations on non-volatile storage
+- **Global key cache**: Keys marked as global (shared across clients) via `WOLFHSM_CFG_GLOBAL_KEYS`
+- **Local key cache**: Per-server key cache operations that must synchronize with NVM
+
+The lock does **not** protect:
+
+- Transport layer operations (each server has its own comm context)
+- Per-request scratch memory (allocated per-handler)
+- Server context fields that are not shared
+
+## Architecture
+
+```
+┌─────────────┐ ┌─────────────┐ ┌─────────────┐
+│ Server 1 │ │ Server 2 │ │ Server 3 │
+│ (localCache)│ │ (localCache)│ │ (localCache)│
+└──────┬──────┘ └──────┬──────┘ └──────┬──────┘
+ │ │ │
+ └────────────────┼────────────────┘
+ │
+ ▼
+ ┌───────────────────┐
+ │ NVM Context │
+ │ ┌─────────────┐ │
+ │ │ Lock │ │
+ │ ├─────────────┤ │
+ │ │ Global Keys │ │
+ │ ├─────────────┤ │
+ │ │ NVM Backend│ │
+ │ └─────────────┘ │
+ └───────────────────┘
+```
+
+## Lock Lifecycle
+
+The lock is initialized and cleaned up with the NVM context:
+
+```c
+whNvmConfig nvmConfig = {
+ .cb = &nvmFlashCb,
+ .context = &flashContext,
+ .config = &flashConfig,
+#ifdef WOLFHSM_CFG_THREADSAFE
+ .lockConfig = &lockConfig, /* Platform-specific lock config */
+#endif
+};
+
+wh_Nvm_Init(&nvmContext, &nvmConfig); /* Initializes lock */
+/* ... use NVM ... */
+wh_Nvm_Cleanup(&nvmContext); /* Cleans up lock */
+```
+
+## Request Handler Locking
+
+Request handlers in the server acquire the lock around compound operations. The pattern is:
+
+```c
+case SOME_ACTION: {
+ /* Translate request (no lock needed) */
+ wh_MessageFoo_TranslateRequest(magic, req_packet, &req);
+
+ /* Acquire lock for atomic compound operation */
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ /* Perform work while holding lock */
+ ret = wh_Server_KeystoreXxx(server, ...);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Nvm_Xxx(server->nvm, ...);
+ }
+
+ /* Release lock */
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ }
+ resp.rc = ret;
+
+ /* Translate response (no lock needed) */
+ wh_MessageFoo_TranslateResponse(magic, &resp, resp_packet);
+}
+```
+
+Key points:
+- Lock acquired **after** request translation
+- Lock released **before** response translation
+- All NVM and keystore operations within the lock are atomic
+- The lock ensures multi-step operations (e.g., check-then-modify) are not interleaved
+
+## Server-Side Development
+
+When developing server-side code that accesses shared resources outside the request handling pipeline, you must manually acquire the lock.
+
+### Using the Server Lock Macros
+
+```c
+int my_server_function(whServerContext* server)
+{
+ int ret;
+
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret != WH_ERROR_OK) {
+ return ret;
+ }
+
+ /* Access NVM or global keystore while holding lock */
+ ret = wh_Nvm_Read(server->nvm, id, offset, len, buffer);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Server_KeystoreCacheKey(server, &meta, keyData);
+ }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ return ret;
+}
+```
+
+### Using the NVM Lock Macros Directly
+
+If you only have access to the NVM context:
+
+```c
+int my_nvm_function(whNvmContext* nvm)
+{
+ int ret;
+
+ ret = WH_NVM_LOCK(nvm);
+ if (ret != WH_ERROR_OK) {
+ return ret;
+ }
+
+ /* Access NVM while holding lock */
+ ret = wh_Nvm_GetMetadata(nvm, id, &meta);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Nvm_Read(nvm, id, 0, meta.len, buffer);
+ }
+
+ (void)WH_NVM_UNLOCK(nvm);
+ return ret;
+}
+```
+
+### Lock Macro Behavior
+
+| Macro | THREADSAFE defined | THREADSAFE not defined |
+|-------|-------------------|------------------------|
+| `WH_SERVER_NVM_LOCK(server)` | Calls `wh_Server_NvmLock()` | Returns `WH_ERROR_OK` |
+| `WH_SERVER_NVM_UNLOCK(server)` | Calls `wh_Server_NvmUnlock()` | Returns `WH_ERROR_OK` |
+| `WH_NVM_LOCK(nvm)` | Calls `wh_Nvm_Lock()` | Returns `WH_ERROR_OK` |
+| `WH_NVM_UNLOCK(nvm)` | Calls `wh_Nvm_Unlock()` | Returns `WH_ERROR_OK` |
+
+Using these macros ensures code compiles and runs correctly regardless of whether thread safety is enabled.
+
+## Platform-Specific Lock Implementation
+
+The lock abstraction is a generic interface that relies on callbacks for the actual implementation, allowing platform-specific implementations. wolfHSM provides a reference POSIX implementation using pthreads for use in the POSIX port:
+
+```c
+#include "port/posix/posix_lock.h"
+
+static posixLockContext lockCtx;
+static const whLockCb lockCb = POSIX_LOCK_CB;
+
+whLockConfig lockConfig = {
+ .cb = &lockCb,
+ .context = &lockCtx,
+ .config = NULL, /* Use default mutex attributes */
+};
+```
+
+To implement for another platform, you can implement your own callbacks matching the `whLockCb` interface:
+
+```c
+typedef struct whLockCb_t {
+ whLockInitCb init; /* Initialize lock resources */
+ whLockCleanupCb cleanup; /* Free lock resources */
+ whLockAcquireCb acquire; /* Acquire exclusive lock (blocking) */
+ whLockReleaseCb release; /* Release exclusive lock */
+} whLockCb;
+```
+
+## Testing Thread Safety on the POSIX port
+
+Run the standard test suite with DMA, SHE, and thread safety enabled
+
+```bash
+make -C test clean && make -j -C test DMA=1 THREADSAFE=1 && make -C test run
+```
+
+Run the multithreaded stress test with the same functionality under ThreadSanitizer to detect data races:
+
+```bash
+make -C test clean && make -j -C test STRESS=1 TSAN=1 DMA=1 SHE=1 THREADSAFE=1 && make -C test run TSAN=1
+```
+
+The stress test runs multiple client threads against multiple server contexts sharing a single NVM context, exercising contention patterns across keystore, NVM, counter, and certificate APIs.
diff --git a/port/posix/posix_lock.c b/port/posix/posix_lock.c
new file mode 100644
index 00000000..f8767281
--- /dev/null
+++ b/port/posix/posix_lock.c
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2026 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * port/posix/posix_lock.c
+ *
+ * POSIX pthread_mutex-based implementation of the wolfHSM lock abstraction.
+ * Each lock context contains a single mutex for one shared resource.
+ */
+
+#include "wolfhsm/wh_settings.h"
+
+#ifdef WOLFHSM_CFG_THREADSAFE
+
+#include
+#include
+
+#include "wolfhsm/wh_error.h"
+#include "wolfhsm/wh_lock.h"
+
+#include "port/posix/posix_lock.h"
+
+int posixLock_Init(void* context, const void* config)
+{
+ posixLockContext* ctx = (posixLockContext*)context;
+ const posixLockConfig* cfg = (const posixLockConfig*)config;
+ const pthread_mutexattr_t* attr = NULL;
+ int rc;
+
+ if (ctx == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* Already initialized? */
+ if (ctx->initialized) {
+ return WH_ERROR_OK;
+ }
+
+ /* Use attributes from config if provided */
+ if (cfg != NULL) {
+ attr = cfg->attr;
+ }
+
+ rc = pthread_mutex_init(&ctx->mutex, attr);
+ if (rc != 0) {
+ if (rc == EINVAL) {
+ return WH_ERROR_BADARGS;
+ }
+ return WH_ERROR_ABORTED;
+ }
+
+ ctx->initialized = 1;
+ return WH_ERROR_OK;
+}
+
+int posixLock_Cleanup(void* context)
+{
+ posixLockContext* ctx = (posixLockContext*)context;
+ int rc;
+
+ if (ctx == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* Not initialized? */
+ if (!ctx->initialized) {
+ return WH_ERROR_OK;
+ }
+
+ rc = pthread_mutex_destroy(&ctx->mutex);
+ if (rc != 0) {
+ if (rc == EINVAL) {
+ return WH_ERROR_BADARGS;
+ }
+ return WH_ERROR_ABORTED;
+ }
+
+ ctx->initialized = 0;
+ return WH_ERROR_OK;
+}
+
+int posixLock_Acquire(void* context)
+{
+ posixLockContext* ctx = (posixLockContext*)context;
+ int rc;
+
+ if (ctx == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* Not initialized? */
+ if (!ctx->initialized) {
+ return WH_ERROR_NOTREADY;
+ }
+
+ rc = pthread_mutex_lock(&ctx->mutex);
+ if (rc != 0) {
+ /* Trap error-checking mutex errors that indicate bugs */
+ if (rc == EDEADLK) {
+ /* Deadlock would occur - owner trying to re-acquire */
+ return WH_ERROR_ABORTED;
+ }
+ if (rc == EINVAL) {
+ return WH_ERROR_BADARGS;
+ }
+ return WH_ERROR_ABORTED;
+ }
+
+ return WH_ERROR_OK;
+}
+
+int posixLock_Release(void* context)
+{
+ posixLockContext* ctx = (posixLockContext*)context;
+ int rc;
+
+ if (ctx == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* Not initialized? */
+ if (!ctx->initialized) {
+ return WH_ERROR_NOTREADY;
+ }
+
+ rc = pthread_mutex_unlock(&ctx->mutex);
+ if (rc != 0) {
+ /* Trap error-checking mutex errors that indicate bugs */
+ if (rc == EPERM) {
+ /* Non-owner attempting to unlock */
+ return WH_ERROR_LOCKED;
+ }
+ if (rc == EINVAL) {
+ return WH_ERROR_BADARGS;
+ }
+ return WH_ERROR_ABORTED;
+ }
+
+ return WH_ERROR_OK;
+}
+
+#endif /* WOLFHSM_CFG_THREADSAFE */
diff --git a/port/posix/posix_lock.h b/port/posix/posix_lock.h
new file mode 100644
index 00000000..815088d0
--- /dev/null
+++ b/port/posix/posix_lock.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2024 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * port/posix/posix_lock.h
+ *
+ * POSIX pthread_mutex-based implementation of the wolfHSM lock abstraction.
+ * Provides thread-safe synchronization for shared resources.
+ *
+ * Each posixLockContext instance contains a single mutex. Create one
+ * context per shared resource (e.g., one for NVM, one for crypto).
+ */
+
+#ifndef PORT_POSIX_POSIX_LOCK_H_
+#define PORT_POSIX_POSIX_LOCK_H_
+
+#include "wolfhsm/wh_settings.h"
+
+#ifdef WOLFHSM_CFG_THREADSAFE
+
+#include
+
+#include "wolfhsm/wh_lock.h"
+
+/* Configuration for POSIX lock backend */
+typedef struct posixLockConfig_t {
+ const pthread_mutexattr_t* attr; /* Mutex attributes, NULL for defaults */
+} posixLockConfig;
+
+/* Context structure containing a single mutex for one resource */
+typedef struct posixLockContext_t {
+ pthread_mutex_t mutex;
+ int initialized;
+} posixLockContext;
+
+/* Callback functions matching whLockCb interface */
+int posixLock_Init(void* context, const void* config);
+int posixLock_Cleanup(void* context);
+int posixLock_Acquire(void* context);
+int posixLock_Release(void* context);
+
+/* Convenience macro for callback table initialization */
+/* clang-format off */
+#define POSIX_LOCK_CB \
+ { \
+ .init = posixLock_Init, \
+ .cleanup = posixLock_Cleanup, \
+ .acquire = posixLock_Acquire, \
+ .release = posixLock_Release, \
+ }
+/* clang-format on */
+
+#endif /* WOLFHSM_CFG_THREADSAFE */
+
+#endif /* !PORT_POSIX_POSIX_LOCK_H_ */
diff --git a/src/wh_lock.c b/src/wh_lock.c
new file mode 100644
index 00000000..da2a29e7
--- /dev/null
+++ b/src/wh_lock.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2026 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * src/wh_lock.c
+ *
+ * Implementation of platform-agnostic lock abstraction for thread-safe
+ * access to shared resources. Each lock instance protects exactly one
+ * resource, allowing arbitrary sharing topologies.
+ */
+
+#include "wolfhsm/wh_settings.h"
+
+#include /* For NULL */
+#include /* For memset */
+
+#include "wolfhsm/wh_lock.h"
+#include "wolfhsm/wh_error.h"
+
+#ifdef WOLFHSM_CFG_THREADSAFE
+
+int wh_Lock_Init(whLock* lock, const whLockConfig* config)
+{
+ int ret = WH_ERROR_OK;
+
+ if (lock == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* Allow NULL config for single-threaded mode (no-op locking) */
+ if ((config == NULL) || (config->cb == NULL)) {
+ lock->cb = NULL;
+ lock->context = NULL;
+ lock->initialized = 1; /* Mark as initialized even in no-op mode */
+ return WH_ERROR_OK;
+ }
+
+ lock->cb = config->cb;
+ lock->context = config->context;
+
+ /* Initialize the lock if callback provided */
+ if (lock->cb->init != NULL) {
+ ret = lock->cb->init(lock->context, config->config);
+ if (ret != WH_ERROR_OK) {
+ lock->cb = NULL;
+ lock->context = NULL;
+ /* Do not set initialized on failure */
+ return ret;
+ }
+ }
+
+ lock->initialized = 1; /* Mark as initialized after successful init */
+ return WH_ERROR_OK;
+}
+
+int wh_Lock_Cleanup(whLock* lock)
+{
+ if (lock == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ if ((lock->cb != NULL) && (lock->cb->cleanup != NULL)) {
+ (void)lock->cb->cleanup(lock->context);
+ }
+
+ /* Zero the entire structure to make post-cleanup state distinguishable */
+ memset(lock, 0, sizeof(*lock));
+
+ return WH_ERROR_OK;
+}
+
+int wh_Lock_Acquire(whLock* lock)
+{
+ /* Return error if lock is NULL */
+ if (lock == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* Return error if lock is not initialized */
+ if (lock->initialized == 0) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* No-op if not configured (no callbacks) */
+ if ((lock->cb == NULL) || (lock->cb->acquire == NULL)) {
+ return WH_ERROR_OK;
+ }
+
+ return lock->cb->acquire(lock->context);
+}
+
+int wh_Lock_Release(whLock* lock)
+{
+ /* Return error if lock is NULL */
+ if (lock == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* Return error if lock is not initialized */
+ if (lock->initialized == 0) {
+ return WH_ERROR_BADARGS;
+ }
+
+ /* No-op if not configured (no callbacks) */
+ if ((lock->cb == NULL) || (lock->cb->release == NULL)) {
+ return WH_ERROR_OK;
+ }
+
+ return lock->cb->release(lock->context);
+}
+
+#endif /* WOLFHSM_CFG_THREADSAFE */
diff --git a/src/wh_nvm.c b/src/wh_nvm.c
index bc32b09e..371e4e79 100644
--- a/src/wh_nvm.c
+++ b/src/wh_nvm.c
@@ -30,7 +30,7 @@
#include "wolfhsm/wh_common.h"
#include "wolfhsm/wh_error.h"
-
+#include "wolfhsm/wh_lock.h"
#include "wolfhsm/wh_nvm.h"
typedef enum {
@@ -104,11 +104,22 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config)
memset(&context->globalCache, 0, sizeof(context->globalCache));
#endif
+#ifdef WOLFHSM_CFG_THREADSAFE
+ /* Initialize lock (NULL lockConfig = no-op locking) */
+ rc = wh_Lock_Init(&context->lock, config->lockConfig);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+#endif
+
if (context->cb != NULL && context->cb->Init != NULL) {
rc = context->cb->Init(context->context, config->config);
- if (rc != 0) {
+ if (rc != WH_ERROR_OK) {
context->cb = NULL;
context->context = NULL;
+#ifdef WOLFHSM_CFG_THREADSAFE
+ (void)wh_Lock_Cleanup(&context->lock);
+#endif
}
}
@@ -117,6 +128,8 @@ int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config)
int wh_Nvm_Cleanup(whNvmContext* context)
{
+ int rc;
+
if ( (context == NULL) ||
(context->cb == NULL) ) {
return WH_ERROR_BADARGS;
@@ -129,9 +142,20 @@ int wh_Nvm_Cleanup(whNvmContext* context)
/* No callback? Return ABORTED */
if (context->cb->Cleanup == NULL) {
- return WH_ERROR_ABORTED;
+ rc = WH_ERROR_ABORTED;
}
- return context->cb->Cleanup(context->context);
+ else {
+ rc = context->cb->Cleanup(context->context);
+ }
+
+#ifdef WOLFHSM_CFG_THREADSAFE
+ (void)wh_Lock_Cleanup(&context->lock);
+#endif
+
+ context->cb = NULL;
+ context->context = NULL;
+
+ return rc;
}
int wh_Nvm_GetAvailable(whNvmContext* context,
@@ -314,3 +338,23 @@ int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset,
return wh_Nvm_Read(context, id, offset, data_len, data);
}
+
+#ifdef WOLFHSM_CFG_THREADSAFE
+
+int wh_Nvm_Lock(whNvmContext* nvm)
+{
+ if (nvm == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+ return wh_Lock_Acquire(&nvm->lock);
+}
+
+int wh_Nvm_Unlock(whNvmContext* nvm)
+{
+ if (nvm == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+ return wh_Lock_Release(&nvm->lock);
+}
+
+#endif /* WOLFHSM_CFG_THREADSAFE */
diff --git a/src/wh_server.c b/src/wh_server.c
index 83df8acb..8426fb65 100644
--- a/src/wh_server.c
+++ b/src/wh_server.c
@@ -463,4 +463,22 @@ int wh_Server_HandleRequestMessage(whServerContext* server)
return rc;
}
+#ifdef WOLFHSM_CFG_THREADSAFE
+int wh_Server_NvmLock(whServerContext* server)
+{
+ if (server == NULL || server->nvm == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+ return wh_Lock_Acquire(&server->nvm->lock);
+}
+
+int wh_Server_NvmUnlock(whServerContext* server)
+{
+ if (server == NULL || server->nvm == NULL) {
+ return WH_ERROR_BADARGS;
+ }
+ return wh_Lock_Release(&server->nvm->lock);
+}
+#endif /* WOLFHSM_CFG_THREADSAFE */
+
#endif /* WOLFHSM_CFG_ENABLE_SERVER */
diff --git a/src/wh_server_cert.c b/src/wh_server_cert.c
index 0d44e778..5cc9b77e 100644
--- a/src/wh_server_cert.c
+++ b/src/wh_server_cert.c
@@ -414,9 +414,14 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
cert_data = (const uint8_t*)req_packet + sizeof(req);
/* Process the add trusted action */
- rc = wh_Server_CertAddTrusted(server, req.id, req.access, req.flags,
- req.label, WH_NVM_LABEL_LEN,
- cert_data, req.cert_len);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ rc = wh_Server_CertAddTrusted(
+ server, req.id, req.access, req.flags, req.label,
+ WH_NVM_LABEL_LEN, cert_data, req.cert_len);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = rc;
/* Convert the response struct */
@@ -434,7 +439,12 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
magic, (whMessageCert_EraseTrustedRequest*)req_packet, &req);
/* Process the delete trusted action */
- rc = wh_Server_CertEraseTrusted(server, req.id);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ rc = wh_Server_CertEraseTrusted(server, req.id);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = rc;
/* Convert the response struct */
@@ -466,23 +476,24 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
/* Check metadata to check if the certificate is non-exportable.
* This is unfortunately redundant since metadata is checked in
* wh_Server_CertReadTrusted(). */
- rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
+ rc = WH_SERVER_NVM_LOCK(server);
if (rc == WH_ERROR_OK) {
- /* Check if the certificate is non-exportable */
- if (meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) {
- resp.rc = WH_ERROR_ACCESS;
- }
- else {
- rc = wh_Server_CertReadTrusted(server, req.id, cert_data,
- &cert_len);
- resp.rc = rc;
- resp.cert_len = cert_len;
+ rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
+ if (rc == WH_ERROR_OK) {
+ /* Check if the certificate is non-exportable */
+ if (meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) {
+ rc = WH_ERROR_ACCESS;
+ }
+ else {
+ rc = wh_Server_CertReadTrusted(server, req.id,
+ cert_data, &cert_len);
+ resp.cert_len = cert_len;
+ }
}
- }
- else {
- resp.rc = rc;
- resp.cert_len = 0;
- }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
/* Convert the response struct */
wh_MessageCert_TranslateReadTrustedResponse(
@@ -511,14 +522,20 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
whKeyId keyId = wh_KeyId_TranslateFromClient(
WH_KEYTYPE_CRYPTO, server->comm->client_id, req.keyId);
- /* Process the verify action */
- resp.rc = wh_Server_CertVerify(server, cert_data, req.cert_len,
- req.trustedRootNvmId, req.flags,
- req.cachedKeyFlags, &keyId);
- /* Propagate the keyId back to the client with flags preserved
- */
- resp.keyId = wh_KeyId_TranslateToClient(keyId);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ /* Process the verify action */
+ rc = wh_Server_CertVerify(server, cert_data, req.cert_len,
+ req.trustedRootNvmId, req.flags,
+ req.cachedKeyFlags, &keyId);
+ /* Propagate the keyId back to the client with flags
+ * preserved */
+ resp.keyId = wh_KeyId_TranslateToClient(keyId);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
/* Convert the response struct */
@@ -550,9 +567,14 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
}
if (resp.rc == WH_ERROR_OK) {
/* Process the add trusted action */
- resp.rc = wh_Server_CertAddTrusted(
- server, req.id, req.access, req.flags, req.label,
- WH_NVM_LABEL_LEN, cert_data, req.cert_len);
+ resp.rc = WH_SERVER_NVM_LOCK(server);
+ if (resp.rc == WH_ERROR_OK) {
+ resp.rc = wh_Server_CertAddTrusted(
+ server, req.id, req.access, req.flags, req.label,
+ WH_NVM_LABEL_LEN, cert_data, req.cert_len);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
}
if (resp.rc == WH_ERROR_OK) {
/* Post-process client address */
@@ -591,18 +613,23 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
}
if (resp.rc == WH_ERROR_OK) {
/* Check metadata to see if the certificate is non-exportable */
- resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
+ resp.rc = WH_SERVER_NVM_LOCK(server);
if (resp.rc == WH_ERROR_OK) {
- if ((meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) != 0) {
- resp.rc = WH_ERROR_ACCESS;
- }
- else {
- /* Clamp cert_len to actual stored length */
- cert_len = req.cert_len;
- resp.rc = wh_Server_CertReadTrusted(
- server, req.id, cert_data, &cert_len);
+ resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
+ if (resp.rc == WH_ERROR_OK) {
+ if ((meta.flags & WH_NVM_FLAGS_NONEXPORTABLE) != 0) {
+ resp.rc = WH_ERROR_ACCESS;
+ }
+ else {
+ /* Clamp cert_len to actual stored length */
+ cert_len = req.cert_len;
+ resp.rc = wh_Server_CertReadTrusted(
+ server, req.id, cert_data, &cert_len);
+ }
}
- }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
}
if (resp.rc == WH_ERROR_OK) {
/* Post-process client address */
@@ -618,9 +645,10 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
}; break;
case WH_MESSAGE_CERT_ACTION_VERIFY_DMA: {
- whMessageCert_VerifyDmaRequest req = {0};
+ whMessageCert_VerifyDmaRequest req = {0};
whMessageCert_VerifyDmaResponse resp = {0};
- void* cert_data = NULL;
+ void* cert_data = NULL;
+ whKeyId keyId = WH_KEYID_ERASED;
if (req_size != sizeof(req)) {
/* Request is malformed */
@@ -631,24 +659,29 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
wh_MessageCert_TranslateVerifyDmaRequest(
magic, (whMessageCert_VerifyDmaRequest*)req_packet, &req);
+ /* Map client keyId to server keyId space */
+ keyId = wh_KeyId_TranslateFromClient(
+ WH_KEYTYPE_CRYPTO, server->comm->client_id, req.keyId);
+
/* Process client address */
resp.rc = wh_Server_DmaProcessClientAddress(
server, req.cert_addr, &cert_data, req.cert_len,
WH_DMA_OPER_CLIENT_READ_PRE, (whServerDmaFlags){0});
}
if (resp.rc == WH_ERROR_OK) {
- /* Map client keyId to server keyId space */
- whKeyId keyId = wh_KeyId_TranslateFromClient(
- WH_KEYTYPE_CRYPTO, server->comm->client_id, req.keyId);
+ resp.rc = WH_SERVER_NVM_LOCK(server);
+ if (resp.rc == WH_ERROR_OK) {
+ /* Process the verify action */
+ resp.rc = wh_Server_CertVerify(
+ server, cert_data, req.cert_len, req.trustedRootNvmId,
+ req.flags, req.cachedKeyFlags, &keyId);
- /* Process the verify action */
- resp.rc = wh_Server_CertVerify(server, cert_data, req.cert_len,
- req.trustedRootNvmId, req.flags,
- req.cachedKeyFlags, &keyId);
+ /* Propagate the keyId back to the client with flags
+ * preserved */
+ resp.keyId = wh_KeyId_TranslateToClient(keyId);
- /* Propagate the keyId back to the client with flags preserved
- */
- resp.keyId = wh_KeyId_TranslateToClient(keyId);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
}
if (resp.rc == WH_ERROR_OK) {
/* Post-process client address */
@@ -677,8 +710,13 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
cert_data = (const uint8_t*)req_packet + sizeof(req);
/* Process the verify action */
- rc = wh_Server_CertVerifyAcert(server, cert_data, req.cert_len,
- req.trustedRootNvmId);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ rc = wh_Server_CertVerifyAcert(server, cert_data, req.cert_len,
+ req.trustedRootNvmId);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
/* Signature confirmation error is not an error for the server, so
* propagate this error to the client in the response, otherwise
@@ -720,8 +758,14 @@ int wh_Server_HandleCertRequest(whServerContext* server, uint16_t magic,
}
if (rc == WH_ERROR_OK) {
/* Process the verify action */
- rc = wh_Server_CertVerifyAcert(server, cert_data, req.cert_len,
- req.trustedRootNvmId);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ rc = wh_Server_CertVerifyAcert(
+ server, cert_data, req.cert_len, req.trustedRootNvmId);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+
/* Signature confirmation error is not an error for the server,
* so propagate this error to the client in the response,
* otherwise return the error code from the verify action */
diff --git a/src/wh_server_counter.c b/src/wh_server_counter.c
index 10c672ce..7bd1d1af 100644
--- a/src/wh_server_counter.c
+++ b/src/wh_server_counter.c
@@ -75,10 +75,16 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic,
(uint16_t)req.counterId);
/* use the label buffer to hold the counter value */
*counter = req.counter;
- ret = wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0, NULL);
+
+ ret = WH_SERVER_NVM_LOCK(server);
if (ret == WH_ERROR_OK) {
- resp.counter = *counter;
- }
+ ret = wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0, NULL);
+ if (ret == WH_ERROR_OK) {
+ resp.counter = *counter;
+ }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = ret;
(void)wh_MessageCounter_TranslateInitResponse(
@@ -104,34 +110,38 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic,
(void)wh_MessageCounter_TranslateIncrementRequest(
magic, (whMessageCounter_IncrementRequest*)req_packet, &req);
- /* read the counter, stored in the metadata label */
- ret = wh_Nvm_GetMetadata(
- server->nvm,
- WH_MAKE_KEYID(WH_KEYTYPE_COUNTER,
- (uint16_t)server->comm->client_id,
- (uint16_t)req.counterId),
- meta);
- resp.rc = ret;
-
- /* increment and write the counter back */
+ ret = WH_SERVER_NVM_LOCK(server);
if (ret == WH_ERROR_OK) {
- *counter = *counter + 1;
- /* set counter to uint32_t max if it rolled over */
- if (*counter == 0) {
- *counter = UINT32_MAX;
+ /* read the counter, stored in the metadata label */
+ ret = wh_Nvm_GetMetadata(
+ server->nvm,
+ WH_MAKE_KEYID(WH_KEYTYPE_COUNTER,
+ (uint16_t)server->comm->client_id,
+ (uint16_t)req.counterId),
+ meta);
+
+ /* increment and write the counter back */
+ if (ret == WH_ERROR_OK) {
+ *counter = *counter + 1;
+ /* set counter to uint32_t max if it rolled over */
+ if (*counter == 0) {
+ *counter = UINT32_MAX;
+ }
+ /* only update if we didn't saturate */
+ else {
+ ret = wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0,
+ NULL);
+ }
}
- /* only update if we didn't saturate */
- else {
- ret =
- wh_Nvm_AddObjectWithReclaim(server->nvm, meta, 0, NULL);
- resp.rc = ret;
+
+ /* return counter to the caller */
+ if (ret == WH_ERROR_OK) {
+ resp.counter = *counter;
}
- }
- /* return counter to the caller */
- if (ret == WH_ERROR_OK) {
- resp.counter = *counter;
- }
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = ret;
(void)wh_MessageCounter_TranslateIncrementResponse(
magic, &resp, (whMessageCounter_IncrementResponse*)resp_packet);
@@ -155,19 +165,24 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic,
(void)wh_MessageCounter_TranslateReadRequest(
magic, (whMessageCounter_ReadRequest*)req_packet, &req);
- /* read the counter, stored in the metadata label */
- ret = wh_Nvm_GetMetadata(
- server->nvm,
- WH_MAKE_KEYID(WH_KEYTYPE_COUNTER,
- (uint16_t)server->comm->client_id,
- (uint16_t)req.counterId),
- meta);
- resp.rc = ret;
-
- /* return counter to the caller */
+ ret = WH_SERVER_NVM_LOCK(server);
if (ret == WH_ERROR_OK) {
- resp.counter = *counter;
- }
+ /* read the counter, stored in the metadata label */
+ ret = wh_Nvm_GetMetadata(
+ server->nvm,
+ WH_MAKE_KEYID(WH_KEYTYPE_COUNTER,
+ (uint16_t)server->comm->client_id,
+ (uint16_t)req.counterId),
+ meta);
+
+ /* return counter to the caller */
+ if (ret == WH_ERROR_OK) {
+ resp.counter = *counter;
+ }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = ret;
(void)wh_MessageCounter_TranslateReadResponse(
magic, &resp, (whMessageCounter_ReadResponse*)resp_packet);
@@ -196,7 +211,12 @@ int wh_Server_HandleCounter(whServerContext* server, uint16_t magic,
(uint16_t)server->comm->client_id,
(uint16_t)req.counterId);
- ret = wh_Nvm_DestroyObjects(server->nvm, 1, &counterId);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Nvm_DestroyObjects(server->nvm, 1, &counterId);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = ret;
(void)wh_MessageCounter_TranslateDestroyResponse(
diff --git a/src/wh_server_keystore.c b/src/wh_server_keystore.c
index 625246c3..bbf0eca0 100644
--- a/src/wh_server_keystore.c
+++ b/src/wh_server_keystore.c
@@ -1667,20 +1667,25 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
}
memcpy(meta->label, req.label, req.labelSz);
- /* get a new id if one wasn't provided */
- if (WH_KEYID_ISERASED(meta->id)) {
- ret = wh_Server_KeystoreGetUniqueId(server, &meta->id);
- resp.rc = ret;
- }
- /* write the key */
+ ret = WH_SERVER_NVM_LOCK(server);
if (ret == WH_ERROR_OK) {
- ret = wh_Server_KeystoreCacheKeyChecked(server, meta, in);
- resp.rc = ret;
- }
+ /* get a new id if one wasn't provided */
+ if (WH_KEYID_ISERASED(meta->id)) {
+ ret = wh_Server_KeystoreGetUniqueId(server, &meta->id);
+ }
+ /* write the key */
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Server_KeystoreCacheKeyChecked(server, meta, in);
+ }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+
if (ret == WH_ERROR_OK) {
/* Translate server keyId back to client format with flags */
resp.id = wh_KeyId_TranslateToClient(meta->id);
}
+ resp.rc = ret;
(void)wh_MessageKeystore_TranslateCacheResponse(
magic, &resp, (whMessageKeystore_CacheResponse*)resp_packet);
@@ -1710,26 +1715,33 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
}
memcpy(meta->label, req.label, req.labelSz);
- /* get a new id if one wasn't provided */
- if (WH_KEYID_ISERASED(meta->id)) {
- ret = wh_Server_KeystoreGetUniqueId(server, &meta->id);
- resp.rc = ret;
- }
-
- /* write the key using DMA */
+ ret = WH_SERVER_NVM_LOCK(server);
if (ret == WH_ERROR_OK) {
- ret = wh_Server_KeystoreCacheKeyDmaChecked(server, meta,
+ /* get a new id if one wasn't provided */
+ if (WH_KEYID_ISERASED(meta->id)) {
+ ret = wh_Server_KeystoreGetUniqueId(server, &meta->id);
+ }
+
+ /* write the key using DMA */
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Server_KeystoreCacheKeyDmaChecked(server, meta,
req.key.addr);
- resp.rc = ret;
- /* propagate bad address to client if DMA operation failed */
- if (ret != WH_ERROR_OK) {
- resp.dmaAddrStatus.badAddr.addr = req.key.addr;
- resp.dmaAddrStatus.badAddr.sz = req.key.sz;
+ /* propagate bad address to client if DMA operation failed
+ */
+ if (ret != WH_ERROR_OK) {
+ resp.dmaAddrStatus.badAddr.addr = req.key.addr;
+ resp.dmaAddrStatus.badAddr.sz = req.key.sz;
+ }
}
- }
- /* Translate server keyId back to client format with flags */
- resp.id = wh_KeyId_TranslateToClient(meta->id);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+
+ if (ret == WH_ERROR_OK) {
+ /* Translate server keyId back to client format with flags */
+ resp.id = wh_KeyId_TranslateToClient(meta->id);
+ }
+ resp.rc = ret;
(void)wh_MessageKeystore_TranslateCacheDmaResponse(
magic, &resp, (whMessageKeystore_CacheDmaResponse*)resp_packet);
@@ -1745,23 +1757,28 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
(void)wh_MessageKeystore_TranslateExportDmaRequest(
magic, (whMessageKeystore_ExportDmaRequest*)req_packet, &req);
- ret = wh_Server_KeystoreExportKeyDmaChecked(
- server,
- wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO,
- server->comm->client_id, req.id),
- req.key.addr, req.key.sz, meta);
- resp.rc = ret;
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Server_KeystoreExportKeyDmaChecked(
+ server,
+ wh_KeyId_TranslateFromClient(
+ WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id),
+ req.key.addr, req.key.sz, meta);
- /* propagate bad address to client if DMA operation failed */
- if (ret != WH_ERROR_OK) {
- resp.dmaAddrStatus.badAddr.addr = req.key.addr;
- resp.dmaAddrStatus.badAddr.sz = req.key.sz;
- }
+ /* propagate bad address to client if DMA operation failed */
+ if (ret != WH_ERROR_OK) {
+ resp.dmaAddrStatus.badAddr.addr = req.key.addr;
+ resp.dmaAddrStatus.badAddr.sz = req.key.sz;
+ }
- if (ret == WH_ERROR_OK) {
- resp.len = req.key.sz;
- memcpy(resp.label, meta->label, sizeof(meta->label));
- }
+ if (ret == WH_ERROR_OK) {
+ resp.len = req.key.sz;
+ memcpy(resp.label, meta->label, sizeof(meta->label));
+ }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = ret;
(void)wh_MessageKeystore_TranslateExportDmaResponse(
magic, &resp,
@@ -1778,12 +1795,17 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
(void)wh_MessageKeystore_TranslateEvictRequest(
magic, (whMessageKeystore_EvictRequest*)req_packet, &req);
- ret = wh_Server_KeystoreEvictKeyChecked(
- server,
- wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO,
- server->comm->client_id, req.id));
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Server_KeystoreEvictKeyChecked(
+ server,
+ wh_KeyId_TranslateFromClient(
+ WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id));
+ resp.ok = 0; /* unused */
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = ret;
- resp.ok = 0; /* unused */
(void)wh_MessageKeystore_TranslateEvictResponse(
magic, &resp, (whMessageKeystore_EvictResponse*)resp_packet);
@@ -1803,24 +1825,26 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
out = (uint8_t*)resp_packet + sizeof(resp);
keySz = WOLFHSM_CFG_COMM_DATA_LEN - sizeof(resp);
- /* read the key */
- ret = wh_Server_KeystoreReadKeyChecked(
- server,
- wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO,
- server->comm->client_id, req.id),
- meta, out, &keySz);
+ resp.len = 0;
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ /* read the key */
+ ret = wh_Server_KeystoreReadKeyChecked(
+ server,
+ wh_KeyId_TranslateFromClient(
+ WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id),
+ meta, out, &keySz);
+
+ /* Only provide key output if no error */
+ if (ret == WH_ERROR_OK) {
+ resp.len = keySz;
+ }
+ memcpy(resp.label, meta->label, sizeof(meta->label));
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = ret;
- /* Only provide key output if no error */
- if (ret == WH_ERROR_OK) {
- resp.len = keySz;
- }
- else {
- resp.len = 0;
- }
- memcpy(resp.label, meta->label, sizeof(meta->label));
-
(void)wh_MessageKeystore_TranslateExportResponse(
magic, &resp, (whMessageKeystore_ExportResponse*)resp_packet);
@@ -1835,12 +1859,17 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
(void)wh_MessageKeystore_TranslateCommitRequest(
magic, (whMessageKeystore_CommitRequest*)req_packet, &req);
- ret = wh_Server_KeystoreCommitKeyChecked(
- server,
- wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO,
- server->comm->client_id, req.id));
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Server_KeystoreCommitKeyChecked(
+ server,
+ wh_KeyId_TranslateFromClient(
+ WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id));
+ resp.ok = 0; /* unused */
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = ret;
- resp.ok = 0; /* unused */
(void)wh_MessageKeystore_TranslateCommitResponse(
magic, &resp, (whMessageKeystore_CommitResponse*)resp_packet);
@@ -1856,12 +1885,17 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
(void)wh_MessageKeystore_TranslateEraseRequest(
magic, (whMessageKeystore_EraseRequest*)req_packet, &req);
- ret = wh_Server_KeystoreEraseKeyChecked(
- server,
- wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO,
- server->comm->client_id, req.id));
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = wh_Server_KeystoreEraseKeyChecked(
+ server,
+ wh_KeyId_TranslateFromClient(
+ WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id));
+ resp.ok = 0; /* unused */
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
resp.rc = ret;
- resp.ok = 0; /* unused */
(void)wh_MessageKeystore_TranslateEraseResponse(
magic, &resp, (whMessageKeystore_EraseResponse*)resp_packet);
@@ -1876,12 +1910,16 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
(void)wh_MessageKeystore_TranslateRevokeRequest(
magic, (whMessageKeystore_RevokeRequest*)req_packet, &req);
- ret = wh_Server_KeystoreRevokeKey(
- server,
- wh_KeyId_TranslateFromClient(WH_KEYTYPE_CRYPTO,
- server->comm->client_id, req.id));
+ ret = WH_SERVER_NVM_LOCK(server);
resp.rc = ret;
- ret = WH_ERROR_OK;
+ if (ret == WH_ERROR_OK) {
+ resp.rc = wh_Server_KeystoreRevokeKey(
+ server,
+ wh_KeyId_TranslateFromClient(
+ WH_KEYTYPE_CRYPTO, server->comm->client_id, req.id));
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
(void)wh_MessageKeystore_TranslateRevokeResponse(
magic, &resp, (whMessageKeystore_RevokeResponse*)resp_packet);
@@ -1916,8 +1954,18 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
respData = (uint8_t*)resp_packet +
sizeof(whMessageKeystore_KeyWrapResponse);
- ret = _HandleKeyWrapRequest(server, &wrapReq, reqData, reqDataSz,
- &wrapResp, respData, respDataSz);
+ /* Note: Locking here is mega-overkill, as there is only one small
+ * section inside this request pipeline that needs to be locked -
+ * freshening the server key and checking usage. Consider relocating
+ * locking to this section */
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret =
+ _HandleKeyWrapRequest(server, &wrapReq, reqData, reqDataSz,
+ &wrapResp, respData, respDataSz);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
wrapResp.rc = ret;
(void)wh_MessageKeystore_TranslateKeyWrapResponse(magic, &wrapResp,
@@ -1952,9 +2000,18 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
respData = (uint8_t*)resp_packet +
sizeof(whMessageKeystore_KeyUnwrapAndExportResponse);
- ret = _HandleKeyUnwrapAndExportRequest(server, &unwrapReq, reqData,
- reqDataSz, &unwrapResp,
- respData, respDataSz);
+ /* Note: Locking here is mega-overkill, as there is only one small
+ * section inside this request pipeline that needs to be locked -
+ * freshening the server key and checking usage. Consider relocating
+ * locking to this section */
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _HandleKeyUnwrapAndExportRequest(
+ server, &unwrapReq, reqData, reqDataSz, &unwrapResp,
+ respData, respDataSz);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
unwrapResp.rc = ret;
(void)wh_MessageKeystore_TranslateKeyUnwrapAndExportResponse(
@@ -1990,9 +2047,14 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
respData = (uint8_t*)resp_packet +
sizeof(whMessageKeystore_KeyUnwrapAndCacheResponse);
- ret = _HandleKeyUnwrapAndCacheRequest(server, &cacheReq, reqData,
- reqDataSz, &cacheResp,
- respData, respDataSz);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _HandleKeyUnwrapAndCacheRequest(
+ server, &cacheReq, reqData, reqDataSz, &cacheResp, respData,
+ respDataSz);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
cacheResp.rc = ret;
(void)wh_MessageKeystore_TranslateKeyUnwrapAndCacheResponse(
@@ -2028,8 +2090,18 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
respData = (uint8_t*)resp_packet +
sizeof(whMessageKeystore_DataWrapResponse);
- ret = _HandleDataWrapRequest(server, &wrapReq, reqData, reqDataSz,
- &wrapResp, respData, respDataSz);
+ /* Note: Locking here is mega-overkill, as there is only one small
+ * section inside this request pipeline that needs to be locked -
+ * freshening the server key and checking usage. Consider relocating
+ * locking to this section */
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret =
+ _HandleDataWrapRequest(server, &wrapReq, reqData, reqDataSz,
+ &wrapResp, respData, respDataSz);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
wrapResp.rc = ret;
(void)wh_MessageKeystore_TranslateDataWrapResponse(magic, &wrapResp,
@@ -2065,9 +2137,18 @@ int wh_Server_HandleKeyRequest(whServerContext* server, uint16_t magic,
respData = (uint8_t*)resp_packet +
sizeof(whMessageKeystore_DataUnwrapResponse);
- ret =
- _HandleDataUnwrapRequest(server, &unwrapReq, reqData, reqDataSz,
- &unwrapResp, respData, respDataSz);
+ /* Note: Locking here is mega-overkill, as there is only one small
+ * section inside this request pipeline that needs to be locked -
+ * freshening the server key and checking usage. Consider relocating
+ * locking to this section */
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _HandleDataUnwrapRequest(server, &unwrapReq, reqData,
+ reqDataSz, &unwrapResp, respData,
+ respDataSz);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
unwrapResp.rc = ret;
(void)wh_MessageKeystore_TranslateDataUnwrapResponse(
diff --git a/src/wh_server_nvm.c b/src/wh_server_nvm.c
index 6425dd1e..9c675b62 100644
--- a/src/wh_server_nvm.c
+++ b/src/wh_server_nvm.c
@@ -153,10 +153,15 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
wh_MessageNvm_TranslateListRequest(magic,
(whMessageNvm_ListRequest*)req_packet, &req);
- /* Process the list action */
- resp.rc = wh_Nvm_List(server->nvm,
- req.access, req.flags, req.startId,
- &resp.count, &resp.id);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ /* Process the list action */
+ rc = wh_Nvm_List(server->nvm, req.access, req.flags,
+ req.startId, &resp.count, &resp.id);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
/* Convert the response struct */
wh_MessageNvm_TranslateListResponse(magic,
@@ -173,10 +178,16 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
/* Request is malformed */
resp.rc = WH_ERROR_ABORTED;
} else {
- /* Process the available action */
- resp.rc = wh_Nvm_GetAvailable(server->nvm,
- &resp.avail_size, &resp.avail_objects,
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ /* Process the available action */
+ rc = wh_Nvm_GetAvailable(
+ server->nvm, &resp.avail_size, &resp.avail_objects,
&resp.reclaim_size, &resp.reclaim_objects);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
/* Convert the response struct */
wh_MessageNvm_TranslateGetAvailableResponse(magic,
@@ -198,16 +209,22 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
wh_MessageNvm_TranslateGetMetadataRequest(magic,
(whMessageNvm_GetMetadataRequest*)req_packet, &req);
- /* Process the getmetadata action */
- resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ /* Process the getmetadata action */
+ rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
- if (resp.rc == 0) {
- resp.id = meta.id;
+ if (rc == WH_ERROR_OK) {
+ resp.id = meta.id;
resp.access = meta.access;
- resp.flags = meta.flags;
- resp.len = meta.len;
+ resp.flags = meta.flags;
+ resp.len = meta.len;
memcpy(resp.label, meta.label, sizeof(resp.label));
}
+ resp.rc = rc;
}
/* Convert the response struct */
wh_MessageNvm_TranslateGetMetadataResponse(magic,
@@ -237,15 +254,22 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
meta.flags = req.flags;
meta.len = req.len;
memcpy(meta.label, req.label, sizeof(meta.label));
- resp.rc =
- wh_Nvm_AddObjectChecked(server->nvm, &meta, req.len, data);
+
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ rc = wh_Nvm_AddObjectChecked(server->nvm, &meta, req.len,
+ data);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
}
/* Convert the response struct */
wh_MessageNvm_TranslateSimpleResponse(magic,
&resp, (whMessageNvm_SimpleResponse*)resp_packet);
*out_resp_size = sizeof(resp);
- }; break;
+ }; break;
case WH_MESSAGE_NVM_ACTION_DESTROYOBJECTS:
{
@@ -261,9 +285,15 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
(whMessageNvm_DestroyObjectsRequest*)req_packet, &req);
if (req.list_count <= WH_MESSAGE_NVM_MAX_DESTROY_OBJECTS_COUNT) {
- /* Process the DestroyObjects action */
- resp.rc = wh_Nvm_DestroyObjectsChecked(
- server->nvm, req.list_count, req.list);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ /* Process the DestroyObjects action */
+ rc = wh_Nvm_DestroyObjectsChecked(server->nvm,
+ req.list_count, req.list);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
else {
/* Problem in transport or request */
@@ -293,11 +323,17 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
wh_MessageNvm_TranslateReadRequest(
magic, (whMessageNvm_ReadRequest*)req_packet, &req);
- resp.rc = _HandleNvmRead(server, data, req.offset, req.data_len,
- &req.data_len, req.id);
- if (resp.rc == WH_ERROR_OK) {
- data_len = req.data_len;
- }
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ rc = _HandleNvmRead(server, data, req.offset, req.data_len,
+ &req.data_len, req.id);
+ if (rc == WH_ERROR_OK) {
+ data_len = req.data_len;
+ }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
/* Convert the response struct */
wh_MessageNvm_TranslateReadResponse(
@@ -334,10 +370,16 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
WH_DMA_OPER_CLIENT_READ_PRE, (whServerDmaFlags){0});
}
if (resp.rc == 0) {
- /* Process the AddObject action */
- resp.rc =
- wh_Nvm_AddObjectChecked(server->nvm, (whNvmMetadata*)metadata,
- req.data_len, (const uint8_t*)data);
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ /* Process the AddObject action */
+ rc = wh_Nvm_AddObjectChecked(
+ server->nvm, (whNvmMetadata*)metadata, req.data_len,
+ (const uint8_t*)data);
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
if (resp.rc == 0) {
/* perform platform-specific host address processing */
@@ -361,7 +403,7 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
whMessageNvm_ReadDmaRequest req = {0};
whMessageNvm_SimpleResponse resp = {0};
whNvmMetadata meta = {0};
- whNvmSize read_len;
+ whNvmSize read_len = 0;
void* data = NULL;
if (req_size != sizeof(req)) {
@@ -373,41 +415,47 @@ int wh_Server_HandleNvmRequest(whServerContext* server,
wh_MessageNvm_TranslateReadDmaRequest(magic,
(whMessageNvm_ReadDmaRequest*)req_packet, &req);
- resp.rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
- }
-
- if (resp.rc == 0) {
- if (req.offset >= meta.len) {
- resp.rc = WH_ERROR_BADARGS;
- }
- }
-
- if (resp.rc == 0) {
- read_len = req.data_len;
- /* Clamp length to object size */
- if ((req.offset + read_len) > meta.len) {
- read_len = meta.len - req.offset;
- }
- }
-
- /* use unclamped length for DMA address processing in case DMA callbacks
- * are sensible to alignment and/or size */
- if (resp.rc == 0) {
- /* perform platform-specific host address processing */
- resp.rc = wh_Server_DmaProcessClientAddress(
- server, req.data_hostaddr, &data, req.data_len,
- WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0});
- }
- if (resp.rc == 0) {
- /* Process the Read action */
- resp.rc = wh_Nvm_ReadChecked(server->nvm, req.id, req.offset,
- read_len, (uint8_t*)data);
- }
- if (resp.rc == 0) {
- /* perform platform-specific host address processing */
- resp.rc = wh_Server_DmaProcessClientAddress(
- server, req.data_hostaddr, &data, req.data_len,
- WH_DMA_OPER_CLIENT_WRITE_POST, (whServerDmaFlags){0});
+ rc = WH_SERVER_NVM_LOCK(server);
+ if (rc == WH_ERROR_OK) {
+ rc = wh_Nvm_GetMetadata(server->nvm, req.id, &meta);
+
+ if (rc == 0) {
+ if (req.offset >= meta.len) {
+ rc = WH_ERROR_BADARGS;
+ }
+ }
+
+ if (rc == 0) {
+ read_len = req.data_len;
+ /* Clamp length to object size */
+ if ((req.offset + read_len) > meta.len) {
+ read_len = meta.len - req.offset;
+ }
+ }
+
+ /* use unclamped length for DMA address processing in case DMA
+ * callbacks are sensible to alignment and/or size */
+ if (rc == 0) {
+ /* perform platform-specific host address processing */
+ rc = wh_Server_DmaProcessClientAddress(
+ server, req.data_hostaddr, &data, req.data_len,
+ WH_DMA_OPER_CLIENT_WRITE_PRE, (whServerDmaFlags){0});
+ }
+ if (rc == 0) {
+ /* Process the Read action */
+ rc = wh_Nvm_ReadChecked(server->nvm, req.id, req.offset,
+ read_len, (uint8_t*)data);
+ }
+ if (rc == 0) {
+ /* perform platform-specific host address processing */
+ rc = wh_Server_DmaProcessClientAddress(
+ server, req.data_hostaddr, &data, req.data_len,
+ WH_DMA_OPER_CLIENT_WRITE_POST, (whServerDmaFlags){0});
+ }
+
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
+ resp.rc = rc;
}
/* Convert the response struct */
wh_MessageNvm_TranslateSimpleResponse(magic,
diff --git a/src/wh_server_she.c b/src/wh_server_she.c
index 18a39745..633dfc4b 100644
--- a/src/wh_server_she.c
+++ b/src/wh_server_she.c
@@ -1608,68 +1608,120 @@ int wh_Server_HandleSheRequest(whServerContext* server, uint16_t magic,
resp_packet);
break;
case WH_SHE_SECURE_BOOT_INIT:
- ret = _SecureBootInit(server, magic, req_size, req_packet,
- out_resp_size, resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _SecureBootInit(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_SECURE_BOOT_UPDATE:
ret = _SecureBootUpdate(server, magic, req_size, req_packet,
out_resp_size, resp_packet);
break;
case WH_SHE_SECURE_BOOT_FINISH:
- ret = _SecureBootFinish(server, magic, req_size, req_packet,
- out_resp_size, resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _SecureBootFinish(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_GET_STATUS:
ret = _GetStatus(server, magic, req_size, req_packet, out_resp_size,
resp_packet);
break;
case WH_SHE_LOAD_KEY:
- ret = _LoadKey(server, magic, req_size, req_packet, out_resp_size,
- resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _LoadKey(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_LOAD_PLAIN_KEY:
- ret = _LoadPlainKey(server, magic, req_size, req_packet,
- out_resp_size, resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _LoadPlainKey(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_EXPORT_RAM_KEY:
- ret = _ExportRamKey(server, magic, req_size, req_packet,
- out_resp_size, resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _ExportRamKey(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_INIT_RND:
- ret = _InitRnd(server, magic, req_size, req_packet, out_resp_size,
- resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _InitRnd(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_RND:
ret = _Rnd(server, magic, req_size, req_packet, out_resp_size,
resp_packet);
break;
case WH_SHE_EXTEND_SEED:
- ret = _ExtendSeed(server, magic, req_size, req_packet,
- out_resp_size, resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _ExtendSeed(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_ENC_ECB:
- ret = _EncEcb(server, magic, req_size, req_packet, out_resp_size,
- resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _EncEcb(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_ENC_CBC:
- ret = _EncCbc(server, magic, req_size, req_packet, out_resp_size,
- resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _EncCbc(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_DEC_ECB:
- ret = _DecEcb(server, magic, req_size, req_packet, out_resp_size,
- resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _DecEcb(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_DEC_CBC:
- ret = _DecCbc(server, magic, req_size, req_packet, out_resp_size,
- resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _DecCbc(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_GEN_MAC:
- ret = _GenerateMac(server, magic, req_size, req_packet,
- out_resp_size, resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _GenerateMac(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
case WH_SHE_VERIFY_MAC:
- ret = _VerifyMac(server, magic, req_size, req_packet, out_resp_size,
- resp_packet);
+ ret = WH_SERVER_NVM_LOCK(server);
+ if (ret == WH_ERROR_OK) {
+ ret = _VerifyMac(server, magic, req_size, req_packet,
+ out_resp_size, resp_packet);
+ (void)WH_SERVER_NVM_UNLOCK(server);
+ } /* WH_SERVER_NVM_LOCK() */
break;
default:
ret = WH_ERROR_BADARGS;
diff --git a/test/Makefile b/test/Makefile
index f3dd6971..25921804 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -93,6 +93,25 @@ ifeq ($(COVERAGE),1)
LDFLAGS += --coverage
endif
+# Enable threadsafe mode, adding lock protection to shared structures
+ifeq ($(THREADSAFE),1)
+ DEF += -DWOLFHSM_CFG_THREADSAFE
+endif
+
+# Add thread sanitizer option (mutually exclusive with ASAN)
+ifeq ($(TSAN),1)
+ ifeq ($(ASAN),1)
+ $(error TSAN and ASAN cannot be used together)
+ endif
+ CFLAGS += -fsanitize=thread -fPIE
+ DEF += -DWOLFSSL_NO_FENCE
+ ifeq ($(STRESS),1)
+ # Force TSAN annotations on in stress test
+ DEF += -DWOLFHSM_CFG_TEST_STRESS_TSAN
+ endif
+ LDFLAGS += -fsanitize=thread -pie
+endif
+
## wolfSSL defines
ifeq ($(DEBUG_WOLFSSL),1)
DEF += -DDEBUG_WOLFSSL
@@ -103,6 +122,7 @@ ifeq ($(NOCRYPTO),1)
DEF += -DWOLFHSM_CFG_NO_CRYPTO
endif
+# Enable scan-build
ifeq ($(SCAN),1)
SCAN_LOG = scan_test.log
# Default target
@@ -124,6 +144,14 @@ ifeq ($(TLS),1)
DEF += -DWOLFHSM_CFG_TLS
endif
+# Support stress test mode (only runs thread safety stress test)
+ifeq ($(STRESS),1)
+ ifneq ($(THREADSAFE),1)
+ $(error Stress test requires building with THREADSAFE=1)
+ endif
+ DEF += -DWOLFHSM_CFG_TEST_STRESS
+endif
+
## Project defines
# Option to build wolfcrypt tests
ifeq ($(TESTWOLFCRYPT),1)
@@ -193,7 +221,7 @@ SRC_C += $(wildcard $(WOLFHSM_DIR)/src/*.c)
# wolfHSM port/HAL code
SRC_C += $(wildcard $(WOLFHSM_PORT_DIR)/*.c)
-# Project
+# Test source files
SRC_C += $(wildcard $(PROJECT_DIR)/*.c)
@@ -274,9 +302,17 @@ clean:
run:
ifeq (,$(wildcard $(BUILD_DIR)/$(BIN).elf))
$(error $(BUILD_DIR)/$(BIN).elf not found. Try: make)
+else
+ifeq ($(TSAN),1)
+ # TSAN options:
+ # - fail fast on first data race detected
+ # - non-zero exit code if races are detected to ensure CI fails
+ # - use our custom suppressions file to ignore wolfCrypt and SHM transport
+ TSAN_OPTIONS="halt_on_error=1:exitcode=66:suppressions=$(PROJECT_DIR)/tsan.supp" $(BUILD_DIR)/$(BIN).elf
else
$(BUILD_DIR)/$(BIN).elf
endif
+endif
# Coverage target: build with coverage, run tests, and generate report
coverage:
diff --git a/test/tsan.supp b/test/tsan.supp
new file mode 100644
index 00000000..293cb645
--- /dev/null
+++ b/test/tsan.supp
@@ -0,0 +1,21 @@
+# ThreadSanitizer Suppression File for wolfHSM
+#
+# This file suppresses known false positives and external library issues.
+# Use with: TSAN_OPTIONS="suppressions=tsan.supp" ./wh_test_stress.elf
+
+# =============================================================================
+# Category: Transport Memory Layer (Lock-free by Design)
+# =============================================================================
+# The shared memory transport uses a lock-free protocol with volatile pointers,
+# memory fences, and cache operations. TSAN doesn't understand this higher-level
+# protocol and flags the raw memory accesses as races.
+
+# CSR (Control/Status Register) access functions
+race:wh_TransportMem_SendRequest
+race:wh_TransportMem_RecvRequest
+race:wh_TransportMem_SendResponse
+race:wh_TransportMem_RecvResponse
+
+# Shared buffer memcpy operations
+race:wh_Utils_memcpy_flush
+race:wh_Utils_memcpy_invalidate
diff --git a/test/wh_test.c b/test/wh_test.c
index 29ba86b8..3fd9a1bb 100644
--- a/test/wh_test.c
+++ b/test/wh_test.c
@@ -40,6 +40,8 @@
#include "wh_test_keywrap.h"
#include "wh_test_multiclient.h"
#include "wh_test_log.h"
+#include "wh_test_lock.h"
+#include "wh_test_posix_threadsafe_stress.h"
#if defined(WOLFHSM_CFG_CERTIFICATE_MANAGER)
#include "wh_test_cert.h"
@@ -104,6 +106,10 @@ int whTest_Unit(void)
/* Multi-Client Tests (includes Global Keys when enabled) */
WH_TEST_ASSERT(0 == whTest_MultiClient());
+#if defined(WOLFHSM_CFG_TEST_POSIX) && defined(WOLFHSM_CFG_THREADSAFE)
+ WH_TEST_ASSERT(0 == whTest_LockPosix());
+#endif
+
#if defined(WOLFHSM_CFG_SHE_EXTENSION)
WH_TEST_ASSERT(0 == whTest_She());
#endif /* WOLFHSM_SHE_EXTENTION */
@@ -255,8 +261,14 @@ int main(void)
{
int ret = 0;
-#if defined(WOLFHSM_CFG_TEST_CLIENT_ONLY) && \
+#if defined(WOLFHSM_CFG_THREADSAFE) && defined(WOLFHSM_CFG_TEST_STRESS) && \
+ defined(WOLFHSM_CFG_TEST_POSIX)
+ /* Stress test mode: only run thread safety stress test */
+ ret = whTest_ThreadSafeStress();
+
+#elif defined(WOLFHSM_CFG_TEST_CLIENT_ONLY) && \
defined(WOLFHSM_CFG_ENABLE_CLIENT) && defined(WOLFHSM_CFG_TEST_POSIX)
+
/* Test driver should run client tests against the example server */
#if defined(WOLFHSM_CFG_TLS)
/* Run TLS client tests */
@@ -265,9 +277,12 @@ int main(void)
/* Run TCP client tests (default) */
ret = whTest_ClientTcp();
#endif
+
#elif defined(WOLFHSM_CFG_ENABLE_CLIENT) && defined(WOLFHSM_CFG_ENABLE_SERVER)
+
/* Default case: Test driver should run all the unit tests locally */
ret = whTest_Unit();
+
#else
#error "No client or server enabled in build, one or both must be enabled"
#endif
diff --git a/test/wh_test_cert.c b/test/wh_test_cert.c
index b249609a..0da3f385 100644
--- a/test/wh_test_cert.c
+++ b/test/wh_test_cert.c
@@ -618,7 +618,7 @@ int whTest_CertRamSim(whTestNvmBackendType nvmType)
const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB};
whTestNvmBackendUnion nvm_setup;
- whNvmConfig n_conf[1];
+ whNvmConfig n_conf[1] = {0};
whNvmContext nvm[1] = {{0}};
WH_TEST_RETURN_ON_FAIL(
diff --git a/test/wh_test_clientserver.c b/test/wh_test_clientserver.c
index 82003597..15f4ea67 100644
--- a/test/wh_test_clientserver.c
+++ b/test/wh_test_clientserver.c
@@ -619,7 +619,7 @@ int whTest_ClientServerSequential(whTestNvmBackendType nvmType)
const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB};
whTestNvmBackendUnion nvm_setup;
- whNvmConfig n_conf[1];
+ whNvmConfig n_conf[1] = {0};
whNvmContext nvm[1] = {{0}};
WH_TEST_RETURN_ON_FAIL(
@@ -1739,7 +1739,7 @@ static int wh_ClientServer_PosixMemMapThreadTest(whTestNvmBackendType nvmType)
const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB};
whTestNvmBackendUnion nvm_setup;
- whNvmConfig n_conf[1];
+ whNvmConfig n_conf[1] = {0};
whNvmContext nvm[1] = {{0}};
WH_TEST_RETURN_ON_FAIL(
diff --git a/test/wh_test_crypto.c b/test/wh_test_crypto.c
index b925cfa6..bc1a3d2a 100644
--- a/test/wh_test_crypto.c
+++ b/test/wh_test_crypto.c
@@ -5493,7 +5493,7 @@ static int wh_ClientServer_MemThreadTest(whTestNvmBackendType nvmType)
const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB};
whTestNvmBackendUnion nvm_setup;
- whNvmConfig n_conf[1];
+ whNvmConfig n_conf[1] = {0};
whNvmContext nvm[1] = {{0}};
WH_TEST_RETURN_ON_FAIL(
diff --git a/test/wh_test_lock.c b/test/wh_test_lock.c
new file mode 100644
index 00000000..6ba42c18
--- /dev/null
+++ b/test/wh_test_lock.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2026 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * test/wh_test_lock.c
+ *
+ * Super simple smoke tests to ensure a lock implementation has basic
+ * functionality
+ */
+
+#include
+#include
+
+#include "wolfhsm/wh_settings.h"
+#include "wolfhsm/wh_error.h"
+
+#include "wh_test_common.h"
+#include "wh_test_lock.h"
+
+#ifdef WOLFHSM_CFG_THREADSAFE
+
+#include "wolfhsm/wh_lock.h"
+#include "wolfhsm/wh_nvm.h"
+#include "wolfhsm/wh_nvm_flash.h"
+#include "wolfhsm/wh_flash_ramsim.h"
+
+#ifdef WOLFHSM_CFG_TEST_POSIX
+#include "port/posix/posix_lock.h"
+#define FLASH_RAM_SIZE (1024 * 1024)
+#define FLASH_SECTOR_SIZE (4096)
+#define FLASH_PAGE_SIZE (8)
+#endif
+
+/* Test: Lock init/cleanup lifecycle */
+static int testLockLifecycle(whLockConfig* lockConfig)
+{
+ whLock lock;
+ int rc;
+
+ memset(&lock, 0, sizeof(lock));
+
+ WH_TEST_PRINT("Testing lock lifecycle...\n");
+
+ if (lockConfig == NULL) {
+ WH_TEST_PRINT(" Lock lifecycle: SKIPPED (no config provided)\n");
+ return WH_ERROR_OK;
+ }
+
+ /* Init should succeed */
+ rc = wh_Lock_Init(&lock, lockConfig);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ /* Acquire and release should work */
+ rc = wh_Lock_Acquire(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ rc = wh_Lock_Release(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ /* Cleanup should succeed */
+ rc = wh_Lock_Cleanup(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ /* After cleanup, acquire should fail */
+ rc = wh_Lock_Acquire(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS);
+
+ /* After cleanup, release should fail */
+ rc = wh_Lock_Release(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS);
+
+ /* Second cleanup should succeed (idempotent) */
+ rc = wh_Lock_Cleanup(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ WH_TEST_PRINT(" Lock lifecycle: PASS\n");
+ return WH_ERROR_OK;
+}
+
+/* Test: NULL config results in no-op locking */
+static int testNullConfigNoOp(void)
+{
+ whLock lock;
+ int rc;
+
+ memset(&lock, 0, sizeof(lock));
+
+ WH_TEST_PRINT("Testing NULL config no-op...\n");
+
+ /* Init with NULL config should succeed (no-op mode) */
+ rc = wh_Lock_Init(&lock, NULL);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ /* Acquire/release should be no-ops returning OK */
+ rc = wh_Lock_Acquire(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ rc = wh_Lock_Release(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ /* Cleanup should succeed */
+ rc = wh_Lock_Cleanup(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ WH_TEST_PRINT(" NULL config no-op: PASS\n");
+ return WH_ERROR_OK;
+}
+
+/* Test: Operations on uninitialized lock should fail appropriately */
+static int testUninitializedLock(void)
+{
+ whLock lock;
+ int rc;
+
+ WH_TEST_PRINT("Testing uninitialized lock...\n");
+
+ /* Create zeroed lock structure (no init call) */
+ memset(&lock, 0, sizeof(lock));
+
+ /* Acquire on uninitialized lock should fail */
+ rc = wh_Lock_Acquire(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS);
+
+ /* Release on uninitialized lock should fail */
+ rc = wh_Lock_Release(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_BADARGS);
+
+ /* Cleanup on uninitialized lock should succeed (idempotent) */
+ rc = wh_Lock_Cleanup(&lock);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ WH_TEST_PRINT(" Uninitialized lock: PASS\n");
+ return WH_ERROR_OK;
+}
+
+#ifdef WOLFHSM_CFG_TEST_POSIX
+/* Test: NVM simulator with lock config. Restrict to POSIX port test due to
+ * resource utilization */
+static int testNvmRamSimWithLock(whLockConfig* lockConfig)
+{
+ /* Flash simulator */
+ uint8_t flashMemory[FLASH_RAM_SIZE];
+ const whFlashCb flashCb[1] = {WH_FLASH_RAMSIM_CB};
+ whFlashRamsimCtx flashCtx[1] = {0};
+ whFlashRamsimCfg flashCfg[1] = {{
+ .size = FLASH_RAM_SIZE,
+ .sectorSize = FLASH_SECTOR_SIZE,
+ .pageSize = FLASH_PAGE_SIZE,
+ .erasedByte = 0xFF,
+ .memory = flashMemory,
+ }};
+
+ /* NVM flash layer */
+ whNvmCb nvmFlashCb[1] = {WH_NVM_FLASH_CB};
+ whNvmFlashContext nvmFlashCtx[1] = {0};
+ whNvmFlashConfig nvmFlashCfg = {
+ .cb = flashCb,
+ .context = flashCtx,
+ .config = flashCfg,
+ };
+
+ /* NVM context with lock */
+ whNvmContext nvm = {0};
+ whNvmConfig nvmCfg;
+
+ whNvmMetadata meta;
+ uint8_t testData[] = "Hello, NVM with lock!";
+ uint8_t readBuf[32];
+ int rc;
+
+ WH_TEST_PRINT("Testing NVM with lock...\n");
+
+ memset(flashMemory, 0xFF, sizeof(flashMemory));
+
+ /* Initialize NVM flash */
+ rc = wh_NvmFlash_Init(nvmFlashCtx, &nvmFlashCfg);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ /* Set up NVM config with user-supplied lock */
+ nvmCfg.cb = nvmFlashCb;
+ nvmCfg.context = nvmFlashCtx;
+ nvmCfg.config = &nvmFlashCfg;
+ nvmCfg.lockConfig = lockConfig;
+
+ rc = wh_Nvm_Init(&nvm, &nvmCfg);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ /* Test basic NVM operations with locking */
+ memset(&meta, 0, sizeof(meta));
+ meta.id = 1;
+ meta.len = sizeof(testData);
+
+ rc = wh_Nvm_AddObject(&nvm, &meta, sizeof(testData), testData);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ memset(readBuf, 0, sizeof(readBuf));
+ rc = wh_Nvm_Read(&nvm, 1, 0, sizeof(testData), readBuf);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+ WH_TEST_ASSERT_RETURN(memcmp(testData, readBuf, sizeof(testData)) == 0);
+
+ /* Cleanup */
+ rc = wh_Nvm_Cleanup(&nvm);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ rc = wh_NvmFlash_Cleanup(nvmFlashCtx);
+ WH_TEST_ASSERT_RETURN(rc == WH_ERROR_OK);
+
+ WH_TEST_PRINT(" NVM with lock: PASS\n");
+ return WH_ERROR_OK;
+}
+#endif /* WOLFHSM_CFG_TEST_POSIX */
+
+int whTest_LockConfig(whLockConfig* lockConfig)
+{
+ WH_TEST_PRINT("Testing lock functionality...\n");
+
+ WH_TEST_RETURN_ON_FAIL(testLockLifecycle(lockConfig));
+ WH_TEST_RETURN_ON_FAIL(testNullConfigNoOp());
+ WH_TEST_RETURN_ON_FAIL(testUninitializedLock());
+#ifdef WOLFHSM_CFG_TEST_POSIX
+ WH_TEST_RETURN_ON_FAIL(testNvmRamSimWithLock(lockConfig));
+#endif
+
+ WH_TEST_PRINT("Lock tests PASSED\n");
+ return WH_ERROR_OK;
+}
+
+
+#if defined(WOLFHSM_CFG_TEST_POSIX)
+int whTest_LockPosix(void)
+{
+ posixLockContext lockCtx = {0};
+ whLockCb lockCb = POSIX_LOCK_CB;
+ whLockConfig lockConfig;
+
+ lockConfig.cb = &lockCb;
+ lockConfig.context = &lockCtx;
+ lockConfig.config = NULL; /* Use default mutex attributes */
+
+ return whTest_LockConfig(&lockConfig);
+}
+#endif /* WOLFHSM_CFG_TEST_POSIX */
+
+#endif /* WOLFHSM_CFG_THREADSAFE */
diff --git a/test/wh_test_lock.h b/test/wh_test_lock.h
new file mode 100644
index 00000000..4564c30e
--- /dev/null
+++ b/test/wh_test_lock.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2024 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * test/wh_test_lock.h
+ *
+ * Thread-safety lock tests
+ */
+#ifndef TEST_WH_TEST_LOCK_H_
+#define TEST_WH_TEST_LOCK_H_
+
+#include "wolfhsm/wh_lock.h" /* For whLockConfig type */
+
+/*
+ * Runs all lock tests using the supplied lock configuration.
+ * - Tests lock lifecycle (init/cleanup) if lockConfig is not NULL
+ * - Tests NULL config results in no-op locking
+ * - Tests NVM with lock config
+ *
+ * @param lockConfig Pointer to lock configuration. If NULL, the lock lifecycle
+ * test will be skipped but other tests will still run.
+ * Returns 0 on success, and a non-zero error code on failure
+ */
+int whTest_LockConfig(whLockConfig* lockConfig);
+
+#if defined(WOLFHSM_CFG_TEST_POSIX)
+
+/*
+ * Runs all lock tests using the POSIX lock backend ontop of POSIX sim
+ * - Tests lock lifecycle (init/cleanup)
+ * - Tests NULL config results in no-op locking
+ * - Tests NVM with lock config
+ * Returns 0 on success, and a non-zero error code on failure
+ */
+int whTest_LockPosix(void);
+#endif /* WOLFHSM_CFG_TEST_POSIX */
+
+#endif /* TEST_WH_TEST_LOCK_H_ */
diff --git a/test/wh_test_log.c b/test/wh_test_log.c
index f2da3ee3..e58a4ea6 100644
--- a/test/wh_test_log.c
+++ b/test/wh_test_log.c
@@ -1480,7 +1480,7 @@ static int whTest_LogClientServerMemTransport(void)
const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB};
whTestNvmBackendUnion nvm_setup;
- whNvmConfig n_conf[1];
+ whNvmConfig n_conf[1] = {0};
whNvmContext nvm[1] = {{0}};
WH_TEST_RETURN_ON_FAIL(whTest_NvmCfgBackend(
diff --git a/test/wh_test_posix_threadsafe_stress.c b/test/wh_test_posix_threadsafe_stress.c
new file mode 100644
index 00000000..fb4c6a0f
--- /dev/null
+++ b/test/wh_test_posix_threadsafe_stress.c
@@ -0,0 +1,2423 @@
+/*
+ * Copyright (C) 2026 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * test/wh_test_posix_threadsafe_stress.c
+ *
+ * POSIX-based phased contention test for thread safety validation.
+ *
+ * Intended to drive a high volume of concurrent requests across multiple
+ * servers sharing a single NVM context, while running under TSAN to detect any
+ * data races. Uses POSIX threading primitives (pthreads).
+ *
+ * Architecture:
+ * - 1 shared NVM context with lock
+ * - 4 server contexts sharing the NVM
+ * - 4 client threads (all doing both NVM and keystore ops)
+ * - Different contention phases to stress test contention patterns across
+ * various different APIs
+ *
+ * NOTE: Uses PTHREAD_MUTEX_ERRORCHECK attribute to trap undefined behavior
+ * errors (EDEADLK for deadlock, EPERM for non-owner unlock) which indicate
+ * bugs in the locking implementation.
+ */
+
+#include "wolfhsm/wh_settings.h"
+
+#if defined(WOLFHSM_CFG_THREADSAFE) && defined(WOLFHSM_CFG_TEST_POSIX) && \
+ defined(WOLFHSM_CFG_GLOBAL_KEYS) && defined(WOLFHSM_CFG_ENABLE_CLIENT) && \
+ defined(WOLFHSM_CFG_ENABLE_SERVER) && !defined(WOLFHSM_CFG_NO_CRYPTO)
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "wolfhsm/wh_error.h"
+#include "wolfhsm/wh_comm.h"
+#include "wolfhsm/wh_transport_mem.h"
+#include "wolfhsm/wh_client.h"
+#include "wolfhsm/wh_server.h"
+#include "wolfhsm/wh_nvm.h"
+#include "wolfhsm/wh_nvm_flash.h"
+#include "wolfhsm/wh_flash_ramsim.h"
+#include "wolfhsm/wh_lock.h"
+#include "wolfhsm/wh_keyid.h"
+
+#include "port/posix/posix_lock.h"
+
+#include "wolfssl/wolfcrypt/types.h"
+#include "wolfssl/wolfcrypt/random.h"
+
+#include "wh_test_common.h"
+#include "wh_test_posix_threadsafe_stress.h"
+
+
+/*
+ * TSAN Transport Shims
+ *
+ * These wrap the transport layer functions with acquire/release annotations
+ * to tell TSAN about the happens-before relationship established by the
+ * CSR handshake. This eliminates false positives on DMA buffer accesses
+ * while still allowing TSAN to analyze keystore/NVM locking.
+ */
+
+#ifdef WOLFHSM_CFG_TEST_STRESS_TSAN
+
+/* Provide a sane error message if we want to use TSAN but no TSAN detected.
+ * GCC uses __SANITIZE_THREAD__, Clang uses __has_feature
+ */
+#ifndef __has_feature
+#define __has_feature(x) 0
+#endif
+#if !defined(__SANITIZE_THREAD__) && !__has_feature(thread_sanitizer)
+#error ThreadSanitizer not enabled for this build
+#endif
+
+#include
+
+/* Client sends request - release all prior writes (including DMA buffers)
+ * IMPORTANT: Release MUST happen BEFORE the send, not after. The transport's
+ * notify counter synchronizes message delivery, but TSAN doesn't see it.
+ * If we release after send, the server could acquire before we release,
+ * breaking the happens-before chain for DMA buffer accesses. */
+static int tsan_SendRequest(void* c, uint16_t len, const void* data)
+{
+ whTransportMemContext* ctx = (whTransportMemContext*)c;
+ __tsan_release((void*)ctx->req);
+ return wh_TransportMem_SendRequest(c, len, data);
+}
+
+/* Client receives response - acquire all server writes */
+static int tsan_RecvResponse(void* c, uint16_t* out_len, void* data)
+{
+ whTransportMemContext* ctx = (whTransportMemContext*)c;
+ int rc = wh_TransportMem_RecvResponse(c, out_len, data);
+ if (rc == WH_ERROR_OK) {
+ __tsan_acquire((void*)ctx->resp);
+ }
+ return rc;
+}
+
+/* Server receives request - acquire all client writes (including DMA buffers)
+ */
+static int tsan_RecvRequest(void* c, uint16_t* out_len, void* data)
+{
+ whTransportMemContext* ctx = (whTransportMemContext*)c;
+ int rc = wh_TransportMem_RecvRequest(c, out_len, data);
+ if (rc == WH_ERROR_OK) {
+ __tsan_acquire((void*)ctx->req);
+ }
+ return rc;
+}
+
+/* Server sends response - release all prior writes (including DMA write-backs)
+ * IMPORTANT: Release MUST happen BEFORE the send (see tsan_SendRequest). */
+static int tsan_SendResponse(void* c, uint16_t len, const void* data)
+{
+ whTransportMemContext* ctx = (whTransportMemContext*)c;
+ __tsan_release((void*)ctx->resp);
+ return wh_TransportMem_SendResponse(c, len, data);
+}
+
+/* TSAN-annotated transport callbacks */
+static const whTransportClientCb clientTransportCb = {
+ .Init = wh_TransportMem_InitClear,
+ .Send = tsan_SendRequest,
+ .Recv = tsan_RecvResponse,
+ .Cleanup = wh_TransportMem_Cleanup,
+};
+
+static const whTransportServerCb serverTransportCb = {
+ .Init = wh_TransportMem_Init,
+ .Recv = tsan_RecvRequest,
+ .Send = tsan_SendResponse,
+ .Cleanup = wh_TransportMem_Cleanup,
+};
+
+#else /* !WOLFHSM_CFG_TEST_STRESS_TSAN */
+
+/* Non-TSAN: use standard transport callbacks */
+static const whTransportClientCb clientTransportCb = WH_TRANSPORT_MEM_CLIENT_CB;
+static const whTransportServerCb serverTransportCb = WH_TRANSPORT_MEM_SERVER_CB;
+
+#endif /* WOLFHSM_CFG_TEST_STRESS_TSAN */
+
+/* Atomic helpers for C99 compatibility (GCC/Clang built-ins) */
+#define ATOMIC_LOAD_INT(ptr) __atomic_load_n((ptr), __ATOMIC_ACQUIRE)
+#define ATOMIC_STORE_INT(ptr, val) \
+ __atomic_store_n((ptr), (val), __ATOMIC_RELEASE)
+#define ATOMIC_ADD_INT(ptr, val) \
+ __atomic_add_fetch((ptr), (val), __ATOMIC_ACQ_REL)
+
+/* Test configuration */
+#define NUM_CLIENTS 4
+/* Allow external configuration of phase iterations */
+#ifndef WOLFHSM_CFG_TEST_STRESS_PHASE_ITERATIONS
+#define WOLFHSM_CFG_TEST_STRESS_PHASE_ITERATIONS 800
+#endif
+#define PHASE_ITERATIONS WOLFHSM_CFG_TEST_STRESS_PHASE_ITERATIONS
+
+#define BUFFER_SIZE 4096
+#define FLASH_RAM_SIZE (1024 * 1024)
+#define FLASH_SECTOR_SIZE (128 * 1024)
+#define FLASH_PAGE_SIZE 8
+#define MAX_TEST_DURATION_SEC 300
+
+/* NVM object parameters */
+#define NVM_OBJECT_DATA_SIZE 64
+
+/* Key parameters */
+#define KEY_DATA_SIZE 32
+
+/* Hot resources - all clients target these same IDs
+ * NOTE: Use client-facing keyId format (simple ID + flags), NOT server-internal
+ * format (WH_MAKE_KEYID).
+ *
+ * We need separate key IDs for different test categories because:
+ * - Revoked keys have NONMODIFIABLE flag and can't be erased or re-cached
+ * - Each revoke-related phase needs its own key that won't be used elsewhere
+ */
+#define HOT_KEY_ID_GLOBAL WH_CLIENT_KEYID_MAKE_GLOBAL(1) /* ID=1, global */
+#define HOT_KEY_ID_LOCAL 2 /* ID=2, local */
+
+/* Separate key IDs for revoke-related phases (can't be reused after revoke) */
+#define REVOKE_CACHE_KEY_GLOBAL \
+ WH_CLIENT_KEYID_MAKE_GLOBAL(10) /* ID=10, global */
+#define REVOKE_CACHE_KEY_LOCAL 11 /* ID=11, local */
+#define REVOKE_EXPORT_KEY_GLOBAL \
+ WH_CLIENT_KEYID_MAKE_GLOBAL(12) /* ID=12, global */
+#define REVOKE_EXPORT_KEY_LOCAL 13 /* ID=13, local */
+#define FRESHEN_KEY_GLOBAL WH_CLIENT_KEYID_MAKE_GLOBAL(14) /* ID=14, global */
+#define FRESHEN_KEY_LOCAL 15 /* ID=15, local */
+#define HOT_NVM_ID ((whNvmId)100)
+#define HOT_NVM_ID_2 ((whNvmId)101)
+#define HOT_NVM_ID_3 ((whNvmId)102)
+#define HOT_COUNTER_ID ((whNvmId)200)
+
+/* ============================================================================
+ * PHASE DEFINITIONS
+ * ========================================================================== */
+
+/* Contention phase enumeration */
+typedef enum {
+ /* Keystore phases */
+ PHASE_KS_CONCURRENT_CACHE,
+ PHASE_KS_CACHE_VS_EVICT,
+ PHASE_KS_CACHE_VS_EXPORT,
+ PHASE_KS_EVICT_VS_EXPORT,
+ PHASE_KS_CACHE_VS_COMMIT,
+ PHASE_KS_COMMIT_VS_EVICT,
+ PHASE_KS_CONCURRENT_EXPORT,
+ PHASE_KS_CONCURRENT_EVICT,
+
+ /* NVM phases */
+ PHASE_NVM_CONCURRENT_ADD,
+ PHASE_NVM_ADD_VS_READ,
+ PHASE_NVM_ADD_VS_DESTROY,
+ PHASE_NVM_READ_VS_DESTROY,
+ PHASE_NVM_CONCURRENT_READ,
+ PHASE_NVM_LIST_DURING_MODIFY,
+ PHASE_NVM_CONCURRENT_DESTROY,
+ PHASE_NVM_READ_VS_RESIZE, /* 2 read, 2 resize same object */
+ PHASE_NVM_CONCURRENT_RESIZE, /* 4 threads resize same object */
+
+ /* Cross-subsystem phases */
+ PHASE_CROSS_COMMIT_VS_ADD,
+ PHASE_CROSS_COMMIT_VS_DESTROY,
+ PHASE_CROSS_FRESHEN_VS_MODIFY,
+
+ /* Keystore - Erase/Revoke */
+ PHASE_KS_ERASE_VS_CACHE, /* 2 erase, 2 cache same key */
+ PHASE_KS_ERASE_VS_EXPORT, /* 2 erase, 2 export same key */
+ PHASE_KS_REVOKE_VS_CACHE, /* 2 revoke, 2 cache same key */
+ PHASE_KS_REVOKE_VS_EXPORT, /* 2 revoke, 2 export same key */
+
+ /* Keystore - GetUniqueId/Freshen */
+ PHASE_KS_CONCURRENT_GETUNIQUEID, /* 4 threads request unique IDs */
+ PHASE_KS_EXPLICIT_FRESHEN, /* 4 threads freshen same uncached key */
+
+ /* NVM - GetAvailable/GetMetadata/Reclaim */
+ PHASE_NVM_ADD_WITH_RECLAIM, /* Fill NVM, concurrent adds trigger reclaim */
+ PHASE_NVM_GETAVAILABLE_VS_ADD, /* 2 query space, 2 add objects */
+ PHASE_NVM_GETMETADATA_VS_DESTROY, /* 2 query metadata, 2 destroy */
+
+ /* Counter */
+ PHASE_COUNTER_CONCURRENT_INCREMENT, /* 4 threads increment same counter */
+ PHASE_COUNTER_INCREMENT_VS_READ, /* 2 increment, 2 read same counter */
+
+#ifdef WOLFHSM_CFG_DMA
+ /* DMA Operations */
+ PHASE_KS_CACHE_DMA_VS_EXPORT, /* 2 DMA cache, 2 regular export */
+ PHASE_KS_EXPORT_DMA_VS_EVICT, /* 2 DMA export, 2 evict */
+ PHASE_NVM_ADD_DMA_VS_READ, /* 2 DMA add, 2 regular read */
+ PHASE_NVM_READ_DMA_VS_DESTROY, /* 2 DMA read, 2 destroy */
+ PHASE_NVM_READ_DMA_VS_RESIZE, /* 2 DMA read, 2 resize same object */
+#endif
+
+ PHASE_COUNT
+} ContentionPhase;
+
+/* Client role assignment per phase */
+typedef enum {
+ ROLE_OP_A, /* Perform operation A (e.g., Cache, Add) */
+ ROLE_OP_B, /* Perform operation B (e.g., Evict, Read) */
+} ClientRole;
+
+/* Phase configuration */
+typedef struct {
+ ContentionPhase phase;
+ const char* name;
+ int iterations;
+ ClientRole roles[NUM_CLIENTS];
+} PhaseConfig;
+
+/* Phase configurations - all 18 phases */
+static const PhaseConfig phases[] = {
+ /* Keystore phases */
+ {PHASE_KS_CONCURRENT_CACHE,
+ "KS: Concurrent Cache",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+ {PHASE_KS_CACHE_VS_EVICT,
+ "KS: Cache vs Evict",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_CACHE_VS_EXPORT,
+ "KS: Cache vs Export",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_EVICT_VS_EXPORT,
+ "KS: Evict vs Export",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_CACHE_VS_COMMIT,
+ "KS: Cache vs Commit",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_COMMIT_VS_EVICT,
+ "KS: Commit vs Evict",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_CONCURRENT_EXPORT,
+ "KS: Concurrent Export",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+ {PHASE_KS_CONCURRENT_EVICT,
+ "KS: Concurrent Evict",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+
+ /* NVM phases */
+ {PHASE_NVM_CONCURRENT_ADD,
+ "NVM: Concurrent Add",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+ {PHASE_NVM_ADD_VS_READ,
+ "NVM: Add vs Read",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_ADD_VS_DESTROY,
+ "NVM: Add vs Destroy",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_READ_VS_DESTROY,
+ "NVM: Read vs Destroy",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_CONCURRENT_READ,
+ "NVM: Concurrent Read",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+ {PHASE_NVM_LIST_DURING_MODIFY,
+ "NVM: List During Modify",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_CONCURRENT_DESTROY,
+ "NVM: Concurrent Destroy",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+
+ /* Cross-subsystem phases */
+ {PHASE_CROSS_COMMIT_VS_ADD,
+ "Cross: Commit vs NVM Add",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_CROSS_COMMIT_VS_DESTROY,
+ "Cross: Commit vs NVM Destroy",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_CROSS_FRESHEN_VS_MODIFY,
+ "Cross: Export/Freshen vs NVM Modify",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+
+ /* Keystore Erase/Revoke */
+ {PHASE_KS_ERASE_VS_CACHE,
+ "KS: Erase vs Cache",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_ERASE_VS_EXPORT,
+ "KS: Erase vs Export",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_REVOKE_VS_CACHE,
+ "KS: Revoke vs Cache",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_REVOKE_VS_EXPORT,
+ "KS: Revoke vs Export",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+
+ /* Keystore GetUniqueId/Freshen */
+ {PHASE_KS_CONCURRENT_GETUNIQUEID,
+ "KS: Concurrent GetUniqueId",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+ {PHASE_KS_EXPLICIT_FRESHEN,
+ "KS: Explicit Freshen",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+
+ /* NVM */
+ {PHASE_NVM_ADD_WITH_RECLAIM,
+ "NVM: Add With Reclaim",
+ PHASE_ITERATIONS / 10,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+ {PHASE_NVM_GETAVAILABLE_VS_ADD,
+ "NVM: GetAvailable vs Add",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_GETMETADATA_VS_DESTROY,
+ "NVM: GetMetadata vs Destroy",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_READ_VS_RESIZE,
+ "NVM Read vs Resize",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+
+ {PHASE_NVM_CONCURRENT_RESIZE,
+ "NVM Concurrent Resize",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+
+ /* Counter */
+ {PHASE_COUNTER_CONCURRENT_INCREMENT,
+ "Counter Concurrent Increment",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_A, ROLE_OP_A}},
+ {PHASE_COUNTER_INCREMENT_VS_READ,
+ "Counter Increment vs Read",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+
+#ifdef WOLFHSM_CFG_DMA
+ /* DMA Operations */
+ {PHASE_KS_CACHE_DMA_VS_EXPORT,
+ "KS: Cache DMA vs Export",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_KS_EXPORT_DMA_VS_EVICT,
+ "KS: Export DMA vs Evict",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_ADD_DMA_VS_READ,
+ "NVM: Add DMA vs Read",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_READ_DMA_VS_DESTROY,
+ "NVM: Read DMA vs Destroy",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+ {PHASE_NVM_READ_DMA_VS_RESIZE,
+ "NVM Read DMA vs Resize",
+ PHASE_ITERATIONS,
+ {ROLE_OP_A, ROLE_OP_A, ROLE_OP_B, ROLE_OP_B}},
+#endif
+};
+
+/* ============================================================================
+ * DATA STRUCTURES
+ * ========================================================================== */
+
+/* Forward declaration */
+struct StressTestContext;
+
+/* Per client-server pair configuration */
+typedef struct {
+ /* Transport buffers */
+ uint8_t reqBuf[BUFFER_SIZE];
+ uint8_t respBuf[BUFFER_SIZE];
+ whTransportMemConfig tmConfig;
+
+ /* Client side */
+ whTransportMemClientContext clientTransportCtx;
+ whCommClientConfig clientCommConfig;
+ whClientContext client;
+ whClientConfig clientConfig;
+
+ /* Server side */
+ whTransportMemServerContext serverTransportCtx;
+ whCommServerConfig serverCommConfig;
+ whServerContext server;
+ whServerConfig serverConfig;
+
+ /* Crypto context for this server */
+ whServerCryptoContext cryptoCtx;
+
+ /* Thread control */
+ pthread_t clientThread;
+ pthread_t serverThread;
+ int clientId;
+ volatile int stopFlag;
+ volatile int errorCount;
+ volatile int iterationCount;
+
+ /* Pointer back to shared context */
+ struct StressTestContext* sharedCtx;
+
+#ifdef WOLFHSM_CFG_DMA
+ /* Per-client DMA buffers */
+ uint8_t dmaKeyBuffer[KEY_DATA_SIZE];
+ uint8_t dmaNvmBuffer[NVM_OBJECT_DATA_SIZE];
+#endif
+} ClientServerPair;
+
+/* Shared test context */
+typedef struct StressTestContext {
+ /* Shared NVM */
+ uint8_t flashMemory[FLASH_RAM_SIZE];
+ whFlashRamsimCtx flashCtx;
+ whFlashRamsimCfg flashCfg;
+ whNvmFlashContext nvmFlashCtx;
+ whNvmFlashConfig nvmFlashCfg;
+ whNvmContext nvm;
+ whNvmConfig nvmCfg;
+
+ /* Lock for NVM */
+ posixLockContext nvmLockCtx;
+ pthread_mutexattr_t mutexAttr;
+ posixLockConfig posixLockCfg;
+ whLockConfig lockCfg;
+
+ /* Client-server pairs */
+ ClientServerPair pairs[NUM_CLIENTS];
+
+ /* Synchronization - initial start */
+ pthread_barrier_t startBarrier;
+ volatile int globalStopFlag;
+
+ /* Phase synchronization */
+ pthread_barrier_t setupBarrier;
+ pthread_barrier_t setupCompleteBarrier;
+ pthread_barrier_t streamStartBarrier;
+ pthread_barrier_t streamEndBarrier;
+
+ /* Phase control */
+ volatile int phaseRunning;
+ volatile ContentionPhase currentPhase;
+ volatile whKeyId currentKeyId;
+ volatile ClientRole clientRoles[NUM_CLIENTS];
+} StressTestContext;
+
+/* Forward declarations */
+static void* serverThread(void* arg);
+static void* contentionClientThread(void* arg);
+
+/* ============================================================================
+ * INITIALIZATION HELPERS
+ * ========================================================================== */
+
+static int initSharedNvm(StressTestContext* ctx)
+{
+ static whFlashCb flashCb = WH_FLASH_RAMSIM_CB;
+ static whNvmCb nvmCb = WH_NVM_FLASH_CB;
+ static whLockCb lockCb = POSIX_LOCK_CB;
+ int rc;
+
+ /* Initialize flash memory */
+ memset(ctx->flashMemory, 0xFF, sizeof(ctx->flashMemory));
+
+ /* Configure flash simulator */
+ ctx->flashCfg.size = FLASH_RAM_SIZE;
+ ctx->flashCfg.sectorSize = FLASH_SECTOR_SIZE;
+ ctx->flashCfg.pageSize = FLASH_PAGE_SIZE;
+ ctx->flashCfg.erasedByte = 0xFF;
+ ctx->flashCfg.memory = ctx->flashMemory;
+
+ /* Configure NVM flash layer */
+ ctx->nvmFlashCfg.cb = &flashCb;
+ ctx->nvmFlashCfg.context = &ctx->flashCtx;
+ ctx->nvmFlashCfg.config = &ctx->flashCfg;
+
+ /* Initialize NVM flash layer */
+ rc = wh_NvmFlash_Init(&ctx->nvmFlashCtx, &ctx->nvmFlashCfg);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Failed to initialize NVM flash: %d\n", rc);
+ return rc;
+ }
+
+ /* Configure lock with error-checking mutex for better debugging */
+ memset(&ctx->nvmLockCtx, 0, sizeof(ctx->nvmLockCtx));
+ pthread_mutexattr_init(&ctx->mutexAttr);
+ pthread_mutexattr_settype(&ctx->mutexAttr, PTHREAD_MUTEX_ERRORCHECK);
+ ctx->posixLockCfg.attr = &ctx->mutexAttr;
+ ctx->lockCfg.cb = &lockCb;
+ ctx->lockCfg.context = &ctx->nvmLockCtx;
+ ctx->lockCfg.config = &ctx->posixLockCfg;
+
+ /* Configure NVM with lock */
+ ctx->nvmCfg.cb = &nvmCb;
+ ctx->nvmCfg.context = &ctx->nvmFlashCtx;
+ ctx->nvmCfg.config = &ctx->nvmFlashCfg;
+ ctx->nvmCfg.lockConfig = &ctx->lockCfg;
+
+ /* Initialize NVM */
+ rc = wh_Nvm_Init(&ctx->nvm, &ctx->nvmCfg);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Failed to initialize NVM: %d\n", rc);
+ return rc;
+ }
+
+ return WH_ERROR_OK;
+}
+
+static int initClientServerPair(StressTestContext* ctx, int pairIndex)
+{
+ ClientServerPair* pair = &ctx->pairs[pairIndex];
+ int rc;
+
+ pair->clientId = pairIndex;
+ pair->sharedCtx = ctx;
+ pair->stopFlag = 0;
+ pair->errorCount = 0;
+ pair->iterationCount = 0;
+
+ /* Configure transport memory */
+ pair->tmConfig.req = (whTransportMemCsr*)pair->reqBuf;
+ pair->tmConfig.req_size = sizeof(pair->reqBuf);
+ pair->tmConfig.resp = (whTransportMemCsr*)pair->respBuf;
+ pair->tmConfig.resp_size = sizeof(pair->respBuf);
+
+ /* Configure client transport */
+ memset(&pair->clientTransportCtx, 0, sizeof(pair->clientTransportCtx));
+
+ /* Configure client comm */
+ pair->clientCommConfig.transport_cb = &clientTransportCb;
+ pair->clientCommConfig.transport_context = &pair->clientTransportCtx;
+ pair->clientCommConfig.transport_config = &pair->tmConfig;
+ pair->clientCommConfig.client_id = (uint16_t)(100 + pairIndex);
+
+ /* Configure client */
+ pair->clientConfig.comm = &pair->clientCommConfig;
+
+ /* Configure server transport */
+ memset(&pair->serverTransportCtx, 0, sizeof(pair->serverTransportCtx));
+
+ /* Configure server comm */
+ pair->serverCommConfig.transport_cb = &serverTransportCb;
+ pair->serverCommConfig.transport_context = &pair->serverTransportCtx;
+ pair->serverCommConfig.transport_config = &pair->tmConfig;
+ pair->serverCommConfig.server_id = (uint16_t)(200 + pairIndex);
+
+ /* Configure crypto context */
+ pair->cryptoCtx.devId = INVALID_DEVID;
+
+ /* Initialize RNG for this server */
+ rc = wc_InitRng_ex(pair->cryptoCtx.rng, NULL, pair->cryptoCtx.devId);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to init RNG for pair %d: %d\n", pairIndex, rc);
+ return rc;
+ }
+
+ /* Configure server - all share the same NVM */
+ pair->serverConfig.comm_config = &pair->serverCommConfig;
+ pair->serverConfig.nvm = &ctx->nvm;
+ pair->serverConfig.crypto = &pair->cryptoCtx;
+ pair->serverConfig.devId = INVALID_DEVID;
+
+ /* Initialize client in the main thread to avoid concurrent calls to
+ * wolfCrypt_Init() and wc_CryptoCb_RegisterDevice() */
+ rc = wh_Client_Init(&pair->client, &pair->clientConfig);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Client %d init failed: %d\n", pairIndex, rc);
+ wc_FreeRng(pair->cryptoCtx.rng);
+ return rc;
+ }
+
+ return WH_ERROR_OK;
+}
+
+static void cleanupClientServerPair(ClientServerPair* pair)
+{
+ wh_Client_Cleanup(&pair->client);
+ wc_FreeRng(pair->cryptoCtx.rng);
+}
+
+/* ============================================================================
+ * SERVER THREAD
+ * ========================================================================== */
+
+static void* serverThread(void* arg)
+{
+ ClientServerPair* pair = (ClientServerPair*)arg;
+ StressTestContext* ctx = pair->sharedCtx;
+ int rc;
+
+ /* Initialize server */
+ rc = wh_Server_Init(&pair->server, &pair->serverConfig);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Server %d init failed: %d\n", pair->clientId, rc);
+ ATOMIC_ADD_INT(&pair->errorCount, 1);
+ return NULL;
+ }
+
+ /* Set connected state */
+ rc = wh_Server_SetConnected(&pair->server, WH_COMM_CONNECTED);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Server %d SetConnected failed: %d\n", pair->clientId,
+ rc);
+ ATOMIC_ADD_INT(&pair->errorCount, 1);
+ wh_Server_Cleanup(&pair->server);
+ return NULL;
+ }
+
+ /* Wait for all threads to start */
+ pthread_barrier_wait(&ctx->startBarrier);
+
+ /* Process requests until stopped */
+ while (!ATOMIC_LOAD_INT(&pair->stopFlag) &&
+ !ATOMIC_LOAD_INT(&ctx->globalStopFlag)) {
+ rc = wh_Server_HandleRequestMessage(&pair->server);
+
+ if (rc == WH_ERROR_NOTREADY) {
+ /* No request pending, yield CPU */
+ sched_yield();
+ continue;
+ }
+
+ /* Don't count server errors - they're expected during stress */
+ }
+
+ wh_Server_Cleanup(&pair->server);
+ return NULL;
+}
+
+/* ============================================================================
+ * CLIENT OPERATIONS
+ * ========================================================================== */
+
+static int doNvmAddObject(whClientContext* client, whNvmId id, int iteration)
+{
+ uint8_t data[NVM_OBJECT_DATA_SIZE];
+ int32_t out_rc;
+ int rc;
+
+ /* Fill data with pattern */
+ memset(data, (uint8_t)(iteration & 0xFF), sizeof(data));
+
+ /* Send request */
+ rc = wh_Client_NvmAddObjectRequest(client, id, WH_NVM_ACCESS_ANY,
+ WH_NVM_FLAGS_USAGE_ANY, 0, NULL,
+ sizeof(data), data);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmAddObjectResponse(client, &out_rc);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+
+static int doNvmRead(whClientContext* client, whNvmId id)
+{
+ uint8_t data[NVM_OBJECT_DATA_SIZE];
+ whNvmSize outSz = sizeof(data);
+ int32_t out_rc;
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_NvmReadRequest(client, id, 0, sizeof(data));
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmReadResponse(client, &out_rc, &outSz, data);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+
+static int doNvmList(whClientContext* client)
+{
+ whNvmId outId;
+ whNvmSize outCount;
+ int32_t out_rc;
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_NvmListRequest(client, WH_NVM_ACCESS_ANY,
+ WH_NVM_FLAGS_USAGE_ANY, 0);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmListResponse(client, &out_rc, &outCount, &outId);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+
+static int doNvmDestroy(whClientContext* client, whNvmId id)
+{
+ int32_t out_rc;
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_NvmDestroyObjectsRequest(client, 1, &id);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmDestroyObjectsResponse(client, &out_rc);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+
+static int doNvmGetAvailable(whClientContext* client)
+{
+ uint32_t availSize;
+ whNvmId availObjects;
+ uint32_t reclaimSize;
+ whNvmId reclaimObjects;
+ int32_t out_rc;
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_NvmGetAvailableRequest(client);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmGetAvailableResponse(client, &out_rc, &availSize,
+ &availObjects, &reclaimSize,
+ &reclaimObjects);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+
+static int doNvmGetMetadata(whClientContext* client, whNvmId id)
+{
+ whNvmId outId;
+ whNvmAccess outAccess;
+ whNvmFlags outFlags;
+ whNvmSize outLen;
+ uint8_t label[WH_NVM_LABEL_LEN];
+ int32_t out_rc;
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_NvmGetMetadataRequest(client, id);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmGetMetadataResponse(client, &out_rc, &outId,
+ &outAccess, &outFlags, &outLen,
+ sizeof(label), label);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+
+static int doKeyCache(whClientContext* client, whKeyId keyId, int iteration)
+{
+ uint8_t keyData[KEY_DATA_SIZE];
+ uint8_t label[WH_NVM_LABEL_LEN];
+ whKeyId outKeyId;
+ int rc;
+
+ /* Fill key data with pattern */
+ memset(keyData, (uint8_t)(iteration & 0xFF), sizeof(keyData));
+ snprintf((char*)label, sizeof(label), "Key%04X", keyId);
+
+ /* Send request */
+ rc = wh_Client_KeyCacheRequest_ex(client, WH_NVM_FLAGS_USAGE_ANY, label,
+ sizeof(label), keyData, sizeof(keyData),
+ keyId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyCacheResponse(client, &outKeyId);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doKeyExport(whClientContext* client, whKeyId keyId)
+{
+ uint8_t keyData[KEY_DATA_SIZE];
+ uint8_t label[WH_NVM_LABEL_LEN];
+ uint16_t labelSz = sizeof(label);
+ uint16_t keySz = sizeof(keyData);
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_KeyExportRequest(client, keyId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyExportResponse(client, label, labelSz, keyData,
+ &keySz);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doKeyEvict(whClientContext* client, whKeyId keyId)
+{
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_KeyEvictRequest(client, keyId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyEvictResponse(client);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doKeyCommit(whClientContext* client, whKeyId keyId)
+{
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_KeyCommitRequest(client, keyId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyCommitResponse(client);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doKeyErase(whClientContext* client, whKeyId keyId)
+{
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_KeyEraseRequest(client, keyId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyEraseResponse(client);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doKeyRevoke(whClientContext* client, whKeyId keyId)
+{
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_KeyRevokeRequest(client, keyId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyRevokeResponse(client);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doCounterInit(whClientContext* client, whNvmId counterId,
+ uint32_t initialValue)
+{
+ uint32_t counter = 0;
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_CounterInitRequest(client, counterId, initialValue);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_CounterInitResponse(client, &counter);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doCounterIncrement(whClientContext* client, whNvmId counterId,
+ uint32_t* out_counter)
+{
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_CounterIncrementRequest(client, counterId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_CounterIncrementResponse(client, out_counter);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doCounterRead(whClientContext* client, whNvmId counterId,
+ uint32_t* out_counter)
+{
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_CounterReadRequest(client, counterId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_CounterReadResponse(client, out_counter);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doCounterDestroy(whClientContext* client, whNvmId counterId)
+{
+ int rc;
+
+ /* Send request */
+ rc = wh_Client_CounterDestroyRequest(client, counterId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_CounterDestroyResponse(client);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+#ifdef WOLFHSM_CFG_DMA
+static int doKeyCacheDma(ClientServerPair* pair, whKeyId keyId, int iteration)
+{
+ uint8_t label[WH_NVM_LABEL_LEN];
+ whKeyId outKeyId;
+ int rc;
+
+ /* Fill key data with pattern */
+ memset(pair->dmaKeyBuffer, (uint8_t)(iteration & 0xFF),
+ sizeof(pair->dmaKeyBuffer));
+ snprintf((char*)label, sizeof(label), "DmaKey%04X", keyId);
+
+ /* Send DMA request */
+ rc = wh_Client_KeyCacheDmaRequest(&pair->client, 0, label, sizeof(label),
+ pair->dmaKeyBuffer,
+ sizeof(pair->dmaKeyBuffer), keyId);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyCacheDmaResponse(&pair->client, &outKeyId);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doKeyExportDma(ClientServerPair* pair, whKeyId keyId)
+{
+ uint8_t label[WH_NVM_LABEL_LEN];
+ uint16_t outSz;
+ int rc;
+
+ /* Send DMA request - provide buffer for DMA export */
+ rc = wh_Client_KeyExportDmaRequest(&pair->client, keyId, pair->dmaKeyBuffer,
+ sizeof(pair->dmaKeyBuffer));
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_KeyExportDmaResponse(&pair->client, label, sizeof(label),
+ &outSz);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return rc;
+}
+
+static int doNvmAddObjectDma(ClientServerPair* pair, whNvmId id, int iteration)
+{
+ whNvmMetadata meta;
+ int32_t out_rc;
+ int rc;
+
+ /* Fill DMA buffer with pattern */
+ memset(pair->dmaNvmBuffer, (uint8_t)(iteration & 0xFF),
+ sizeof(pair->dmaNvmBuffer));
+
+ /* Set up metadata */
+ memset(&meta, 0, sizeof(meta));
+ meta.id = id;
+ meta.access = WH_NVM_ACCESS_ANY;
+ meta.flags = WH_NVM_FLAGS_USAGE_ANY;
+ meta.len = sizeof(pair->dmaNvmBuffer);
+
+ /* Send DMA request */
+ rc = wh_Client_NvmAddObjectDmaRequest(
+ &pair->client, &meta, sizeof(pair->dmaNvmBuffer), pair->dmaNvmBuffer);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmAddObjectDmaResponse(&pair->client, &out_rc);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+
+static int doNvmReadDma(ClientServerPair* pair, whNvmId id)
+{
+ int32_t out_rc;
+ int rc;
+
+ /* Send DMA request */
+ rc = wh_Client_NvmReadDmaRequest(
+ &pair->client, id, 0, sizeof(pair->dmaNvmBuffer), pair->dmaNvmBuffer);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Wait for response from server thread */
+ do {
+ rc = wh_Client_NvmReadDmaResponse(&pair->client, &out_rc);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ return out_rc;
+}
+#endif /* WOLFHSM_CFG_DMA */
+
+/* ============================================================================
+ * PHASE SETUP
+ * ========================================================================== */
+
+static int doPhaseSetup(ClientServerPair* pair, ContentionPhase phase,
+ whKeyId keyId)
+{
+ whClientContext* client = &pair->client;
+ int rc;
+
+ switch (phase) {
+ /* Keystore phases that need clean state (evict first) */
+ case PHASE_KS_CONCURRENT_CACHE:
+ rc = doKeyEvict(client, keyId);
+ /* Ignore NOTFOUND - key might not exist */
+ if (rc == WH_ERROR_NOTFOUND)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* Keystore phases that need key to exist (cache first) */
+ case PHASE_KS_CACHE_VS_EVICT:
+ case PHASE_KS_CACHE_VS_EXPORT:
+ case PHASE_KS_EVICT_VS_EXPORT:
+ case PHASE_KS_CACHE_VS_COMMIT:
+ case PHASE_KS_COMMIT_VS_EVICT:
+ case PHASE_KS_CONCURRENT_EXPORT:
+ case PHASE_KS_CONCURRENT_EVICT:
+ return doKeyCache(client, keyId, 0);
+
+ /* NVM phases that need clean state (destroy first) */
+ case PHASE_NVM_CONCURRENT_ADD:
+ rc = doNvmDestroy(client, HOT_NVM_ID);
+ if (rc == WH_ERROR_NOTFOUND)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* NVM phases that need object to exist (add first) */
+ case PHASE_NVM_ADD_VS_READ:
+ case PHASE_NVM_ADD_VS_DESTROY:
+ case PHASE_NVM_READ_VS_DESTROY:
+ case PHASE_NVM_CONCURRENT_READ:
+ case PHASE_NVM_CONCURRENT_DESTROY:
+ /* First destroy any existing object to make space */
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+ return doNvmAddObject(client, HOT_NVM_ID, 0);
+
+ /* List during modify needs multiple objects */
+ case PHASE_NVM_LIST_DURING_MODIFY:
+ rc = doNvmAddObject(client, HOT_NVM_ID, 0);
+ if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE)
+ return rc;
+ rc = doNvmAddObject(client, HOT_NVM_ID_2, 0);
+ if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE)
+ return rc;
+ rc = doNvmAddObject(client, HOT_NVM_ID_3, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* Cross-subsystem: cache key AND add NVM object */
+ case PHASE_CROSS_COMMIT_VS_ADD:
+ case PHASE_CROSS_COMMIT_VS_DESTROY:
+ rc = doKeyCache(client, keyId, 0);
+ if (rc != WH_ERROR_OK)
+ return rc;
+ rc = doNvmAddObject(client, HOT_NVM_ID, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* Freshen test: commit key to NVM, then evict from cache */
+ case PHASE_CROSS_FRESHEN_VS_MODIFY:
+ rc = doKeyCache(client, keyId, 0);
+ if (rc != WH_ERROR_OK)
+ return rc;
+ rc = doKeyCommit(client, keyId);
+ if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE)
+ return rc;
+ rc = doKeyEvict(client, keyId);
+ if (rc != WH_ERROR_OK && rc != WH_ERROR_NOTFOUND)
+ return rc;
+ /* Add NVM object for the NVM modify operations */
+ rc = doNvmAddObject(client, HOT_NVM_ID, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* Erase phases: cache key, commit to NVM (erase needs both) */
+ case PHASE_KS_ERASE_VS_CACHE:
+ case PHASE_KS_ERASE_VS_EXPORT:
+ rc = doKeyCache(client, keyId, 0);
+ if (rc != WH_ERROR_OK)
+ return rc;
+ rc = doKeyCommit(client, keyId);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* Revoke phases: cache key, commit to NVM
+ * Each revoke phase uses a unique key ID (see selectKeyIdForPhase)
+ * because revoked keys have NONMODIFIABLE flag and can't be erased */
+ case PHASE_KS_REVOKE_VS_CACHE:
+ case PHASE_KS_REVOKE_VS_EXPORT:
+ rc = doKeyCache(client, keyId, 0);
+ if (rc != WH_ERROR_OK)
+ return rc;
+ rc = doKeyCommit(client, keyId);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* GetUniqueId: no setup needed */
+ case PHASE_KS_CONCURRENT_GETUNIQUEID:
+ return WH_ERROR_OK;
+
+ /* Explicit freshen: evict any existing key, cache, commit, then evict
+ * The initial evict makes room in cache. NOSPACE is tolerated because
+ * NVM may be full from revoke phases - test still runs testing NOTFOUND
+ */
+ case PHASE_KS_EXPLICIT_FRESHEN:
+ /* Evict first to make room if key already cached */
+ (void)doKeyEvict(client, keyId);
+ rc = doKeyCache(client, keyId, 0);
+ /* NOSPACE is OK - NVM may be full from revoke phases */
+ if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE)
+ return rc;
+ if (rc == WH_ERROR_OK) {
+ rc = doKeyCommit(client, keyId);
+ if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE)
+ return rc;
+ rc = doKeyEvict(client, keyId);
+ if (rc == WH_ERROR_NOTFOUND)
+ rc = WH_ERROR_OK;
+ }
+ else {
+ rc = WH_ERROR_OK; /* Tolerate NOSPACE */
+ }
+ return rc;
+
+ /* Add with reclaim: fill NVM with objects to trigger reclaim */
+ case PHASE_NVM_ADD_WITH_RECLAIM: {
+ int i;
+ /* Add 10 objects to fill up NVM and trigger reclaim during test */
+ for (i = 0; i < 10; i++) {
+ rc = doNvmAddObject(client, (whNvmId)(HOT_NVM_ID + i), 0);
+ if (rc != WH_ERROR_OK && rc != WH_ERROR_NOSPACE)
+ return rc;
+ }
+ return WH_ERROR_OK;
+ }
+
+ /* GetAvailable vs Add: add one object */
+ case PHASE_NVM_GETAVAILABLE_VS_ADD:
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+ rc = doNvmAddObject(client, HOT_NVM_ID, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* GetMetadata vs Destroy: destroy then add object */
+ case PHASE_NVM_GETMETADATA_VS_DESTROY:
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+ rc = doNvmAddObject(client, HOT_NVM_ID, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* Counter phases - create counter with initial value 0 */
+ case PHASE_COUNTER_CONCURRENT_INCREMENT:
+ case PHASE_COUNTER_INCREMENT_VS_READ:
+ /* Destroy any existing counter first */
+ (void)doCounterDestroy(client, HOT_COUNTER_ID);
+ /* Initialize counter with value 0 */
+ rc = doCounterInit(client, HOT_COUNTER_ID, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ /* NVM Read vs Resize - create object with initial size */
+ case PHASE_NVM_READ_VS_RESIZE:
+ case PHASE_NVM_CONCURRENT_RESIZE:
+ /* Destroy any existing object first */
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+ /* Create object with initial size (64 bytes) */
+ rc = doNvmAddObject(client, HOT_NVM_ID, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+#ifdef WOLFHSM_CFG_DMA
+ /* DMA phases: evict first to make room in cache, tolerate NOSPACE
+ * (cache may be full from earlier phases like GetUniqueId) */
+ case PHASE_KS_CACHE_DMA_VS_EXPORT:
+ case PHASE_KS_EXPORT_DMA_VS_EVICT:
+ /* Evict first to make room if key already cached */
+ (void)doKeyEvict(client, keyId);
+ rc = doKeyCache(client, keyId, 0);
+ /* NOSPACE is OK - cache may be full from earlier phases */
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+
+ case PHASE_NVM_ADD_DMA_VS_READ:
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+ return doNvmAddObject(client, HOT_NVM_ID, 0);
+
+ case PHASE_NVM_READ_DMA_VS_DESTROY:
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+ return doNvmAddObject(client, HOT_NVM_ID, 0);
+
+ /* NVM Read DMA vs Resize */
+ case PHASE_NVM_READ_DMA_VS_RESIZE:
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+ rc = doNvmAddObject(client, HOT_NVM_ID, 0);
+ if (rc == WH_ERROR_NOSPACE)
+ rc = WH_ERROR_OK;
+ return rc;
+#endif
+
+ default:
+ return WH_ERROR_OK;
+ }
+}
+
+/* ============================================================================
+ * PHASE OPERATION DISPATCH
+ * ========================================================================== */
+
+static int executePhaseOperation(ClientServerPair* pair, ContentionPhase phase,
+ ClientRole role, int iteration, whKeyId keyId)
+{
+ whClientContext* client = &pair->client;
+
+ switch (phase) {
+ /* Keystore phases */
+ case PHASE_KS_CONCURRENT_CACHE:
+ return doKeyCache(client, keyId, iteration);
+
+ case PHASE_KS_CACHE_VS_EVICT:
+ if (role == ROLE_OP_A)
+ return doKeyCache(client, keyId, iteration);
+ else
+ return doKeyEvict(client, keyId);
+
+ case PHASE_KS_CACHE_VS_EXPORT:
+ if (role == ROLE_OP_A)
+ return doKeyCache(client, keyId, iteration);
+ else
+ return doKeyExport(client, keyId);
+
+ case PHASE_KS_EVICT_VS_EXPORT:
+ if (role == ROLE_OP_A)
+ return doKeyEvict(client, keyId);
+ else
+ return doKeyExport(client, keyId);
+
+ case PHASE_KS_CACHE_VS_COMMIT:
+ if (role == ROLE_OP_A)
+ return doKeyCache(client, keyId, iteration);
+ else
+ return doKeyCommit(client, keyId);
+
+ case PHASE_KS_COMMIT_VS_EVICT:
+ if (role == ROLE_OP_A)
+ return doKeyCommit(client, keyId);
+ else
+ return doKeyEvict(client, keyId);
+
+ case PHASE_KS_CONCURRENT_EXPORT:
+ return doKeyExport(client, keyId);
+
+ case PHASE_KS_CONCURRENT_EVICT:
+ return doKeyEvict(client, keyId);
+
+ /* NVM phases */
+ case PHASE_NVM_CONCURRENT_ADD:
+ return doNvmAddObject(client, HOT_NVM_ID, iteration);
+
+ case PHASE_NVM_ADD_VS_READ:
+ if (role == ROLE_OP_A)
+ return doNvmAddObject(client, HOT_NVM_ID, iteration);
+ else
+ return doNvmRead(client, HOT_NVM_ID);
+
+ case PHASE_NVM_ADD_VS_DESTROY:
+ if (role == ROLE_OP_A)
+ return doNvmAddObject(client, HOT_NVM_ID, iteration);
+ else
+ return doNvmDestroy(client, HOT_NVM_ID);
+
+ case PHASE_NVM_READ_VS_DESTROY:
+ if (role == ROLE_OP_A)
+ return doNvmRead(client, HOT_NVM_ID);
+ else
+ return doNvmDestroy(client, HOT_NVM_ID);
+
+ case PHASE_NVM_CONCURRENT_READ:
+ return doNvmRead(client, HOT_NVM_ID);
+
+ case PHASE_NVM_LIST_DURING_MODIFY:
+ if (role == ROLE_OP_A) {
+ /* Alternate between add and destroy */
+ if (iteration % 2 == 0)
+ return doNvmAddObject(client, HOT_NVM_ID, iteration);
+ else
+ return doNvmDestroy(client, HOT_NVM_ID);
+ }
+ else {
+ return doNvmList(client);
+ }
+
+ case PHASE_NVM_CONCURRENT_DESTROY:
+ return doNvmDestroy(client, HOT_NVM_ID);
+
+ /* Cross-subsystem phases */
+ case PHASE_CROSS_COMMIT_VS_ADD:
+ if (role == ROLE_OP_A)
+ return doKeyCommit(client, keyId);
+ else
+ return doNvmAddObject(client, HOT_NVM_ID, iteration);
+
+ case PHASE_CROSS_COMMIT_VS_DESTROY:
+ if (role == ROLE_OP_A)
+ return doKeyCommit(client, keyId);
+ else
+ return doNvmDestroy(client, HOT_NVM_ID);
+
+ case PHASE_CROSS_FRESHEN_VS_MODIFY:
+ if (role == ROLE_OP_A) {
+ /* Export triggers freshen from NVM if not in cache */
+ return doKeyExport(client, keyId);
+ }
+ else {
+ /* Modify NVM while freshen might be happening */
+ if (iteration % 2 == 0)
+ return doNvmAddObject(client, HOT_NVM_ID, iteration);
+ else
+ return doNvmDestroy(client, HOT_NVM_ID);
+ }
+
+ /* Erase vs Cache */
+ case PHASE_KS_ERASE_VS_CACHE:
+ if (role == ROLE_OP_A)
+ return doKeyErase(client, keyId);
+ else
+ return doKeyCache(client, keyId, iteration);
+
+ /* Erase vs Export */
+ case PHASE_KS_ERASE_VS_EXPORT:
+ if (role == ROLE_OP_A)
+ return doKeyErase(client, keyId);
+ else
+ return doKeyExport(client, keyId);
+
+ /* Revoke vs Cache */
+ case PHASE_KS_REVOKE_VS_CACHE:
+ if (role == ROLE_OP_A)
+ return doKeyRevoke(client, keyId);
+ else
+ return doKeyCache(client, keyId, iteration);
+
+ /* Revoke vs Export */
+ case PHASE_KS_REVOKE_VS_EXPORT:
+ if (role == ROLE_OP_A)
+ return doKeyRevoke(client, keyId);
+ else
+ return doKeyExport(client, keyId);
+
+ /* Concurrent GetUniqueId: all threads request fresh keys via cache with
+ * ERASED */
+ case PHASE_KS_CONCURRENT_GETUNIQUEID:
+ return doKeyCache(client, WH_KEYID_ERASED, iteration);
+
+ /* Explicit Freshen: all threads export (triggers freshen from NVM) */
+ case PHASE_KS_EXPLICIT_FRESHEN:
+ return doKeyExport(client, keyId);
+
+ /* Add With Reclaim: all threads add unique objects to trigger reclaim
+ */
+ case PHASE_NVM_ADD_WITH_RECLAIM:
+ /* Use client ID and iteration to create unique object IDs */
+ return doNvmAddObject(client,
+ (whNvmId)(HOT_NVM_ID + 10 +
+ (pair->clientId * 1000) +
+ iteration),
+ iteration);
+
+ /* GetAvailable vs Add */
+ case PHASE_NVM_GETAVAILABLE_VS_ADD:
+ if (role == ROLE_OP_A)
+ return doNvmGetAvailable(client);
+ else
+ return doNvmAddObject(client, HOT_NVM_ID, iteration);
+
+ /* GetMetadata vs Destroy */
+ case PHASE_NVM_GETMETADATA_VS_DESTROY:
+ if (role == ROLE_OP_A)
+ return doNvmGetMetadata(client, HOT_NVM_ID);
+ else
+ return doNvmDestroy(client, HOT_NVM_ID);
+
+ /* Counter Concurrent Increment */
+ case PHASE_COUNTER_CONCURRENT_INCREMENT: {
+ uint32_t counter = 0;
+ return doCounterIncrement(client, HOT_COUNTER_ID, &counter);
+ }
+
+ /* Counter Increment vs Read */
+ case PHASE_COUNTER_INCREMENT_VS_READ: {
+ uint32_t counter = 0;
+ if (role == ROLE_OP_A)
+ return doCounterIncrement(client, HOT_COUNTER_ID, &counter);
+ else
+ return doCounterRead(client, HOT_COUNTER_ID, &counter);
+ }
+
+ /* NVM Read vs Resize - alternating object sizes */
+ case PHASE_NVM_READ_VS_RESIZE:
+ if (role == ROLE_OP_A) {
+ /* Read operation */
+ return doNvmRead(client, HOT_NVM_ID);
+ }
+ else {
+ /* Resize operation: destroy and re-add with different size
+ * Alternate between 64 bytes (full) and 32 bytes (half) */
+ int rc;
+ uint8_t data[NVM_OBJECT_DATA_SIZE];
+ int32_t out_rc;
+ whNvmSize newSize = (iteration % 2 == 0)
+ ? NVM_OBJECT_DATA_SIZE
+ : (NVM_OBJECT_DATA_SIZE / 2);
+
+ /* Destroy existing object */
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+
+ /* Re-add with new size */
+ memset(data, (uint8_t)(iteration & 0xFF), newSize);
+ rc = wh_Client_NvmAddObjectRequest(
+ client, HOT_NVM_ID, WH_NVM_ACCESS_ANY,
+ WH_NVM_FLAGS_USAGE_ANY, 0, NULL, newSize, data);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ do {
+ rc = wh_Client_NvmAddObjectResponse(client, &out_rc);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return (rc == WH_ERROR_OK) ? out_rc : rc;
+ }
+
+ /* NVM Concurrent Resize */
+ case PHASE_NVM_CONCURRENT_RESIZE: {
+ /* All threads resize: destroy and re-add with different size */
+ int rc;
+ uint8_t data[NVM_OBJECT_DATA_SIZE];
+ int32_t out_rc;
+ whNvmSize newSize = (iteration % 2 == 0)
+ ? NVM_OBJECT_DATA_SIZE
+ : (NVM_OBJECT_DATA_SIZE / 2);
+
+ /* Destroy existing object */
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+
+ /* Re-add with new size */
+ memset(data, (uint8_t)(iteration & 0xFF), newSize);
+ rc = wh_Client_NvmAddObjectRequest(
+ client, HOT_NVM_ID, WH_NVM_ACCESS_ANY, WH_NVM_FLAGS_USAGE_ANY,
+ 0, NULL, newSize, data);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ do {
+ rc = wh_Client_NvmAddObjectResponse(client, &out_rc);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return (rc == WH_ERROR_OK) ? out_rc : rc;
+ }
+
+#ifdef WOLFHSM_CFG_DMA
+ /* DMA: Cache DMA vs Export */
+ case PHASE_KS_CACHE_DMA_VS_EXPORT:
+ if (role == ROLE_OP_A)
+ return doKeyCacheDma(pair, keyId, iteration);
+ else
+ return doKeyExport(client, keyId);
+
+ /* DMA: Export DMA vs Evict */
+ case PHASE_KS_EXPORT_DMA_VS_EVICT:
+ if (role == ROLE_OP_A)
+ return doKeyExportDma(pair, keyId);
+ else
+ return doKeyEvict(client, keyId);
+
+ /* DMA: Add DMA vs Read */
+ case PHASE_NVM_ADD_DMA_VS_READ:
+ if (role == ROLE_OP_A)
+ return doNvmAddObjectDma(pair, HOT_NVM_ID, iteration);
+ else
+ return doNvmRead(client, HOT_NVM_ID);
+
+ /* DMA: Read DMA vs Destroy */
+ case PHASE_NVM_READ_DMA_VS_DESTROY:
+ if (role == ROLE_OP_A)
+ return doNvmReadDma(pair, HOT_NVM_ID);
+ else
+ return doNvmDestroy(client, HOT_NVM_ID);
+
+ /* NVM Read DMA vs Resize */
+ case PHASE_NVM_READ_DMA_VS_RESIZE:
+ if (role == ROLE_OP_A) {
+ /* DMA Read operation */
+ return doNvmReadDma(pair, HOT_NVM_ID);
+ }
+ else {
+ /* Resize operation: destroy and re-add with different size */
+ int rc;
+ uint8_t data[NVM_OBJECT_DATA_SIZE];
+ int32_t out_rc;
+ whNvmSize newSize = (iteration % 2 == 0)
+ ? NVM_OBJECT_DATA_SIZE
+ : (NVM_OBJECT_DATA_SIZE / 2);
+ whNvmMetadata meta;
+
+ /* Destroy existing object */
+ (void)doNvmDestroy(client, HOT_NVM_ID);
+
+ /* Re-add with new size */
+ memset(data, (uint8_t)(iteration & 0xFF), newSize);
+ memset(&meta, 0, sizeof(meta));
+ meta.id = HOT_NVM_ID;
+ meta.access = WH_NVM_ACCESS_ANY;
+ meta.flags = WH_NVM_FLAGS_USAGE_ANY;
+ meta.len = newSize;
+
+ rc = wh_Client_NvmAddObjectRequest(
+ client, HOT_NVM_ID, WH_NVM_ACCESS_ANY,
+ WH_NVM_FLAGS_USAGE_ANY, 0, NULL, newSize, data);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ do {
+ rc = wh_Client_NvmAddObjectResponse(client, &out_rc);
+ if (rc == WH_ERROR_NOTREADY) {
+ sched_yield();
+ }
+ } while (rc == WH_ERROR_NOTREADY);
+
+ return (rc == WH_ERROR_OK) ? out_rc : rc;
+ }
+#endif
+
+ default:
+ return WH_ERROR_OK;
+ }
+}
+
+/* ============================================================================
+ * RESULT VALIDATION
+ * ========================================================================== */
+
+static int isAcceptableResult(ContentionPhase phase, int rc)
+{
+ /* Always acceptable */
+ if (rc == WH_ERROR_OK)
+ return 1;
+
+ switch (phase) {
+ /* Cache operations: NOSPACE acceptable (cache full) */
+ case PHASE_KS_CONCURRENT_CACHE:
+ case PHASE_KS_CACHE_VS_EVICT:
+ case PHASE_KS_CACHE_VS_EXPORT:
+ case PHASE_KS_CACHE_VS_COMMIT:
+ return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND);
+
+ /* Evict/Export: NOTFOUND acceptable (already evicted) */
+ case PHASE_KS_EVICT_VS_EXPORT:
+ case PHASE_KS_COMMIT_VS_EVICT:
+ case PHASE_KS_CONCURRENT_EVICT:
+ case PHASE_KS_CONCURRENT_EXPORT:
+ return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE);
+
+ /* NVM Add: NOSPACE acceptable */
+ case PHASE_NVM_CONCURRENT_ADD:
+ case PHASE_NVM_ADD_VS_READ:
+ case PHASE_NVM_ADD_VS_DESTROY:
+ case PHASE_NVM_LIST_DURING_MODIFY:
+ return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND);
+
+ /* NVM Read/Destroy: NOTFOUND acceptable */
+ case PHASE_NVM_READ_VS_DESTROY:
+ case PHASE_NVM_CONCURRENT_READ:
+ case PHASE_NVM_CONCURRENT_DESTROY:
+ return (rc == WH_ERROR_NOTFOUND);
+
+ /* Cross-subsystem: multiple acceptable */
+ case PHASE_CROSS_COMMIT_VS_ADD:
+ case PHASE_CROSS_COMMIT_VS_DESTROY:
+ case PHASE_CROSS_FRESHEN_VS_MODIFY:
+ return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND);
+
+ /* Erase phases: NOTFOUND, NOSPACE acceptable */
+ case PHASE_KS_ERASE_VS_CACHE:
+ case PHASE_KS_ERASE_VS_EXPORT:
+ return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE);
+
+ /* Revoke phases: NOTFOUND, ACCESS, USAGE, NOSPACE acceptable */
+ case PHASE_KS_REVOKE_VS_CACHE:
+ case PHASE_KS_REVOKE_VS_EXPORT:
+ return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_ACCESS ||
+ rc == WH_ERROR_USAGE || rc == WH_ERROR_NOSPACE);
+
+ /* GetUniqueId: NOSPACE acceptable */
+ case PHASE_KS_CONCURRENT_GETUNIQUEID:
+ return (rc == WH_ERROR_NOSPACE);
+
+ /* Explicit Freshen: NOTFOUND, NOSPACE acceptable */
+ case PHASE_KS_EXPLICIT_FRESHEN:
+ return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE);
+
+ /* Add With Reclaim: NOSPACE, ACCESS acceptable
+ * ACCESS can occur if reclaim is modifying object metadata
+ * concurrently with add operations */
+ case PHASE_NVM_ADD_WITH_RECLAIM:
+ return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_ACCESS);
+
+ /* GetAvailable vs Add: NOSPACE, NOTFOUND acceptable */
+ case PHASE_NVM_GETAVAILABLE_VS_ADD:
+ return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND);
+
+ /* GetMetadata vs Destroy: NOTFOUND acceptable */
+ case PHASE_NVM_GETMETADATA_VS_DESTROY:
+ return (rc == WH_ERROR_NOTFOUND);
+
+ /* Counter phases - NOTFOUND marked acceptable to prevent test abort,
+ * but validation will catch it as a bug (counter < expectedMin).
+ * NOTFOUND shouldn't occur in CONCURRENT_INCREMENT (no destroys). */
+ case PHASE_COUNTER_CONCURRENT_INCREMENT:
+ case PHASE_COUNTER_INCREMENT_VS_READ:
+ return (rc == WH_ERROR_NOTFOUND);
+
+ /* NVM Read vs Resize - NOTFOUND acceptable (object destroyed
+ * during resize), NOSPACE acceptable (NVM full) */
+ case PHASE_NVM_READ_VS_RESIZE:
+ case PHASE_NVM_CONCURRENT_RESIZE:
+ return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE);
+
+#ifdef WOLFHSM_CFG_DMA
+ /* DMA phases: same as non-DMA equivalents */
+ case PHASE_KS_CACHE_DMA_VS_EXPORT:
+ return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND);
+
+ case PHASE_KS_EXPORT_DMA_VS_EVICT:
+ return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE);
+
+ case PHASE_NVM_ADD_DMA_VS_READ:
+ return (rc == WH_ERROR_NOSPACE || rc == WH_ERROR_NOTFOUND);
+
+ case PHASE_NVM_READ_DMA_VS_DESTROY:
+ return (rc == WH_ERROR_NOTFOUND);
+
+ /* NVM Read DMA vs Resize */
+ case PHASE_NVM_READ_DMA_VS_RESIZE:
+ return (rc == WH_ERROR_NOTFOUND || rc == WH_ERROR_NOSPACE);
+#endif
+
+ default:
+ return 0;
+ }
+}
+
+/* ============================================================================
+ * CLIENT THREAD (CONTINUOUS STREAMING)
+ * ========================================================================== */
+
+static void* contentionClientThread(void* arg)
+{
+ ClientServerPair* pair = (ClientServerPair*)arg;
+ StressTestContext* ctx = pair->sharedCtx;
+ int rc;
+ int localIteration;
+
+ /* Wait for all threads to start */
+ pthread_barrier_wait(&ctx->startBarrier);
+
+ /* Always call barrier first, then check exit flag - prevents deadlock */
+ while (1) {
+ /* ===== SETUP PHASE (once per phase) ===== */
+ pthread_barrier_wait(&ctx->setupBarrier);
+ if (ATOMIC_LOAD_INT(&ctx->globalStopFlag)) {
+ /* Call remaining barriers before exiting to prevent deadlock */
+ pthread_barrier_wait(&ctx->setupCompleteBarrier);
+ pthread_barrier_wait(&ctx->streamStartBarrier);
+ pthread_barrier_wait(&ctx->streamEndBarrier);
+ break;
+ }
+
+ /* Only client 0 does setup */
+ if (pair->clientId == 0) {
+ rc = doPhaseSetup(pair, ctx->currentPhase, ctx->currentKeyId);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Setup failed for phase %d: %d\n",
+ ctx->currentPhase, rc);
+ }
+ }
+
+ pthread_barrier_wait(&ctx->setupCompleteBarrier);
+
+ /* ===== STREAMING PHASE (tight loop, no barriers) ===== */
+ pthread_barrier_wait(&ctx->streamStartBarrier);
+
+ ContentionPhase phase = ctx->currentPhase;
+ whKeyId keyId = ctx->currentKeyId;
+ ClientRole role = ctx->clientRoles[pair->clientId];
+ localIteration = 0;
+
+ /* Stream requests until phaseRunning becomes 0 */
+ while (ATOMIC_LOAD_INT(&ctx->phaseRunning)) {
+ rc =
+ executePhaseOperation(pair, phase, role, localIteration, keyId);
+
+ /* Count iteration */
+ localIteration++;
+ ATOMIC_ADD_INT(&pair->iterationCount, 1);
+
+ /* Track unexpected errors */
+ if (!isAcceptableResult(phase, rc)) {
+ ATOMIC_ADD_INT(&pair->errorCount, 1);
+ }
+
+ /* NO BARRIER HERE - continuous streaming */
+ }
+
+ /* Wait for all clients to finish streaming */
+ pthread_barrier_wait(&ctx->streamEndBarrier);
+ }
+
+ return NULL;
+}
+
+/* ============================================================================
+ * PHASE EXECUTION
+ * ========================================================================== */
+
+static int allClientsReachedIterations(StressTestContext* ctx, int target)
+{
+ int i;
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ if (ATOMIC_LOAD_INT(&ctx->pairs[i].iterationCount) < target) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Post-phase validation for applicable tests
+ * Returns WH_ERROR_OK if validation passes, error code otherwise */
+static int validatePhaseResult(StressTestContext* ctx, ContentionPhase phase,
+ int totalIterations, int totalErrors)
+{
+ int rc;
+
+ switch (phase) {
+ case PHASE_COUNTER_CONCURRENT_INCREMENT: {
+ /* Validate counter value matches expected increments
+ * Expected: number of successful increments
+ * Count ROLE_OP_A threads (all 4 in this phase) */
+ uint32_t counter = 0;
+ int opACount = 0;
+ int i;
+
+ /* Count how many threads were doing increments (ROLE_OP_A) */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ if (ctx->clientRoles[i] == ROLE_OP_A) {
+ opACount++;
+ }
+ }
+
+ /* Read final counter value using client 0 */
+ rc = doCounterRead(&ctx->pairs[0].client, HOT_COUNTER_ID, &counter);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT(
+ " VALIDATION FAILED: Counter read failed: %d\n", rc);
+ return WH_ERROR_ABORTED;
+ }
+
+ /* Calculate expected value: iterations per client × number of
+ * incrementing clients Each incrementing client did
+ * config->iterations increments
+ * Account for errors: totalIterations counts all attempts,
+ * but totalErrors counts unacceptable failures that didn't
+ * increment */
+ uint32_t expectedMin = totalIterations - totalErrors;
+
+ WH_TEST_PRINT(" Counter validation: value=%u, expected_min=%u "
+ "(iters=%d, errors=%d)\n",
+ counter, expectedMin, totalIterations, totalErrors);
+
+ /* Counter must equal expectedMin. If counter < expectedMin, this
+ * indicates either:
+ * 1. Lost increments due to locking bug (race condition)
+ * 2. NOTFOUND occurred (shouldn't happen - no concurrent destroys)
+ */
+ if (counter < expectedMin) {
+ WH_ERROR_PRINT(" VALIDATION FAILED: Counter value %u < "
+ "expected min %u\n",
+ counter, expectedMin);
+ return WH_ERROR_ABORTED;
+ }
+
+ return WH_ERROR_OK;
+ }
+
+ /* Other phases don't need special validation yet */
+ default:
+ return WH_ERROR_OK;
+ }
+}
+
+/* Select the appropriate keyId for a phase.
+ * Revoke-related phases need unique key IDs because revoked keys can't be
+ * erased or re-cached. Each phase type that might leave a key revoked needs
+ * its own key ID to avoid conflicts.
+ */
+static whKeyId selectKeyIdForPhase(ContentionPhase phase, int isGlobal)
+{
+ switch (phase) {
+ case PHASE_KS_REVOKE_VS_CACHE:
+ return isGlobal ? REVOKE_CACHE_KEY_GLOBAL : REVOKE_CACHE_KEY_LOCAL;
+ case PHASE_KS_REVOKE_VS_EXPORT:
+ return isGlobal ? REVOKE_EXPORT_KEY_GLOBAL
+ : REVOKE_EXPORT_KEY_LOCAL;
+ /* Freshen uses HOT_KEY_ID (not unique IDs) so it can reuse keys that
+ * are already committed by earlier phases - avoids NVM space issues */
+ default:
+ return isGlobal ? HOT_KEY_ID_GLOBAL : HOT_KEY_ID_LOCAL;
+ }
+}
+
+static int runPhase(StressTestContext* ctx, const PhaseConfig* config,
+ whKeyId keyId)
+{
+ int i;
+ int rc;
+#ifdef WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC
+ time_t phaseStart;
+#endif
+ int totalIterations = 0;
+ int totalErrors = 0;
+ int timedOut = 0;
+ const char* keyScope =
+ (keyId & WH_KEYID_CLIENT_GLOBAL_FLAG) ? "global" : "local";
+
+ WH_TEST_PRINT(" Phase: %s (%s key)\n", config->name, keyScope);
+
+ /* 1. Set phase info for all clients */
+ ctx->currentPhase = config->phase;
+ ctx->currentKeyId = keyId;
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ ctx->clientRoles[i] = config->roles[i];
+ }
+
+ /* 2. Signal clients to run setup */
+ pthread_barrier_wait(&ctx->setupBarrier);
+
+ /* 3. Wait for setup to complete */
+ pthread_barrier_wait(&ctx->setupCompleteBarrier);
+
+ /* 4. Reset iteration counts */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ ATOMIC_STORE_INT(&ctx->pairs[i].iterationCount, 0);
+ ATOMIC_STORE_INT(&ctx->pairs[i].errorCount, 0);
+ }
+
+ /* 5. Signal clients to start streaming */
+ ATOMIC_STORE_INT(&ctx->phaseRunning, 1);
+ pthread_barrier_wait(&ctx->streamStartBarrier);
+
+ /* 6. Let clients stream for configured iterations OR duration */
+#ifdef WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC
+ phaseStart = time(NULL);
+#endif
+ while (!allClientsReachedIterations(ctx, config->iterations)) {
+#ifdef WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC
+ if (time(NULL) - phaseStart >
+ WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC) {
+ WH_ERROR_PRINT(" Phase timeout after %d seconds\n",
+ WOLFHSM_CFG_TEST_STRESS_PHASE_TIMEOUT_SEC);
+ timedOut = 1;
+ break;
+ }
+#endif
+ {
+ struct timespec ts = {0, 10000000}; /* 10ms */
+ nanosleep(&ts, NULL);
+ }
+ }
+
+ /* 7. Signal clients to stop streaming */
+ ATOMIC_STORE_INT(&ctx->phaseRunning, 0);
+
+ /* 8. Wait for all clients to finish current operation */
+ pthread_barrier_wait(&ctx->streamEndBarrier);
+
+ /* 9. Collect and print results */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ int iters = ATOMIC_LOAD_INT(&ctx->pairs[i].iterationCount);
+ int errors = ATOMIC_LOAD_INT(&ctx->pairs[i].errorCount);
+ totalIterations += iters;
+ totalErrors += errors;
+ }
+ WH_TEST_PRINT(" Total: %d iterations, %d errors\n", totalIterations,
+ totalErrors);
+
+ /* 10. Run phase-specific validation */
+ rc = validatePhaseResult(ctx, config->phase, totalIterations, totalErrors);
+ if (rc != WH_ERROR_OK) {
+ return rc;
+ }
+
+ /* Return error if phase failed */
+ if (timedOut || totalErrors > 0) {
+ return WH_ERROR_ABORTED;
+ }
+ return WH_ERROR_OK;
+}
+
+/* ============================================================================
+ * MAIN TEST FUNCTION
+ * ========================================================================== */
+
+static void printFinalStats(StressTestContext* ctx)
+{
+ int i;
+ int totalErrors = 0;
+
+ WH_TEST_PRINT("\n=== Final Statistics ===\n");
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ WH_TEST_PRINT("Client %d: total errors=%d\n", i,
+ ctx->pairs[i].errorCount);
+ totalErrors += ctx->pairs[i].errorCount;
+ }
+ WH_TEST_PRINT("Total errors across all phases: %d\n", totalErrors);
+}
+
+int whTest_ThreadSafeStress(void)
+{
+ StressTestContext ctx;
+ int i;
+ int rc;
+ int testResult = 0;
+ int phasesFailed = 0;
+ size_t phaseIdx;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ WH_TEST_PRINT("=== Deterministic Contention Stress Test ===\n");
+ WH_TEST_PRINT("Clients: %d, Phases: %zu, Iterations per phase: %d\n",
+ NUM_CLIENTS, sizeof(phases) / sizeof(phases[0]),
+ PHASE_ITERATIONS);
+ WH_TEST_PRINT("Key scopes: global, local\n");
+
+ /* Initialize wolfCrypt */
+ rc = wolfCrypt_Init();
+ if (rc != 0) {
+ WH_ERROR_PRINT("wolfCrypt_Init failed: %d\n", rc);
+ return rc;
+ }
+
+ /* Initialize shared NVM with lock */
+ rc = initSharedNvm(&ctx);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Failed to initialize shared NVM\n");
+ wolfCrypt_Cleanup();
+ return rc;
+ }
+
+ /* Initialize client-server pairs */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ rc = initClientServerPair(&ctx, i);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Failed to initialize pair %d\n", i);
+ testResult = rc;
+ goto cleanup;
+ }
+ }
+
+ /* Initialize barriers
+ * startBarrier: main + servers + clients = NUM_CLIENTS * 2 + 1
+ * phase barriers: main + clients = NUM_CLIENTS + 1
+ */
+ rc = pthread_barrier_init(&ctx.startBarrier, NULL, NUM_CLIENTS * 2 + 1);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to init start barrier: %d\n", rc);
+ testResult = rc;
+ goto cleanup;
+ }
+
+ rc = pthread_barrier_init(&ctx.setupBarrier, NULL, NUM_CLIENTS + 1);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to init setup barrier: %d\n", rc);
+ testResult = rc;
+ goto cleanup;
+ }
+
+ rc = pthread_barrier_init(&ctx.setupCompleteBarrier, NULL, NUM_CLIENTS + 1);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to init setupComplete barrier: %d\n", rc);
+ testResult = rc;
+ goto cleanup;
+ }
+
+ rc = pthread_barrier_init(&ctx.streamStartBarrier, NULL, NUM_CLIENTS + 1);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to init streamStart barrier: %d\n", rc);
+ testResult = rc;
+ goto cleanup;
+ }
+
+ rc = pthread_barrier_init(&ctx.streamEndBarrier, NULL, NUM_CLIENTS + 1);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to init streamEnd barrier: %d\n", rc);
+ testResult = rc;
+ goto cleanup;
+ }
+
+ WH_TEST_PRINT("Starting %d server threads and %d client threads...\n",
+ NUM_CLIENTS, NUM_CLIENTS);
+
+ /* Start all server threads */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ rc = pthread_create(&ctx.pairs[i].serverThread, NULL, serverThread,
+ &ctx.pairs[i]);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to create server thread %d: %d\n", i, rc);
+ ATOMIC_STORE_INT(&ctx.globalStopFlag, 1);
+ testResult = rc;
+ goto join_threads;
+ }
+ }
+
+ /* Start all client threads */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ rc = pthread_create(&ctx.pairs[i].clientThread, NULL,
+ contentionClientThread, &ctx.pairs[i]);
+ if (rc != 0) {
+ WH_ERROR_PRINT("Failed to create client thread %d: %d\n", i, rc);
+ ATOMIC_STORE_INT(&ctx.globalStopFlag, 1);
+ testResult = rc;
+ goto join_threads;
+ }
+ }
+
+ /* Synchronize start - wait for all threads to be ready */
+ pthread_barrier_wait(&ctx.startBarrier);
+ WH_TEST_PRINT("All threads started, running phases...\n\n");
+
+ /* Run all phases */
+ for (phaseIdx = 0; phaseIdx < sizeof(phases) / sizeof(phases[0]);
+ phaseIdx++) {
+ whKeyId globalKey = selectKeyIdForPhase(phases[phaseIdx].phase, 1);
+ whKeyId localKey = selectKeyIdForPhase(phases[phaseIdx].phase, 0);
+
+ rc = runPhase(&ctx, &phases[phaseIdx], globalKey);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Phase %zu (global) failed: %d\n", phaseIdx, rc);
+ phasesFailed++;
+ if (testResult == 0) {
+ testResult = rc; /* Record first error */
+ }
+ /* Continue to next phase - don't break */
+ }
+
+ rc = runPhase(&ctx, &phases[phaseIdx], localKey);
+ if (rc != WH_ERROR_OK) {
+ WH_ERROR_PRINT("Phase %zu (local) failed: %d\n", phaseIdx, rc);
+ phasesFailed++;
+ if (testResult == 0) {
+ testResult = rc; /* Record first error */
+ }
+ /* Continue to next phase - don't break */
+ }
+ }
+
+ /* Signal global stop */
+ ATOMIC_STORE_INT(&ctx.globalStopFlag, 1);
+
+ /* Need to release clients from barrier waits - run one more "dummy" phase
+ */
+ pthread_barrier_wait(&ctx.setupBarrier);
+ pthread_barrier_wait(&ctx.setupCompleteBarrier);
+ ATOMIC_STORE_INT(&ctx.phaseRunning, 0);
+ pthread_barrier_wait(&ctx.streamStartBarrier);
+ pthread_barrier_wait(&ctx.streamEndBarrier);
+
+join_threads:
+ /* Signal stop for servers */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ ATOMIC_STORE_INT(&ctx.pairs[i].stopFlag, 1);
+ }
+
+ /* Join all client threads */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ if (ctx.pairs[i].clientThread != 0) {
+ pthread_join(ctx.pairs[i].clientThread, NULL);
+ }
+ }
+
+ /* Join all server threads */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ if (ctx.pairs[i].serverThread != 0) {
+ pthread_join(ctx.pairs[i].serverThread, NULL);
+ }
+ }
+
+ /* Print statistics */
+ printFinalStats(&ctx);
+
+ pthread_barrier_destroy(&ctx.startBarrier);
+ pthread_barrier_destroy(&ctx.setupBarrier);
+ pthread_barrier_destroy(&ctx.setupCompleteBarrier);
+ pthread_barrier_destroy(&ctx.streamStartBarrier);
+ pthread_barrier_destroy(&ctx.streamEndBarrier);
+
+cleanup:
+ /* Cleanup client-server pairs */
+ for (i = 0; i < NUM_CLIENTS; i++) {
+ cleanupClientServerPair(&ctx.pairs[i]);
+ }
+
+ /* Cleanup NVM */
+ wh_Nvm_Cleanup(&ctx.nvm);
+
+ /* Cleanup mutex attributes */
+ pthread_mutexattr_destroy(&ctx.mutexAttr);
+
+ /* Cleanup wolfCrypt */
+ wolfCrypt_Cleanup();
+
+ if (testResult == 0) {
+ WH_TEST_PRINT(
+ "\n=== Deterministic Contention Stress Test PASSED ===\n");
+ }
+ else {
+ WH_ERROR_PRINT("\nPhases failed: %d\n", phasesFailed);
+ WH_ERROR_PRINT("=== Deterministic Contention Stress Test FAILED ===\n");
+ }
+
+ return testResult;
+}
+
+#endif /* !WOLFHSM_CFG_THREADSAFE || !WOLFHSM_CFG_TEST_POSIX || \
+ !WOLFHSM_CFG_GLOBAL_KEYS || !WOLFHSM_CFG_ENABLE_CLIENT || \
+ !WOLFHSM_CFG_ENABLE_SERVER || WOLFHSM_CFG_NO_CRYPTO */
diff --git a/test/wh_test_posix_threadsafe_stress.h b/test/wh_test_posix_threadsafe_stress.h
new file mode 100644
index 00000000..3b0891b4
--- /dev/null
+++ b/test/wh_test_posix_threadsafe_stress.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2026 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * test/wh_test_posix_threadsafe_stress.h
+ *
+ * POSIX multithreaded stress test for thread safety validation.
+ * This test uses POSIX threading primitives (pthreads).
+ */
+
+#ifndef TEST_WH_TEST_POSIX_THREADSAFE_STRESS_H_
+#define TEST_WH_TEST_POSIX_THREADSAFE_STRESS_H_
+
+/*
+ * Runs multithreaded stress tests for thread safety validation.
+ * Tests concurrent access to shared NVM and global key cache from
+ * multiple client threads via separate server contexts.
+ *
+ * Requires: WOLFHSM_CFG_THREADSAFE, WOLFHSM_CFG_TEST_POSIX,
+ * WOLFHSM_CFG_GLOBAL_KEYS
+ *
+ * Returns 0 on success, non-zero on failure.
+ */
+int whTest_ThreadSafeStress(void);
+
+#endif /* TEST_WH_TEST_POSIX_THREADSAFE_STRESS_H_ */
diff --git a/test/wh_test_server_img_mgr.c b/test/wh_test_server_img_mgr.c
index cf5f9092..dbd4e148 100644
--- a/test/wh_test_server_img_mgr.c
+++ b/test/wh_test_server_img_mgr.c
@@ -1230,7 +1230,7 @@ int whTest_ServerImgMgr(whTestNvmBackendType nvmType)
const whFlashCb fcb[1] = {WH_FLASH_RAMSIM_CB};
whTestNvmBackendUnion nvm_setup;
- whNvmConfig n_conf[1];
+ whNvmConfig n_conf[1] = {0};
whNvmContext nvm[1] = {{0}};
WH_TEST_RETURN_ON_FAIL(
diff --git a/wolfhsm/wh_client.h b/wolfhsm/wh_client.h
index f1b1b70a..3b695d8a 100644
--- a/wolfhsm/wh_client.h
+++ b/wolfhsm/wh_client.h
@@ -1517,6 +1517,10 @@ int wh_Client_NvmListResponse(whClientContext* c, int32_t* out_rc,
* the criteria.
* @param[out] out_id Pointer to store the ID of the first matching NVM object.
* @return int Returns 0 on success, or a negative error code on failure.
+ *
+ * @note Enumerating all objects requires calling this function in a loop
+ * with successive start_id values. In thread-safe builds, concurrent
+ * NVM modifications between calls may result in inconsistent results.
*/
int wh_Client_NvmList(whClientContext* c, whNvmAccess access, whNvmFlags flags,
whNvmId start_id, int32_t* out_rc, whNvmId* out_count,
diff --git a/wolfhsm/wh_lock.h b/wolfhsm/wh_lock.h
new file mode 100644
index 00000000..62e0b35a
--- /dev/null
+++ b/wolfhsm/wh_lock.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2026 wolfSSL Inc.
+ *
+ * This file is part of wolfHSM.
+ *
+ * wolfHSM is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * wolfHSM is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with wolfHSM. If not, see .
+ */
+/*
+ * wolfhsm/wh_lock.h
+ *
+ * Platform-agnostic lock abstraction for thread-safe access to shared
+ * resources. This module provides a callback-based locking mechanism
+ * that allows platform-specific implementations (pthreads, FreeRTOS,
+ * bare-metal spinlocks, etc.) without introducing OS dependencies in the
+ * core wolfHSM code.
+ *
+ * Each shared resource (NVM, crypto) embeds its own whLock instance,
+ * allowing independent locking and arbitrary sharing topologies.
+ *
+ * When WOLFHSM_CFG_THREADSAFE is defined:
+ * - Lock operations use platform callbacks for actual synchronization
+ * - NULL lockConfig results in no-op locking (single-threaded mode)
+ *
+ * When WOLFHSM_CFG_THREADSAFE is not defined:
+ * - All lock operations are no-ops with zero overhead
+ */
+
+#ifndef WOLFHSM_WH_LOCK_H_
+#define WOLFHSM_WH_LOCK_H_
+
+
+/**
+ * Lock callback function signatures.
+ *
+ * All callbacks receive a user-provided context pointer (from whLockConfig).
+ * Each lock instance protects exactly one resource.
+ *
+ * Return: WH_ERROR_OK on success, negative error code on failure
+ */
+
+/** Initialize a lock - called once during setup */
+typedef int (*whLockInitCb)(void* context, const void* config);
+
+/** Cleanup a lock - called once during teardown */
+typedef int (*whLockCleanupCb)(void* context);
+
+/** Acquire exclusive lock (blocking) */
+typedef int (*whLockAcquireCb)(void* context);
+
+/** Release exclusive lock */
+typedef int (*whLockReleaseCb)(void* context);
+
+/**
+ * Lock callback table
+ *
+ * Platforms provide implementations of these callbacks. If the entire
+ * callback table is NULL, all locking operations become no-ops
+ * (single-threaded mode). Individual callbacks may also be NULL to
+ * skip specific operations.
+ */
+typedef struct whLockCb_t {
+ whLockInitCb init; /* Initialize lock resources */
+ whLockCleanupCb cleanup; /* Free lock resources */
+ whLockAcquireCb acquire; /* Acquire exclusive lock */
+ whLockReleaseCb release; /* Release exclusive lock */
+} whLockCb;
+
+/**
+ * Lock instance structure
+ *
+ * Holds callback table and platform-specific context.
+ * The context pointer is passed to all callbacks.
+ *
+ * IMPORTANT: Lock initialization and cleanup must be performed in a
+ * single-threaded context before any threads attempt to use the lock. The
+ * initialized flag is not atomic and is only safe to access during
+ * initialization/cleanup phases.
+ */
+typedef struct whLock_t {
+ const whLockCb* cb; /* Platform callbacks (may be NULL) */
+ void* context; /* Platform context (e.g., mutex pointer) */
+ int initialized; /* 1 if initialized, 0 otherwise (not atomic) */
+ uint8_t WH_PAD[4];
+} whLock;
+
+/**
+ * Lock configuration for initialization
+ */
+typedef struct whLockConfig_t {
+ const whLockCb* cb; /* Callback table */
+ void* context; /* Platform context */
+ const void* config; /* Backend-specific config (passed to init cb) */
+} whLockConfig;
+
+/**
+ * @brief Initializes a lock instance.
+ *
+ * This function initializes the lock by calling the init callback.
+ * If config is NULL or config->cb is NULL, locking is disabled
+ * (single-threaded mode) and all lock operations become no-ops.
+ *
+ * IMPORTANT: Initialization must be performed in a single-threaded context
+ * before any threads attempt to use the lock. The initialized flag is not
+ * atomic and is only safe to access during initialization/cleanup phases.
+ *
+ * On error, no state is modified.
+ *
+ * @param[in] lock Pointer to the lock structure. Must not be NULL.
+ * @param[in] config Pointer to the lock configuration (may be NULL for no-op
+ * mode).
+ * @return int Returns WH_ERROR_OK on success.
+ * Returns WH_ERROR_BADARGS if lock is NULL.
+ * Returns negative error code on callback initialization failure.
+ */
+int wh_Lock_Init(whLock* lock, const whLockConfig* config);
+
+/**
+ * @brief Cleans up a lock instance.
+ *
+ * This function cleans up the lock by calling the cleanup callback and then
+ * zeros the entire structure to make post-cleanup state distinguishable.
+ *
+ * IMPORTANT: Cleanup must only be called when no other threads are accessing
+ * the lock. Calling cleanup while the lock is held or while other threads are
+ * waiting results in undefined behavior. In typical usage, cleanup occurs
+ * during controlled shutdown sequences after all worker threads have been
+ * stopped.
+ *
+ * Cleanup must be performed in a single-threaded context, similar to
+ * initialization.
+ *
+ * This function is idempotent - calling cleanup on an already cleaned up or
+ * uninitialized lock returns WH_ERROR_OK.
+ *
+ * @param[in] lock Pointer to the lock structure.
+ * @return int Returns WH_ERROR_OK on success.
+ * Returns WH_ERROR_BADARGS if lock is NULL.
+ */
+int wh_Lock_Cleanup(whLock* lock);
+
+/**
+ * @brief Acquires exclusive access to a lock.
+ *
+ * This function blocks until the lock is acquired. If no callbacks are
+ * configured (no-op mode), this returns WH_ERROR_OK immediately.
+ *
+ * @param[in] lock Pointer to the lock structure. Must not be NULL.
+ * @return int Returns WH_ERROR_OK on success (lock acquired or no-op mode).
+ * Returns WH_ERROR_BADARGS if lock is NULL or not initialized.
+ * Returns WH_ERROR_ABORTED if the blocking operation itself failed
+ * (indicates callback failure, not contention).
+ */
+int wh_Lock_Acquire(whLock* lock);
+
+/**
+ * @brief Releases exclusive access to a lock.
+ *
+ * This function releases a previously acquired lock. If no callbacks are
+ * configured (no-op mode), this returns WH_ERROR_OK immediately.
+ *
+ * Releasing a lock that is initialized but not currently acquired returns
+ * WH_ERROR_OK (idempotent behavior).
+ *
+ * @param[in] lock Pointer to the lock structure. Must not be NULL.
+ * @return int Returns WH_ERROR_OK on success (lock released or no-op mode).
+ * Returns WH_ERROR_BADARGS if lock is NULL or not initialized.
+ * Returns WH_ERROR_LOCKED (optional) if non-owner attempts to release
+ * or owner attempts double-acquire (platform-specific).
+ */
+int wh_Lock_Release(whLock* lock);
+
+
+#endif /* WOLFHSM_WH_LOCK_H_ */
diff --git a/wolfhsm/wh_nvm.h b/wolfhsm/wh_nvm.h
index 374180d0..e41f7f7c 100644
--- a/wolfhsm/wh_nvm.h
+++ b/wolfhsm/wh_nvm.h
@@ -16,21 +16,36 @@
* You should have received a copy of the GNU General Public License
* along with wolfHSM. If not, see .
*/
-/*
- * wolfhsm/wh_nvm.h
+/**
+ * @file wolfhsm/wh_nvm.h
+ *
+ * @brief Non-Volatile Memory (NVM) object management interface.
+ *
+ * This library provides management of NVM objects with basic metadata
+ * association to blocks of data. The backend storage is expected to have
+ * flash-style semantics with Read, Erase, and Program hooks.
*
- * Abstract library to provide management of NVM objects providing basic
- * metadata association with blocks of data. The backend storage is expected
- * to have flash-style semantics with Read, Erase, and Program hooks.
+ * This library provides reliable, atomic operations (recoverable) to ensure
+ * transactions are fully committed prior to returning success. Initial
+ * indexing and handling of incomplete transactions are allowed to take longer
+ * than ordinary runtime function calls.
*
- * This library is expected to provide reliable, atomic operations (recoverable)
- * to ensure transactions are fully committed prior to returning success.
- * Initial indexing and handling of incomplete transactions are allowed to take
- * longer than ordinary runtime function calls.
+ * NVM objects are added with a fixed length and data. Removal of objects
+ * causes the backend to replicate the entire partition without the listed
+ * objects present, which also maximizes the contiguous free space.
*
- * NVM objects are added with a fixed length and data. Removal of objects
- * causes the backend to replicate the entire partition without the
- * listed objects present, which also maximizes the contiguous free space.
+ * ## Thread Safety
+ *
+ * When built with `WOLFHSM_CFG_THREADSAFE`, the NVM context contains an
+ * embedded lock. The lock lifecycle is managed automatically:
+ * - wh_Nvm_Init() initializes the lock
+ * - wh_Nvm_Cleanup() cleans up the lock
+ *
+ * However, callers are responsible for acquiring and releasing the lock
+ * around NVM operations using the WH_NVM_LOCK/WH_NVM_UNLOCK helper macros,
+ * which transparently account for build-time locking support. The NVM API
+ * functions themselves do NOT acquire the lock internally, allowing callers to
+ * group multiple operations under a single lock acquisition.
*
*/
@@ -44,98 +59,425 @@
#include "wolfhsm/wh_common.h" /* For whNvm types */
#include "wolfhsm/wh_keycache.h" /* For whKeyCacheContext */
+#include "wolfhsm/wh_lock.h"
+/**
+ * @brief NVM backend callback table.
+ *
+ * Platforms provide implementations of these callbacks to interface with
+ * the underlying non-volatile storage hardware. All callbacks receive a
+ * platform-specific context pointer.
+ */
typedef struct {
- int (*Init)(void* context, const void *config);
+ /** Initialize the NVM backend */
+ int (*Init)(void* context, const void* config);
+
+ /** Cleanup the NVM backend */
int (*Cleanup)(void* context);
- /* Retrieve the current free space, or the maximum data object length that can
- * be successfully created and the number of free entries in the directory.
- * Also get the sizes that could be reclaimed if the partition was regenerated:
- * wh_Nvm_DestroyObjects(c, 0, NULL);
- * Any out_ parameters may be NULL without error. */
- int (*GetAvailable)(void* context,
- uint32_t *out_avail_size, whNvmId *out_avail_objects,
- uint32_t *out_reclaim_size, whNvmId *out_reclaim_objects);
-
- /* Add a new object. Duplicate ids are allowed, but only the most recent
- * version will be accessible. */
- int (*AddObject)(void* context, whNvmMetadata *meta,
- whNvmSize data_len, const uint8_t* data);
-
- /* Retrieve the next matching id starting at start_id. Sets out_count to the
- * total number of id's that match access and flags. */
+ /**
+ * Retrieve the current free space, or the maximum data object length that
+ * can be successfully created and the number of free entries in the
+ * directory. Also get the sizes that could be reclaimed if the partition
+ * was regenerated via wh_Nvm_DestroyObjects(c, 0, NULL). Any out_
+ * parameters may be NULL without error.
+ */
+ int (*GetAvailable)(void* context, uint32_t* out_avail_size,
+ whNvmId* out_avail_objects, uint32_t* out_reclaim_size,
+ whNvmId* out_reclaim_objects);
+
+ /**
+ * Add a new object. Duplicate IDs are allowed, but only the most recent
+ * version will be accessible.
+ */
+ int (*AddObject)(void* context, whNvmMetadata* meta, whNvmSize data_len,
+ const uint8_t* data);
+
+ /**
+ * Retrieve the next matching ID starting at start_id. Sets out_count to
+ * the total number of IDs that match access and flags.
+ */
int (*List)(void* context, whNvmAccess access, whNvmFlags flags,
- whNvmId start_id, whNvmId *out_count, whNvmId *out_id);
-
- /* Retrieve object metadata using the id */
- int (*GetMetadata)(void* context, whNvmId id,
- whNvmMetadata* meta);
-
- /* Destroy a list of objects by replicating the current state without the id's
- * in the provided list. Id's in the list that are not present do not cause an
- * error. Atomically: erase the inactive partition, add all remaining objects,
- * switch the active partition, and erase the old active (now inactive)
- * partition. Interruption prior completing the write of the new partition will
- * recover as before the replication. Interruption after the new partition is
- * fully populated will recover as after, including restarting erasure. */
+ whNvmId start_id, whNvmId* out_count, whNvmId* out_id);
+
+ /** Retrieve object metadata using the ID */
+ int (*GetMetadata)(void* context, whNvmId id, whNvmMetadata* meta);
+
+ /**
+ * Destroy a list of objects by replicating the current state without the
+ * IDs in the provided list. IDs in the list that are not present do not
+ * cause an error. Atomically: erase the inactive partition, add all
+ * remaining objects, switch the active partition, and erase the old active
+ * (now inactive) partition. Interruption prior to completing the write of
+ * the new partition will recover as before the replication. Interruption
+ * after the new partition is fully populated will recover as after,
+ * including restarting erasure.
+ */
int (*DestroyObjects)(void* context, whNvmId list_count,
- const whNvmId* id_list);
+ const whNvmId* id_list);
- /* Read the data of the object starting at the byte offset */
+ /** Read the data of the object starting at the byte offset */
int (*Read)(void* context, whNvmId id, whNvmSize offset, whNvmSize data_len,
uint8_t* data);
} whNvmCb;
-/** NVM Context helper structs and functions */
-/* Simple helper context structure associated with an NVM instance */
+/**
+ * @brief NVM context structure.
+ *
+ * Holds the state for an NVM instance including the backend callbacks,
+ * platform-specific context, and optional thread synchronization lock.
+ *
+ * When `WOLFHSM_CFG_GLOBAL_KEYS` is enabled, also contains the global key
+ * cache for keys shared across all clients.
+ */
typedef struct whNvmContext_t {
- whNvmCb *cb;
- void* context;
+ whNvmCb* cb; /**< Backend callback table */
+ void* context; /**< Platform-specific backend context */
#if !defined(WOLFHSM_CFG_NO_CRYPTO) && defined(WOLFHSM_CFG_GLOBAL_KEYS)
- whKeyCacheContext globalCache; /* Global key cache */
+ whKeyCacheContext globalCache; /**< Global key cache (shared keys) */
+#endif
+#ifdef WOLFHSM_CFG_THREADSAFE
+ whLock lock; /**< Lock for serializing NVM and global cache operations */
#endif
} whNvmContext;
-/* Simple helper configuration structure associated with an NVM instance */
+/**
+ * @brief NVM configuration structure.
+ *
+ * Used to initialize an NVM context with wh_Nvm_Init().
+ */
typedef struct whNvmConfig_t {
- whNvmCb *cb;
- void* context;
- void* config;
+ whNvmCb* cb; /**< Backend callback table */
+ void* context; /**< Platform-specific backend context */
+ void* config; /**< Backend-specific configuration */
+#ifdef WOLFHSM_CFG_THREADSAFE
+ whLockConfig*
+ lockConfig; /**< Lock configuration (NULL for no-op locking) */
+#endif
} whNvmConfig;
-int wh_Nvm_Init(whNvmContext* context, const whNvmConfig *config);
+/**
+ * @brief Initializes an NVM context.
+ *
+ * This function initializes the NVM context with the provided configuration,
+ * including setting up the backend callbacks and platform-specific context.
+ * If crypto is enabled with global keys, the global key cache is also
+ * initialized.
+ *
+ * When built with `WOLFHSM_CFG_THREADSAFE`, this function initializes the
+ * embedded lock using the provided lock configuration. If lockConfig is NULL,
+ * locking is disabled (single-threaded mode). Note that while init/cleanup
+ * manage the lock lifecycle, the caller is responsible for explicitly
+ * acquiring and releasing the lock around NVM operations using wh_Nvm_Lock()
+ * and wh_Nvm_Unlock().
+ *
+ * @param[in,out] context Pointer to the NVM context to initialize.
+ * Must not be NULL.
+ * @param[in] config Pointer to the NVM configuration. Must not be NULL.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context or config is NULL.
+ * Other negative error codes on backend or lock initialization
+ * failure.
+ */
+int wh_Nvm_Init(whNvmContext* context, const whNvmConfig* config);
+
+/**
+ * @brief Cleans up an NVM context.
+ *
+ * This function cleans up the NVM context by calling the backend cleanup
+ * callback, clearing any global key cache if applicable, and zeroing the
+ * context structure.
+ *
+ * When built with `WOLFHSM_CFG_THREADSAFE`, this function also cleans up the
+ * embedded lock. Cleanup must only be called when no other threads are
+ * accessing the NVM context. Note that while init/cleanup manage the lock
+ * lifecycle, the caller is responsible for ensuring no threads hold the lock
+ * when cleanup is called.
+ *
+ * @param[in,out] context Pointer to the NVM context to cleanup.
+ * Must not be NULL and must have been initialized.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_ABORTED if the backend cleanup callback is NULL.
+ * Other negative error codes on backend cleanup failure.
+ */
int wh_Nvm_Cleanup(whNvmContext* context);
-int wh_Nvm_GetAvailable(whNvmContext* context,
- uint32_t *out_avail_size, whNvmId *out_avail_objects,
- uint32_t *out_reclaim_size, whNvmId *out_reclaim_objects);
+/**
+ * @brief Retrieves available NVM space and object capacity.
+ *
+ * Gets the current free space (maximum data object length that can be created)
+ * and the number of free directory entries. Also retrieves the sizes that
+ * could be reclaimed if the partition was regenerated via
+ * wh_Nvm_DestroyObjects(context, 0, NULL).
+ *
+ * All output parameters may be NULL without error if that information is not
+ * needed.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[out] out_avail_size Current available data size in bytes (optional).
+ * @param[out] out_avail_objects Current available object slots (optional).
+ * @param[out] out_reclaim_size Reclaimable data size in bytes (optional).
+ * @param[out] out_reclaim_objects Reclaimable object slots (optional).
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_ABORTED if the backend callback is NULL.
+ */
+int wh_Nvm_GetAvailable(whNvmContext* context, uint32_t* out_avail_size,
+ whNvmId* out_avail_objects, uint32_t* out_reclaim_size,
+ whNvmId* out_reclaim_objects);
-int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata *meta,
- whNvmSize dataLen, const uint8_t* data);
+/**
+ * @brief Adds an object to NVM, reclaiming space if necessary.
+ *
+ * Attempts to add an object to NVM. If there is insufficient space, this
+ * function will attempt to reclaim space by calling wh_Nvm_DestroyObjects()
+ * with an empty list (which compacts the partition) before retrying.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in,out] meta Pointer to the object metadata. Must not be NULL.
+ * @param[in] dataLen Length of the object data in bytes.
+ * @param[in] data Pointer to the object data.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL.
+ * WH_ERROR_NOSPACE if insufficient space even after reclaim.
+ * Other negative error codes on backend failure.
+ */
+int wh_Nvm_AddObjectWithReclaim(whNvmContext* context, whNvmMetadata* meta,
+ whNvmSize dataLen, const uint8_t* data);
+
+/**
+ * @brief Adds an object to NVM.
+ *
+ * Adds a new object with the specified metadata and data. Duplicate IDs are
+ * allowed, but only the most recent version will be accessible.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in,out] meta Pointer to the object metadata. Must not be NULL.
+ * @param[in] data_len Length of the object data in bytes.
+ * @param[in] data Pointer to the object data.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_ABORTED if the backend callback is NULL.
+ * Other negative error codes on backend failure.
+ */
+int wh_Nvm_AddObject(whNvmContext* context, whNvmMetadata* meta,
+ whNvmSize data_len, const uint8_t* data);
-int wh_Nvm_AddObject(whNvmContext* context, whNvmMetadata *meta,
- whNvmSize data_len, const uint8_t* data);
+/**
+ * @brief Adds an object to NVM with policy checking.
+ *
+ * Same as wh_Nvm_AddObject(), but first checks if an existing object with the
+ * same ID has the WH_NVM_FLAGS_NONMODIFIABLE flag set. If so, returns an
+ * access error.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in,out] meta Pointer to the object metadata. Must not be NULL.
+ * @param[in] data_len Length of the object data in bytes.
+ * @param[in] data Pointer to the object data.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_ACCESS if existing object is non-modifiable.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * Other negative error codes on backend failure.
+ */
int wh_Nvm_AddObjectChecked(whNvmContext* context, whNvmMetadata* meta,
whNvmSize data_len, const uint8_t* data);
-int wh_Nvm_List(whNvmContext* context,
- whNvmAccess access, whNvmFlags flags, whNvmId start_id,
- whNvmId *out_count, whNvmId *out_id);
+/**
+ * @brief Lists objects in NVM matching specified criteria.
+ *
+ * Retrieves the next matching object ID starting at start_id. Also sets
+ * out_count to the total number of IDs that match the access and flags
+ * criteria.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in] access Access level filter for matching objects.
+ * @param[in] flags Flags filter for matching objects.
+ * @param[in] start_id ID to start searching from.
+ * @param[out] out_count Total count of matching objects (optional).
+ * @param[out] out_id Next matching object ID (optional).
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_ABORTED if the backend callback is NULL.
+ * WH_ERROR_NOTFOUND if no matching objects found.
+ *
+ * @note This function returns one matching ID per call. Enumerating all
+ * objects requires multiple calls with successive start_id values.
+ * In thread-safe builds, if the NVM lock is not held across calls,
+ * concurrent modifications may result in skipped or repeated entries.
+ */
+int wh_Nvm_List(whNvmContext* context, whNvmAccess access, whNvmFlags flags,
+ whNvmId start_id, whNvmId* out_count, whNvmId* out_id);
-int wh_Nvm_GetMetadata(whNvmContext* context, whNvmId id,
- whNvmMetadata* meta);
+/**
+ * @brief Retrieves metadata for an NVM object.
+ *
+ * Gets the metadata associated with the specified object ID.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in] id ID of the object to retrieve metadata for.
+ * @param[out] meta Pointer to store the retrieved metadata.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_ABORTED if the backend callback is NULL.
+ * WH_ERROR_NOTFOUND if the object does not exist.
+ */
+int wh_Nvm_GetMetadata(whNvmContext* context, whNvmId id, whNvmMetadata* meta);
+/**
+ * @brief Destroys a list of objects from NVM.
+ *
+ * Destroys objects by replicating the current state without the IDs in the
+ * provided list. IDs in the list that are not present do not cause an error.
+ *
+ * The operation is atomic: erase the inactive partition, add all remaining
+ * objects, switch the active partition, and erase the old active (now
+ * inactive) partition. Interruption prior to completing the write of the new
+ * partition will recover as before the replication. Interruption after the
+ * new partition is fully populated will recover as after, including
+ * restarting erasure.
+ *
+ * If list_count is 0 and id_list is NULL, this function compacts the
+ * partition by replicating without removing any objects, which reclaims
+ * space from previously deleted objects.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in] list_count Number of IDs in the list (0 for compaction only).
+ * @param[in] id_list Array of object IDs to destroy (NULL if list_count is 0).
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_ABORTED if the backend callback is NULL.
+ * Other negative error codes on backend failure.
+ */
int wh_Nvm_DestroyObjects(whNvmContext* context, whNvmId list_count,
- const whNvmId* id_list);
+ const whNvmId* id_list);
+
+/**
+ * @brief Destroys a list of objects from NVM with policy checking.
+ *
+ * Same as wh_Nvm_DestroyObjects(), but first checks if any object in the list
+ * has the WH_NVM_FLAGS_NONMODIFIABLE or WH_NVM_FLAGS_NONDESTROYABLE flags set.
+ * If so, returns an access error without destroying any objects.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in] list_count Number of IDs in the list.
+ * @param[in] id_list Array of object IDs to destroy.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_ACCESS if any object is non-modifiable or
+ * non-destroyable.
+ * WH_ERROR_BADARGS if context is NULL, not initialized, or
+ * id_list is NULL with non-zero list_count.
+ * Other negative error codes on backend failure.
+ */
int wh_Nvm_DestroyObjectsChecked(whNvmContext* context, whNvmId list_count,
const whNvmId* id_list);
+/**
+ * @brief Reads data from an NVM object.
+ *
+ * Reads data from the specified object starting at the given byte offset.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in] id ID of the object to read from.
+ * @param[in] offset Byte offset within the object to start reading.
+ * @param[in] data_len Number of bytes to read.
+ * @param[out] data Buffer to store the read data.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_ABORTED if the backend callback is NULL.
+ * WH_ERROR_NOTFOUND if the object does not exist.
+ * Other negative error codes on backend failure.
+ */
int wh_Nvm_Read(whNvmContext* context, whNvmId id, whNvmSize offset,
whNvmSize data_len, uint8_t* data);
+
+/**
+ * @brief Reads data from an NVM object with policy checking.
+ *
+ * Same as wh_Nvm_Read(), but first checks if the object has the
+ * WH_NVM_FLAGS_NONEXPORTABLE flag set. If so, returns an access error.
+ *
+ * @param[in] context Pointer to the NVM context. Must not be NULL.
+ * @param[in] id ID of the object to read from.
+ * @param[in] offset Byte offset within the object to start reading.
+ * @param[in] data_len Number of bytes to read.
+ * @param[out] data Buffer to store the read data.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_ACCESS if object is non-exportable.
+ * WH_ERROR_BADARGS if context is NULL or not initialized.
+ * WH_ERROR_NOTFOUND if the object does not exist.
+ * Other negative error codes on backend failure.
+ */
int wh_Nvm_ReadChecked(whNvmContext* context, whNvmId id, whNvmSize offset,
whNvmSize data_len, uint8_t* data);
+/**
+ * @brief Thread-safe access to NVM resources.
+ *
+ * When built with `WOLFHSM_CFG_THREADSAFE`, callers must explicitly acquire
+ * and release the NVM lock around operations that access shared NVM state.
+ * The NVM API functions do NOT acquire locks internally, allowing callers
+ * to group multiple operations under a single lock acquisition for atomicity.
+ *
+ * Use the WH_NVM_LOCK() and WH_NVM_UNLOCK() macros for portable code that
+ * compiles correctly regardless of whether thread safety is enabled.
+ *
+ * Example usage:
+ * @code
+ * int ret;
+ * ret = WH_NVM_LOCK(nvm);
+ * if (ret == WH_ERROR_OK) {
+ * ret = wh_Nvm_AddObject(nvm, &meta, dataLen, data);
+ * (void)WH_NVM_UNLOCK(nvm);
+ * }
+ * @endcode
+ * @{
+ */
+
+#ifdef WOLFHSM_CFG_THREADSAFE
+
+/**
+ * @brief Acquires the NVM lock. Should not be used directly, callers should
+ * instead use the WH_NVM_LOCK() macro, as it compiles to a no-op when
+ * WOLFHSM_CFG_THREADSAFE is not defined.
+ *
+ * Blocks until exclusive access to the NVM context is acquired. Must be
+ * paired with a corresponding call to wh_Nvm_Unlock().
+ *
+ * @param[in] nvm Pointer to the NVM context. Must not be NULL.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if nvm is NULL.
+ * Other negative error codes on lock acquisition failure.
+ */
+int wh_Nvm_Lock(whNvmContext* nvm);
+
+/**
+ * @brief Releases the NVM lock. Should not be used directly, callers should
+ * instead use the WH_NVM_UNLOCK() macro, as it compiles to a no-op when
+ * WOLFHSM_CFG_THREADSAFE is not defined.
+ *
+ * Releases exclusive access to the NVM context previously acquired via
+ * wh_Nvm_Lock().
+ *
+ * @param[in] nvm Pointer to the NVM context. Must not be NULL.
+ * @return int WH_ERROR_OK on success.
+ * WH_ERROR_BADARGS if nvm is NULL.
+ * Other negative error codes on lock release failure.
+ */
+int wh_Nvm_Unlock(whNvmContext* nvm);
+
+/* Helper macro for NVM locking */
+#define WH_NVM_LOCK(nvm) wh_Nvm_Lock(nvm)
+/* Helper macro for NVM unlocking */
+#define WH_NVM_UNLOCK(nvm) wh_Nvm_Unlock(nvm)
+
+#else /* !WOLFHSM_CFG_THREADSAFE */
+
+#define WH_NVM_LOCK(nvm) (WH_ERROR_OK)
+#define WH_NVM_UNLOCK(nvm) (WH_ERROR_OK)
+
+#endif /* WOLFHSM_CFG_THREADSAFE */
+
#endif /* !WOLFHSM_WH_NVM_H_ */
diff --git a/wolfhsm/wh_server.h b/wolfhsm/wh_server.h
index 4e922658..6d7f6311 100644
--- a/wolfhsm/wh_server.h
+++ b/wolfhsm/wh_server.h
@@ -497,4 +497,15 @@ int whServerDma_CopyToClient(struct whServerContext_t* server,
whServerDmaFlags flags);
#endif /* WOLFHSM_CFG_DMA */
+/** Server NVM Locking API for handler-level thread safety */
+#ifdef WOLFHSM_CFG_THREADSAFE
+int wh_Server_NvmLock(whServerContext* server);
+int wh_Server_NvmUnlock(whServerContext* server);
+#define WH_SERVER_NVM_LOCK(server) wh_Server_NvmLock(server)
+#define WH_SERVER_NVM_UNLOCK(server) wh_Server_NvmUnlock(server)
+#else
+#define WH_SERVER_NVM_LOCK(server) (WH_ERROR_OK)
+#define WH_SERVER_NVM_UNLOCK(server) (WH_ERROR_OK)
+#endif
+
#endif /* !WOLFHSM_WH_SERVER_H_ */
diff --git a/wolfhsm/wh_settings.h b/wolfhsm/wh_settings.h
index b33ecc9e..1c828f6f 100644
--- a/wolfhsm/wh_settings.h
+++ b/wolfhsm/wh_settings.h
@@ -102,6 +102,13 @@
* WOLFHSM_CFG_DEBUG_VERBOSE - If defined, enable verbose debug output
* Default: Not defined
*
+ * WOLFHSM_CFG_THREADSAFE - If defined, enable thread-safe access to shared
+ * server resources. Requires platform to provide lock callbacks via
+ * whLockConfig. When enabled, protects global key cache, NVM operations,
+ * and hardware crypto (if shared). When not defined, all lock operations
+ * are no-ops with zero overhead.
+ * Default: Not defined
+ *
* WOLFHSM_CFG_PRINTF - Function or macro for printf redirection. Must have
* signature: int func(const char* fmt, ...)
* Default: stdlib printf
@@ -165,6 +172,8 @@
#include
#endif /* WOLFSSL_USER_SETTINGS */
+#include "wolfssl/wolfcrypt/types.h"
+
#if defined(WOLFHSM_CFG_DEBUG) || defined(WOLFHSM_CFG_DEBUG_VERBOSE)
#define WOLFHSM_CFG_HEXDUMP
#endif