|
@@ -1,6 +1,7 @@
|
|
/*
|
|
/*
|
|
* ASPEED Hash and Crypto Engine
|
|
* ASPEED Hash and Crypto Engine
|
|
*
|
|
*
|
|
|
|
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
|
|
* Copyright (C) 2021 IBM Corp.
|
|
* Copyright (C) 2021 IBM Corp.
|
|
*
|
|
*
|
|
* Joel Stanley <joel@jms.id.au>
|
|
* Joel Stanley <joel@jms.id.au>
|
|
@@ -151,49 +152,28 @@ static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
|
|
return iov_count;
|
|
return iov_count;
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * Generate iov for accumulative mode.
|
|
|
|
- *
|
|
|
|
- * @param s aspeed hace state object
|
|
|
|
- * @param iov iov of the current request
|
|
|
|
- * @param id index of the current iov
|
|
|
|
- * @param req_len length of the current request
|
|
|
|
- *
|
|
|
|
- * @return count of iov
|
|
|
|
- */
|
|
|
|
-static int gen_acc_mode_iov(AspeedHACEState *s, struct iovec *iov, int id,
|
|
|
|
- hwaddr *req_len)
|
|
|
|
-{
|
|
|
|
- uint32_t pad_offset;
|
|
|
|
- uint32_t total_msg_len;
|
|
|
|
- s->total_req_len += *req_len;
|
|
|
|
-
|
|
|
|
- if (has_padding(s, &iov[id], *req_len, &total_msg_len, &pad_offset)) {
|
|
|
|
- if (s->iov_count) {
|
|
|
|
- return reconstruct_iov(s, iov, id, &pad_offset);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- *req_len -= s->total_req_len - total_msg_len;
|
|
|
|
- s->total_req_len = 0;
|
|
|
|
- iov[id].iov_len = *req_len;
|
|
|
|
- } else {
|
|
|
|
- s->iov_cache[s->iov_count].iov_base = iov->iov_base;
|
|
|
|
- s->iov_cache[s->iov_count].iov_len = *req_len;
|
|
|
|
- ++s->iov_count;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return id + 1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
|
static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
|
bool acc_mode)
|
|
bool acc_mode)
|
|
{
|
|
{
|
|
struct iovec iov[ASPEED_HACE_MAX_SG];
|
|
struct iovec iov[ASPEED_HACE_MAX_SG];
|
|
|
|
+ uint32_t total_msg_len;
|
|
|
|
+ uint32_t pad_offset;
|
|
g_autofree uint8_t *digest_buf = NULL;
|
|
g_autofree uint8_t *digest_buf = NULL;
|
|
size_t digest_len = 0;
|
|
size_t digest_len = 0;
|
|
- int niov = 0;
|
|
|
|
|
|
+ bool sg_acc_mode_final_request = false;
|
|
int i;
|
|
int i;
|
|
void *haddr;
|
|
void *haddr;
|
|
|
|
+ Error *local_err = NULL;
|
|
|
|
+
|
|
|
|
+ if (acc_mode && s->hash_ctx == NULL) {
|
|
|
|
+ s->hash_ctx = qcrypto_hash_new(algo, &local_err);
|
|
|
|
+ if (s->hash_ctx == NULL) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash failed : %s",
|
|
|
|
+ error_get_pretty(local_err));
|
|
|
|
+ error_free(local_err);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
if (sg_mode) {
|
|
if (sg_mode) {
|
|
uint32_t len = 0;
|
|
uint32_t len = 0;
|
|
@@ -226,8 +206,16 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
|
}
|
|
}
|
|
iov[i].iov_base = haddr;
|
|
iov[i].iov_base = haddr;
|
|
if (acc_mode) {
|
|
if (acc_mode) {
|
|
- niov = gen_acc_mode_iov(s, iov, i, &plen);
|
|
|
|
-
|
|
|
|
|
|
+ s->total_req_len += plen;
|
|
|
|
+
|
|
|
|
+ if (has_padding(s, &iov[i], plen, &total_msg_len,
|
|
|
|
+ &pad_offset)) {
|
|
|
|
+ /* Padding being present indicates the final request */
|
|
|
|
+ sg_acc_mode_final_request = true;
|
|
|
|
+ iov[i].iov_len = pad_offset;
|
|
|
|
+ } else {
|
|
|
|
+ iov[i].iov_len = plen;
|
|
|
|
+ }
|
|
} else {
|
|
} else {
|
|
iov[i].iov_len = plen;
|
|
iov[i].iov_len = plen;
|
|
}
|
|
}
|
|
@@ -252,21 +240,42 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
|
|
* required to check whether cache is empty. If no, we should
|
|
* required to check whether cache is empty. If no, we should
|
|
* combine cached iov and the current iov.
|
|
* combine cached iov and the current iov.
|
|
*/
|
|
*/
|
|
- uint32_t total_msg_len;
|
|
|
|
- uint32_t pad_offset;
|
|
|
|
s->total_req_len += len;
|
|
s->total_req_len += len;
|
|
if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
|
|
if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
|
|
- niov = reconstruct_iov(s, iov, 0, &pad_offset);
|
|
|
|
|
|
+ i = reconstruct_iov(s, iov, 0, &pad_offset);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- if (niov) {
|
|
|
|
- i = niov;
|
|
|
|
- }
|
|
|
|
|
|
+ if (acc_mode) {
|
|
|
|
+ if (qcrypto_hash_updatev(s->hash_ctx, iov, i, &local_err) < 0) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash update failed : %s",
|
|
|
|
+ error_get_pretty(local_err));
|
|
|
|
+ error_free(local_err);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (sg_acc_mode_final_request) {
|
|
|
|
+ if (qcrypto_hash_finalize_bytes(s->hash_ctx, &digest_buf,
|
|
|
|
+ &digest_len, &local_err)) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR,
|
|
|
|
+ "qcrypto hash finalize failed : %s",
|
|
|
|
+ error_get_pretty(local_err));
|
|
|
|
+ error_free(local_err);
|
|
|
|
+ local_err = NULL;
|
|
|
|
+ }
|
|
|
|
|
|
- if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
|
|
|
|
- qemu_log_mask(LOG_GUEST_ERROR, "%s: qcrypto failed\n", __func__);
|
|
|
|
|
|
+ qcrypto_hash_free(s->hash_ctx);
|
|
|
|
+
|
|
|
|
+ s->hash_ctx = NULL;
|
|
|
|
+ s->iov_count = 0;
|
|
|
|
+ s->total_req_len = 0;
|
|
|
|
+ }
|
|
|
|
+ } else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
|
|
|
|
+ &digest_len, &local_err) < 0) {
|
|
|
|
+ qemu_log_mask(LOG_GUEST_ERROR, "qcrypto hash bytesv failed : %s",
|
|
|
|
+ error_get_pretty(local_err));
|
|
|
|
+ error_free(local_err);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -397,6 +406,11 @@ static void aspeed_hace_reset(DeviceState *dev)
|
|
{
|
|
{
|
|
struct AspeedHACEState *s = ASPEED_HACE(dev);
|
|
struct AspeedHACEState *s = ASPEED_HACE(dev);
|
|
|
|
|
|
|
|
+ if (s->hash_ctx != NULL) {
|
|
|
|
+ qcrypto_hash_free(s->hash_ctx);
|
|
|
|
+ s->hash_ctx = NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
memset(s->regs, 0, sizeof(s->regs));
|
|
memset(s->regs, 0, sizeof(s->regs));
|
|
s->iov_count = 0;
|
|
s->iov_count = 0;
|
|
s->total_req_len = 0;
|
|
s->total_req_len = 0;
|