diff options
Diffstat (limited to 'roms/skiboot/libflash')
28 files changed, 12594 insertions, 0 deletions
diff --git a/roms/skiboot/libflash/Makefile.inc b/roms/skiboot/libflash/Makefile.inc new file mode 100644 index 000000000..ab094b92e --- /dev/null +++ b/roms/skiboot/libflash/Makefile.inc @@ -0,0 +1,7 @@ +LIBFLASH_SRCS = libflash.c libffs.c ecc.c blocklevel.c mbox-flash.c ipmi-hiomap.c +LIBFLASH_OBJS = $(LIBFLASH_SRCS:%.c=%.o) + +SUBDIRS += libflash +LIBFLASH = libflash/built-in.a + +$(LIBFLASH): $(LIBFLASH_OBJS:%=libflash/%) diff --git a/roms/skiboot/libflash/blocklevel.c b/roms/skiboot/libflash/blocklevel.c new file mode 100644 index 000000000..f11f337f4 --- /dev/null +++ b/roms/skiboot/libflash/blocklevel.c @@ -0,0 +1,741 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2018 IBM Corp. */ + +#include <stdlib.h> +#include <unistd.h> +#include <stdio.h> +#include <stdbool.h> +#include <errno.h> +#include <string.h> +#include <inttypes.h> + +#include <libflash/libflash.h> +#include <libflash/errors.h> + +#include "blocklevel.h" +#include "ecc.h" + +#define PROT_REALLOC_NUM 25 + +/* This function returns tristate values. + * 1 - The region is ECC protected + * 0 - The region is not ECC protected + * -1 - Partially protected + */ +static int ecc_protected(struct blocklevel_device *bl, uint64_t pos, uint64_t len, uint64_t *start) +{ + int i; + + /* Length of 0 is nonsensical so add 1 */ + if (len == 0) + len = 1; + + for (i = 0; i < bl->ecc_prot.n_prot; i++) { + /* Fits entirely within the range */ + if (bl->ecc_prot.prot[i].start <= pos && + bl->ecc_prot.prot[i].start + bl->ecc_prot.prot[i].len >= pos + len) { + if (start) + *start = bl->ecc_prot.prot[i].start; + return 1; + } + + /* + * Even if ranges are merged we can't currently guarantee two + * contiguous regions are sanely ECC protected so a partial fit + * is no good. + */ + if ((bl->ecc_prot.prot[i].start >= pos && bl->ecc_prot.prot[i].start < pos + len) || + (bl->ecc_prot.prot[i].start <= pos && + bl->ecc_prot.prot[i].start + bl->ecc_prot.prot[i].len > pos)) { + if (start) + *start = bl->ecc_prot.prot[i].start; + return -1; + } + } + return 0; +} + +static uint64_t with_ecc_pos(uint64_t ecc_start, uint64_t pos) +{ + return pos + ((pos - ecc_start) / (BYTES_PER_ECC)); +} + +static int reacquire(struct blocklevel_device *bl) +{ + if (!bl->keep_alive && bl->reacquire) + return bl->reacquire(bl); + return 0; +} + +static int release(struct blocklevel_device *bl) +{ + int rc = 0; + if (!bl->keep_alive && bl->release) { + /* This is the error return path a lot, preserve errno */ + int err = errno; + rc = bl->release(bl); + errno = err; + } + return rc; +} + +int blocklevel_raw_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len) +{ + int rc; + + FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len); + if (!bl || !bl->read || !buf) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + rc = reacquire(bl); + if (rc) + return rc; + + rc = bl->read(bl, pos, buf, len); + + release(bl); + + return rc; +} + +int blocklevel_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len) +{ + int rc, ecc_protection; + struct ecc64 *buffer; + uint64_t ecc_pos, ecc_start, ecc_diff, ecc_len; + + FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len); + if (!bl || !buf) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + ecc_protection = ecc_protected(bl, pos, len, &ecc_start); + + FL_DBG("%s: 0x%" PRIx64 " for 0x%" PRIx64 " ecc=%s\n", + __func__, pos, len, ecc_protection ? + (ecc_protection == -1 ? "partial" : "yes") : "no"); + + if (!ecc_protection) + return blocklevel_raw_read(bl, pos, buf, len); + + /* + * The region we're reading to has both ecc protection and not. + * Perhaps one day in the future blocklevel can cope with this. + */ + if (ecc_protection == -1) { + FL_ERR("%s: Can't cope with partial ecc\n", __func__); + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + pos = with_ecc_pos(ecc_start, pos); + + ecc_pos = ecc_buffer_align(ecc_start, pos); + ecc_diff = pos - ecc_pos; + ecc_len = ecc_buffer_size(len + ecc_diff); + + FL_DBG("%s: adjusted_pos: 0x%" PRIx64 ", ecc_pos: 0x%" PRIx64 + ", ecc_diff: 0x%" PRIx64 ", ecc_len: 0x%" PRIx64 "\n", + __func__, pos, ecc_pos, ecc_diff, ecc_len); + buffer = malloc(ecc_len); + if (!buffer) { + errno = ENOMEM; + rc = FLASH_ERR_MALLOC_FAILED; + goto out; + } + + rc = blocklevel_raw_read(bl, ecc_pos, buffer, ecc_len); + if (rc) + goto out; + + /* + * Could optimise and simply call memcpy_from_ecc() if ecc_diff + * == 0 but _unaligned checks and bascially does that for us + */ + if (memcpy_from_ecc_unaligned(buf, buffer, len, ecc_diff)) { + errno = EBADF; + rc = FLASH_ERR_ECC_INVALID; + } + +out: + free(buffer); + return rc; +} + +int blocklevel_raw_write(struct blocklevel_device *bl, uint64_t pos, + const void *buf, uint64_t len) +{ + int rc; + + FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len); + if (!bl || !bl->write || !buf) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + rc = reacquire(bl); + if (rc) + return rc; + + rc = bl->write(bl, pos, buf, len); + + release(bl); + + return rc; +} + +int blocklevel_write(struct blocklevel_device *bl, uint64_t pos, const void *buf, + uint64_t len) +{ + int rc, ecc_protection; + struct ecc64 *buffer; + uint64_t ecc_len; + uint64_t ecc_start, ecc_pos, ecc_diff; + + FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len); + if (!bl || !buf) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + ecc_protection = ecc_protected(bl, pos, len, &ecc_start); + + FL_DBG("%s: 0x%" PRIx64 " for 0x%" PRIx64 " ecc=%s\n", + __func__, pos, len, ecc_protection ? + (ecc_protection == -1 ? "partial" : "yes") : "no"); + + if (!ecc_protection) + return blocklevel_raw_write(bl, pos, buf, len); + + /* + * The region we're writing to has both ecc protection and not. + * Perhaps one day in the future blocklevel can cope with this. + */ + if (ecc_protection == -1) { + FL_ERR("%s: Can't cope with partial ecc\n", __func__); + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + pos = with_ecc_pos(ecc_start, pos); + + ecc_pos = ecc_buffer_align(ecc_start, pos); + ecc_diff = pos - ecc_pos; + ecc_len = ecc_buffer_size(len + ecc_diff); + + FL_DBG("%s: adjusted_pos: 0x%" PRIx64 ", ecc_pos: 0x%" PRIx64 + ", ecc_diff: 0x%" PRIx64 ", ecc_len: 0x%" PRIx64 "\n", + __func__, pos, ecc_pos, ecc_diff, ecc_len); + + buffer = malloc(ecc_len); + if (!buffer) { + errno = ENOMEM; + rc = FLASH_ERR_MALLOC_FAILED; + goto out; + } + + if (ecc_diff) { + uint64_t start_chunk = ecc_diff; + uint64_t end_chunk = BYTES_PER_ECC - ecc_diff; + uint64_t end_len = ecc_len - end_chunk; + + /* + * Read the start bytes that memcpy_to_ecc_unaligned() will need + * to calculate the first ecc byte + */ + rc = blocklevel_raw_read(bl, ecc_pos, buffer, start_chunk); + if (rc) { + errno = EBADF; + rc = FLASH_ERR_ECC_INVALID; + goto out; + } + + /* + * Read the end bytes that memcpy_to_ecc_unaligned() will need + * to calculate the last ecc byte + */ + rc = blocklevel_raw_read(bl, ecc_pos + end_len, ((char *)buffer) + end_len, + end_chunk); + if (rc) { + errno = EBADF; + rc = FLASH_ERR_ECC_INVALID; + goto out; + } + + if (memcpy_to_ecc_unaligned(buffer, buf, len, ecc_diff)) { + errno = EBADF; + rc = FLASH_ERR_ECC_INVALID; + goto out; + } + } else { + if (memcpy_to_ecc(buffer, buf, len)) { + errno = EBADF; + rc = FLASH_ERR_ECC_INVALID; + goto out; + } + } + rc = blocklevel_raw_write(bl, pos, buffer, ecc_len); + +out: + free(buffer); + return rc; +} + +int blocklevel_erase(struct blocklevel_device *bl, uint64_t pos, uint64_t len) +{ + int rc; + if (!bl || !bl->erase) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + FL_DBG("%s: 0x%" PRIx64 "\t0x%" PRIx64 "\n", __func__, pos, len); + + /* Programmer may be making a horrible mistake without knowing it */ + if (pos & bl->erase_mask) { + FL_ERR("blocklevel_erase: pos (0x%"PRIx64") is not erase block (0x%08x) aligned\n", + pos, bl->erase_mask + 1); + return FLASH_ERR_ERASE_BOUNDARY; + } + + if (len & bl->erase_mask) { + FL_ERR("blocklevel_erase: len (0x%"PRIx64") is not erase block (0x%08x) aligned\n", + len, bl->erase_mask + 1); + return FLASH_ERR_ERASE_BOUNDARY; + } + + rc = reacquire(bl); + if (rc) + return rc; + + rc = bl->erase(bl, pos, len); + + release(bl); + + return rc; +} + +int blocklevel_get_info(struct blocklevel_device *bl, const char **name, uint64_t *total_size, + uint32_t *erase_granule) +{ + int rc; + + if (!bl || !bl->get_info) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + rc = reacquire(bl); + if (rc) + return rc; + + rc = bl->get_info(bl, name, total_size, erase_granule); + + /* Check the validity of what we are being told */ + if (erase_granule && *erase_granule != bl->erase_mask + 1) + FL_ERR("blocklevel_get_info: WARNING: erase_granule (0x%08x) and erase_mask" + " (0x%08x) don't match\n", *erase_granule, bl->erase_mask + 1); + + release(bl); + + return rc; +} + +/* + * Compare flash and memory to determine if: + * a) Erase must happen before write + * b) Flash and memory are identical + * c) Flash can simply be written to + * + * returns -1 for a + * returns 0 for b + * returns 1 for c + */ +static int blocklevel_flashcmp(const void *flash_buf, const void *mem_buf, uint64_t len) +{ + uint64_t i; + int same = true; + const uint8_t *f_buf, *m_buf; + + f_buf = flash_buf; + m_buf = mem_buf; + + for (i = 0; i < len; i++) { + if (m_buf[i] & ~f_buf[i]) + return -1; + if (same && (m_buf[i] != f_buf[i])) + same = false; + } + + return same ? 0 : 1; +} + +int blocklevel_smart_erase(struct blocklevel_device *bl, uint64_t pos, uint64_t len) +{ + uint64_t block_size; + void *erase_buf; + int rc; + + if (!bl) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + FL_DBG("%s: 0x%" PRIx64 "\t0x%" PRIx64 "\n", __func__, pos, len); + + /* Nothing smart needs to be done, pos and len are aligned */ + if ((pos & bl->erase_mask) == 0 && (len & bl->erase_mask) == 0) { + FL_DBG("%s: Skipping smarts everything is aligned 0x%" PRIx64 " 0x%" PRIx64 + "to 0x%08x\n", __func__, pos, len, bl->erase_mask); + return blocklevel_erase(bl, pos, len); + } + block_size = bl->erase_mask + 1; + erase_buf = malloc(block_size); + if (!erase_buf) { + errno = ENOMEM; + return FLASH_ERR_MALLOC_FAILED; + } + + rc = reacquire(bl); + if (rc) { + free(erase_buf); + return rc; + } + + if (pos & bl->erase_mask) { + /* + * base_pos and base_len are the values in the first erase + * block that we need to preserve: the region up to pos. + */ + uint64_t base_pos = pos & ~(bl->erase_mask); + uint64_t base_len = pos - base_pos; + + FL_DBG("%s: preserving 0x%" PRIx64 "..0x%" PRIx64 "\n", + __func__, base_pos, base_pos + base_len); + + /* + * Read the entire block in case this is the ONLY block we're + * modifying, we may need the end chunk of it later + */ + rc = bl->read(bl, base_pos, erase_buf, block_size); + if (rc) + goto out; + + rc = bl->erase(bl, base_pos, block_size); + if (rc) + goto out; + + rc = bl->write(bl, base_pos, erase_buf, base_len); + if (rc) + goto out; + + /* + * The requested erase fits entirely into this erase block and + * so we need to write back the chunk at the end of the block + */ + if (base_pos + base_len + len < base_pos + block_size) { + rc = bl->write(bl, pos + len, erase_buf + base_len + len, + block_size - base_len - len); + FL_DBG("%s: Early exit, everything was in one erase block\n", + __func__); + goto out; + } + + pos += block_size - base_len; + len -= block_size - base_len; + } + + /* Now we should be aligned, best to double check */ + if (pos & bl->erase_mask) { + FL_DBG("%s:pos 0x%" PRIx64 " isn't erase_mask 0x%08x aligned\n", + __func__, pos, bl->erase_mask); + rc = FLASH_ERR_PARM_ERROR; + goto out; + } + + if (len & ~(bl->erase_mask)) { + rc = bl->erase(bl, pos, len & ~(bl->erase_mask)); + if (rc) + goto out; + + pos += len & ~(bl->erase_mask); + len -= len & ~(bl->erase_mask); + } + + /* Length should be less than a block now */ + if (len > block_size) { + FL_DBG("%s: len 0x%" PRIx64 " is still exceeds block_size 0x%" PRIx64 "\n", + __func__, len, block_size); + rc = FLASH_ERR_PARM_ERROR; + goto out; + } + + if (len & bl->erase_mask) { + /* + * top_pos is the first byte that must be preserved and + * top_len is the length from top_pos to the end of the erase + * block: the region that must be preserved + */ + uint64_t top_pos = pos + len; + uint64_t top_len = block_size - len; + + FL_DBG("%s: preserving 0x%" PRIx64 "..0x%" PRIx64 "\n", + __func__, top_pos, top_pos + top_len); + + rc = bl->read(bl, top_pos, erase_buf, top_len); + if (rc) + goto out; + + rc = bl->erase(bl, pos, block_size); + if (rc) + goto out; + + rc = bl->write(bl, top_pos, erase_buf, top_len); + if (rc) + goto out; + } + +out: + free(erase_buf); + release(bl); + return rc; +} + +int blocklevel_smart_write(struct blocklevel_device *bl, uint64_t pos, const void *buf, uint64_t len) +{ + void *ecc_buf = NULL; + uint64_t ecc_start; + int ecc_protection; + + void *erase_buf = NULL; + uint32_t erase_size; + + const void *write_buf; + uint64_t write_len; + uint64_t write_pos; + + int rc = 0; + + if (!buf || !bl) { + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + FL_DBG("%s: 0x%" PRIx64 "\t0x%" PRIx64 "\n", __func__, pos, len); + + if (!(bl->flags & WRITE_NEED_ERASE)) { + FL_DBG("%s: backend doesn't need erase\n", __func__); + return blocklevel_write(bl, pos, buf, len); + } + + rc = blocklevel_get_info(bl, NULL, NULL, &erase_size); + if (rc) + return rc; + + ecc_protection = ecc_protected(bl, pos, len, &ecc_start); + if (ecc_protection == -1) { + FL_ERR("%s: Can't cope with partial ecc\n", __func__); + errno = EINVAL; + return FLASH_ERR_PARM_ERROR; + } + + if (ecc_protection) { + uint64_t ecc_pos, ecc_align, ecc_diff, ecc_len; + + FL_DBG("%s: region has ECC\n", __func__); + + ecc_pos = with_ecc_pos(ecc_start, pos); + ecc_align = ecc_buffer_align(ecc_start, ecc_pos); + ecc_diff = ecc_pos - ecc_align; + ecc_len = ecc_buffer_size(len + ecc_diff); + + ecc_buf = malloc(ecc_len); + if (!ecc_buf) { + errno = ENOMEM; + return FLASH_ERR_MALLOC_FAILED; + } + + if (ecc_diff) { + rc = blocklevel_read(bl, ecc_align, ecc_buf, ecc_diff); + if (rc) { + errno = EBADF; + rc = FLASH_ERR_ECC_INVALID; + goto out; + } + } + + rc = memcpy_to_ecc_unaligned(ecc_buf, buf, len, ecc_diff); + if (rc) { + free(ecc_buf); + errno = EBADF; + return FLASH_ERR_ECC_INVALID; + } + + write_buf = ecc_buf; + write_len = ecc_len; + write_pos = ecc_pos; + } else { + write_buf = buf; + write_len = len; + write_pos = pos; + } + + erase_buf = malloc(erase_size); + if (!erase_buf) { + errno = ENOMEM; + rc = FLASH_ERR_MALLOC_FAILED; + goto out_free; + } + + rc = reacquire(bl); + if (rc) + goto out_free; + + while (write_len > 0) { + uint32_t erase_block = write_pos & ~(erase_size - 1); + uint32_t block_offset = write_pos & (erase_size - 1); + uint32_t chunk_size = erase_size > write_len ? + write_len : erase_size; + int cmp; + + /* Write crosses an erase boundary, shrink the write to the boundary */ + if (erase_size < block_offset + chunk_size) { + chunk_size = erase_size - block_offset; + } + + rc = bl->read(bl, erase_block, erase_buf, erase_size); + if (rc) + goto out; + + cmp = blocklevel_flashcmp(erase_buf + block_offset, write_buf, + chunk_size); + FL_DBG("%s: region 0x%08x..0x%08x ", __func__, + erase_block, erase_size); + if (cmp != 0) { + FL_DBG("needs "); + if (cmp == -1) { + FL_DBG("erase and "); + bl->erase(bl, erase_block, erase_size); + } + FL_DBG("write\n"); + memcpy(erase_buf + block_offset, write_buf, chunk_size); + rc = bl->write(bl, erase_block, erase_buf, erase_size); + if (rc) + goto out; + } else { + FL_DBG("clean\n"); + } + + write_len -= chunk_size; + write_pos += chunk_size; + write_buf += chunk_size; + } + +out: + release(bl); +out_free: + free(ecc_buf); + free(erase_buf); + return rc; +} + +static bool insert_bl_prot_range(struct blocklevel_range *ranges, struct bl_prot_range range) +{ + int i; + uint32_t pos, len; + struct bl_prot_range *prot = ranges->prot; + + pos = range.start; + len = range.len; + + if (len == 0) + return true; + + /* Check for overflow */ + if (pos + len < len) + return false; + + for (i = 0; i < ranges->n_prot && len > 0; i++) { + if (prot[i].start <= pos && prot[i].start + prot[i].len >= pos + len) { + len = 0; + break; /* Might as well, the next two conditions can't be true */ + } + + /* Can easily extend this down just by adjusting start */ + if (pos <= prot[i].start && pos + len >= prot[i].start) { + FL_DBG("%s: extending start down\n", __func__); + prot[i].len += prot[i].start - pos; + prot[i].start = pos; + pos += prot[i].len; + if (prot[i].len >= len) + len = 0; + else + len -= prot[i].len; + } + + /* + * Jump over this range but the new range might be so big that + * theres a chunk after + */ + if (pos >= prot[i].start && pos < prot[i].start + prot[i].len) { + FL_DBG("%s: fits within current range ", __func__); + if (prot[i].start + prot[i].len - pos > len) { + FL_DBG("but there is some extra at the end\n"); + len -= prot[i].start + prot[i].len - pos; + pos = prot[i].start + prot[i].len; + } else { + FL_DBG("\n"); + len = 0; + } + } + /* + * This condition will be true if the range is smaller than + * the current range, therefore it should go here! + */ + if (pos < prot[i].start && pos + len <= prot[i].start) + break; + } + + if (len) { + int insert_pos = i; + struct bl_prot_range *new_ranges = ranges->prot; + + FL_DBG("%s: adding 0x%08x..0x%08x\n", __func__, pos, pos + len); + + if (ranges->n_prot == ranges->total_prot) { + new_ranges = realloc(ranges->prot, + sizeof(range) * ((ranges->n_prot) + PROT_REALLOC_NUM)); + if (!new_ranges) + return false; + ranges->total_prot += PROT_REALLOC_NUM; + } + if (insert_pos != ranges->n_prot) + for (i = ranges->n_prot; i > insert_pos; i--) + memcpy(&new_ranges[i], &new_ranges[i - 1], sizeof(range)); + range.start = pos; + range.len = len; + memcpy(&new_ranges[insert_pos], &range, sizeof(range)); + ranges->prot = new_ranges; + ranges->n_prot++; + prot = new_ranges; + } + + return true; +} + +int blocklevel_ecc_protect(struct blocklevel_device *bl, uint32_t start, uint32_t len) +{ + /* + * Could implement this at hardware level by having an accessor to the + * backend in struct blocklevel_device and as a result do nothing at + * this level (although probably not for ecc!) + */ + struct bl_prot_range range = { .start = start, .len = len }; + + if (len < BYTES_PER_ECC) + return -1; + return !insert_bl_prot_range(&bl->ecc_prot, range); +} diff --git a/roms/skiboot/libflash/blocklevel.h b/roms/skiboot/libflash/blocklevel.h new file mode 100644 index 000000000..2557eb3f0 --- /dev/null +++ b/roms/skiboot/libflash/blocklevel.h @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2017 IBM Corp. */ + +#ifndef __LIBFLASH_BLOCKLEVEL_H +#define __LIBFLASH_BLOCKLEVEL_H + +#include <stdint.h> +#include <stdbool.h> + +struct bl_prot_range { + uint64_t start; + uint64_t len; +}; + +struct blocklevel_range { + struct bl_prot_range *prot; + int n_prot; + int total_prot; +}; + +enum blocklevel_flags { + WRITE_NEED_ERASE = 1, +}; + +/* + * libffs may be used with different backends, all should provide these for + * libflash to get the information it needs + */ +struct blocklevel_device { + void *priv; + int (*reacquire)(struct blocklevel_device *bl); + int (*release)(struct blocklevel_device *bl); + int (*read)(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len); + int (*write)(struct blocklevel_device *bl, uint64_t pos, const void *buf, uint64_t len); + int (*erase)(struct blocklevel_device *bl, uint64_t pos, uint64_t len); + int (*get_info)(struct blocklevel_device *bl, const char **name, uint64_t *total_size, + uint32_t *erase_granule); + bool (*exit)(struct blocklevel_device *bl); + + /* + * Keep the erase mask so that blocklevel_erase() can do sanity checking + */ + uint32_t erase_mask; + bool keep_alive; + enum blocklevel_flags flags; + + struct blocklevel_range ecc_prot; +}; +int blocklevel_raw_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len); +int blocklevel_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len); +int blocklevel_raw_write(struct blocklevel_device *bl, uint64_t pos, const void *buf, uint64_t len); +int blocklevel_write(struct blocklevel_device *bl, uint64_t pos, const void *buf, uint64_t len); +int blocklevel_erase(struct blocklevel_device *bl, uint64_t pos, uint64_t len); +int blocklevel_get_info(struct blocklevel_device *bl, const char **name, uint64_t *total_size, + uint32_t *erase_granule); + +/* + * blocklevel_smart_write() performs reads on the data to see if it + * can skip erase or write calls. This is likely more convenient for + * the caller since they don't need to perform these checks + * themselves. Depending on the new and old data, this may be faster + * or slower than the just using blocklevel_erase/write calls. + * directly. + */ +int blocklevel_smart_write(struct blocklevel_device *bl, uint64_t pos, const void *buf, uint64_t len); + +/* + * blocklevel_smart_erase() will handle unaligned erases. + * blocklevel_erase() expects a erase_granule aligned buffer and the + * erase length to be an exact multiple of erase_granule, + * blocklevel_smart_erase() solves this requirement by performing a + * read erase write under the hood. + */ +int blocklevel_smart_erase(struct blocklevel_device *bl, uint64_t pos, uint64_t len); +/* Implemented in software at this level */ +int blocklevel_ecc_protect(struct blocklevel_device *bl, uint32_t start, uint32_t len); + +#endif /* __LIBFLASH_BLOCKLEVEL_H */ diff --git a/roms/skiboot/libflash/ecc.c b/roms/skiboot/libflash/ecc.c new file mode 100644 index 000000000..25f518e66 --- /dev/null +++ b/roms/skiboot/libflash/ecc.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * This is based on the hostboot ecc code + * + * Copyright 2013-2019 IBM Corp. + */ + +#include <stdint.h> +#include <inttypes.h> +#include <string.h> + +#include <ccan/endian/endian.h> + +#include "libflash.h" +#include "ecc.h" + +/* Bit field identifiers for syndrome calculations. */ +enum eccbitfields +{ + GD = 0xff, //< Good, ECC matches. + UE = 0xfe, //< Uncorrectable. + E0 = 71, //< Error in ECC bit 0 + E1 = 70, //< Error in ECC bit 1 + E2 = 69, //< Error in ECC bit 2 + E3 = 68, //< Error in ECC bit 3 + E4 = 67, //< Error in ECC bit 4 + E5 = 66, //< Error in ECC bit 5 + E6 = 65, //< Error in ECC bit 6 + E7 = 64 //< Error in ECC bit 7 + /* 0-63 Correctable bit in byte */ +}; + +/* + * Matrix used for ECC calculation. + * + * Each row of this is the set of data word bits that are used for + * the calculation of the corresponding ECC bit. The parity of the + * bitset is the value of the ECC bit. + * + * ie. ECC[n] = eccMatrix[n] & data + * + * Note: To make the math easier (and less shifts in resulting code), + * row0 = ECC7. HW numbering is MSB, order here is LSB. + * + * These values come from the HW design of the ECC algorithm. + */ +static uint64_t eccmatrix[] = { + 0x0000e8423c0f99ffull, + 0x00e8423c0f99ff00ull, + 0xe8423c0f99ff0000ull, + 0x423c0f99ff0000e8ull, + 0x3c0f99ff0000e842ull, + 0x0f99ff0000e8423cull, + 0x99ff0000e8423c0full, + 0xff0000e8423c0f99ull +}; + +/** + * Syndrome calculation matrix. + * + * Maps syndrome to flipped bit. + * + * To perform ECC correction, this matrix is a look-up of the bit + * that is bad based on the binary difference of the good and bad + * ECC. This difference is called the "syndrome". + * + * When a particular bit is on in the data, it cause a column from + * eccMatrix being XOR'd into the ECC field. This column is the + * "effect" of each bit. If a bit is flipped in the data then its + * "effect" is missing from the ECC. You can calculate ECC on unknown + * quality data and compare the ECC field between the calculated + * value and the stored value. If the difference is zero, then the + * data is clean. If the difference is non-zero, you look up the + * difference in the syndrome table to identify the "effect" that + * is missing, which is the bit that is flipped. + * + * Notice that ECC bit flips are recorded by a single "effect" + * bit (ie. 0x1, 0x2, 0x4, 0x8 ...) and double bit flips are identified + * by the UE status in the table. + * + * Bits are in MSB order. + */ +static enum eccbitfields syndromematrix[] = { + GD, E7, E6, UE, E5, UE, UE, 47, E4, UE, UE, 37, UE, 35, 39, UE, + E3, UE, UE, 48, UE, 30, 29, UE, UE, 57, 27, UE, 31, UE, UE, UE, + E2, UE, UE, 17, UE, 18, 40, UE, UE, 58, 22, UE, 21, UE, UE, UE, + UE, 16, 49, UE, 19, UE, UE, UE, 23, UE, UE, UE, UE, 20, UE, UE, + E1, UE, UE, 51, UE, 46, 9, UE, UE, 34, 10, UE, 32, UE, UE, 36, + UE, 62, 50, UE, 14, UE, UE, UE, 13, UE, UE, UE, UE, UE, UE, UE, + UE, 61, 8, UE, 41, UE, UE, UE, 11, UE, UE, UE, UE, UE, UE, UE, + 15, UE, UE, UE, UE, UE, UE, UE, UE, UE, 12, UE, UE, UE, UE, UE, + E0, UE, UE, 55, UE, 45, 43, UE, UE, 56, 38, UE, 1, UE, UE, UE, + UE, 25, 26, UE, 2, UE, UE, UE, 24, UE, UE, UE, UE, UE, 28, UE, + UE, 59, 54, UE, 42, UE, UE, 44, 6, UE, UE, UE, UE, UE, UE, UE, + 5, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, + UE, 63, 53, UE, 0, UE, UE, UE, 33, UE, UE, UE, UE, UE, UE, UE, + 3, UE, UE, 52, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, + 7, UE, UE, UE, UE, UE, UE, UE, UE, 60, UE, UE, UE, UE, UE, UE, + UE, UE, UE, UE, 4, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, UE, +}; + +/** + * Create the ECC field corresponding to a 8-byte data field + * + * @data: The 8 byte data to generate ECC for. + * @return: The 1 byte ECC corresponding to the data. + */ +static uint8_t eccgenerate(uint64_t data) +{ + int i; + uint8_t result = 0; + + for (i = 0; i < 8; i++) + result |= __builtin_parityll(eccmatrix[i] & data) << i; + + return result; +} + +/** + * Verify the data and ECC match or indicate how they are wrong. + * + * @data: The data to check ECC on. + * @ecc: The [supposed] ECC for the data. + * + * @return: eccBitfield or 0-64. + * + * @retval GD - Indicates the data is good (matches ECC). + * @retval UE - Indicates the data is uncorrectable. + * @retval all others - Indication of which bit is incorrect. + */ +static enum eccbitfields eccverify(uint64_t data, uint8_t ecc) +{ + return syndromematrix[eccgenerate(data) ^ ecc]; +} + +/* IBM bit ordering */ +static inline uint64_t eccflipbit(uint64_t data, uint8_t bit) +{ + if (bit > 63) + return data; + + return data ^ (1ul << (63 - bit)); +} + +static int eccbyte(beint64_t *dst, struct ecc64 *src) +{ + uint8_t ecc, badbit; + uint64_t data; + + data = be64_to_cpu(src->data); + ecc = src->ecc; + + badbit = eccverify(data, ecc); + if (badbit == UE) { + FL_ERR("ECC: uncorrectable error: %016llx %02x\n", (unsigned long long int)data, ecc); + return badbit; + } + if (badbit <= UE) + FL_INF("ECC: correctable error: %i\n", badbit); + if (badbit < 64) + *dst = cpu_to_be64(eccflipbit(data, badbit)); + else + *dst = cpu_to_be64(data); + + return 0; +} + +static beint64_t *inc_beint64_by(const void *p, uint64_t i) +{ + return (beint64_t *)(((char *)p) + i); +} + +static uint64_t *inc_uint64_by(const void *p, uint64_t i) +{ + return (uint64_t *)(((char *)p) + i); +} + +static struct ecc64 *inc_ecc64_by(struct ecc64 *p, uint64_t i) +{ + return (struct ecc64 *)(((char *)p) + i); +} + +static uint64_t whole_ecc_bytes(uint64_t i) +{ + return i & ~(BYTES_PER_ECC - 1); +} + +static uint64_t whole_ecc_structs(uint64_t i) +{ + return whole_ecc_bytes(i) >> 3; +} + +/** + * Copy data from an input buffer with ECC to an output buffer without ECC. + * Correct it along the way and check for errors. + * + * @dst: destination buffer without ECC + * @src: source buffer with ECC + * @len: number of bytes of data to copy (without ecc). + * Must be 8 byte aligned. + * + * @return: Success or error + * + * @retval: 0 - success + * @retfal: other - fail + */ +int memcpy_from_ecc(beint64_t *dst, struct ecc64 *src, uint64_t len) +{ + uint32_t i; + + if (len & 0x7) { + /* TODO: we could probably handle this */ + FL_ERR("ECC data length must be 8 byte aligned length:%" PRIx64 "\n", + len); + return -1; + } + + /* Handle in chunks of 8 bytes, so adjust the length */ + len >>= 3; + + for (i = 0; i < len; i++) { + int rc; + rc = eccbyte(dst, src + i); + if (rc) + return rc; + dst++; + } + return 0; +} + +/** + * Copy data from an input buffer with ECC to an output buffer without ECC. + * Correct it along the way and check for errors. + * + * Unlike memcmp_from_ecc() which requires that the first byte into + * dst be the first byte in src (which must also be aligned to a + * struct ecc64 struct boundary) this function can cope with the first + * byte in dst not being the first byte in src. + * + * Note: src MUST still be aligned to a struct ecc64 otherwise ECC + * calculations are impossible. + * + * The alignment parameter species the number of bytes present at the + * start of src that should be skipped and not written to dst. Once + * again, these bytes must be in src otherwise the ECC cannot be + * checked. + * + * len also doesn't have any value limitation for this function. Of + * course src must contain an exact multiple of struct ecc64 otherwise + * ECC calculation cannot be performed but this function won't copy + * the entirety of the last src data word if len is not mutiple of 8 + * + * @dst: destination buffer without ECC + * @src: source buffer with ECC + * @len: number of bytes of data to copy (without ecc). + * @alignment: number of leading bytes in src which shouldn't be + * copied to dst + * @return: Success or error + * + * @retval: 0 - success + * @retfal: other - fail + */ +int memcpy_from_ecc_unaligned(beint64_t *dst, struct ecc64 *src, + uint64_t len, uint8_t alignment) +{ + char data[BYTES_PER_ECC]; + uint8_t bytes_wanted; + int rc; + + if (alignment > 8) + return -1; + + bytes_wanted = BYTES_PER_ECC - alignment; + + /* + * Only actually do the first calculation if an alignment is + * required - otherwise jump straight to memcpy_from_ecc() + */ + if (alignment) { + rc = eccbyte((beint64_t *)data, src); + if (rc) + return rc; + + memcpy(dst, &data[alignment], bytes_wanted); + + src = inc_ecc64_by(src, sizeof(struct ecc64)); + dst = inc_beint64_by(dst, bytes_wanted); + len -= bytes_wanted; + } + + if (len >= BYTES_PER_ECC) { + rc = memcpy_from_ecc(dst, src, whole_ecc_bytes(len)); + if (rc) + return rc; + + /* + * It helps to let the compiler to the pointer arithmetic + * here, (dst and src are different types) + */ + dst += whole_ecc_structs(len); + src += whole_ecc_structs(len); + len -= whole_ecc_bytes(len); + } + + if (len) { + rc = eccbyte((beint64_t *)data, src); + if (rc) + return rc; + + memcpy(dst, data, len); + } + + return 0; +} + +/** + * Copy data from an input buffer without ECC to an output buffer with ECC. + * + * @dst: destination buffer with ECC + * @src: source buffer without ECC + * @len: number of bytes of data to copy (without ecc, length of src). + * Note: dst must be big enough to hold ecc bytes as well. + * Must be 8 byte aligned. + * + * @return: success or failure + * + * @retval: 0 - success + * @retfal: other - fail + */ +int memcpy_to_ecc(struct ecc64 *dst, const beint64_t *src, uint64_t len) +{ + struct ecc64 ecc_word; + uint64_t i; + + if (len & 0x7) { + /* TODO: we could probably handle this */ + FL_ERR("Data to add ECC bytes to must be 8 byte aligned length: %" + PRIx64 "\n", len); + return -1; + } + + /* Handle in chunks of 8 bytes, so adjust the length */ + len >>= 3; + + for (i = 0; i < len; i++) { + ecc_word.ecc = eccgenerate(be64_to_cpu(*(src + i))); + ecc_word.data = *(src + i); + + *(dst + i) = ecc_word; + } + + return 0; +} + +/** + * Copy data from an input buffer without ECC to an output buffer with ECC. + * + * Unlike memcmp_to_ecc() which requires that the first byte in src be + * the first byte of a struct ecc64 structure this function does not + * have this requirement. + * + * Like memcpy_to_ecc_unaligned() the alignment parameter specfies the + * number of bytes in the first src word that are missing and would be + * required to form a struct ecc64 structure. + * + * It must be noted here that extra work IN THE CALLER must be done + * if your data is unaligned. In order to peform ECC calculations + * whatever portions of the ecc words are missing in src must be in + * dst. + * + * For example, if there is an alignment value of 1 then this means + * there is 1 byte (of the total of 8 bytes) missing in src which is + * needed to calculate the first ECC byte. Therefore the first byte of + * dst MUST CONTAIN IT! + * + * The same problem exists for the end of the buffer where src may not + * end exactly aligned, if this is the case dst must contain the + * required bytes to calculate the last ECC byte - they should be in + * dst where they would normally be found if src did contain those + * bytes. + * + * @dst: destination buffer with ECC + * @src: source buffer without ECC + * @len: number of bytes of data to copy (without ecc, length of src). + * @alignment: The number of bytes 'missing' from the start of src to + * be struct ecc64 aligned + * + * Note: dst must be big enough to hold ecc bytes as well. + * Must be 8 byte aligned. + * + * @return: success or failure + * + * @retval: 0 - success + * @retfal: other - fail + */ + +int memcpy_to_ecc_unaligned(struct ecc64 *dst, const beint64_t *src, + uint64_t len, uint8_t alignment) +{ + struct ecc64 ecc_word; + uint8_t bytes_wanted; + int rc; + + bytes_wanted = BYTES_PER_ECC - alignment; + + /* + * Only actually do the first calculation if an alignment is + * required - otherwise jump straight to memcpy_to_ecc() + */ + if (alignment) { + ecc_word.data = dst->data; + memcpy(inc_uint64_by(&ecc_word.data, alignment), src, bytes_wanted); + + ecc_word.ecc = eccgenerate(be64_to_cpu(ecc_word.data)); + memcpy(dst, inc_ecc64_by(&ecc_word, alignment), + sizeof(struct ecc64) - alignment); + + dst = inc_ecc64_by(dst, sizeof(struct ecc64) - alignment); + src = inc_beint64_by(src, bytes_wanted); + len -= bytes_wanted; + } + + if (len >= BYTES_PER_ECC) { + rc = memcpy_to_ecc(dst, src, whole_ecc_bytes(len)); + if (rc) + return rc; + + /* + * It helps to let the compiler to the pointer arithmetic + * here, (dst and src are different types) + */ + dst += whole_ecc_structs(len); + src += whole_ecc_structs(len); + len -= whole_ecc_bytes(len); + } + + if (len) { + bytes_wanted = BYTES_PER_ECC - len; + + ecc_word.data = *src; + memcpy(inc_uint64_by(&ecc_word.data, len), inc_ecc64_by(dst, len), + bytes_wanted); + ecc_word.ecc = eccgenerate(be64_to_cpu(ecc_word.data)); + + *dst = ecc_word; + } + + return 0; +} diff --git a/roms/skiboot/libflash/ecc.h b/roms/skiboot/libflash/ecc.h new file mode 100644 index 000000000..bdf04201f --- /dev/null +++ b/roms/skiboot/libflash/ecc.h @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * This is based on the hostboot ecc code + * + * Copyright 2013-2018 IBM Corp. + */ + +#ifndef __ECC_H +#define __ECC_H + +#include <stdint.h> +#include <ccan/endian/endian.h> + +struct ecc64 { + beint64_t data; + uint8_t ecc; +} __attribute__((__packed__)); + +extern int memcpy_from_ecc(beint64_t *dst, struct ecc64 *src, uint64_t len); +extern int memcpy_from_ecc_unaligned(beint64_t *dst, struct ecc64 *src, uint64_t len, + uint8_t alignment); + +extern int memcpy_to_ecc(struct ecc64 *dst, const beint64_t *src, uint64_t len); +extern int memcpy_to_ecc_unaligned(struct ecc64 *dst, const beint64_t *src, uint64_t len, + uint8_t alignment); + +/* + * Calculate the size of a buffer if ECC is added + * + * We add 1 byte of ecc for every 8 bytes of data. So we need to round up to 8 + * bytes length and then add 1/8 + */ +#ifndef ALIGN_UP +#define ALIGN_UP(_v, _a) (((_v) + (_a) - 1) & ~((_a) - 1)) +#endif + +#define BYTES_PER_ECC 8 + +static inline uint64_t ecc_size(uint64_t len) +{ + return ALIGN_UP(len, BYTES_PER_ECC) >> 3; +} + +static inline uint64_t ecc_buffer_size(uint64_t len) +{ + return ALIGN_UP(len, BYTES_PER_ECC) + ecc_size(len); +} + +static inline int ecc_buffer_size_check(uint64_t len) +{ + return len % (BYTES_PER_ECC + 1); +} + +static inline uint64_t ecc_buffer_size_minus_ecc(uint64_t len) +{ + return len * BYTES_PER_ECC / (BYTES_PER_ECC + 1); +} + +static inline uint64_t ecc_buffer_align(uint64_t start, uint64_t pos) +{ + return pos - ((pos - start) % (BYTES_PER_ECC + 1)); +} + +#endif diff --git a/roms/skiboot/libflash/errors.h b/roms/skiboot/libflash/errors.h new file mode 100644 index 000000000..e67a759de --- /dev/null +++ b/roms/skiboot/libflash/errors.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2018 IBM Corp. */ + +#ifndef __LIBFLASH_ERRORS_H +#define __LIBFLASH_ERRORS_H + +#define FLASH_ERR_MALLOC_FAILED 1 +#define FLASH_ERR_CHIP_UNKNOWN 2 +#define FLASH_ERR_PARM_ERROR 3 +#define FLASH_ERR_ERASE_BOUNDARY 4 +#define FLASH_ERR_WREN_TIMEOUT 5 +#define FLASH_ERR_WIP_TIMEOUT 6 +#define FLASH_ERR_BAD_PAGE_SIZE 7 +#define FLASH_ERR_VERIFY_FAILURE 8 +#define FLASH_ERR_4B_NOT_SUPPORTED 9 +#define FLASH_ERR_CTRL_CONFIG_MISMATCH 10 +#define FLASH_ERR_CHIP_ER_NOT_SUPPORTED 11 +#define FLASH_ERR_CTRL_CMD_UNSUPPORTED 12 +#define FLASH_ERR_CTRL_TIMEOUT 13 +#define FLASH_ERR_ECC_INVALID 14 +#define FLASH_ERR_BAD_READ 15 +#define FLASH_ERR_DEVICE_GONE 16 +#define FLASH_ERR_AGAIN 17 + +#ifdef __SKIBOOT__ +#include <skiboot.h> +#define FL_INF(fmt...) do { prlog(PR_INFO, fmt); } while(0) +#define FL_DBG(fmt...) do { prlog(PR_TRACE, fmt); } while(0) +#define FL_ERR(fmt...) do { prlog(PR_ERR, fmt); } while(0) +#else +#include <stdio.h> +extern bool libflash_debug; +#define FL_DBG(fmt...) do { if (libflash_debug) printf(fmt); } while(0) +#define FL_INF(fmt...) do { printf(fmt); } while(0) +#define FL_ERR(fmt...) do { printf(fmt); } while(0) +#endif + + +#endif /* __LIBFLASH_ERRORS_H */ diff --git a/roms/skiboot/libflash/ffs.h b/roms/skiboot/libflash/ffs.h new file mode 100644 index 000000000..441d84646 --- /dev/null +++ b/roms/skiboot/libflash/ffs.h @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * FSP Flash Structure + * + * This header defines the layout for the FSP Flash Structure. + * + * Copyright 2012-2018 IBM Corp. + */ + +#ifndef __FFS_H__ +#define __FFS_H__ + +/* Pull in the correct header depending on what is being built */ +#ifndef __SKIBOOT__ +#include <linux/types.h> +#else +#include <stdint.h> +#endif +#include <ccan/short_types/short_types.h> +#include <ccan/endian/endian.h> +#include <ccan/list/list.h> + +#include "libffs.h" + +/* The version of this partition implementation */ +#define FFS_VERSION_1 1 + +/* Magic number for the partition header (ASCII 'PART') */ +#define FFS_MAGIC 0x50415254 + +/* pid of logical partitions/containers */ +#define FFS_PID_TOPLEVEL 0xFFFFFFFF + +/* + * Type of image contained w/in partition + */ +enum ffs_type { + FFS_TYPE_DATA = 1, + FFS_TYPE_LOGICAL = 2, + FFS_TYPE_PARTITION = 3, +}; + +/* + * Flag bit definitions + */ +#define FFS_FLAGS_PROTECTED 0x0001 +#define FFS_FLAGS_U_BOOT_ENV 0x0002 + +/* Data integrity flags */ +#define FFS_ENRY_INTEG_ECC 0x8000 + +/* + * User verCheck definitions + */ +#define FFS_VERCHECK_SHA512V 0x80 +#define FFS_VERCHECK_SHA512EC 0x40 + +/* + * User miscFlags + */ +#define FFS_MISCFLAGS_PRESERVED 0x80 +#define FFS_MISCFLAGS_READONLY 0x40 +#define FFS_MISCFLAGS_BACKUP 0x20 +#define FFS_MISCFLAGS_REPROVISION 0x10 +#define FFS_MISCFLAGS_VOLATILE 0x08 +#define FFS_MISCFLAGS_CLEARECC 0x04 +#define FFS_MISCFLAGS_GOLDEN 0x01 + +/** + * struct __ffs_entry_user - On flash user data entries + * + * Represents the on flash layout of FFS structures + * + * @chip: Chip Select (0,1) + * @compressType: Compression Indication/alg (0=not compressed) + * @dataInteg: Indicates Data Integrity mechanism + * @verCheck: Indicates Version check type + * @miscFlags: Misc Partition related Flags + * @freeMisc[2]: Unused Miscellaneious Info + * @freeUser[14]: Unused User Data + */ +struct __ffs_entry_user { + uint8_t chip; + uint8_t compresstype; + be16 datainteg; + uint8_t vercheck; + uint8_t miscflags; + uint8_t freemisc[2]; + be32 reserved[14]; +} __attribute__ ((packed)); + +/** + * struct __ffs_entry - On flash partition entry + * + * Represents the on flash layout of FFS structures + * Note: Unlike the in memory structures base and size of the entry are in + * units of block_size and the actual size is in bytes + * + * @name: Opaque null terminated string + * @base: Starting offset of partition in flash (in hdr.block_size) + * @size: Partition size (in hdr.block_size) + * @pid: Parent partition entry (FFS_PID_TOPLEVEL for toplevel) + * @id: Partition entry ID [1..65536] + * @type: Describe type of partition + * @flags: Partition attributes (optional) + * @actual: Actual partition size (in bytes) + * @resvd: Reserved words for future use + * @user: User data (optional) + * @checksum: Partition entry checksum (includes all above) + */ +struct __ffs_entry { + char name[FFS_PART_NAME_MAX + 1]; + be32 base; + be32 size; + be32 pid; + be32 id; + be32 type; + be32 flags; + be32 actual; + be32 resvd[4]; + struct __ffs_entry_user user; + /* The checksum is actually endian agnostic */ + uint32_t checksum; +} __attribute__ ((packed)); + +/** + * struct ffs_entry - Partition entry + * + * Useable in memory representation of a struct __ffs_entry + * Note: Unlike the on flash structure, all sizes here are in bytes! + * + * @name: Opaque null terminated string + * @base: Starting offset of partition in flash (in bytes) + * @size: Partition size (in bytes) + * @actual: Actual partition size (in bytes) + * @pid: Parent partition entry (FFS_PID_TOPLEVEL for toplevel) + * @type: Describe type of partition + * @flags: Partition attributes (optional) + * @user: User data (optional) + * @ref: Refcount + */ +struct ffs_entry { + char name[FFS_PART_NAME_MAX + 1]; + uint32_t base; + uint32_t size; + uint32_t actual; + uint32_t pid; + enum ffs_type type; + uint32_t flags; + struct ffs_entry_user user; + unsigned int ref; +}; + + +/** + * struct __ffs_hdr - On flash FSP Flash Structure header + * + * Represents the on flash layout of FFS structures + * Note: Beware that the size of the partition table is in units of block_size + * + * @magic: Eye catcher/corruption detector + * @version: Version of the structure + * @size: Size of partition table (in block_size) + * @entry_size: Size of struct ffs_entry element (in bytes) + * @entry_count: Number of struct ffs_entry elements in @entries array + * @block_size: Size of block on device (in bytes) + * @block_count: Number of blocks on device + * @resvd[4]: Reserved words for future use + * @checksum: Header checksum + * @entries: Pointer to array of partition entries + */ +struct __ffs_hdr { + be32 magic; + be32 version; + be32 size; + be32 entry_size; + be32 entry_count; + be32 block_size; + be32 block_count; + be32 resvd[4]; + /* The checksum is actually endian agnostic */ + uint32_t checksum; + struct __ffs_entry entries[]; +} __attribute__ ((packed)); + +/** + * struct ffs_hdr - FSP Flash Structure header + * + * Useable in memory representation of a struct __ffs_hdr + * Note: All sizes here are in bytes + * + * @version: Version of the structure + * @size: Size of partition table (in bytes) + * @block_size: Size of block on device (in bytes) + * @block_count: Number of blocks on device. + * @count: Count of the number of entires + * @entries: Array of partition entries. + */ +struct ffs_hdr { + uint32_t version; + uint32_t size; + uint32_t block_size; + uint32_t block_count; + uint32_t count; + struct ffs_entry *part; + struct ffs_entry **entries; + unsigned int entries_size; +}; + +#endif /* __FFS_H__ */ diff --git a/roms/skiboot/libflash/file.c b/roms/skiboot/libflash/file.c new file mode 100644 index 000000000..fbaf79243 --- /dev/null +++ b/roms/skiboot/libflash/file.c @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2019 IBM Corp. */ + +#define _GNU_SOURCE +#include <errno.h> +#include <inttypes.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> +#include <limits.h> + +#include <ccan/container_of/container_of.h> + +#include <mtd/mtd-abi.h> + +#include "libflash.h" +#include "libflash/file.h" +#include "blocklevel.h" + +struct file_data { + int fd; + char *name; + char *path; + struct blocklevel_device bl; +}; + +static int file_release(struct blocklevel_device *bl) +{ + struct file_data *file_data = container_of(bl, struct file_data, bl); + close(file_data->fd); + file_data->fd = -1; + return 0; +} + +static int file_reacquire(struct blocklevel_device *bl) +{ + struct file_data *file_data = container_of(bl, struct file_data, bl); + int fd; + + fd = open(file_data->path, O_RDWR); + if (fd == -1) + return FLASH_ERR_PARM_ERROR; + file_data->fd = fd; + return 0; +} + +static int file_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len) +{ + struct file_data *file_data = container_of(bl, struct file_data, bl); + int rc, count = 0; + + rc = lseek(file_data->fd, pos, SEEK_SET); + /* errno should remain set */ + if (rc != pos) + return FLASH_ERR_PARM_ERROR; + + while (count < len) { + rc = read(file_data->fd, buf, len - count); + /* errno should remain set */ + if (rc == -1 || rc == 0) + return FLASH_ERR_BAD_READ; + + buf += rc; + count += rc; + } + + return 0; +} + +static int file_write(struct blocklevel_device *bl, uint64_t dst, const void *src, + uint64_t len) +{ + struct file_data *file_data = container_of(bl, struct file_data, bl); + int rc, count = 0; + + rc = lseek(file_data->fd, dst, SEEK_SET); + /* errno should remain set */ + if (rc != dst) + return FLASH_ERR_PARM_ERROR; + + while (count < len) { + rc = write(file_data->fd, src, len - count); + /* errno should remain set */ + if (rc == -1) + return FLASH_ERR_VERIFY_FAILURE; + + src += rc; + count += rc; + } + + return 0; +} + +/* + * Due to to the fact these interfaces are ultimately supposed to deal with + * flash, an erase function must be implemented even when the flash images + * are backed by regular files. + * Also, erasing flash leaves all the bits set to 1. This may be expected + * by higher level functions so this function should also emulate that + */ +static int file_erase(struct blocklevel_device *bl, uint64_t dst, uint64_t len) +{ + static char buf[4096]; + int i = 0; + int rc; + + memset(buf, ~0, sizeof(buf)); + + while (len - i > 0) { + rc = file_write(bl, dst + i, buf, len - i > sizeof(buf) ? sizeof(buf) : len - i); + if (rc) + return rc; + i += (len - i > sizeof(buf)) ? sizeof(buf) : len - i; + } + + return 0; +} + +static int mtd_erase(struct blocklevel_device *bl, uint64_t dst, uint64_t len) +{ + struct file_data *file_data = container_of(bl, struct file_data, bl); + int err; + + FL_DBG("%s: dst: 0x%" PRIx64 ", len: 0x%" PRIx64 "\n", __func__, dst, len); + + /* + * Some kernels that pflash supports do not know about the 64bit + * version of the ioctl() therefore we'll just use the 32bit (which + * should always be supported...) unless we MUST use the 64bit and + * then lets just hope the kernel knows how to deal with it. If it + * is unsupported the ioctl() will fail and we'll report that - + * there is no other option. + * + * Furthermore, even very recent MTD layers and drivers aren't + * particularly good at not blocking in the kernel. This creates + * unexpected behaviour in userspace tools using these functions. + * In the absence of significant work inside the kernel, we'll just + * split stuff up here for convenience. + * We can assume everything is aligned here. + */ + while (len) { + if (dst > UINT_MAX || len > UINT_MAX) { + struct erase_info_user64 erase_info = { + .start = dst, + .length = file_data->bl.erase_mask + 1 + }; + + if (ioctl(file_data->fd, MEMERASE64, &erase_info) == -1) { + err = errno; + if (err == 25) /* Kernel doesn't do 64bit MTD erase ioctl() */ + FL_DBG("Attempted a 64bit erase on a kernel which doesn't support it\n"); + FL_ERR("%s: IOCTL to kernel failed! %s\n", __func__, strerror(err)); + errno = err; + return FLASH_ERR_PARM_ERROR; + } + } else { + struct erase_info_user erase_info = { + .start = dst, + .length = file_data->bl.erase_mask + 1 + }; + if (ioctl(file_data->fd, MEMERASE, &erase_info) == -1) { + err = errno; + FL_ERR("%s: IOCTL to kernel failed! %s\n", __func__, strerror(err)); + errno = err; + return FLASH_ERR_PARM_ERROR; + } + } + dst += file_data->bl.erase_mask + 1; + len -= file_data->bl.erase_mask + 1; + } + return 0; +} + +static int get_info_name(struct file_data *file_data, char **name) +{ + char *path, *lpath; + int len; + struct stat st; + + if (asprintf(&path, "/proc/self/fd/%d", file_data->fd) == -1) + return FLASH_ERR_MALLOC_FAILED; + + if (lstat(path, &st)) { + free(path); + return FLASH_ERR_PARM_ERROR; + } + + lpath = malloc(st.st_size + 1); + if (!lpath) { + free(path); + return FLASH_ERR_MALLOC_FAILED; + } + + len = readlink(path, lpath, st.st_size +1); + if (len == -1) { + free(path); + free(lpath); + return FLASH_ERR_PARM_ERROR; + } + lpath[len] = '\0'; + + *name = lpath; + + free(path); + return 0; +} + + +static int mtd_get_info(struct blocklevel_device *bl, const char **name, + uint64_t *total_size, uint32_t *erase_granule) +{ + struct file_data *file_data = container_of(bl, struct file_data, bl); + struct mtd_info_user mtd_info; + int rc; + + rc = ioctl(file_data->fd, MEMGETINFO, &mtd_info); + if (rc == -1) + return FLASH_ERR_BAD_READ; + + if (total_size) + *total_size = mtd_info.size; + + if (erase_granule) + *erase_granule = mtd_info.erasesize; + + if (name) { + rc = get_info_name(file_data, &(file_data->name)); + if (rc) + return rc; + *name = file_data->name; + } + + return 0; +} + +static int file_get_info(struct blocklevel_device *bl, const char **name, + uint64_t *total_size, uint32_t *erase_granule) +{ + struct file_data *file_data = container_of(bl, struct file_data, bl); + struct stat st; + int rc; + + if (fstat(file_data->fd, &st)) + return FLASH_ERR_PARM_ERROR; + + if (total_size) + *total_size = st.st_size; + + if (erase_granule) + *erase_granule = 1; + + if (name) { + rc = get_info_name(file_data, &(file_data->name)); + if (rc) + return rc; + *name = file_data->name; + } + + return 0; +} + +int file_init(int fd, struct blocklevel_device **bl) +{ + struct file_data *file_data; + struct stat sbuf; + + if (!bl) + return FLASH_ERR_PARM_ERROR; + + *bl = NULL; + + file_data = calloc(1, sizeof(struct file_data)); + if (!file_data) + return FLASH_ERR_MALLOC_FAILED; + + file_data->fd = fd; + file_data->bl.reacquire = &file_reacquire; + file_data->bl.release = &file_release; + file_data->bl.read = &file_read; + file_data->bl.write = &file_write; + file_data->bl.erase = &file_erase; + file_data->bl.get_info = &file_get_info; + file_data->bl.erase_mask = 0; + + /* + * If the blocklevel_device is only inited with file_init() then keep + * alive is assumed, as fd will change otherwise and this may break + * callers assumptions. + */ + file_data->bl.keep_alive = 1; + + /* + * Unfortunately not all file descriptors are created equal... + * Here we check to see if the file descriptor is to an MTD device, in + * which case we have to erase and get the size of it differently. + */ + if (fstat(file_data->fd, &sbuf) == -1) + goto out; + + /* Won't be able to handle other than MTD devices for now */ + if (S_ISCHR(sbuf.st_mode)) { + file_data->bl.erase = &mtd_erase; + file_data->bl.get_info = &mtd_get_info; + file_data->bl.flags = WRITE_NEED_ERASE; + mtd_get_info(&file_data->bl, NULL, NULL, &(file_data->bl.erase_mask)); + file_data->bl.erase_mask--; + } else if (!S_ISREG(sbuf.st_mode)) { + /* If not a char device or a regular file something went wrong */ + goto out; + } + + *bl = &(file_data->bl); + return 0; +out: + free(file_data); + return FLASH_ERR_PARM_ERROR; +} + +int file_init_path(const char *path, int *r_fd, bool keep_alive, + struct blocklevel_device **bl) +{ + int fd, rc; + char *path_ptr = NULL; + struct file_data *file_data; + + if (!path || !bl) + return FLASH_ERR_PARM_ERROR; + + fd = open(path, O_RDWR); + if (fd == -1) + return FLASH_ERR_PARM_ERROR; + + /* + * strdup() first so don't have to deal with malloc failure after + * file_init() + */ + path_ptr = strdup(path); + if (!path_ptr) { + rc = FLASH_ERR_MALLOC_FAILED; + goto out; + } + + rc = file_init(fd, bl); + if (rc) + goto out; + + file_data = container_of(*bl, struct file_data, bl); + file_data->bl.keep_alive = keep_alive; + file_data->path = path_ptr; + + if (r_fd) + *r_fd = fd; + + return rc; +out: + free(path_ptr); + close(fd); + return rc; +} + +void file_exit(struct blocklevel_device *bl) +{ + struct file_data *file_data; + if (bl) { + free(bl->ecc_prot.prot); + file_data = container_of(bl, struct file_data, bl); + free(file_data->name); + free(file_data->path); + free(file_data); + } +} + +void file_exit_close(struct blocklevel_device *bl) +{ + struct file_data *file_data; + if (bl) { + file_data = container_of(bl, struct file_data, bl); + close(file_data->fd); + file_exit(bl); + } +} diff --git a/roms/skiboot/libflash/file.h b/roms/skiboot/libflash/file.h new file mode 100644 index 000000000..25cf73bf4 --- /dev/null +++ b/roms/skiboot/libflash/file.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2016 IBM Corp. */ + +#ifndef __LIBFLASH_FILE_H +#define __LIBFLASH_FILE_H + +#include <stdbool.h> + +#include "blocklevel.h" + +/* + * Blockevel functions created leave errno set on errors, as these calls + * often boil down to standard read() and write() calls, inspecting errno + * may prove useful + */ + +int file_init(int fd, struct blocklevel_device **bl); +void file_exit(struct blocklevel_device *bl); + +/* + * file_init_path() is a convenience wrapper which will open the path and call + * file_init(). The call to open happens with O_RDWR and no additional flags + * Because file_exit() doesn't close the file descriptor, file_init_path() + * makes it available. + */ +int file_init_path(const char *path, int *fd, bool keep_alive, struct blocklevel_device **bl); + +/* + * file_exit_close is a convenience wrapper which will close the open + * file descriptor and call file_exit(). + */ +void file_exit_close(struct blocklevel_device *bl); + +#endif /* __LIBFLASH_FILE_H */ diff --git a/roms/skiboot/libflash/ipmi-hiomap.c b/roms/skiboot/libflash/ipmi-hiomap.c new file mode 100644 index 000000000..c889d6316 --- /dev/null +++ b/roms/skiboot/libflash/ipmi-hiomap.c @@ -0,0 +1,1012 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2018-2019 IBM Corp. */ + +#define pr_fmt(fmt) "HIOMAP: " fmt + +#include <hiomap.h> +#include <inttypes.h> +#include <ipmi.h> +#include <lpc.h> +#include <mem_region-malloc.h> +#include <stdbool.h> +#include <stdint.h> +#include <string.h> + +#include <ccan/container_of/container_of.h> + +#include "errors.h" +#include "ipmi-hiomap.h" + +#define CMD_OP_HIOMAP_EVENT 0x0f + +struct ipmi_hiomap_result { + struct ipmi_hiomap *ctx; + int16_t cc; +}; + +#define RESULT_INIT(_name, _ctx) struct ipmi_hiomap_result _name = { _ctx, -1 } + +static inline uint32_t blocks_to_bytes(struct ipmi_hiomap *ctx, uint16_t blocks) +{ + return blocks << ctx->block_size_shift; +} + +static inline uint16_t bytes_to_blocks(struct ipmi_hiomap *ctx, uint32_t bytes) +{ + return bytes >> ctx->block_size_shift; +} + +static inline uint16_t bytes_to_blocks_align_up(struct ipmi_hiomap *ctx, + uint32_t pos, uint32_t len) +{ + uint32_t block_size = 1 << ctx->block_size_shift; + uint32_t delta = pos & (block_size - 1); + uint32_t aligned = ALIGN_UP((len + delta), block_size); + uint32_t blocks = aligned >> ctx->block_size_shift; + /* Our protocol can handle block count < sizeof(u16) */ + uint32_t mask = ((1 << 16) - 1); + + assert(!(blocks & ~mask)); + + return blocks & mask; +} + +/* Call under ctx->lock */ +static int hiomap_protocol_ready(struct ipmi_hiomap *ctx) +{ + if (!(ctx->bmc_state & HIOMAP_E_DAEMON_READY)) + return FLASH_ERR_DEVICE_GONE; + if (ctx->bmc_state & HIOMAP_E_FLASH_LOST) + return FLASH_ERR_AGAIN; + + return 0; +} + +static int hiomap_queue_msg_sync(struct ipmi_hiomap *ctx, struct ipmi_msg *msg) +{ + int rc; + + /* + * There's an unavoidable TOCTOU race here with the BMC sending an + * event saying it's no-longer available right after we test but before + * we call into the IPMI stack to send the message. + * hiomap_queue_msg_sync() exists to capture the race in a single + * location. + */ + lock(&ctx->lock); + rc = hiomap_protocol_ready(ctx); + unlock(&ctx->lock); + if (rc) { + ipmi_free_msg(msg); + return rc; + } + + ipmi_queue_msg_sync(msg); + + return 0; +} + +/* Call under ctx->lock */ +static int hiomap_window_valid(struct ipmi_hiomap *ctx, uint64_t pos, + uint64_t len) +{ + if (ctx->bmc_state & HIOMAP_E_FLASH_LOST) + return FLASH_ERR_AGAIN; + if (ctx->bmc_state & HIOMAP_E_PROTOCOL_RESET) + return FLASH_ERR_AGAIN; + if (ctx->bmc_state & HIOMAP_E_WINDOW_RESET) + return FLASH_ERR_AGAIN; + if (ctx->window_state == closed_window) + return FLASH_ERR_PARM_ERROR; + if (pos < ctx->current.cur_pos) + return FLASH_ERR_PARM_ERROR; + if ((pos + len) > (ctx->current.cur_pos + ctx->current.size)) + return FLASH_ERR_PARM_ERROR; + + return 0; +} + +static void ipmi_hiomap_cmd_cb(struct ipmi_msg *msg) +{ + struct ipmi_hiomap_result *res = msg->user_data; + struct ipmi_hiomap *ctx = res->ctx; + + res->cc = msg->cc; + if (msg->cc != IPMI_CC_NO_ERROR) { + ipmi_free_msg(msg); + return; + } + + /* We at least need the command and sequence */ + if (msg->resp_size < 2) { + prerror("Illegal response size: %u\n", msg->resp_size); + res->cc = IPMI_ERR_UNSPECIFIED; + ipmi_free_msg(msg); + return; + } + + if (msg->data[1] != ctx->seq) { + prerror("Unmatched sequence number: wanted %u got %u\n", + ctx->seq, msg->data[1]); + res->cc = IPMI_ERR_UNSPECIFIED; + ipmi_free_msg(msg); + return; + } + + switch (msg->data[0]) { + case HIOMAP_C_GET_INFO: + { + struct hiomap_v2_info *parms; + + if (msg->resp_size != 6) { + prerror("%u: Unexpected response size: %u\n", msg->data[0], + msg->resp_size); + res->cc = IPMI_ERR_UNSPECIFIED; + break; + } + + ctx->version = msg->data[2]; + if (ctx->version < 2) { + prerror("Failed to negotiate protocol v2 or higher: %d\n", + ctx->version); + res->cc = IPMI_ERR_UNSPECIFIED; + break; + } + + parms = (struct hiomap_v2_info *)&msg->data[3]; + ctx->block_size_shift = parms->block_size_shift; + ctx->timeout = le16_to_cpu(parms->timeout); + break; + } + case HIOMAP_C_GET_FLASH_INFO: + { + struct hiomap_v2_flash_info *parms; + + if (msg->resp_size != 6) { + prerror("%u: Unexpected response size: %u\n", msg->data[0], + msg->resp_size); + res->cc = IPMI_ERR_UNSPECIFIED; + break; + } + + parms = (struct hiomap_v2_flash_info *)&msg->data[2]; + ctx->total_size = + blocks_to_bytes(ctx, le16_to_cpu(parms->total_size)); + ctx->erase_granule = + blocks_to_bytes(ctx, le16_to_cpu(parms->erase_granule)); + break; + } + case HIOMAP_C_CREATE_READ_WINDOW: + case HIOMAP_C_CREATE_WRITE_WINDOW: + { + struct hiomap_v2_create_window *parms; + + if (msg->resp_size != 8) { + prerror("%u: Unexpected response size: %u\n", msg->data[0], + msg->resp_size); + res->cc = IPMI_ERR_UNSPECIFIED; + break; + } + + parms = (struct hiomap_v2_create_window *)&msg->data[2]; + + ctx->current.lpc_addr = + blocks_to_bytes(ctx, le16_to_cpu(parms->lpc_addr)); + ctx->current.size = + blocks_to_bytes(ctx, le16_to_cpu(parms->size)); + ctx->current.cur_pos = + blocks_to_bytes(ctx, le16_to_cpu(parms->offset)); + + lock(&ctx->lock); + if (msg->data[0] == HIOMAP_C_CREATE_READ_WINDOW) + ctx->window_state = read_window; + else + ctx->window_state = write_window; + unlock(&ctx->lock); + + break; + } + case HIOMAP_C_MARK_DIRTY: + case HIOMAP_C_FLUSH: + case HIOMAP_C_ACK: + case HIOMAP_C_ERASE: + case HIOMAP_C_RESET: + if (msg->resp_size != 2) { + prerror("%u: Unexpected response size: %u\n", msg->data[0], + msg->resp_size); + res->cc = IPMI_ERR_UNSPECIFIED; + break; + } + break; + default: + prlog(PR_WARNING, "Unimplemented command handler: %u\n", + msg->data[0]); + break; + }; + ipmi_free_msg(msg); +} + +static void hiomap_init(struct ipmi_hiomap *ctx) +{ + /* + * Speculatively mark the daemon as available so we attempt to perform + * the handshake without immediately bailing out. + */ + lock(&ctx->lock); + ctx->bmc_state = HIOMAP_E_DAEMON_READY; + unlock(&ctx->lock); +} + +static int hiomap_get_info(struct ipmi_hiomap *ctx) +{ + RESULT_INIT(res, ctx); + unsigned char req[3]; + struct ipmi_msg *msg; + int rc; + + /* Negotiate protocol version 2 */ + req[0] = HIOMAP_C_GET_INFO; + req[1] = ++ctx->seq; + req[2] = HIOMAP_V2; + + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 6); + + rc = hiomap_queue_msg_sync(ctx, msg); + if (rc) + return rc; + + if (res.cc != IPMI_CC_NO_ERROR) { + prerror("%s failed: %d\n", __func__, res.cc); + return FLASH_ERR_PARM_ERROR; /* XXX: Find something better? */ + } + + return 0; +} + +static int hiomap_get_flash_info(struct ipmi_hiomap *ctx) +{ + RESULT_INIT(res, ctx); + unsigned char req[2]; + struct ipmi_msg *msg; + int rc; + + req[0] = HIOMAP_C_GET_FLASH_INFO; + req[1] = ++ctx->seq; + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2 + 2 + 2); + + rc = hiomap_queue_msg_sync(ctx, msg); + if (rc) + return rc; + + if (res.cc != IPMI_CC_NO_ERROR) { + prerror("%s failed: %d\n", __func__, res.cc); + return FLASH_ERR_PARM_ERROR; /* XXX: Find something better? */ + } + + return 0; +} + +static int hiomap_window_move(struct ipmi_hiomap *ctx, uint8_t command, + uint64_t pos, uint64_t len, uint64_t *size) +{ + enum lpc_window_state want_state; + struct hiomap_v2_range *range; + RESULT_INIT(res, ctx); + unsigned char req[6]; + struct ipmi_msg *msg; + bool valid_state; + bool is_read; + int rc; + + is_read = (command == HIOMAP_C_CREATE_READ_WINDOW); + want_state = is_read ? read_window : write_window; + + lock(&ctx->lock); + + valid_state = want_state == ctx->window_state; + rc = hiomap_window_valid(ctx, pos, len); + if (valid_state && !rc) { + unlock(&ctx->lock); + *size = len; + return 0; + } + + ctx->window_state = closed_window; + + unlock(&ctx->lock); + + req[0] = command; + req[1] = ++ctx->seq; + + range = (struct hiomap_v2_range *)&req[2]; + range->offset = cpu_to_le16(bytes_to_blocks(ctx, pos)); + range->size = cpu_to_le16(bytes_to_blocks_align_up(ctx, pos, len)); + + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), + 2 + 2 + 2 + 2); + + rc = hiomap_queue_msg_sync(ctx, msg); + if (rc) + return rc; + + if (res.cc != IPMI_CC_NO_ERROR) { + prlog(PR_INFO, "%s failed: %d\n", __func__, res.cc); + return FLASH_ERR_PARM_ERROR; /* XXX: Find something better? */ + } + + lock(&ctx->lock); + *size = len; + /* Is length past the end of the window? */ + if ((pos + len) > (ctx->current.cur_pos + ctx->current.size)) + /* Adjust size to meet current window */ + *size = (ctx->current.cur_pos + ctx->current.size) - pos; + + if (len != 0 && *size == 0) { + unlock(&ctx->lock); + prerror("Invalid window properties: len: %"PRIu64", size: %"PRIu64"\n", + len, *size); + return FLASH_ERR_PARM_ERROR; + } + + prlog(PR_DEBUG, "Opened %s window from 0x%x for %u bytes at 0x%x\n", + (command == HIOMAP_C_CREATE_READ_WINDOW) ? "read" : "write", + ctx->current.cur_pos, ctx->current.size, ctx->current.lpc_addr); + + unlock(&ctx->lock); + + return 0; +} + +static int hiomap_mark_dirty(struct ipmi_hiomap *ctx, uint64_t offset, + uint64_t size) +{ + struct hiomap_v2_range *range; + enum lpc_window_state state; + RESULT_INIT(res, ctx); + unsigned char req[6]; + struct ipmi_msg *msg; + uint32_t pos; + int rc; + + lock(&ctx->lock); + state = ctx->window_state; + unlock(&ctx->lock); + + if (state != write_window) + return FLASH_ERR_PARM_ERROR; + + req[0] = HIOMAP_C_MARK_DIRTY; + req[1] = ++ctx->seq; + + pos = offset - ctx->current.cur_pos; + range = (struct hiomap_v2_range *)&req[2]; + range->offset = cpu_to_le16(bytes_to_blocks(ctx, pos)); + range->size = cpu_to_le16(bytes_to_blocks_align_up(ctx, pos, size)); + + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2); + + rc = hiomap_queue_msg_sync(ctx, msg); + if (rc) + return rc; + + if (res.cc != IPMI_CC_NO_ERROR) { + prerror("%s failed: %d\n", __func__, res.cc); + return FLASH_ERR_PARM_ERROR; + } + + prlog(PR_DEBUG, "Marked flash dirty at 0x%" PRIx64 " for %" PRIu64 "\n", + offset, size); + + return 0; +} + +static int hiomap_flush(struct ipmi_hiomap *ctx) +{ + enum lpc_window_state state; + RESULT_INIT(res, ctx); + unsigned char req[2]; + struct ipmi_msg *msg; + int rc; + + lock(&ctx->lock); + state = ctx->window_state; + unlock(&ctx->lock); + + if (state != write_window) + return FLASH_ERR_PARM_ERROR; + + req[0] = HIOMAP_C_FLUSH; + req[1] = ++ctx->seq; + + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2); + + rc = hiomap_queue_msg_sync(ctx, msg); + if (rc) + return rc; + + if (res.cc != IPMI_CC_NO_ERROR) { + prerror("%s failed: %d\n", __func__, res.cc); + return FLASH_ERR_PARM_ERROR; + } + + prlog(PR_DEBUG, "Flushed writes\n"); + + return 0; +} + +static int hiomap_ack(struct ipmi_hiomap *ctx, uint8_t ack) +{ + RESULT_INIT(res, ctx); + unsigned char req[3]; + struct ipmi_msg *msg; + int rc; + + req[0] = HIOMAP_C_ACK; + req[1] = ++ctx->seq; + req[2] = ack; + + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2); + + rc = hiomap_queue_msg_sync(ctx, msg); + if (rc) + return rc; + + if (res.cc != IPMI_CC_NO_ERROR) { + prlog(PR_DEBUG, "%s failed: %d\n", __func__, res.cc); + return FLASH_ERR_PARM_ERROR; + } + + prlog(PR_DEBUG, "Acked events: 0x%x\n", ack); + + return 0; +} + +static int hiomap_erase(struct ipmi_hiomap *ctx, uint64_t offset, + uint64_t size) +{ + struct hiomap_v2_range *range; + enum lpc_window_state state; + RESULT_INIT(res, ctx); + unsigned char req[6]; + struct ipmi_msg *msg; + uint32_t pos; + int rc; + + lock(&ctx->lock); + state = ctx->window_state; + unlock(&ctx->lock); + + if (state != write_window) + return FLASH_ERR_PARM_ERROR; + + req[0] = HIOMAP_C_ERASE; + req[1] = ++ctx->seq; + + pos = offset - ctx->current.cur_pos; + range = (struct hiomap_v2_range *)&req[2]; + range->offset = cpu_to_le16(bytes_to_blocks(ctx, pos)); + range->size = cpu_to_le16(bytes_to_blocks_align_up(ctx, pos, size)); + + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2); + rc = hiomap_queue_msg_sync(ctx, msg); + if (rc) + return rc; + + if (res.cc != IPMI_CC_NO_ERROR) { + prerror("%s failed: %d\n", __func__, res.cc); + return FLASH_ERR_PARM_ERROR; + } + + prlog(PR_DEBUG, "Erased flash at 0x%" PRIx64 " for %" PRIu64 "\n", + offset, size); + + return 0; +} + +static bool hiomap_reset(struct ipmi_hiomap *ctx) +{ + RESULT_INIT(res, ctx); + unsigned char req[2]; + struct ipmi_msg *msg; + + prlog(PR_NOTICE, "Reset\n"); + + req[0] = HIOMAP_C_RESET; + req[1] = ++ctx->seq; + msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, + bmc_platform->sw->ipmi_oem_hiomap_cmd, + ipmi_hiomap_cmd_cb, &res, req, sizeof(req), 2); + ipmi_queue_msg_sync(msg); + + if (res.cc != IPMI_CC_NO_ERROR) { + prlog(PR_ERR, "%s failed: %d\n", __func__, res.cc); + return false; + } + + return true; +} + +static void hiomap_event(uint8_t events, void *context) +{ + struct ipmi_hiomap *ctx = context; + + prlog(PR_DEBUG, "Received events: 0x%x\n", events); + + lock(&ctx->lock); + ctx->bmc_state = events | (ctx->bmc_state & HIOMAP_E_ACK_MASK); + unlock(&ctx->lock); +} + +static int lpc_window_read(struct ipmi_hiomap *ctx, uint32_t pos, + void *buf, uint32_t len) +{ + uint32_t off = ctx->current.lpc_addr + (pos - ctx->current.cur_pos); + int rc; + + if ((ctx->current.lpc_addr + ctx->current.size) < (off + len)) + return FLASH_ERR_PARM_ERROR; + + prlog(PR_TRACE, "Reading at 0x%08x for 0x%08x offset: 0x%08x\n", + pos, len, off); + + while(len) { + uint32_t chunk; + uint32_t dat; + + /* XXX: make this read until it's aligned */ + if (len > 3 && !(off & 3)) { + rc = lpc_read(OPAL_LPC_FW, off, &dat, 4); + if (!rc) { + /* + * lpc_read swaps to CPU endian but it's not + * really a 32-bit value, so convert back. + */ + *(__be32 *)buf = cpu_to_be32(dat); + } + chunk = 4; + } else { + rc = lpc_read(OPAL_LPC_FW, off, &dat, 1); + if (!rc) + *(uint8_t *)buf = dat; + chunk = 1; + } + if (rc) { + prlog(PR_ERR, "lpc_read failure %d to FW 0x%08x\n", rc, off); + return rc; + } + len -= chunk; + off += chunk; + buf += chunk; + } + + return 0; +} + +static int lpc_window_write(struct ipmi_hiomap *ctx, uint32_t pos, + const void *buf, uint32_t len) +{ + uint32_t off = ctx->current.lpc_addr + (pos - ctx->current.cur_pos); + enum lpc_window_state state; + int rc; + + lock(&ctx->lock); + state = ctx->window_state; + unlock(&ctx->lock); + + if (state != write_window) + return FLASH_ERR_PARM_ERROR; + + if ((ctx->current.lpc_addr + ctx->current.size) < (off + len)) + return FLASH_ERR_PARM_ERROR; + + prlog(PR_TRACE, "Writing at 0x%08x for 0x%08x offset: 0x%08x\n", + pos, len, off); + + while(len) { + uint32_t chunk; + + if (len > 3 && !(off & 3)) { + /* endian swap: see lpc_window_write */ + uint32_t dat = be32_to_cpu(*(__be32 *)buf); + + rc = lpc_write(OPAL_LPC_FW, off, dat, 4); + chunk = 4; + } else { + uint8_t dat = *(uint8_t *)buf; + + rc = lpc_write(OPAL_LPC_FW, off, dat, 1); + chunk = 1; + } + if (rc) { + prlog(PR_ERR, "lpc_write failure %d to FW 0x%08x\n", rc, off); + return rc; + } + len -= chunk; + off += chunk; + buf += chunk; + } + + return 0; +} + +/* Best-effort asynchronous event handling by blocklevel callbacks */ +static int ipmi_hiomap_handle_events(struct ipmi_hiomap *ctx) +{ + uint8_t status; + int rc; + + lock(&ctx->lock); + + status = ctx->bmc_state; + + /* + * Immediately clear the ackable events to make sure we don't race to + * clear them after dropping the lock, as we may lose protocol or + * window state if a race materialises. In the event of a failure where + * we haven't completed the recovery, the state we mask out below gets + * OR'ed back in to avoid losing it. + */ + ctx->bmc_state &= ~HIOMAP_E_ACK_MASK; + + /* + * We won't be attempting to restore window state - + * ipmi_hiomap_handle_events() is followed by hiomap_window_move() in + * all cases. Attempting restoration after HIOMAP_E_PROTOCOL_RESET or + * HIOMAP_E_WINDOW_RESET can be wasteful if we immediately shift the + * window elsewhere, and if it does not need to be shifted with respect + * to the subsequent request then hiomap_window_move() will handle + * re-opening it from the closed state. + * + * Therefore it is enough to mark the window as closed to consider it + * recovered. + */ + if (status & (HIOMAP_E_PROTOCOL_RESET | HIOMAP_E_WINDOW_RESET)) + ctx->window_state = closed_window; + + unlock(&ctx->lock); + + /* + * If there's anything to acknowledge, do so in the one request to + * minimise overhead. By sending the ACK prior to performing the + * protocol recovery we ensure that even with coalesced resets we still + * end up in the recovered state and not unknowingly stuck in a reset + * state. We may receive reset events after the ACK but prior to the + * recovery procedures being run, but this just means that we will + * needlessly perform recovery on the following invocation of + * ipmi_hiomap_handle_events(). If the reset event is a + * HIOMAP_E_WINDOW_RESET it is enough that the window is already marked + * as closed above - future accesses will force it to be re-opened and + * the BMC's cache must be valid if opening the window is successful. + */ + if (status & HIOMAP_E_ACK_MASK) { + /* ACK is unversioned, can send it if the daemon is ready */ + rc = hiomap_ack(ctx, status & HIOMAP_E_ACK_MASK); + if (rc) { + prlog(PR_DEBUG, "Failed to ack events: 0x%x\n", + status & HIOMAP_E_ACK_MASK); + goto restore; + } + } + + if (status & HIOMAP_E_PROTOCOL_RESET) { + prlog(PR_INFO, "Protocol was reset\n"); + + rc = hiomap_get_info(ctx); + if (rc) { + prerror("Failure to renegotiate after protocol reset\n"); + goto restore; + } + + rc = hiomap_get_flash_info(ctx); + if (rc) { + prerror("Failure to fetch flash info after protocol reset\n"); + goto restore; + } + + prlog(PR_INFO, "Restored state after protocol reset\n"); + } + + /* + * As there's no change to the protocol on HIOMAP_E_WINDOW_RESET we + * simply need to open a window to recover, which as mentioned above is + * handled by hiomap_window_move() after our cleanup here. + */ + + return 0; + +restore: + /* + * Conservatively restore the events to the un-acked state to avoid + * losing events due to races. It might cause us to restore state more + * than necessary, but never less than necessary. + */ + lock(&ctx->lock); + ctx->bmc_state |= (status & HIOMAP_E_ACK_MASK); + unlock(&ctx->lock); + + return rc; +} + +static int ipmi_hiomap_read(struct blocklevel_device *bl, uint64_t pos, + void *buf, uint64_t len) +{ + struct ipmi_hiomap *ctx; + uint64_t size; + int rc = 0; + + /* LPC is only 32bit */ + if (pos > UINT_MAX || len > UINT_MAX) + return FLASH_ERR_PARM_ERROR; + + ctx = container_of(bl, struct ipmi_hiomap, bl); + + rc = ipmi_hiomap_handle_events(ctx); + if (rc) + return rc; + + prlog(PR_TRACE, "Flash read at %#" PRIx64 " for %#" PRIx64 "\n", pos, + len); + while (len > 0) { + /* Move window and get a new size to read */ + rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_READ_WINDOW, pos, + len, &size); + if (rc) + return rc; + + /* Perform the read for this window */ + rc = lpc_window_read(ctx, pos, buf, size); + if (rc) + return rc; + + /* Check we can trust what we read */ + lock(&ctx->lock); + rc = hiomap_window_valid(ctx, pos, size); + unlock(&ctx->lock); + if (rc) + return rc; + + len -= size; + pos += size; + buf += size; + } + return rc; + +} + +static int ipmi_hiomap_write(struct blocklevel_device *bl, uint64_t pos, + const void *buf, uint64_t len) +{ + struct ipmi_hiomap *ctx; + uint64_t size; + int rc = 0; + + /* LPC is only 32bit */ + if (pos > UINT_MAX || len > UINT_MAX) + return FLASH_ERR_PARM_ERROR; + + ctx = container_of(bl, struct ipmi_hiomap, bl); + + rc = ipmi_hiomap_handle_events(ctx); + if (rc) + return rc; + + prlog(PR_TRACE, "Flash write at %#" PRIx64 " for %#" PRIx64 "\n", pos, + len); + while (len > 0) { + /* Move window and get a new size to read */ + rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos, + len, &size); + if (rc) + return rc; + + /* Perform the write for this window */ + rc = lpc_window_write(ctx, pos, buf, size); + if (rc) + return rc; + + /* + * Unlike ipmi_hiomap_read() we don't explicitly test if the + * window is still valid after completing the LPC accesses as + * the following hiomap_mark_dirty() will implicitly check for + * us. In the case of a read operation there's no requirement + * that a command that validates window state follows, so the + * read implementation explicitly performs a check. + */ + + rc = hiomap_mark_dirty(ctx, pos, size); + if (rc) + return rc; + + /* + * The BMC *should* flush if the window is implicitly closed, + * but do an explicit flush here to be sure. + * + * XXX: Removing this could improve performance + */ + rc = hiomap_flush(ctx); + if (rc) + return rc; + + len -= size; + pos += size; + buf += size; + } + return rc; +} + +static int ipmi_hiomap_erase(struct blocklevel_device *bl, uint64_t pos, + uint64_t len) +{ + struct ipmi_hiomap *ctx; + int rc; + + /* LPC is only 32bit */ + if (pos > UINT_MAX || len > UINT_MAX) + return FLASH_ERR_PARM_ERROR; + + ctx = container_of(bl, struct ipmi_hiomap, bl); + + rc = ipmi_hiomap_handle_events(ctx); + if (rc) + return rc; + + prlog(PR_TRACE, "Flash erase at 0x%08x for 0x%08x\n", (u32) pos, + (u32) len); + while (len > 0) { + uint64_t size; + + /* Move window and get a new size to erase */ + rc = hiomap_window_move(ctx, HIOMAP_C_CREATE_WRITE_WINDOW, pos, + len, &size); + if (rc) + return rc; + + rc = hiomap_erase(ctx, pos, size); + if (rc) + return rc; + + /* + * Flush directly, don't mark that region dirty otherwise it + * isn't clear if a write happened there or not + */ + rc = hiomap_flush(ctx); + if (rc) + return rc; + + len -= size; + pos += size; + } + + return 0; +} + +static int ipmi_hiomap_get_flash_info(struct blocklevel_device *bl, + const char **name, uint64_t *total_size, + uint32_t *erase_granule) +{ + struct ipmi_hiomap *ctx; + int rc; + + ctx = container_of(bl, struct ipmi_hiomap, bl); + + rc = ipmi_hiomap_handle_events(ctx); + if (rc) + return rc; + + rc = hiomap_get_flash_info(ctx); + if (rc) + return rc; + + ctx->bl.erase_mask = ctx->erase_granule - 1; + + if (name) + *name = NULL; + if (total_size) + *total_size = ctx->total_size; + if (erase_granule) + *erase_granule = ctx->erase_granule; + + return 0; +} + +int ipmi_hiomap_init(struct blocklevel_device **bl) +{ + struct ipmi_hiomap *ctx; + int rc; + + if (!bmc_platform->sw->ipmi_oem_hiomap_cmd) + /* FIXME: Find a better error code */ + return FLASH_ERR_DEVICE_GONE; + + if (!bl) + return FLASH_ERR_PARM_ERROR; + + *bl = NULL; + + ctx = zalloc(sizeof(struct ipmi_hiomap)); + if (!ctx) + return FLASH_ERR_MALLOC_FAILED; + + init_lock(&ctx->lock); + + ctx->bl.read = &ipmi_hiomap_read; + ctx->bl.write = &ipmi_hiomap_write; + ctx->bl.erase = &ipmi_hiomap_erase; + ctx->bl.get_info = &ipmi_hiomap_get_flash_info; + ctx->bl.exit = &ipmi_hiomap_exit; + + hiomap_init(ctx); + + /* Ack all pending ack-able events to avoid spurious failures */ + rc = hiomap_ack(ctx, HIOMAP_E_ACK_MASK); + if (rc) { + prlog(PR_DEBUG, "Failed to ack events: 0x%x\n", + HIOMAP_E_ACK_MASK); + goto err; + } + + rc = ipmi_sel_register(CMD_OP_HIOMAP_EVENT, hiomap_event, ctx); + if (rc < 0) + goto err; + + /* Negotiate protocol behaviour */ + rc = hiomap_get_info(ctx); + if (rc) { + prerror("Failed to get hiomap parameters: %d\n", rc); + goto err; + } + + /* Grab the flash parameters */ + rc = hiomap_get_flash_info(ctx); + if (rc) { + prerror("Failed to get flash parameters: %d\n", rc); + goto err; + } + + prlog(PR_NOTICE, "Negotiated hiomap protocol v%u\n", ctx->version); + prlog(PR_NOTICE, "Block size is %uKiB\n", + 1 << (ctx->block_size_shift - 10)); + prlog(PR_NOTICE, "BMC suggested flash timeout of %us\n", ctx->timeout); + prlog(PR_NOTICE, "Flash size is %uMiB\n", ctx->total_size >> 20); + prlog(PR_NOTICE, "Erase granule size is %uKiB\n", + ctx->erase_granule >> 10); + + ctx->bl.keep_alive = 0; + + *bl = &(ctx->bl); + + return 0; + +err: + free(ctx); + + return rc; +} + +bool ipmi_hiomap_exit(struct blocklevel_device *bl) +{ + bool status = true; + + struct ipmi_hiomap *ctx; + if (bl) { + ctx = container_of(bl, struct ipmi_hiomap, bl); + status = hiomap_reset(ctx); + free(ctx); + } + + return status; +} diff --git a/roms/skiboot/libflash/ipmi-hiomap.h b/roms/skiboot/libflash/ipmi-hiomap.h new file mode 100644 index 000000000..2046a2763 --- /dev/null +++ b/roms/skiboot/libflash/ipmi-hiomap.h @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2018-2019 IBM Corp. */ + +#ifndef __LIBFLASH_IPMI_HIOMAP_H +#define __LIBFLASH_IPMI_HIOMAP_H + +#include <lock.h> +#include <stdbool.h> +#include <stdint.h> + +#include "blocklevel.h" + +enum lpc_window_state { closed_window, read_window, write_window }; + +struct lpc_window { + uint32_t lpc_addr; /* Offset into LPC space */ + uint32_t cur_pos; /* Current position of the window in the flash */ + uint32_t size; /* Size of the window into the flash */ +}; + +struct ipmi_hiomap { + /* Members protected by the blocklevel lock */ + uint8_t seq; + uint8_t version; + uint8_t block_size_shift; + uint16_t timeout; + struct blocklevel_device bl; + uint32_t total_size; + uint32_t erase_granule; + struct lpc_window current; + + /* + * update, bmc_state and window_state can be accessed by both calls + * through read/write/erase functions and the IPMI SEL handler. All + * three variables are protected by lock to avoid conflict. + */ + struct lock lock; + uint8_t bmc_state; + enum lpc_window_state window_state; +}; + +int ipmi_hiomap_init(struct blocklevel_device **bl); +bool ipmi_hiomap_exit(struct blocklevel_device *bl); + +#endif /* __LIBFLASH_IPMI_HIOMAP_H */ diff --git a/roms/skiboot/libflash/libffs.c b/roms/skiboot/libflash/libffs.c new file mode 100644 index 000000000..fe808d118 --- /dev/null +++ b/roms/skiboot/libflash/libffs.c @@ -0,0 +1,842 @@ +// SPDX-License-Identifier: Apache-2.0 +/* Copyright 2013-2019 IBM Corp. */ + +#include <limits.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> + +#ifndef __SKIBOOT__ +#include <sys/types.h> +#include <unistd.h> +#endif + +#include "ffs.h" + +#define __unused __attribute__((unused)) +#define HDR_ENTRIES_NUM 30 + +struct ffs_handle { + struct ffs_hdr hdr; /* Converted header */ + uint32_t toc_offset; + uint32_t max_size; + /* The converted header knows how big this is */ + struct __ffs_hdr *cache; + struct blocklevel_device *bl; +}; + +static uint32_t ffs_checksum(void* data, size_t size) +{ + uint32_t i, csum = 0; + + for (i = csum = 0; i < (size/4); i++) + csum ^= ((uint32_t *)data)[i]; + return csum; +} + +/* Helper functions for typesafety and size safety */ +static uint32_t ffs_hdr_checksum(struct __ffs_hdr *hdr) +{ + return ffs_checksum(hdr, sizeof(struct __ffs_hdr)); +} + +static uint32_t ffs_entry_checksum(struct __ffs_entry *ent) +{ + return ffs_checksum(ent, sizeof(struct __ffs_entry)); +} + +static size_t ffs_hdr_raw_size(int num_entries) +{ + return sizeof(struct __ffs_hdr) + num_entries * sizeof(struct __ffs_entry); +} + +static int ffs_num_entries(struct ffs_hdr *hdr) +{ + if (hdr->count == 0) + FL_DBG("%s returned zero!\n", __func__); + return hdr->count; +} + +static int ffs_check_convert_header(struct ffs_hdr *dst, struct __ffs_hdr *src) +{ + if (be32_to_cpu(src->magic) != FFS_MAGIC) + return FFS_ERR_BAD_MAGIC; + dst->version = be32_to_cpu(src->version); + if (dst->version != FFS_VERSION_1) + return FFS_ERR_BAD_VERSION; + if (ffs_hdr_checksum(src) != 0) + return FFS_ERR_BAD_CKSUM; + if (be32_to_cpu(src->entry_size) != sizeof(struct __ffs_entry)) + return FFS_ERR_BAD_SIZE; + if ((be32_to_cpu(src->entry_size) * be32_to_cpu(src->entry_count)) > + (be32_to_cpu(src->block_size) * be32_to_cpu(src->size))) + return FLASH_ERR_PARM_ERROR; + + dst->block_size = be32_to_cpu(src->block_size); + dst->size = be32_to_cpu(src->size) * dst->block_size; + dst->block_count = be32_to_cpu(src->block_count); + dst->entries_size = be32_to_cpu(src->entry_count); + + return 0; +} + +static int ffs_entry_user_to_flash(struct ffs_hdr *hdr __unused, + struct __ffs_entry_user *dst, struct ffs_entry_user *src) +{ + memset(dst, 0, sizeof(struct __ffs_entry_user)); + dst->datainteg = cpu_to_be16(src->datainteg); + dst->vercheck = src->vercheck; + dst->miscflags = src->miscflags; + + return 0; +} + +static int ffs_entry_user_to_cpu(struct ffs_hdr *hdr __unused, + struct ffs_entry_user *dst, struct __ffs_entry_user *src) +{ + memset(dst, 0, sizeof(struct ffs_entry_user)); + dst->datainteg = be16_to_cpu(src->datainteg); + dst->vercheck = src->vercheck; + dst->miscflags = src->miscflags; + + return 0; +} + +static int ffs_entry_to_flash(struct ffs_hdr *hdr, + struct __ffs_entry *dst, struct ffs_entry *src) +{ + int rc, index; + + if (!hdr || !dst || !src) + return -1; + + for (index = 0; index < hdr->count && hdr->entries[index] != src; index++); + + if (index == hdr->count) + return FFS_ERR_PART_NOT_FOUND; + index++; /* On flash indexes start at 1 */ + /* + * So that the checksum gets calculated correctly at least the + * dst->checksum must be zero before calling ffs_entry_checksum() + * memset()ting the entire struct to zero is probably wise as it + * appears the reserved fields are always zero. + */ + memset(dst, 0, sizeof(*dst)); + + memcpy(dst->name, src->name, sizeof(dst->name)); + dst->name[FFS_PART_NAME_MAX] = '\0'; + dst->base = cpu_to_be32(src->base / hdr->block_size); + dst->size = cpu_to_be32(src->size / hdr->block_size); + dst->pid = cpu_to_be32(src->pid); + dst->id = cpu_to_be32(index); + dst->type = cpu_to_be32(src->type); /* TODO: Check that it is valid? */ + dst->flags = cpu_to_be32(src->flags); + dst->actual = cpu_to_be32(src->actual); + rc = ffs_entry_user_to_flash(hdr, &dst->user, &src->user); + dst->checksum = ffs_entry_checksum(dst); + + return rc; +} + +static int ffs_entry_to_cpu(struct ffs_hdr *hdr, + struct ffs_entry *dst, struct __ffs_entry *src) +{ + int rc; + + if (ffs_entry_checksum(src) != 0) + return FFS_ERR_BAD_CKSUM; + + memcpy(dst->name, src->name, sizeof(dst->name)); + dst->name[FFS_PART_NAME_MAX] = '\0'; + dst->base = be32_to_cpu(src->base) * hdr->block_size; + dst->size = be32_to_cpu(src->size) * hdr->block_size; + dst->actual = be32_to_cpu(src->actual); + dst->pid = be32_to_cpu(src->pid); + dst->type = be32_to_cpu(src->type); /* TODO: Check that it is valid? */ + dst->flags = be32_to_cpu(src->flags); + rc = ffs_entry_user_to_cpu(hdr, &dst->user, &src->user); + + return rc; +} + +char *ffs_entry_user_to_string(struct ffs_entry_user *user) +{ + char *ret; + + if (!user) + return NULL; + + ret = strdup("----------"); + if (!ret) + return NULL; + + if (user->datainteg & FFS_ENRY_INTEG_ECC) + ret[0] = 'E'; + + if (user->vercheck & FFS_VERCHECK_SHA512V) + ret[1] = 'L'; + + if (user->vercheck & FFS_VERCHECK_SHA512EC) + ret[2] = 'I'; + + if (user->miscflags & FFS_MISCFLAGS_PRESERVED) + ret[3] = 'P'; + + if (user->miscflags & FFS_MISCFLAGS_READONLY) + ret[4] = 'R'; + + if (user->miscflags & FFS_MISCFLAGS_BACKUP) + ret[5] = 'B'; + + if (user->miscflags & FFS_MISCFLAGS_REPROVISION) + ret[6] = 'F'; + + if (user->miscflags & FFS_MISCFLAGS_GOLDEN) + ret[7] = 'G'; + + if (user->miscflags & FFS_MISCFLAGS_CLEARECC) + ret[8] = 'C'; + + if (user->miscflags & FFS_MISCFLAGS_VOLATILE) + ret[9] = 'V'; + + return ret; +} + +int ffs_string_to_entry_user(const char *flags, int nflags, + struct ffs_entry_user *user) +{ + int i; + + if (!user || !flags) + return FLASH_ERR_PARM_ERROR; + + memset(user, 0, sizeof(struct ffs_entry_user)); + for (i = 0; i < nflags; i++) { + switch (flags[i]) { + case 'E': + user->datainteg |= FFS_ENRY_INTEG_ECC; + break; + case 'L': + user->vercheck |= FFS_VERCHECK_SHA512V; + break; + case 'I': + user->vercheck |= FFS_VERCHECK_SHA512EC; + break; + case 'P': + user->miscflags |= FFS_MISCFLAGS_PRESERVED; + break; + case 'R': + user->miscflags |= FFS_MISCFLAGS_READONLY; + break; + case 'B': + user->miscflags |= FFS_MISCFLAGS_BACKUP; + break; + case 'F': + user->miscflags |= FFS_MISCFLAGS_REPROVISION; + break; + case 'G': + user->miscflags |= FFS_MISCFLAGS_GOLDEN; + break; + case 'C': + user->miscflags |= FFS_MISCFLAGS_CLEARECC; + break; + case 'V': + user->miscflags |= FFS_MISCFLAGS_VOLATILE; + break; + default: + FL_DBG("Unknown flag '%c'\n", flags[i]); + return FLASH_ERR_PARM_ERROR; + } + } + + return 0; +} + +bool has_flag(struct ffs_entry *ent, uint16_t flag) +{ + return ((ent->user.miscflags & flag) != 0); +} + +static struct ffs_entry *__ffs_entry_get(struct ffs_handle *ffs, uint32_t index) +{ + if (index >= ffs->hdr.count) + return NULL; + return ffs->hdr.entries[index]; +} + +struct ffs_entry *ffs_entry_get(struct ffs_handle *ffs, uint32_t index) +{ + struct ffs_entry *ret = __ffs_entry_get(ffs, index); + if (ret) + ret->ref++; + return ret; +} + +struct ffs_entry *ffs_entry_put(struct ffs_entry *ent) +{ + if (!ent) + return NULL; + + ent->ref--; + if (ent->ref == 0) { + free(ent); + ent = NULL; + } + + return ent; +} + +bool has_ecc(struct ffs_entry *ent) +{ + return ((ent->user.datainteg & FFS_ENRY_INTEG_ECC) != 0); +} + +int ffs_init(uint32_t offset, uint32_t max_size, struct blocklevel_device *bl, + struct ffs_handle **ffs, bool mark_ecc) +{ + struct __ffs_hdr blank_hdr; + struct __ffs_hdr raw_hdr; + struct ffs_handle *f; + uint64_t total_size; + int rc, i; + + if (!ffs || !bl) + return FLASH_ERR_PARM_ERROR; + *ffs = NULL; + + rc = blocklevel_get_info(bl, NULL, &total_size, NULL); + if (rc) { + FL_ERR("FFS: Error %d retrieving flash info\n", rc); + return rc; + } + if (total_size > UINT_MAX) + return FLASH_ERR_VERIFY_FAILURE; + if ((offset + max_size) < offset) + return FLASH_ERR_PARM_ERROR; + + if ((max_size > total_size)) + return FLASH_ERR_PARM_ERROR; + + /* Read flash header */ + rc = blocklevel_read(bl, offset, &raw_hdr, sizeof(raw_hdr)); + if (rc) { + FL_ERR("FFS: Error %d reading flash header\n", rc); + return rc; + } + + /* + * Flash controllers can get deconfigured or otherwise upset, when this + * happens they return all 0xFF bytes. + * An __ffs_hdr consisting of all 0xFF cannot be valid and it would be + * nice to drop a hint to the user to help with debugging. This will + * help quickly differentiate between flash corruption and standard + * type 'reading from the wrong place' errors vs controller errors or + * reading erased data. + */ + memset(&blank_hdr, UINT_MAX, sizeof(struct __ffs_hdr)); + if (memcmp(&blank_hdr, &raw_hdr, sizeof(struct __ffs_hdr)) == 0) { + FL_ERR("FFS: Reading the flash has returned all 0xFF.\n"); + FL_ERR(" Are you reading erased flash?\n"); + FL_ERR(" Is something else using the flash controller?\n"); + return FLASH_ERR_BAD_READ; + } + + /* Allocate ffs_handle structure and start populating */ + f = calloc(1, sizeof(*f)); + if (!f) + return FLASH_ERR_MALLOC_FAILED; + + f->toc_offset = offset; + f->max_size = max_size; + f->bl = bl; + + /* Convert and check flash header */ + rc = ffs_check_convert_header(&f->hdr, &raw_hdr); + if (rc) { + FL_INF("FFS: Flash header not found. Code: %d\n", rc); + goto out; + } + + /* Check header is sane */ + if ((f->hdr.block_count * f->hdr.block_size) > max_size) { + rc = FLASH_ERR_PARM_ERROR; + FL_ERR("FFS: Flash header exceeds max flash size\n"); + goto out; + } + + f->hdr.entries = calloc(f->hdr.entries_size, sizeof(struct ffs_entry *)); + + /* + * Grab the entire partition header + */ + /* Check for overflow or a silly size */ + if (!f->hdr.size || f->hdr.size % f->hdr.block_size != 0) { + rc = FLASH_ERR_MALLOC_FAILED; + FL_ERR("FFS: Cache size overflow (0x%x * 0x%x)\n", + f->hdr.block_size, f->hdr.size); + goto out; + } + + FL_DBG("FFS: Partition map size: 0x%x\n", f->hdr.size); + + /* Allocate cache */ + f->cache = malloc(f->hdr.size); + if (!f->cache) { + rc = FLASH_ERR_MALLOC_FAILED; + goto out; + } + + /* Read the cached map */ + rc = blocklevel_read(bl, offset, f->cache, f->hdr.size); + if (rc) { + FL_ERR("FFS: Error %d reading flash partition map\n", rc); + goto out; + } + + for (i = 0; i < f->hdr.entries_size; i++) { + struct ffs_entry *ent = calloc(1, sizeof(struct ffs_entry)); + if (!ent) { + rc = FLASH_ERR_MALLOC_FAILED; + goto out; + } + + f->hdr.entries[f->hdr.count++] = ent; + ent->ref = 1; + rc = ffs_entry_to_cpu(&f->hdr, ent, &f->cache->entries[i]); + if (rc) { + FL_DBG("FFS: Failed checksum for partition %s\n", + f->cache->entries[i].name); + goto out; + } + + if (mark_ecc && has_ecc(ent)) { + rc = blocklevel_ecc_protect(bl, ent->base, ent->size); + if (rc) { + FL_ERR("Failed to blocklevel_ecc_protect(0x%08x, 0x%08x)\n", + ent->base, ent->size); + goto out; + } + } + } + +out: + if (rc == 0) + *ffs = f; + else + ffs_close(f); + + return rc; +} + +static void __hdr_free(struct ffs_hdr *hdr) +{ + int i; + + if (!hdr) + return; + + for (i = 0; i < hdr->count; i++) + ffs_entry_put(hdr->entries[i]); + free(hdr->entries); +} + +void ffs_hdr_free(struct ffs_hdr *hdr) +{ + __hdr_free(hdr); + free(hdr); +} + +void ffs_close(struct ffs_handle *ffs) +{ + __hdr_free(&ffs->hdr); + + if (ffs->cache) + free(ffs->cache); + + free(ffs); +} + +int ffs_lookup_part(struct ffs_handle *ffs, const char *name, + uint32_t *part_idx) +{ + struct ffs_entry **ents = ffs->hdr.entries; + int i; + + for (i = 0; + i < ffs->hdr.count && + strncmp(name, ents[i]->name, FFS_PART_NAME_MAX); + i++); + + if (i == ffs->hdr.count) + return FFS_ERR_PART_NOT_FOUND; + + if (part_idx) + *part_idx = i; + return 0; +} + +int ffs_part_info(struct ffs_handle *ffs, uint32_t part_idx, + char **name, uint32_t *start, + uint32_t *total_size, uint32_t *act_size, bool *ecc) +{ + struct ffs_entry *ent; + char *n; + + ent = __ffs_entry_get(ffs, part_idx); + if (!ent) + return FFS_ERR_PART_NOT_FOUND; + + if (start) + *start = ent->base; + if (total_size) + *total_size = ent->size; + if (act_size) + *act_size = ent->actual; + if (ecc) + *ecc = has_ecc(ent); + + if (name) { + n = calloc(1, FFS_PART_NAME_MAX + 1); + if (!n) + return FLASH_ERR_MALLOC_FAILED; + memcpy(n, ent->name, FFS_PART_NAME_MAX); + *name = n; + } + return 0; +} + +/* + * There are quite a few ways one might consider two ffs_handles to be the + * same. For the purposes of this function we are trying to detect a fairly + * specific scenario: + * Consecutive calls to ffs_next_side() may succeed but have gone circular. + * It is possible that the OTHER_SIDE partition in one TOC actually points + * back to the TOC to first ffs_handle. + * This function compares for this case, therefore the requirements are + * simple, the underlying blocklevel_devices must be the same along with + * the toc_offset and the max_size. + */ +bool ffs_equal(struct ffs_handle *one, struct ffs_handle *two) +{ + return (!one && !two) || (one && two && one->bl == two->bl + && one->toc_offset == two->toc_offset + && one->max_size == two->max_size); +} + +int ffs_next_side(struct ffs_handle *ffs, struct ffs_handle **new_ffs, + bool mark_ecc) +{ + int rc; + uint32_t index, offset, max_size; + + if (!ffs || !new_ffs) + return FLASH_ERR_PARM_ERROR; + + *new_ffs = NULL; + + rc = ffs_lookup_part(ffs, "OTHER_SIDE", &index); + if (rc) + return rc; + + rc = ffs_part_info(ffs, index, NULL, &offset, &max_size, NULL, NULL); + if (rc) + return rc; + + return ffs_init(offset, max_size, ffs->bl, new_ffs, mark_ecc); +} + +int ffs_entry_add(struct ffs_hdr *hdr, struct ffs_entry *entry) +{ + const char *smallest_name; + uint32_t smallest_base, toc_base; + int i; + + FL_DBG("LIBFFS: Adding '%s' at 0x%08x..0x%08x\n", + entry->name, entry->base, entry->base + entry->size); + + if (hdr->count == 0) { + FL_DBG("LIBFFS: Adding an entry to an empty header\n"); + hdr->entries[hdr->count++] = entry; + } + if (entry->base + entry->size > hdr->block_size * hdr->block_count) + return FFS_ERR_BAD_PART_SIZE; + + smallest_base = entry->base; + smallest_name = entry->name; + toc_base = 0; + /* + * TODO: This may have assumed entries was sorted + */ + for (i = 0; i < hdr->count; i++) { + struct ffs_entry *ent = hdr->entries[i]; + + /* Don't allow same names to differ only by case */ + if (strncasecmp(entry->name, ent->name, FFS_PART_NAME_MAX) == 0) + return FFS_ERR_BAD_PART_NAME; + + if (entry->base >= ent->base && entry->base < ent->base + ent->size) + return FFS_ERR_BAD_PART_BASE; + + if (entry->base + entry->size > ent->base && + entry->base + entry->size < ent->base + ent->size) + return FFS_ERR_BAD_PART_SIZE; + + if (entry->actual > entry->size) + return FFS_ERR_BAD_PART_SIZE; + + if (entry->pid != FFS_PID_TOPLEVEL) + return FFS_ERR_BAD_PART_PID; + + /* First partition is the partition table */ + if (i == 0) { + toc_base = ent->base; + } else { + /* + * We're looking for the partition directly + * after the toc to make sure we don't + * overflow onto it. + */ + if (ent->base < smallest_base && ent->base > toc_base) { + smallest_base = ent->base; + smallest_name = ent->name; + } + } + } + /* If the smallest base is before the TOC, don't worry */ + if (smallest_base > toc_base && (hdr->count + 1) * sizeof(struct __ffs_entry) + + sizeof(struct __ffs_hdr) + toc_base > smallest_base) { + fprintf(stderr, "Adding partition '%s' would cause partition '%s' at " + "0x%08x to overlap with the header\n", entry->name, smallest_name, + smallest_base); + return FFS_ERR_BAD_PART_BASE; + } + + if (hdr->count == hdr->entries_size) { + struct ffs_entry **old = hdr->entries; + + hdr->entries = realloc(hdr->entries, + (HDR_ENTRIES_NUM + hdr->entries_size) * sizeof(struct ffs_entry *)); + if (!hdr->entries) { + hdr->entries = old; + return FLASH_ERR_MALLOC_FAILED; + } + hdr->entries_size += HDR_ENTRIES_NUM; + } + entry->ref++; + hdr->entries[hdr->count++] = entry; + + return 0; +} + +int ffs_hdr_finalise(struct blocklevel_device *bl, struct ffs_hdr *hdr) +{ + int num_entries, i, rc = 0; + struct __ffs_hdr *real_hdr; + + num_entries = ffs_num_entries(hdr); + + /* A TOC shouldn't have zero partitions */ + if (num_entries == 0) + return FFS_ERR_BAD_SIZE; + + real_hdr = malloc(ffs_hdr_raw_size(num_entries)); + if (!real_hdr) + return FLASH_ERR_MALLOC_FAILED; + + /* + * So that the checksum gets calculated correctly at least the + * real_hdr->checksum must be zero before calling ffs_hdr_checksum() + * memset()ting the entire struct to zero is probably wise as it + * appears the reserved fields are always zero. + */ + memset(real_hdr, 0, sizeof(*real_hdr)); + + hdr->part->size = ffs_hdr_raw_size(num_entries) + hdr->block_size; + /* + * So actual is in bytes. ffs_entry_to_flash() don't do the + * block_size division that we're relying on + */ + hdr->part->actual = (hdr->part->size / hdr->block_size) * hdr->block_size; + real_hdr->magic = cpu_to_be32(FFS_MAGIC); + real_hdr->version = cpu_to_be32(hdr->version); + real_hdr->size = cpu_to_be32(hdr->part->size / hdr->block_size); + real_hdr->entry_size = cpu_to_be32(sizeof(struct __ffs_entry)); + real_hdr->entry_count = cpu_to_be32(num_entries); + real_hdr->block_size = cpu_to_be32(hdr->block_size); + real_hdr->block_count = cpu_to_be32(hdr->block_count); + real_hdr->checksum = ffs_hdr_checksum(real_hdr); + + for (i = 0; i < hdr->count; i++) { + rc = ffs_entry_to_flash(hdr, real_hdr->entries + i, hdr->entries[i]); + if (rc) { + fprintf(stderr, "Couldn't format all entries for new TOC\n"); + goto out; + } + } + + /* Don't really care if this fails */ + blocklevel_erase(bl, hdr->part->base, hdr->size); + rc = blocklevel_write(bl, hdr->part->base, real_hdr, + ffs_hdr_raw_size(num_entries)); + if (rc) + goto out; + +out: + free(real_hdr); + return rc; +} + +int ffs_entry_user_set(struct ffs_entry *ent, struct ffs_entry_user *user) +{ + if (!ent || !user) + return -1; + + /* + * Don't allow the user to specify anything we dont't know about. + * Rationale: This is the library providing access to the FFS structures. + * If the consumer of the library knows more about FFS structures then + * questions need to be asked. + * The other possibility is that they've unknowningly supplied invalid + * flags, we should tell them. + */ + if (user->chip) + return -1; + if (user->compresstype) + return -1; + if (user->datainteg & ~(FFS_ENRY_INTEG_ECC)) + return -1; + if (user->vercheck & ~(FFS_VERCHECK_SHA512V | FFS_VERCHECK_SHA512EC)) + return -1; + if (user->miscflags & ~(FFS_MISCFLAGS_PRESERVED | FFS_MISCFLAGS_BACKUP | + FFS_MISCFLAGS_READONLY | FFS_MISCFLAGS_REPROVISION | + FFS_MISCFLAGS_VOLATILE | FFS_MISCFLAGS_GOLDEN | + FFS_MISCFLAGS_CLEARECC)) + return -1; + + memcpy(&ent->user, user, sizeof(*user)); + return 0; +} + +struct ffs_entry_user ffs_entry_user_get(struct ffs_entry *ent) +{ + struct ffs_entry_user user = { 0 }; + + if (ent) + memcpy(&user, &ent->user, sizeof(user)); + + return user; +} + +int ffs_entry_new(const char *name, uint32_t base, uint32_t size, struct ffs_entry **r) +{ + struct ffs_entry *ret; + + ret = calloc(1, sizeof(*ret)); + if (!ret) + return FLASH_ERR_MALLOC_FAILED; + + strncpy(ret->name, name, FFS_PART_NAME_MAX); + ret->name[FFS_PART_NAME_MAX] = '\0'; + ret->base = base; + ret->size = size; + ret->actual = size; + ret->pid = FFS_PID_TOPLEVEL; + ret->type = FFS_TYPE_DATA; + ret->ref = 1; + + *r = ret; + return 0; +} + +int ffs_entry_set_act_size(struct ffs_entry *ent, uint32_t actual_size) +{ + if (!ent) + return -1; + + if (actual_size > ent->size) + return FFS_ERR_BAD_PART_SIZE; + + ent->actual = actual_size; + + return 0; +} + +int ffs_hdr_new(uint32_t block_size, uint32_t block_count, + struct ffs_entry **e, struct ffs_hdr **r) +{ + struct ffs_hdr *ret; + struct ffs_entry *part_table; + int rc; + + ret = calloc(1, sizeof(*ret)); + if (!ret) + return FLASH_ERR_MALLOC_FAILED; + + ret->version = FFS_VERSION_1; + ret->block_size = block_size; + ret->block_count = block_count; + ret->entries = calloc(HDR_ENTRIES_NUM, sizeof(struct ffs_entry *)); + ret->entries_size = HDR_ENTRIES_NUM; + + if (!e || !(*e)) { + /* Don't know how big it will be, ffs_hdr_finalise() will fix */ + rc = ffs_entry_new("part", 0, 0, &part_table); + if (rc) { + free(ret); + return rc; + } + if (e) + *e = part_table; + } else { + part_table = *e; + } + + /* If the user still holds a ref to e, then inc the refcount */ + if (e) + part_table->ref++; + + ret->part = part_table; + + part_table->pid = FFS_PID_TOPLEVEL; + part_table->type = FFS_TYPE_PARTITION; + part_table->flags = FFS_FLAGS_PROTECTED; + + ret->entries[0] = part_table; + ret->count = 1; + + *r = ret; + + return 0; +} + +int ffs_update_act_size(struct ffs_handle *ffs, uint32_t part_idx, + uint32_t act_size) +{ + struct ffs_entry *ent; + struct __ffs_entry raw_ent; + uint32_t offset; + int rc; + + ent = __ffs_entry_get(ffs, part_idx); + if (!ent) { + FL_DBG("FFS: Entry not found\n"); + return FFS_ERR_PART_NOT_FOUND; + } + offset = ffs->toc_offset + ffs_hdr_raw_size(part_idx); + FL_DBG("FFS: part index %d at offset 0x%08x\n", + part_idx, offset); + + if (ent->actual == act_size) { + FL_DBG("FFS: ent->actual alrady matches: 0x%08x==0x%08x\n", + act_size, ent->actual); + return 0; + } + ent->actual = act_size; + + rc = ffs_entry_to_flash(&ffs->hdr, &raw_ent, ent); + if (rc) + return rc; + + return blocklevel_smart_write(ffs->bl, offset, &raw_ent, sizeof(struct __ffs_entry)); +} diff --git a/roms/skiboot/libflash/libffs.h b/roms/skiboot/libflash/libffs.h new file mode 100644 index 000000000..a81f24c99 --- /dev/null +++ b/roms/skiboot/libflash/libffs.h @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2018 IBM Corp. */ + +#ifndef __LIBFFS_H +#define __LIBFFS_H + +#include <libflash/libflash.h> +#include <libflash/blocklevel.h> + +/* FFS handle, opaque */ +struct ffs_handle; +struct ffs_hdr; +struct ffs_entry; +struct ffs_entry_user; + +/** + * struct ffs_entry_user - User data entries + * + * Usable in memory representation of a struct __ffs_entry_user + * + * @chip: Chip Select (0,1) + * @compressType: Compression Indication/alg (0=not compressed) + * @dataInteg: Indicates Data Integrity mechanism + * @verCheck: Indicates Version check type + * @miscFlags: Misc Partition related Flags + */ +struct ffs_entry_user { + uint8_t chip; + uint8_t compresstype; + uint16_t datainteg; + uint8_t vercheck; + uint8_t miscflags; +}; + +/* Error codes: + * + * < 0 = flash controller errors + * 0 = success + * > 0 = libffs / libflash errors + */ +#define FFS_ERR_BAD_MAGIC 100 +#define FFS_ERR_BAD_VERSION 101 +#define FFS_ERR_BAD_CKSUM 102 +#define FFS_ERR_PART_NOT_FOUND 103 +#define FFS_ERR_BAD_ECC 104 +#define FFS_ERR_BAD_SIZE 105 +#define FFS_ERR_BAD_PART_NAME 106 +#define FFS_ERR_BAD_PART_BASE 107 +#define FFS_ERR_BAD_PART_SIZE 108 +#define FFS_ERR_BAD_PART_PID 109 + +/* The maximum length of the partition name */ +#define FFS_PART_NAME_MAX 15 +/* Old version of the name DEPRECATED */ +#define PART_NAME_MAX 15 + +/* + * Flag bit definitions + */ +#define FFS_FLAGS_PROTECTED 0x0001 +#define FFS_FLAGS_U_BOOT_ENV 0x0002 + +/* Data integrity flags */ +#define FFS_ENRY_INTEG_ECC 0x8000 + +/* + * User verCheck definitions + */ +#define FFS_VERCHECK_SHA512V 0x80 +#define FFS_VERCHECK_SHA512EC 0x40 + +/* + * User miscFlags + */ +#define FFS_MISCFLAGS_PRESERVED 0x80 +#define FFS_MISCFLAGS_READONLY 0x40 +#define FFS_MISCFLAGS_BACKUP 0x20 +#define FFS_MISCFLAGS_REPROVISION 0x10 +#define FFS_MISCFLAGS_VOLATILE 0x08 +#define FFS_MISCFLAGS_CLEARECC 0x04 +#define FFS_MISCFLAGS_GOLDEN 0x01 + + +int ffs_string_to_entry_user(const char *flags, int nflags, + struct ffs_entry_user *user); +char *ffs_entry_user_to_string(struct ffs_entry_user *user); + +bool has_ecc(struct ffs_entry *ent); + +bool has_flag(struct ffs_entry *ent, uint16_t flag); + +/* Init */ + +int ffs_init(uint32_t offset, uint32_t max_size, struct blocklevel_device *bl, + struct ffs_handle **ffs, bool mark_ecc); + +/* + * Initialise a new ffs_handle to the "OTHER SIDE". + * Reuses the underlying blocklevel_device. + */ +int ffs_next_side(struct ffs_handle *ffs, struct ffs_handle **new_ffs, + bool mark_ecc); + +/* + * There are quite a few ways one might consider two ffs_handles to be the + * same. For the purposes of this function we are trying to detect a fairly + * specific scenario: + * Consecutive calls to ffs_next_side() may succeed but have gone circular. + * It is possible that the OTHER_SIDE partition in one TOC actually points + * back to the TOC of the first ffs_handle. + * This function compares for this case, therefore the requirements are + * simple, the underlying blocklevel_devices must be the same along with + * the toc_offset and the max_size. + */ +bool ffs_equal(struct ffs_handle *one, struct ffs_handle *two); + +void ffs_close(struct ffs_handle *ffs); + +int ffs_lookup_part(struct ffs_handle *ffs, const char *name, + uint32_t *part_idx); + +int ffs_part_info(struct ffs_handle *ffs, uint32_t part_idx, + char **name, uint32_t *start, + uint32_t *total_size, uint32_t *act_size, bool *ecc); + +struct ffs_entry *ffs_entry_get(struct ffs_handle *ffs, uint32_t index); + +int ffs_update_act_size(struct ffs_handle *ffs, uint32_t part_idx, + uint32_t act_size); + +int ffs_hdr_new(uint32_t block_size, uint32_t block_count, + struct ffs_entry **e, struct ffs_hdr **r); + +int ffs_hdr_add_side(struct ffs_hdr *hdr); + +int ffs_entry_new(const char *name, uint32_t base, uint32_t size, struct ffs_entry **r); + +struct ffs_entry *ffs_entry_put(struct ffs_entry *ent); + +int ffs_entry_user_set(struct ffs_entry *ent, struct ffs_entry_user *user); + +int ffs_entry_set_act_size(struct ffs_entry *ent, uint32_t actual_size); + + +struct ffs_entry_user ffs_entry_user_get(struct ffs_entry *ent); + +int ffs_entry_add(struct ffs_hdr *hdr, struct ffs_entry *entry); + +int ffs_hdr_finalise(struct blocklevel_device *bl, struct ffs_hdr *hdr); + +void ffs_hdr_free(struct ffs_hdr *hdr); +#endif /* __LIBFFS_H */ diff --git a/roms/skiboot/libflash/libflash-priv.h b/roms/skiboot/libflash/libflash-priv.h new file mode 100644 index 000000000..b3d10d4e8 --- /dev/null +++ b/roms/skiboot/libflash/libflash-priv.h @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2015 IBM Corp. */ + +#ifndef __LIBFLASH_PRIV_H +#define __LIBFLASH_PRIV_H + +#include <ccan/endian/endian.h> +#include <ccan/array_size/array_size.h> +#include <ccan/container_of/container_of.h> + +/* Flash commands */ +#define CMD_WRSR 0x01 /* Write Status Register (also config. on Macronix) */ +#define CMD_PP 0x02 /* Page Program */ +#define CMD_READ 0x03 /* READ */ +#define CMD_WRDI 0x04 /* Write Disable */ +#define CMD_RDSR 0x05 /* Read Status Register */ +#define CMD_WREN 0x06 /* Write Enable */ +#define CMD_RDCR 0x15 /* Read configuration register (Macronix) */ +#define CMD_SE 0x20 /* Sector (4K) Erase */ +#define CMD_RDSCUR 0x2b /* Read Security Register (Macronix) */ +#define CMD_BE32K 0x52 /* Block (32K) Erase */ +#define CMD_RDSFDP 0x5a /* Read SFDP JEDEC info */ +#define CMD_CE 0x60 /* Chip Erase (Macronix/Winbond) */ +#define CMD_MIC_WREVCONF 0x61 /* Micron Write Enhanced Volatile Config */ +#define CMD_MIC_RDEVCONF 0x65 /* Micron Read Enhanced Volatile Config */ +#define CMD_MIC_RDFLST 0x70 /* Micron Read Flag Status */ +#define CMD_MIC_WRVCONF 0x81 /* Micron Write Volatile Config */ +#define CMD_MIC_RDVCONF 0x85 /* Micron Read Volatile Config */ +#define CMD_RDID 0x9f /* Read JEDEC ID */ +#define CMD_EN4B 0xb7 /* Enable 4B addresses */ +#define CMD_MIC_BULK_ERASE 0xc7 /* Micron Bulk Erase */ +#define CMD_BE 0xd8 /* Block (64K) Erase */ +#define CMD_RDDPB 0xe0 /* Read dynamic protection (Macronix) */ +#define CMD_RDSPB 0xe2 /* Read static protection (Macronix) */ +#define CMD_EX4B 0xe9 /* Exit 4B addresses */ + +/* Flash status bits */ +#define STAT_WIP 0x01 +#define STAT_WEN 0x02 + +/* This isn't exposed to clients but is to controllers */ +struct flash_info { + uint32_t id; + uint32_t size; + uint32_t flags; +#define FL_ERASE_4K 0x00000001 /* Supports 4k erase */ +#define FL_ERASE_32K 0x00000002 /* Supports 32k erase */ +#define FL_ERASE_64K 0x00000004 /* Supports 64k erase */ +#define FL_ERASE_CHIP 0x00000008 /* Supports 0x60 cmd chip erase */ +#define FL_ERASE_BULK 0x00000010 /* Supports 0xc7 cmd bulk erase */ +#define FL_MICRON_BUGS 0x00000020 /* Various micron bug workarounds */ +#define FL_ERASE_ALL (FL_ERASE_4K | FL_ERASE_32K | FL_ERASE_64K | \ + FL_ERASE_CHIP) +#define FL_CAN_4B 0x00000010 /* Supports 4b mode */ + const char *name; +}; + +/* Flash controller, return negative values for errors */ +struct spi_flash_ctrl { + /* + * The controller can provide basically two interfaces, + * either a fairly high level one and a lower level one. + * + * If all functions of the high level interface are + * implemented then the low level one is optional. A + * controller can implement some of the high level one + * in which case the missing ones will be handled by + * libflash using the low level interface. + * + * There are also some common functions. + */ + + /* ************************************************** + * Misc / common functions + * **************************************************/ + + /* + * - setup(ctrl, tsize) + * + * Provides the controller with an option to configure itself + * based on the specific flash type. It can also override some + * settings in the info block such as available erase sizes etc... + * which can be needed for high level controllers. It can also + * override the total flash size. + */ + int (*setup)(struct spi_flash_ctrl *ctrl, uint32_t *tsize); + + /* + * - set_4b(ctrl, enable) + * + * enable : Switch to 4bytes (true) or 3bytes (false) address mode + * + * Set the controller's address size. If the controller doesn't + * implement the low level command interface, then this must also + * configure the flash chip itself. Otherwise, libflash will do it. + * + * Note that if this isn't implemented, then libflash might still + * try to switch large flash chips to 4b mode if the low level cmd + * interface is implemented. It will then also stop using the high + * level command interface since it's assumed that it cannot handle + * 4b addresses. + */ + int (*set_4b)(struct spi_flash_ctrl *ctrl, bool enable); + + + + /* ************************************************** + * High level interface + * **************************************************/ + + /* + * Read chip ID. This can return up to 16 bytes though the + * current libflash will only use 3 (room for things like + * extended micron stuff). + * + * id_size is set on entry to the buffer size and need to + * be adjusted to the actual ID size read. + * + * If NULL, libflash will use cmd_rd to send normal RDID (0x9f) + * command. + */ + int (*chip_id)(struct spi_flash_ctrl *ctrl, uint8_t *id_buf, + uint32_t *id_size); + + /* + * Read from flash. There is no specific constraint on + * alignment or size other than not reading outside of + * the chip. + * + * If NULL, libflash will use cmd_rd to send normal + * READ (0x03) commands. + */ + int (*read)(struct spi_flash_ctrl *ctrl, uint32_t addr, void *buf, + uint32_t size); + + /* + * Write to flash. There is no specific constraint on + * alignment or size other than not reading outside of + * the chip. The driver is responsible for handling + * 256-bytes page alignment and to send the write enable + * commands when needed. + * + * If absent, libflash will use cmd_wr to send WREN (0x06) + * and PP (0x02) commands. + * + * Note: This does not need to handle erasing. libflash + * will ensure that this is never used for changing a bit + * value from 0 to 1. + */ + int (*write)(struct spi_flash_ctrl *ctrl, uint32_t addr, + const void *buf, uint32_t size); + + /* + * Erase. This will be called for erasing a portion of + * the flash using a granularity (alignment of start and + * size) that is no less than the smallest supported + * erase size in the info block (*). The driver is + * responsible to send write enable commands when needed. + * + * If absent, libflash will use cmd_wr to send WREN (0x06) + * and either of SE (0x20), BE32K (0x52) or BE (0xd8) + * based on what the flash chip supports. + * + * (*) Note: This is called with addr=0 and size=0xffffffff + * in which case this is used as a "chip erase". Return + * FLASH_ERR_CHIP_ER_NOT_SUPPORTED if not supported. Some + * future version of libflash might then emulate it using + * normal erase commands. + */ + int (*erase)(struct spi_flash_ctrl *ctrl, uint32_t addr, + uint32_t size); + + /* ************************************************** + * Low level interface + * **************************************************/ + + /* Note: For commands with no data, libflash will might use + * either cmd_rd or cmd_wr. + */ + + /* + * - cmd_rd(ctrl, cmd, has_addr, address, buffer, size); + * + * cmd : command opcode + * has_addr : send an address after the command + * address : address to send + * buffer : buffer for additional data to read (or NULL) + * size : size of additional data read (or NULL) + * + * Sends a command and optionally read additional data + */ + int (*cmd_rd)(struct spi_flash_ctrl *ctrl, uint8_t cmd, + bool has_addr, uint32_t addr, void *buffer, + uint32_t size); + /* + * - cmd_wr(ctrl, cmd, has_addr, address, buffer, size); + * + * cmd : command opcode + * has_addr : send an address after the command + * address : address to send + * buffer : buffer for additional data to write (or NULL) + * size : size of additional data write (or NULL) + * + * Sends a command and optionally write additional data + */ + int (*cmd_wr)(struct spi_flash_ctrl *ctrl, uint8_t cmd, + bool has_addr, uint32_t addr, const void *buffer, + uint32_t size); + + /* The core will establish this at init, after chip ID has + * been probed + */ + struct flash_info *finfo; + + void *priv; +}; + +extern int fl_wren(struct spi_flash_ctrl *ct); +extern int fl_read_stat(struct spi_flash_ctrl *ct, uint8_t *stat); +extern int fl_sync_wait_idle(struct spi_flash_ctrl *ct); + +#endif /* LIBFLASH_PRIV_H */ diff --git a/roms/skiboot/libflash/libflash.c b/roms/skiboot/libflash/libflash.c new file mode 100644 index 000000000..70c8a597e --- /dev/null +++ b/roms/skiboot/libflash/libflash.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2017 IBM Corp. */ + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <inttypes.h> + +#include "libflash.h" +#include "libflash-priv.h" +#include "ecc.h" +#include "blocklevel.h" + +static const struct flash_info flash_info[] = { + { 0xc22018, 0x01000000, FL_ERASE_ALL | FL_CAN_4B, "Macronix MXxxL12835F"}, + { 0xc22019, 0x02000000, FL_ERASE_ALL | FL_CAN_4B, "Macronix MXxxL25635F"}, + { 0xc2201a, 0x04000000, FL_ERASE_ALL | FL_CAN_4B, "Macronix MXxxL51235F"}, + { 0xc2201b, 0x08000000, FL_ERASE_ALL | FL_CAN_4B, "Macronix MX66L1G45G"}, + { 0xef4018, 0x01000000, FL_ERASE_ALL, "Winbond W25Q128BV" }, + { 0xef4019, 0x02000000, FL_ERASE_ALL | FL_ERASE_64K | FL_CAN_4B | + FL_ERASE_BULK, + "Winbond W25Q256BV"}, + { 0x20ba20, 0x04000000, FL_ERASE_4K | FL_ERASE_64K | FL_CAN_4B | + FL_ERASE_BULK | FL_MICRON_BUGS, + "Micron N25Qx512Ax" }, + { 0x20ba19, 0x02000000, FL_ERASE_4K | FL_ERASE_64K | FL_CAN_4B | + FL_ERASE_BULK | FL_MICRON_BUGS, + "Micron N25Q256Ax" }, + { 0x1940ef, 0x02000000, FL_ERASE_4K | FL_ERASE_64K | FL_CAN_4B | + FL_ERASE_BULK | FL_MICRON_BUGS, + "Micron N25Qx256Ax" }, + { 0x4d5444, 0x02000000, FL_ERASE_ALL | FL_CAN_4B, "File Abstraction"}, + { 0x55aa55, 0x00100000, FL_ERASE_ALL | FL_CAN_4B, "TEST_FLASH" }, + { 0xaa55aa, 0x02000000, FL_ERASE_ALL | FL_CAN_4B, "EMULATED_FLASH"}, +}; + +struct flash_chip { + struct spi_flash_ctrl *ctrl; /* Controller */ + struct flash_info info; /* Flash info */ + uint32_t tsize; /* Corrected flash size */ + uint32_t min_erase_mask; /* Minimum erase size */ + bool mode_4b; /* Flash currently in 4b mode */ + struct flash_req *cur_req; /* Current request */ + void *smart_buf; /* Buffer for smart writes */ + struct blocklevel_device bl; +}; + +#ifndef __SKIBOOT__ +bool libflash_debug; +#endif + +int fl_read_stat(struct spi_flash_ctrl *ct, uint8_t *stat) +{ + return ct->cmd_rd(ct, CMD_RDSR, false, 0, stat, 1); +} + +static void fl_micron_status(struct spi_flash_ctrl *ct) +{ + uint8_t flst; + + /* + * After a success status on a write or erase, we + * need to do that command or some chip variants will + * lock + */ + ct->cmd_rd(ct, CMD_MIC_RDFLST, false, 0, &flst, 1); +} + +/* Synchronous write completion, probably need a yield hook */ +int fl_sync_wait_idle(struct spi_flash_ctrl *ct) +{ + uint8_t stat; + int rc; + + /* XXX Add timeout */ + for (;;) { + rc = fl_read_stat(ct, &stat); + if (rc) return rc; + if (!(stat & STAT_WIP)) { + if (ct->finfo->flags & FL_MICRON_BUGS) + fl_micron_status(ct); + return 0; + } + } + /* return FLASH_ERR_WIP_TIMEOUT; */ +} + +/* Exported for internal use */ +int fl_wren(struct spi_flash_ctrl *ct) +{ + int i, rc; + uint8_t stat; + + /* Some flashes need it to be hammered */ + for (i = 0; i < 1000; i++) { + rc = ct->cmd_wr(ct, CMD_WREN, false, 0, NULL, 0); + if (rc) return rc; + rc = fl_read_stat(ct, &stat); + if (rc) return rc; + if (stat & STAT_WIP) { + FL_ERR("LIBFLASH: WREN has WIP status set !\n"); + rc = fl_sync_wait_idle(ct); + if (rc) + return rc; + continue; + } + if (stat & STAT_WEN) + return 0; + } + return FLASH_ERR_WREN_TIMEOUT; +} + +static int flash_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len) +{ + struct flash_chip *c = container_of(bl, struct flash_chip, bl); + struct spi_flash_ctrl *ct = c->ctrl; + + /* XXX Add sanity/bound checking */ + + /* + * If the controller supports read and either we are in 3b mode + * or we are in 4b *and* the controller supports it, then do a + * high level read. + */ + if ((!c->mode_4b || ct->set_4b) && ct->read) + return ct->read(ct, pos, buf, len); + + /* Otherwise, go manual if supported */ + if (!ct->cmd_rd) + return FLASH_ERR_CTRL_CMD_UNSUPPORTED; + return ct->cmd_rd(ct, CMD_READ, true, pos, buf, len); +} + +#define COPY_BUFFER_LENGTH 4096 + +/* + * This provides a wrapper around flash_read on ECCed data + * len is length of data without ECC attached + */ +int flash_read_corrected(struct blocklevel_device *bl, uint32_t pos, void *buf, + uint32_t len, bool ecc) +{ + struct ecc64 *bufecc; + uint32_t copylen; + int rc; + uint8_t ret; + + if (!ecc) + return flash_read(bl, pos, buf, len); + + /* Copy the buffer in chunks */ + bufecc = malloc(ecc_buffer_size(COPY_BUFFER_LENGTH)); + if (!bufecc) + return FLASH_ERR_MALLOC_FAILED; + + while (len > 0) { + /* What's left to copy? */ + copylen = MIN(len, COPY_BUFFER_LENGTH); + + /* Read ECCed data from flash */ + rc = flash_read(bl, pos, bufecc, ecc_buffer_size(copylen)); + if (rc) + goto err; + + /* Extract data from ECCed data */ + ret = memcpy_from_ecc(buf, bufecc, copylen); + if (ret) { + rc = FLASH_ERR_ECC_INVALID; + goto err; + } + + /* Update for next copy */ + len -= copylen; + buf = (uint8_t *)buf + copylen; + pos += ecc_buffer_size(copylen); + } + + rc = 0; + +err: + free(bufecc); + return rc; +} + +static void fl_get_best_erase(struct flash_chip *c, uint32_t dst, uint32_t size, + uint32_t *chunk, uint8_t *cmd) +{ + /* Smaller than 32k, use 4k */ + if ((dst & 0x7fff) || (size < 0x8000)) { + *chunk = 0x1000; + *cmd = CMD_SE; + return; + } + /* Smaller than 64k and 32k is supported, use it */ + if ((c->info.flags & FL_ERASE_32K) && + ((dst & 0xffff) || (size < 0x10000))) { + *chunk = 0x8000; + *cmd = CMD_BE32K; + return; + } + /* If 64K is not supported, use whatever smaller size is */ + if (!(c->info.flags & FL_ERASE_64K)) { + if (c->info.flags & FL_ERASE_32K) { + *chunk = 0x8000; + *cmd = CMD_BE32K; + } else { + *chunk = 0x1000; + *cmd = CMD_SE; + } + return; + } + /* Allright, let's go for 64K */ + *chunk = 0x10000; + *cmd = CMD_BE; +} + +static int flash_erase(struct blocklevel_device *bl, uint64_t dst, uint64_t size) +{ + struct flash_chip *c = container_of(bl, struct flash_chip, bl); + struct spi_flash_ctrl *ct = c->ctrl; + uint32_t chunk; + uint8_t cmd; + int rc; + + /* Some sanity checking */ + if (((dst + size) <= dst) || !size || (dst + size) > c->tsize) + return FLASH_ERR_PARM_ERROR; + + /* Check boundaries fit erase blocks */ + if ((dst | size) & c->min_erase_mask) + return FLASH_ERR_ERASE_BOUNDARY; + + FL_DBG("LIBFLASH: Erasing 0x%" PRIx64"..0%" PRIx64 "...\n", + dst, dst + size); + + /* Use controller erase if supported */ + if (ct->erase) + return ct->erase(ct, dst, size); + + /* Allright, loop as long as there's something to erase */ + while(size) { + /* How big can we make it based on alignent & size */ + fl_get_best_erase(c, dst, size, &chunk, &cmd); + + /* Poke write enable */ + rc = fl_wren(ct); + if (rc) + return rc; + + /* Send erase command */ + rc = ct->cmd_wr(ct, cmd, true, dst, NULL, 0); + if (rc) + return rc; + + /* Wait for write complete */ + rc = fl_sync_wait_idle(ct); + if (rc) + return rc; + + size -= chunk; + dst += chunk; + } + return 0; +} + +int flash_erase_chip(struct flash_chip *c) +{ + struct spi_flash_ctrl *ct = c->ctrl; + int rc; + + /* XXX TODO: Fallback to using normal erases */ + if (!(c->info.flags & (FL_ERASE_CHIP|FL_ERASE_BULK))) + return FLASH_ERR_CHIP_ER_NOT_SUPPORTED; + + FL_DBG("LIBFLASH: Erasing chip...\n"); + + /* Use controller erase if supported */ + if (ct->erase) + return ct->erase(ct, 0, 0xffffffff); + + rc = fl_wren(ct); + if (rc) return rc; + + if (c->info.flags & FL_ERASE_CHIP) + rc = ct->cmd_wr(ct, CMD_CE, false, 0, NULL, 0); + else + rc = ct->cmd_wr(ct, CMD_MIC_BULK_ERASE, false, 0, NULL, 0); + if (rc) + return rc; + + /* Wait for write complete */ + return fl_sync_wait_idle(ct); +} + +static int fl_wpage(struct flash_chip *c, uint32_t dst, const void *src, + uint32_t size) +{ + struct spi_flash_ctrl *ct = c->ctrl; + int rc; + + if (size < 1 || size > 0x100) + return FLASH_ERR_BAD_PAGE_SIZE; + + rc = fl_wren(ct); + if (rc) return rc; + + rc = ct->cmd_wr(ct, CMD_PP, true, dst, src, size); + if (rc) + return rc; + + /* Wait for write complete */ + return fl_sync_wait_idle(ct); +} + +static int flash_write(struct blocklevel_device *bl, uint32_t dst, const void *src, + uint32_t size, bool verify) +{ + struct flash_chip *c = container_of(bl, struct flash_chip, bl); + struct spi_flash_ctrl *ct = c->ctrl; + uint32_t todo = size; + uint32_t d = dst; + const void *s = src; + uint8_t vbuf[0x100]; + int rc; + + /* Some sanity checking */ + if (((dst + size) <= dst) || !size || (dst + size) > c->tsize) + return FLASH_ERR_PARM_ERROR; + + FL_DBG("LIBFLASH: Writing to 0x%08x..0%08x...\n", dst, dst + size); + + /* + * If the controller supports write and either we are in 3b mode + * or we are in 4b *and* the controller supports it, then do a + * high level write. + */ + if ((!c->mode_4b || ct->set_4b) && ct->write) { + rc = ct->write(ct, dst, src, size); + if (rc) + return rc; + goto writing_done; + } + + /* Otherwise, go manual if supported */ + if (!ct->cmd_wr) + return FLASH_ERR_CTRL_CMD_UNSUPPORTED; + + /* Iterate for each page to write */ + while(todo) { + uint32_t chunk; + + /* Handle misaligned start */ + chunk = 0x100 - (d & 0xff); + if (chunk > todo) + chunk = todo; + + rc = fl_wpage(c, d, s, chunk); + if (rc) return rc; + d += chunk; + s += chunk; + todo -= chunk; + } + + writing_done: + if (!verify) + return 0; + + /* Verify */ + FL_DBG("LIBFLASH: Verifying...\n"); + + while(size) { + uint32_t chunk; + + chunk = sizeof(vbuf); + if (chunk > size) + chunk = size; + rc = flash_read(bl, dst, vbuf, chunk); + if (rc) return rc; + if (memcmp(vbuf, src, chunk)) { + FL_ERR("LIBFLASH: Miscompare at 0x%08x\n", dst); + return FLASH_ERR_VERIFY_FAILURE; + } + dst += chunk; + src += chunk; + size -= chunk; + } + return 0; +} + +int flash_write_corrected(struct blocklevel_device *bl, uint32_t pos, const void *buf, + uint32_t len, bool verify, bool ecc) +{ + struct ecc64 *bufecc; + uint32_t copylen, copylen_minus_ecc; + int rc; + uint8_t ret; + + if (!ecc) + return flash_write(bl, pos, buf, len, verify); + + /* Copy the buffer in chunks */ + bufecc = malloc(ecc_buffer_size(COPY_BUFFER_LENGTH)); + if (!bufecc) + return FLASH_ERR_MALLOC_FAILED; + + while (len > 0) { + /* What's left to copy? */ + copylen = MIN(len, COPY_BUFFER_LENGTH); + copylen_minus_ecc = ecc_buffer_size_minus_ecc(copylen); + + /* Add the ecc byte to the data */ + ret = memcpy_to_ecc(bufecc, buf, copylen_minus_ecc); + if (ret) { + rc = FLASH_ERR_ECC_INVALID; + goto err; + } + + /* Write ECCed data to the flash */ + rc = flash_write(bl, pos, bufecc, copylen, verify); + if (rc) + goto err; + + /* Update for next copy */ + len -= copylen_minus_ecc; + buf = (uint8_t *)buf + copylen_minus_ecc; + pos += copylen; + } + + rc = 0; + +err: + free(bufecc); + return rc; +} + +enum sm_comp_res { + sm_no_change, + sm_need_write, + sm_need_erase, +}; + +static enum sm_comp_res flash_smart_comp(struct flash_chip *c, + const void *src, + uint32_t offset, uint32_t size) +{ + uint8_t *b = c->smart_buf + offset; + const uint8_t *s = src; + bool is_same = true; + uint32_t i; + + /* SRC DEST NEED_ERASE + * 0 1 0 + * 1 1 0 + * 0 0 0 + * 1 0 1 + */ + for (i = 0; i < size; i++) { + /* Any bit need to be set, need erase */ + if (s[i] & ~b[i]) + return sm_need_erase; + if (is_same && (b[i] != s[i])) + is_same = false; + } + return is_same ? sm_no_change : sm_need_write; +} + +static int flash_smart_write(struct blocklevel_device *bl, uint64_t dst, const void *src, uint64_t size) +{ + struct flash_chip *c = container_of(bl, struct flash_chip, bl); + uint32_t er_size = c->min_erase_mask + 1; + uint32_t end = dst + size; + int rc; + + /* Some sanity checking */ + if (end <= dst || !size || end > c->tsize) { + FL_DBG("LIBFLASH: Smart write param error\n"); + return FLASH_ERR_PARM_ERROR; + } + + FL_DBG("LIBFLASH: Smart writing to 0x%" PRIx64 "..0%" PRIx64 "...\n", + dst, dst + size); + + /* As long as we have something to write ... */ + while(dst < end) { + uint32_t page, off, chunk; + enum sm_comp_res sr; + + /* Figure out which erase page we are in and read it */ + page = dst & ~c->min_erase_mask; + off = dst & c->min_erase_mask; + FL_DBG("LIBFLASH: reading page 0x%08x..0x%08x...\n", + page, page + er_size); + rc = flash_read(bl, page, c->smart_buf, er_size); + if (rc) { + FL_DBG("LIBFLASH: ...error %d!\n", rc); + return rc; + } + + /* Locate the chunk of data we are working on */ + chunk = er_size - off; + if (size < chunk) + chunk = size; + + /* Compare against what we are writing and ff */ + sr = flash_smart_comp(c, src, off, chunk); + switch(sr) { + case sm_no_change: + /* Identical, skip it */ + FL_DBG("LIBFLASH: ...same !\n"); + break; + case sm_need_write: + /* Just needs writing over */ + FL_DBG("LIBFLASH: ...need write !\n"); + rc = flash_write(bl, dst, src, chunk, true); + if (rc) { + FL_DBG("LIBFLASH: Write error %d !\n", rc); + return rc; + } + break; + case sm_need_erase: + FL_DBG("LIBFLASH: ...need erase !\n"); + rc = flash_erase(bl, page, er_size); + if (rc) { + FL_DBG("LIBFLASH: erase error %d !\n", rc); + return rc; + } + /* Then update the portion of the buffer and write the block */ + memcpy(c->smart_buf + off, src, chunk); + rc = flash_write(bl, page, c->smart_buf, er_size, true); + if (rc) { + FL_DBG("LIBFLASH: write error %d !\n", rc); + return rc; + } + break; + } + dst += chunk; + src += chunk; + size -= chunk; + } + return 0; +} + +int flash_smart_write_corrected(struct blocklevel_device *bl, uint32_t dst, const void *src, + uint32_t size, bool ecc) +{ + struct ecc64 *buf; + int rc; + + if (!ecc) + return flash_smart_write(bl, dst, src, size); + + buf = malloc(ecc_buffer_size(size)); + if (!buf) + return FLASH_ERR_MALLOC_FAILED; + + rc = memcpy_to_ecc(buf, src, size); + if (rc) { + rc = FLASH_ERR_ECC_INVALID; + goto out; + } + + rc = flash_smart_write(bl, dst, buf, ecc_buffer_size(size)); + +out: + free(buf); + return rc; +} + +static int fl_chip_id(struct spi_flash_ctrl *ct, uint8_t *id_buf, + uint32_t *id_size) +{ + int rc; + uint8_t stat; + + /* Check initial status */ + rc = fl_read_stat(ct, &stat); + if (rc) + return rc; + + /* If stuck writing, wait for idle */ + if (stat & STAT_WIP) { + FL_ERR("LIBFLASH: Flash in writing state ! Waiting...\n"); + rc = fl_sync_wait_idle(ct); + if (rc) + return rc; + } else + FL_DBG("LIBFLASH: Init status: %02x\n", stat); + + /* Fallback to get ID manually */ + rc = ct->cmd_rd(ct, CMD_RDID, false, 0, id_buf, 3); + if (rc) + return rc; + *id_size = 3; + + return 0; +} + +static int flash_identify(struct flash_chip *c) +{ + struct spi_flash_ctrl *ct = c->ctrl; + const struct flash_info *info = NULL; + uint32_t iid, id_size; +#define MAX_ID_SIZE 16 + uint8_t id[MAX_ID_SIZE]; + int rc, i; + + if (ct->chip_id) { + /* High level controller interface */ + id_size = MAX_ID_SIZE; + rc = ct->chip_id(ct, id, &id_size); + } else + rc = fl_chip_id(ct, id, &id_size); + if (rc) + return rc; + if (id_size < 3) + return FLASH_ERR_CHIP_UNKNOWN; + + /* Convert to a dword for lookup */ + iid = id[0]; + iid = (iid << 8) | id[1]; + iid = (iid << 8) | id[2]; + + FL_DBG("LIBFLASH: Flash ID: %02x.%02x.%02x (%06x)\n", + id[0], id[1], id[2], iid); + + /* Lookup in flash_info */ + for (i = 0; i < ARRAY_SIZE(flash_info); i++) { + info = &flash_info[i]; + if (info->id == iid) + break; + } + if (!info || info->id != iid) + return FLASH_ERR_CHIP_UNKNOWN; + + c->info = *info; + c->tsize = info->size; + ct->finfo = &c->info; + + /* + * Let controller know about our settings and possibly + * override them + */ + if (ct->setup) { + rc = ct->setup(ct, &c->tsize); + if (rc) + return rc; + } + + /* Calculate min erase granularity */ + if (c->info.flags & FL_ERASE_4K) + c->min_erase_mask = 0xfff; + else if (c->info.flags & FL_ERASE_32K) + c->min_erase_mask = 0x7fff; + else if (c->info.flags & FL_ERASE_64K) + c->min_erase_mask = 0xffff; + else { + /* No erase size ? oops ... */ + FL_ERR("LIBFLASH: No erase sizes !\n"); + return FLASH_ERR_CTRL_CONFIG_MISMATCH; + } + + FL_DBG("LIBFLASH: Found chip %s size %dM erase granule: %dK\n", + c->info.name, c->tsize >> 20, (c->min_erase_mask + 1) >> 10); + + return 0; +} + +static int flash_set_4b(struct flash_chip *c, bool enable) +{ + struct spi_flash_ctrl *ct = c->ctrl; + int rc; + + /* Don't have low level interface, assume all is well */ + if (!ct->cmd_wr) + return 0; + + /* Some flash chips want this */ + rc = fl_wren(ct); + if (rc) { + FL_ERR("LIBFLASH: Error %d enabling write for set_4b\n", rc); + /* Ignore the error & move on (could be wrprotect chip) */ + } + + /* Ignore error in case chip is write protected */ + return ct->cmd_wr(ct, enable ? CMD_EN4B : CMD_EX4B, false, 0, NULL, 0); +} + +int flash_force_4b_mode(struct flash_chip *c, bool enable_4b) +{ + struct spi_flash_ctrl *ct = c->ctrl; + int rc = FLASH_ERR_4B_NOT_SUPPORTED; + + /* + * We only allow force 4b if both controller and flash do 4b + * as this is mainly used if a 3rd party tries to directly + * access a direct mapped read region + */ + if (enable_4b && !((c->info.flags & FL_CAN_4B) && ct->set_4b)) + return rc; + + /* Only send to flash directly on controllers that implement + * the low level callbacks + */ + if (ct->cmd_wr) { + rc = flash_set_4b(c, enable_4b); + if (rc) + return rc; + } + + /* Then inform the controller */ + if (ct->set_4b) + rc = ct->set_4b(ct, enable_4b); + return rc; +} + +static int flash_configure(struct flash_chip *c) +{ + struct spi_flash_ctrl *ct = c->ctrl; + int rc; + + /* Crop flash size if necessary */ + if (c->tsize > 0x01000000 && !(c->info.flags & FL_CAN_4B)) { + FL_ERR("LIBFLASH: Flash chip cropped to 16M, no 4b mode\n"); + c->tsize = 0x01000000; + } + + /* If flash chip > 16M, enable 4b mode */ + if (c->tsize > 0x01000000) { + FL_DBG("LIBFLASH: Flash >16MB, enabling 4B mode...\n"); + + /* Set flash to 4b mode if we can */ + if (ct->cmd_wr) { + rc = flash_set_4b(c, true); + if (rc) { + FL_ERR("LIBFLASH: Failed to set flash 4b mode\n"); + return rc; + } + } + + + /* Set controller to 4b mode if supported */ + if (ct->set_4b) { + FL_DBG("LIBFLASH: Enabling controller 4B mode...\n"); + rc = ct->set_4b(ct, true); + if (rc) { + FL_ERR("LIBFLASH: Failed to set controller 4b mode\n"); + return rc; + } + } + } else { + FL_DBG("LIBFLASH: Flash <=16MB, disabling 4B mode...\n"); + + /* + * If flash chip supports 4b mode, make sure we disable + * it in case it was left over by the previous user + */ + if (c->info.flags & FL_CAN_4B) { + rc = flash_set_4b(c, false); + if (rc) { + FL_ERR("LIBFLASH: Failed to" + " clear flash 4b mode\n"); + return rc; + } + } + /* Set controller to 3b mode if mode switch is supported */ + if (ct->set_4b) { + FL_DBG("LIBFLASH: Disabling controller 4B mode...\n"); + rc = ct->set_4b(ct, false); + if (rc) { + FL_ERR("LIBFLASH: Failed to" + " clear controller 4b mode\n"); + return rc; + } + } + } + return 0; +} + +static int flash_get_info(struct blocklevel_device *bl, const char **name, + uint64_t *total_size, uint32_t *erase_granule) +{ + struct flash_chip *c = container_of(bl, struct flash_chip, bl); + if (name) + *name = c->info.name; + if (total_size) + *total_size = c->tsize; + if (erase_granule) + *erase_granule = c->min_erase_mask + 1; + return 0; +} + +int flash_init(struct spi_flash_ctrl *ctrl, struct blocklevel_device **bl, + struct flash_chip **flash_chip) +{ + struct flash_chip *c; + int rc; + + if (!bl) + return FLASH_ERR_PARM_ERROR; + + *bl = NULL; + + c = malloc(sizeof(struct flash_chip)); + if (!c) + return FLASH_ERR_MALLOC_FAILED; + memset(c, 0, sizeof(*c)); + c->ctrl = ctrl; + + rc = flash_identify(c); + if (rc) { + FL_ERR("LIBFLASH: Flash identification failed\n"); + goto bail; + } + c->smart_buf = malloc(c->min_erase_mask + 1); + if (!c->smart_buf) { + FL_ERR("LIBFLASH: Failed to allocate smart buffer !\n"); + rc = FLASH_ERR_MALLOC_FAILED; + goto bail; + } + rc = flash_configure(c); + if (rc) + FL_ERR("LIBFLASH: Flash configuration failed\n"); +bail: + if (rc) { + free(c); + return rc; + } + + /* The flash backend doesn't support reiniting it */ + c->bl.keep_alive = true; + c->bl.reacquire = NULL; + c->bl.release = NULL; + c->bl.read = &flash_read; + c->bl.write = &flash_smart_write; + c->bl.erase = &flash_erase; + c->bl.get_info = &flash_get_info; + c->bl.erase_mask = c->min_erase_mask; + c->bl.flags = WRITE_NEED_ERASE; + + *bl = &(c->bl); + if (flash_chip) + *flash_chip = c; + + return 0; +} + +void flash_exit(struct blocklevel_device *bl) +{ + /* XXX Make sure we are idle etc... */ + if (bl) { + struct flash_chip *c = container_of(bl, struct flash_chip, bl); + free(c->smart_buf); + free(c); + } +} + +void flash_exit_close(struct blocklevel_device *bl, void (*close)(struct spi_flash_ctrl *ctrl)) +{ + if (bl) { + struct flash_chip *c = container_of(bl, struct flash_chip, bl); + close(c->ctrl); + free(c); + } +} diff --git a/roms/skiboot/libflash/libflash.h b/roms/skiboot/libflash/libflash.h new file mode 100644 index 000000000..9e83d4198 --- /dev/null +++ b/roms/skiboot/libflash/libflash.h @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2017 IBM Corp. */ + +#ifndef __LIBFLASH_H +#define __LIBFLASH_H + +#include <stdint.h> +#include <stdbool.h> +#include <libflash/blocklevel.h> + +/* API status/return: + * + * <0 = flash controller errors passed through, + * 0 = success + * >0 = libflash error + */ +#include <libflash/errors.h> + +#ifndef MIN +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +/* Flash chip, opaque */ +struct flash_chip; +struct spi_flash_ctrl; + +int flash_init(struct spi_flash_ctrl *ctrl, struct blocklevel_device **bl, + struct flash_chip **flash_chip); +void flash_exit(struct blocklevel_device *bl); + +/* + * Function which till call close on the underlying struct spi_flash_ctrl + */ +void flash_exit_close(struct blocklevel_device *bl, void (*close)(struct spi_flash_ctrl *ctrl)); + +/* libflash sets the 4b mode automatically based on the flash + * size and controller capabilities but it can be overriden + */ +int flash_force_4b_mode(struct flash_chip *c, bool enable_4b); + +/* + * This provides a wapper around flash_read() on ECCed data. All params are + * the same as to flash_read(). Not passing true in ecc is akin to calling + * flash_read() directly. + * + * len is length of data without ecc attached therefore this will read beyond + * pos + len. + */ +int flash_read_corrected(struct blocklevel_device *bl, uint32_t pos, void *buf, + uint32_t len, bool ecc); + +/* + * This provides a wrapper around flash_write() on ECCed data. All params are + * the same as to flash_write(). Not passing true in ecc is akin to calling + * flash_write() directly. + * + * size is length of data without ECC attached therefore this will write beyond + * dst + size. + */ +int flash_write_corrected(struct blocklevel_device *bl, uint32_t dst, const void *src, + uint32_t size, bool verify, bool ecc); + +/* + * This provides a wrapper around flash_smart_write() on ECCed data. All + * params are the same as to flash_smart_write(). Not passing true in ecc is + * akin to calling flash_smart_write() directly. + * + * size is length of data without ECC attached therefore this will write beyond + * dst + size. + */ +int flash_smart_write_corrected(struct blocklevel_device *bl, uint32_t dst, const void *src, + uint32_t size, bool ecc); + +/* chip erase may not be supported by all chips/controllers, get ready + * for FLASH_ERR_CHIP_ER_NOT_SUPPORTED + */ +int flash_erase_chip(struct flash_chip *c); + +#endif /* __LIBFLASH_H */ diff --git a/roms/skiboot/libflash/mbox-flash.c b/roms/skiboot/libflash/mbox-flash.c new file mode 100644 index 000000000..6da77d7fc --- /dev/null +++ b/roms/skiboot/libflash/mbox-flash.c @@ -0,0 +1,1199 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2017-2018 IBM Corp. */ + +#define pr_fmt(fmt) "MBOX-FLASH: " fmt + +#define _GNU_SOURCE +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include <skiboot.h> +#include <inttypes.h> +#include <timebase.h> +#include <timer.h> +#include <libflash/libflash.h> +#include <libflash/mbox-flash.h> +#include <lpc.h> +#include <lpc-mbox.h> + +#include <ccan/container_of/container_of.h> + +#ifndef __SKIBOOT__ +#ifndef __TEST__ +#error "This libflash backend must be compiled with skiboot" +#endif +#endif + +/* Same technique as BUILD_BUG_ON from linux */ +#define CHECK_HANDLER_SIZE(handlers) ((void)sizeof(char[1 - 2*!!(ARRAY_SIZE(handlers) != (MBOX_COMMAND_COUNT + 1))])) + +#define MBOX_DEFAULT_TIMEOUT 3 /* seconds */ + +#define MSG_CREATE(init_command) { .command = init_command } + +struct mbox_flash_data; +typedef void (mbox_handler)(struct mbox_flash_data *, struct bmc_mbox_msg *); + +struct lpc_window { + uint32_t lpc_addr; /* Offset into LPC space */ + uint32_t cur_pos; /* Current position of the window in the flash */ + uint32_t size; /* Size of the window into the flash */ + bool open; +}; + +struct mbox_flash_data { + int version; + uint16_t timeout; + uint32_t shift; + struct lpc_window read; + struct lpc_window write; + struct blocklevel_device bl; + uint32_t total_size; + uint32_t erase_granule; + int rc; + bool reboot; + bool pause; + bool busy; + bool ack; + mbox_handler **handlers; +}; + +static mbox_handler mbox_flash_do_nop; +static mbox_handler mbox_flash_do_illegal; + +/* Version 1, 2, 3 compatible */ +static mbox_handler mbox_flash_do_get_mbox_info; + +/* Version 2 and 3 compatible */ +static mbox_handler mbox_flash_do_get_flash_info; +static mbox_handler mbox_flash_do_get_flash_info_v1; + +/* Version 2 and 3 compatible */ +static mbox_handler mbox_flash_do_create_read_window; +static mbox_handler mbox_flash_do_create_read_window_v1; + +/* Version 2 and 3 compatible */ +static mbox_handler mbox_flash_do_create_write_window; +static mbox_handler mbox_flash_do_create_write_window_v1; + +/* Version 1, 2, 3 compatible */ +static mbox_handler mbox_flash_do_close_window; + +/* Plus one, commands start at 1 */ +static mbox_handler *handlers_v3[] = { + NULL, + &mbox_flash_do_nop, + &mbox_flash_do_get_mbox_info, + &mbox_flash_do_get_flash_info, + &mbox_flash_do_create_read_window, + &mbox_flash_do_close_window, + &mbox_flash_do_create_write_window, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_nop +}; + +/* Plus one, commands start at 1 */ +static mbox_handler *handlers_v2[] = { + NULL, + &mbox_flash_do_nop, + &mbox_flash_do_get_mbox_info, + &mbox_flash_do_get_flash_info, + &mbox_flash_do_create_read_window, + &mbox_flash_do_close_window, + &mbox_flash_do_create_write_window, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_illegal, + &mbox_flash_do_illegal +}; + +/* + * Plus one, commands start at 1. + * V2 adds a command so there should never be a response for the last + * command. + * Ensure we print an error message with mbox_flash_do_illegal(). + */ +static mbox_handler *handlers_v1[] = { + NULL, + &mbox_flash_do_nop, + &mbox_flash_do_get_mbox_info, + &mbox_flash_do_get_flash_info_v1, + &mbox_flash_do_create_read_window_v1, + &mbox_flash_do_close_window, + &mbox_flash_do_create_write_window_v1, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_nop, + &mbox_flash_do_illegal, + &mbox_flash_do_illegal, + &mbox_flash_do_illegal +}; + + +static void mbox_flash_callback(struct bmc_mbox_msg *msg, void *priv); +static void mbox_flash_attn(uint8_t attn, void *priv); + +static int protocol_init(struct mbox_flash_data *mbox_flash, uint8_t shift); + +static int lpc_window_read(struct mbox_flash_data *mbox_flash, uint32_t pos, + void *buf, uint32_t len) +{ + uint32_t off = mbox_flash->read.lpc_addr + (pos - mbox_flash->read.cur_pos); + int rc; + + prlog(PR_TRACE, "Reading at 0x%08x for 0x%08x offset: 0x%08x\n", + pos, len, off); + + while(len) { + uint32_t chunk; + uint32_t dat; + + /* XXX: make this read until it's aligned */ + if (len > 3 && !(off & 3)) { + rc = lpc_read(OPAL_LPC_FW, off, &dat, 4); + if (!rc) { + /* + * lpc_read swaps to CPU endian but it's not + * really a 32-bit value, so convert back. + */ + *(__be32 *)buf = cpu_to_be32(dat); + } + chunk = 4; + } else { + rc = lpc_read(OPAL_LPC_FW, off, &dat, 1); + if (!rc) + *(uint8_t *)buf = dat; + chunk = 1; + } + if (rc) { + prlog(PR_ERR, "lpc_read failure %d to FW 0x%08x\n", rc, off); + return rc; + } + len -= chunk; + off += chunk; + buf += chunk; + } + + return 0; +} + +static int lpc_window_write(struct mbox_flash_data *mbox_flash, uint32_t pos, + const void *buf, uint32_t len) +{ + uint32_t off = mbox_flash->write.lpc_addr + (pos - mbox_flash->write.cur_pos); + int rc; + + + prlog(PR_TRACE, "Writing at 0x%08x for 0x%08x offset: 0x%08x\n", + pos, len, off); + + while(len) { + uint32_t chunk; + + if (len > 3 && !(off & 3)) { + /* endian swap: see lpc_window_write */ + uint32_t dat = be32_to_cpu(*(__be32 *)buf); + + rc = lpc_write(OPAL_LPC_FW, off, dat, 4); + chunk = 4; + } else { + uint8_t dat = *(uint8_t *)buf; + + rc = lpc_write(OPAL_LPC_FW, off, dat, 1); + chunk = 1; + } + if (rc) { + prlog(PR_ERR, "lpc_write failure %d to FW 0x%08x\n", rc, off); + return rc; + } + len -= chunk; + off += chunk; + buf += chunk; + } + + return 0; +} + +static uint64_t mbox_flash_mask(struct mbox_flash_data *mbox_flash) +{ + return (1ULL << mbox_flash->shift) - 1; +} + +__unused static uint8_t msg_get_u8(struct bmc_mbox_msg *msg, int i) +{ + return msg->args[i]; +} + +static void msg_put_u8(struct bmc_mbox_msg *msg, int i, uint8_t val) +{ + msg->args[i] = val; +} + +static uint16_t msg_get_u16(struct bmc_mbox_msg *msg, int i) +{ + return le16_to_cpu(*(__le16 *)(&msg->args[i])); +} + +static void msg_put_u16(struct bmc_mbox_msg *msg, int i, uint16_t val) +{ + __le16 tmp = cpu_to_le16(val); + memcpy(&msg->args[i], &tmp, sizeof(val)); +} + +static uint32_t msg_get_u32(struct bmc_mbox_msg *msg, int i) +{ + return le32_to_cpu(*(__le32 *)(&msg->args[i])); +} + +static void msg_put_u32(struct bmc_mbox_msg *msg, int i, uint32_t val) +{ + __le32 tmp = cpu_to_le32(val); + memcpy(&msg->args[i], &tmp, sizeof(val)); +} + +static uint32_t blocks_to_bytes(struct mbox_flash_data *mbox_flash, uint16_t blocks) +{ + return blocks << mbox_flash->shift; +} + +static uint16_t bytes_to_blocks(struct mbox_flash_data *mbox_flash, + uint32_t bytes) +{ + return bytes >> mbox_flash->shift; +} + +/* + * The BMC may send is an out of band message to say that it doesn't + * own the flash anymore. + * It guarantees we can still access our (open) windows but it does + * not guarantee their contents until it clears the bit without + * sending us a corresponding bit to say that the windows are bad + * first. + * Since this is all things that will happen in the future, we should + * not perform any calls speculatively as its almost impossible to + * rewind. + */ +static bool is_paused(struct mbox_flash_data *mbox_flash) +{ + return mbox_flash->pause; +} + +/* + * After a read or a write it is wise to check that the window we just + * read/write to/from is still valid otherwise it is possible some of + * the data didn't make it. + * This check is an optimisation as we'll close all our windows on any + * notification from the BMC that the windows are bad. See the above + * comment about is_paused(). + * A foolproof (but much closer) method of validating reads/writes + * would be to attempt to close the window, if that fails then we can + * be sure that the read/write was no good. + */ +static bool is_valid(struct mbox_flash_data *mbox_flash, struct lpc_window *win) +{ + return !is_paused(mbox_flash) && win->open; +} + +/* + * Check if we've received a BMC reboot notification. + * The strategy is to check on entry to mbox-flash and return a + * failure accordingly. Races will be handled by the fact that the BMC + * won't respond so timeouts will occur. As an added precaution + * msg_send() checks right before sending a message (to make the race + * as small as possible to avoid needless timeouts). + */ +static bool is_reboot(struct mbox_flash_data *mbox_flash) +{ + return mbox_flash->reboot; +} + +static int msg_send(struct mbox_flash_data *mbox_flash, struct bmc_mbox_msg *msg, + unsigned int timeout_sec) +{ + if (is_reboot(mbox_flash)) + return FLASH_ERR_AGAIN; + mbox_flash->busy = true; + mbox_flash->rc = 0; + return bmc_mbox_enqueue(msg, timeout_sec); +} + +static int wait_for_bmc(struct mbox_flash_data *mbox_flash, unsigned int timeout_sec) +{ + unsigned long last = 1, start = tb_to_secs(mftb()); + prlog(PR_TRACE, "Waiting for BMC\n"); + while (mbox_flash->busy && timeout_sec > last) { + long now = tb_to_secs(mftb()); + if (now - start > last) { + if (last < timeout_sec / 2) + prlog(PR_TRACE, "Been waiting for the BMC for %lu secs\n", last); + else + prlog(PR_ERR, "BMC NOT RESPONDING %lu second wait\n", last); + last++; + } + /* + * Both functions are important. + * Well time_wait_ms() relaxes the spin... so... its nice + */ + check_timers(false); + if (mbox_flash->busy) + time_wait_ms(MBOX_DEFAULT_POLL_MS); + asm volatile ("" ::: "memory"); + } + + if (mbox_flash->busy) { + prlog(PR_ERR, "Timeout waiting for BMC\n"); + mbox_flash->busy = false; + return MBOX_R_TIMEOUT; + } + + return mbox_flash->rc; +} + +static int mbox_flash_ack(struct mbox_flash_data *mbox_flash, uint8_t reg) +{ + struct bmc_mbox_msg msg = MSG_CREATE(MBOX_C_BMC_EVENT_ACK); + int rc; + + msg_put_u8(&msg, 0, reg); + + /* Clear this first so msg_send() doesn't freak out */ + mbox_flash->reboot = false; + + /* + * Use a lower timeout - there is strong evidence to suggest the + * BMC won't respond, don't waste time spinning here just have the + * high levels retry when the BMC might be back + */ + rc = msg_send(mbox_flash, &msg, 3); + + /* Still need to deal with it, we've only acked it now. */ + mbox_flash->reboot = true; + + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX message\n"); + return rc; + } + + /* + * Use a lower timeout - there is strong evidence to suggest the + * BMC won't respond, don't waste time spinning here just have the + * high levels retry when the BMC might be back + */ + rc = wait_for_bmc(mbox_flash, 3); + if (rc) + prlog(PR_ERR, "Error waiting for BMC\n"); + + return rc; +} + +static int do_acks(struct mbox_flash_data *mbox_flash) +{ + int rc; + + if (!mbox_flash->ack) + return 0; /* Nothing to do */ + + rc = mbox_flash_ack(mbox_flash, bmc_mbox_get_attn_reg() & MBOX_ATTN_ACK_MASK); + if (!rc) + mbox_flash->ack = false; + + return rc; +} + +static void mbox_flash_do_nop(struct mbox_flash_data *mbox_flash __unused, + struct bmc_mbox_msg *msg __unused) +{ +} + +static void mbox_flash_do_illegal(struct mbox_flash_data *mbox_flash __unused, + struct bmc_mbox_msg *msg __unused) +{ + prlog(PR_CRIT, "Got response to unknown message type\n"); +} + +/* Version 1, 2 and 3 compatible */ +static void mbox_flash_do_get_mbox_info(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg) +{ + + mbox_flash->version = msg_get_u8(msg, 0); + switch (mbox_flash->version) { + case 1: + /* Not all version 1 daemons set argument 5 correctly */ + mbox_flash->shift = 12; /* Protocol hardcodes to 4K anyway */ + mbox_flash->read.size = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 1)); + mbox_flash->write.size = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 3)); + break; + case 3: + case 2: + mbox_flash->shift = msg_get_u8(msg, 5); + mbox_flash->timeout = msg_get_u16(msg, 6); + if (mbox_flash->timeout == 0) + mbox_flash->timeout = MBOX_DEFAULT_TIMEOUT; + break; + } + /* Callers will handle the case where the version is not known + * + * Here we deliberately ignore the 'default' sizes. + * All windows opened will not provide a hint and we're + * happy to let the BMC figure everything out. + * Future optimisations may use the default size. + */ +} + +/* Version 2 and 3 compatible */ +static void mbox_flash_do_get_flash_info(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg) +{ + mbox_flash->total_size = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 0)); + mbox_flash->erase_granule = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 2)); +} + +static void mbox_flash_do_get_flash_info_v1(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg) +{ + mbox_flash->total_size = msg_get_u32(msg, 0); + mbox_flash->erase_granule = msg_get_u32(msg, 4); +} + +/* Version 2 and 3 compatible */ +static void mbox_flash_do_create_read_window(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg) +{ + mbox_flash->read.lpc_addr = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 0)); + mbox_flash->read.size = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 2)); + mbox_flash->read.cur_pos = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 4)); + mbox_flash->read.open = true; + mbox_flash->write.open = false; +} + +static void mbox_flash_do_create_read_window_v1(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg) +{ + mbox_flash->read.lpc_addr = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 0)); + mbox_flash->read.open = true; + mbox_flash->write.open = false; +} + +/* Version 2 and 3 compatible */ +static void mbox_flash_do_create_write_window(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg) +{ + mbox_flash->write.lpc_addr = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 0)); + mbox_flash->write.size = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 2)); + mbox_flash->write.cur_pos = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 4)); + mbox_flash->write.open = true; + mbox_flash->read.open = false; +} + +static void mbox_flash_do_create_write_window_v1(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg) +{ + mbox_flash->write.lpc_addr = blocks_to_bytes(mbox_flash, msg_get_u16(msg, 0)); + mbox_flash->write.open = true; + mbox_flash->read.open = false; +} + +/* Version 1 and Version 2 compatible */ +static void mbox_flash_do_close_window(struct mbox_flash_data *mbox_flash, + struct bmc_mbox_msg *msg __unused) +{ + mbox_flash->read.open = false; + mbox_flash->write.open = false; +} + +static int handle_reboot(struct mbox_flash_data *mbox_flash) +{ + int rc; + + /* + * If the BMC ready bit isn't present then we're basically + * guaranteed to timeout trying to talk to it so just fail + * whatever is trying to happen. + * Importantly, we can't trust that the presence of the bit means + * the daemon is ok - don't assume it is going to respond at all + * from here onwards + */ + if (!(bmc_mbox_get_attn_reg() & MBOX_ATTN_BMC_DAEMON_READY)) + return FLASH_ERR_AGAIN; + + /* Clear this first so msg_send() doesn't freak out */ + mbox_flash->reboot = false; + + rc = do_acks(mbox_flash); + if (rc) { + if (rc == MBOX_R_TIMEOUT) + rc = FLASH_ERR_AGAIN; + mbox_flash->reboot = true; + return rc; + } + + rc = protocol_init(mbox_flash, 0); + if (rc) + mbox_flash->reboot = true; + + return rc; +} + +static bool do_delayed_work(struct mbox_flash_data *mbox_flash) +{ + return is_paused(mbox_flash) || do_acks(mbox_flash) || + (is_reboot(mbox_flash) && handle_reboot(mbox_flash)); +} + +static int mbox_flash_mark_write(struct mbox_flash_data *mbox_flash, + uint64_t pos, uint64_t len, int type) +{ + struct bmc_mbox_msg msg = MSG_CREATE(type); + int rc; + + if (mbox_flash->version == 1) { + uint32_t start = ALIGN_DOWN(pos, 1 << mbox_flash->shift); + msg_put_u16(&msg, 0, bytes_to_blocks(mbox_flash, pos)); + /* + * We need to make sure that we mark dirty until up to atleast + * pos + len. + */ + msg_put_u32(&msg, 2, pos + len - start); + } else { + uint64_t window_pos = pos - mbox_flash->write.cur_pos; + uint16_t start = bytes_to_blocks(mbox_flash, window_pos); + uint16_t end = bytes_to_blocks(mbox_flash, + ALIGN_UP(window_pos + len, + 1 << mbox_flash->shift)); + + msg_put_u16(&msg, 0, start); + msg_put_u16(&msg, 2, end - start); /* Total Length */ + } + + rc = msg_send(mbox_flash, &msg, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX message\n"); + return rc; + } + + rc = wait_for_bmc(mbox_flash, mbox_flash->timeout); + if (rc) + prlog(PR_ERR, "Error waiting for BMC\n"); + + return rc; +} + +static int mbox_flash_dirty(struct mbox_flash_data *mbox_flash, uint64_t pos, + uint64_t len) +{ + if (!mbox_flash->write.open) { + prlog(PR_ERR, "Attempting to dirty without an open write window\n"); + return FLASH_ERR_DEVICE_GONE; + } + + return mbox_flash_mark_write(mbox_flash, pos, len, + MBOX_C_MARK_WRITE_DIRTY); +} + +static int mbox_flash_erase(struct mbox_flash_data *mbox_flash, uint64_t pos, + uint64_t len) +{ + if (!mbox_flash->write.open) { + prlog(PR_ERR, "Attempting to erase without an open write window\n"); + return FLASH_ERR_DEVICE_GONE; + } + + return mbox_flash_mark_write(mbox_flash, pos, len, + MBOX_C_MARK_WRITE_ERASED); +} + +static int mbox_flash_flush(struct mbox_flash_data *mbox_flash) +{ + struct bmc_mbox_msg msg = MSG_CREATE(MBOX_C_WRITE_FLUSH); + int rc; + + if (!mbox_flash->write.open) { + prlog(PR_ERR, "Attempting to flush without an open write window\n"); + return FLASH_ERR_DEVICE_GONE; + } + + rc = msg_send(mbox_flash, &msg, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX message\n"); + return rc; + } + + rc = wait_for_bmc(mbox_flash, mbox_flash->timeout); + if (rc) + prlog(PR_ERR, "Error waiting for BMC\n"); + + return rc; +} + +/* Is the current window able perform the complete operation */ +static bool mbox_window_valid(struct lpc_window *win, uint64_t pos, + uint64_t len) +{ + if (!win->open) + return false; + if (pos < win->cur_pos) /* start */ + return false; + if ((pos + len) > (win->cur_pos + win->size)) /* end */ + return false; + return true; +} + +static int mbox_window_move(struct mbox_flash_data *mbox_flash, + struct lpc_window *win, uint8_t command, + uint64_t pos, uint64_t len, uint64_t *size) +{ + struct bmc_mbox_msg msg = MSG_CREATE(command); + int rc; + + /* Is the window currently open valid */ + if (mbox_window_valid(win, pos, len)) { + *size = len; + return 0; + } + + /* V1 needs to remember where it has opened the window, note it + * here. + * If we're running V2 the response to the CREATE_*_WINDOW command + * will overwrite what we've noted here. + */ + win->cur_pos = pos & ~mbox_flash_mask(mbox_flash); + + msg_put_u16(&msg, 0, bytes_to_blocks(mbox_flash, pos)); + rc = msg_send(mbox_flash, &msg, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX message\n"); + return rc; + } + + mbox_flash->read.open = false; + mbox_flash->write.open = false; + + rc = wait_for_bmc(mbox_flash, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Error waiting for BMC\n"); + return rc; + } + + *size = len; + /* Is length past the end of the window? */ + if ((pos + len) > (win->cur_pos + win->size)) + /* Adjust size to meet current window */ + *size = (win->cur_pos + win->size) - pos; + + /* + * It doesn't make sense for size to be zero if len isn't zero. + * If this condition happens we're most likely going to spin since + * the caller will likely decerement pos by zero then call this + * again. + * Debateable as to if this should return non zero. At least the + * bug will be obvious from the barf. + */ + if (len != 0 && *size == 0) { + prlog(PR_ERR, "Failed read/write!\n"); + prlog(PR_ERR, "Please update your BMC firmware\n"); + prlog(PR_ERR, "Move window is indicating size zero!\n"); + prlog(PR_ERR, "pos: 0x%" PRIx64 ", len: 0x%" PRIx64 "\n", pos, len); + prlog(PR_ERR, "win pos: 0x%08x win size: 0x%08x\n", win->cur_pos, win->size); + /* + * In practice skiboot gets stuck and this eventually + * brings down the host. Just fail pass the error back + * up and hope someone makes a good decision + */ + return MBOX_R_SYSTEM_ERROR; + } + + return rc; +} + +static int mbox_flash_write(struct blocklevel_device *bl, uint64_t pos, + const void *buf, uint64_t len) +{ + struct mbox_flash_data *mbox_flash; + uint64_t size; + + int rc = 0; + + /* LPC is only 32bit */ + if (pos > UINT_MAX || len > UINT_MAX) + return FLASH_ERR_PARM_ERROR; + + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + + if (do_delayed_work(mbox_flash)) + return FLASH_ERR_AGAIN; + + prlog(PR_TRACE, "Flash write at %#" PRIx64 " for %#" PRIx64 "\n", pos, len); + while (len > 0) { + /* Move window and get a new size to read */ + rc = mbox_window_move(mbox_flash, &mbox_flash->write, + MBOX_C_CREATE_WRITE_WINDOW, pos, len, + &size); + if (rc) + return rc; + + /* Perform the read for this window */ + rc = lpc_window_write(mbox_flash, pos, buf, size); + if (rc) + return rc; + + rc = mbox_flash_dirty(mbox_flash, pos, size); + if (rc) + return rc; + + /* + * Must flush here as changing the window contents + * without flushing entitles the BMC to throw away the + * data. Unlike the read case there isn't a need to explicitly + * validate the window, the flush command will fail if the + * window was compromised. + */ + rc = mbox_flash_flush(mbox_flash); + if (rc) + return rc; + + len -= size; + pos += size; + buf += size; + } + return rc; +} + +static int mbox_flash_read(struct blocklevel_device *bl, uint64_t pos, + void *buf, uint64_t len) +{ + struct mbox_flash_data *mbox_flash; + uint64_t size; + + int rc = 0; + + /* LPC is only 32bit */ + if (pos > UINT_MAX || len > UINT_MAX) + return FLASH_ERR_PARM_ERROR; + + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + + if (do_delayed_work(mbox_flash)) + return FLASH_ERR_AGAIN; + + prlog(PR_TRACE, "Flash read at %#" PRIx64 " for %#" PRIx64 "\n", pos, len); + while (len > 0) { + /* Move window and get a new size to read */ + rc = mbox_window_move(mbox_flash, &mbox_flash->read, + MBOX_C_CREATE_READ_WINDOW, pos, + len, &size); + if (rc) + return rc; + + /* Perform the read for this window */ + rc = lpc_window_read(mbox_flash, pos, buf, size); + if (rc) + return rc; + + len -= size; + pos += size; + buf += size; + /* + * Ensure my window is still open, if it isn't we can't trust + * what we read + */ + if (!is_valid(mbox_flash, &mbox_flash->read)) + return FLASH_ERR_AGAIN; + } + return rc; +} + +static bool mbox_flash_reset(struct blocklevel_device *bl) +{ + int rc; + struct mbox_flash_data *mbox_flash; + struct bmc_mbox_msg msg = MSG_CREATE(MBOX_C_RESET_STATE); + + prlog(PR_NOTICE, "MBOX reset\n"); + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + + rc = msg_send(mbox_flash, &msg, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX RESET msg\n"); + return false; + } + if (wait_for_bmc(mbox_flash, mbox_flash->timeout)) { + prlog(PR_ERR, "Error waiting for BMC\n"); + return false; + } + + return true; +} + +static int mbox_flash_get_info(struct blocklevel_device *bl, const char **name, + uint64_t *total_size, uint32_t *erase_granule) +{ + struct bmc_mbox_msg msg = MSG_CREATE(MBOX_C_GET_FLASH_INFO); + struct mbox_flash_data *mbox_flash; + int rc; + + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + + if (do_delayed_work(mbox_flash)) + return FLASH_ERR_AGAIN; + + /* + * We want to avoid runtime mallocs in skiboot. The expected + * behavour to uses of libflash is that one can free() the memory + * returned. + * NULL will do for now. + */ + if (name) + *name = NULL; + + mbox_flash->busy = true; + rc = msg_send(mbox_flash, &msg, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX message\n"); + return rc; + } + + if (wait_for_bmc(mbox_flash, mbox_flash->timeout)) { + prlog(PR_ERR, "Error waiting for BMC\n"); + return rc; + } + + mbox_flash->bl.erase_mask = mbox_flash->erase_granule - 1; + + if (total_size) + *total_size = mbox_flash->total_size; + if (erase_granule) + *erase_granule = mbox_flash->erase_granule; + + return rc; +} + +static int mbox_flash_erase_v2(struct blocklevel_device *bl, uint64_t pos, + uint64_t len) +{ + struct mbox_flash_data *mbox_flash; + + /* LPC is only 32bit */ + if (pos > UINT_MAX || len > UINT_MAX) + return FLASH_ERR_PARM_ERROR; + + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + + prlog(PR_TRACE, "Flash erase at 0x%08x for 0x%08x\n", (u32) pos, (u32) len); + while (len > 0) { + uint64_t size; + int rc; + + /* Move window and get a new size to erase */ + rc = mbox_window_move(mbox_flash, &mbox_flash->write, + MBOX_C_CREATE_WRITE_WINDOW, pos, len, &size); + if (rc) + return rc; + + rc = mbox_flash_erase(mbox_flash, pos, size); + if (rc) + return rc; + + /* + * Flush directly, don't mark that region dirty otherwise it + * isn't clear if a write happened there or not + */ + + rc = mbox_flash_flush(mbox_flash); + if (rc) + return rc; + + len -= size; + pos += size; + } + + return 0; +} + +static int mbox_flash_erase_v1(struct blocklevel_device *bl __unused, + uint64_t pos __unused, uint64_t len __unused) +{ + /* + * We can probably get away with doing nothing. + * TODO: Rethink this, causes interesting behaviour in pflash. + * Users do expect pflash -{e,E} to do something. This is because + * on real flash this would have set that region to all 0xFF but + * really the erase at the blocklevel interface was only designed + * to be "please make this region writeable". + * It may be wise (despite the large performance penalty) to + * actually write all 0xFF here. I'll leave that as an exercise + * for the future. + */ + + return 0; +} + +/* Called from interrupt handler, don't send any mbox messages */ +static void mbox_flash_attn(uint8_t attn, void *priv) +{ + struct mbox_flash_data *mbox_flash = priv; + + if (attn & MBOX_ATTN_ACK_MASK) + mbox_flash->ack = true; + if (attn & MBOX_ATTN_BMC_REBOOT) { + mbox_flash->reboot = true; + mbox_flash->read.open = false; + mbox_flash->write.open = false; + attn &= ~MBOX_ATTN_BMC_REBOOT; + } + + if (attn & MBOX_ATTN_BMC_WINDOW_RESET) { + mbox_flash->read.open = false; + mbox_flash->write.open = false; + attn &= ~MBOX_ATTN_BMC_WINDOW_RESET; + } + + if (attn & MBOX_ATTN_BMC_FLASH_LOST) { + mbox_flash->pause = true; + attn &= ~MBOX_ATTN_BMC_FLASH_LOST; + } else { + mbox_flash->pause = false; + } +} + +static void mbox_flash_callback(struct bmc_mbox_msg *msg, void *priv) +{ + struct mbox_flash_data *mbox_flash = priv; + + prlog(PR_TRACE, "BMC OK command %u\n", msg->command); + + if (msg->response != MBOX_R_SUCCESS) { + prlog(PR_ERR, "Bad response code from BMC %d\n", msg->response); + mbox_flash->rc = msg->response; + goto out; + } + + if (msg->command > MBOX_COMMAND_COUNT) { + prlog(PR_ERR, "Got response to unknown command %02x\n", msg->command); + mbox_flash->rc = -1; + goto out; + } + + if (!mbox_flash->handlers[msg->command]) { + prlog(PR_ERR, "Couldn't find handler for message! command: %u, seq: %u\n", + msg->command, msg->seq); + mbox_flash->rc = MBOX_R_SYSTEM_ERROR; + goto out; + } + + mbox_flash->rc = 0; + + mbox_flash->handlers[msg->command](mbox_flash, msg); + +out: + mbox_flash->busy = false; +} + +static int protocol_init(struct mbox_flash_data *mbox_flash, uint8_t shift) +{ + struct bmc_mbox_msg msg = MSG_CREATE(MBOX_C_GET_MBOX_INFO); + int rc; + + mbox_flash->read.open = false; + mbox_flash->write.open = false; + + /* Assume V2+ */ + mbox_flash->bl.read = &mbox_flash_read; + mbox_flash->bl.write = &mbox_flash_write; + mbox_flash->bl.erase = &mbox_flash_erase_v2; + mbox_flash->bl.get_info = &mbox_flash_get_info; + + /* Assume V3 */ + mbox_flash->handlers = handlers_v3; + + bmc_mbox_register_callback(&mbox_flash_callback, mbox_flash); + bmc_mbox_register_attn(&mbox_flash_attn, mbox_flash); + + /* + * For V1 of the protocol this is fixed. + * V2+: The init code will update this + */ + mbox_flash->shift = 12; + + /* + * For V1 we'll use this value. + * V2+: The init code (may) update this + */ + mbox_flash->timeout = MBOX_DEFAULT_TIMEOUT; + + /* + * Always attempt init with highest version known. + * The GET_MBOX_INFO response will confirm that the other side can + * talk the highest version, we'll update this variable then if + * our highest version is not supported + */ + mbox_flash->version = 3; + +negotiate_version: + msg_put_u8(&msg, 0, mbox_flash->version); + msg_put_u8(&msg, 1, shift); + rc = msg_send(mbox_flash, &msg, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX message\n"); + return rc; + } + + rc = wait_for_bmc(mbox_flash, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Error waiting for BMC\n"); + if (mbox_flash->version > 1) { + mbox_flash->version--; + prlog(PR_INFO, "Retrying MBOX negotiation with BMC" + " with MBOXv%d\n", mbox_flash->version); + goto negotiate_version; + } + return rc; + } + + prlog(PR_INFO, "Detected mbox protocol version %d\n", mbox_flash->version); + switch (mbox_flash->version) { + case 1: + mbox_flash->bl.erase = &mbox_flash_erase_v1; + mbox_flash->handlers = handlers_v1; + break; + case 2: + mbox_flash->handlers = handlers_v2; + break; + case 3: + /* Nothing to do we assumed it would be V3 */ + break; + default: + /* + * The BMC is can only lower the requested version not do + * anything else. FWIW there is no verion 0. + */ + prlog(PR_CRIT, "Bad version: %u\n", mbox_flash->version); + rc = FLASH_ERR_PARM_ERROR; + } + + return rc; +} + +int mbox_flash_lock(struct blocklevel_device *bl, uint64_t pos, uint64_t len) +{ + struct mbox_flash_data *mbox_flash; + struct bmc_mbox_msg msg = MSG_CREATE(MBOX_C_MARK_LOCKED); + int rc; + + /* mbox-flash only talks 32bit for now */ + if (pos > UINT_MAX || len > UINT_MAX) + return FLASH_ERR_PARM_ERROR; + + /* + * If the region isn't at least 4k aligned and in size then bail + * out, the protocol won't allow for smaller block sizes. + */ + if (pos & ((1 << 12) - 1) || len & ((1 << 12) - 1)) + return FLASH_ERR_PARM_ERROR; + + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + if ((pos & mbox_flash_mask(mbox_flash)) || (len & mbox_flash_mask(mbox_flash))) { + uint8_t shift = 0; + /* + * The current block size won't work for locking the requested + * region must reinit. + */ + while (!((1 << shift) & pos) && !((1 << shift) & len)) + shift++; + + prlog(PR_INFO, "Locking flash requires re-init from shift of %d to shift of %d\n", + mbox_flash->shift, shift); + + rc = protocol_init(mbox_flash, shift); + if (rc) + return rc; + + /* + * The daemon didn't agree with the requested shift - the + * flash won't be able to be locked + */ + if (mbox_flash->shift > shift) + return FLASH_ERR_PARM_ERROR; + } + + msg_put_u16(&msg, 0, bytes_to_blocks(mbox_flash, pos)); + msg_put_u16(&msg, 2, bytes_to_blocks(mbox_flash, len)); + rc = msg_send(mbox_flash, &msg, mbox_flash->timeout); + if (rc) { + prlog(PR_ERR, "Failed to enqueue/send BMC MBOX message\n"); + return rc; + } + + rc = wait_for_bmc(mbox_flash, mbox_flash->timeout); + if (rc) + prlog(PR_ERR, "Error waiting for BMC\n"); + + return rc; +} + +int mbox_flash_init(struct blocklevel_device **bl) +{ + struct mbox_flash_data *mbox_flash; + int rc; + + CHECK_HANDLER_SIZE(handlers_v3); + CHECK_HANDLER_SIZE(handlers_v2); + CHECK_HANDLER_SIZE(handlers_v1); + + if (!bl) + return FLASH_ERR_PARM_ERROR; + + /* XXX: We only support one blocklevel flash device over mbox. If we + * ever support more than one, move this out. The chances of that are + * slim though due to circumstances. + */ + mbox_init(); + + *bl = NULL; + + mbox_flash = zalloc(sizeof(struct mbox_flash_data)); + if (!mbox_flash) + return FLASH_ERR_MALLOC_FAILED; + + /* Assume V2+ */ + mbox_flash->bl.read = &mbox_flash_read; + mbox_flash->bl.write = &mbox_flash_write; + mbox_flash->bl.erase = &mbox_flash_erase_v2; + mbox_flash->bl.get_info = &mbox_flash_get_info; + mbox_flash->bl.exit = &mbox_flash_exit; + + if (bmc_mbox_get_attn_reg() & MBOX_ATTN_BMC_REBOOT) + rc = handle_reboot(mbox_flash); + else + rc = protocol_init(mbox_flash, 0); + if (rc) { + free(mbox_flash); + return rc; + } + + mbox_flash->bl.keep_alive = 0; + + *bl = &(mbox_flash->bl); + return 0; +} + +bool mbox_flash_exit(struct blocklevel_device *bl) +{ + bool status = true; + struct mbox_flash_data *mbox_flash; + if (bl) { + status = mbox_flash_reset(bl); + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + free(mbox_flash); + } + + return status; +} diff --git a/roms/skiboot/libflash/mbox-flash.h b/roms/skiboot/libflash/mbox-flash.h new file mode 100644 index 000000000..cf8575c56 --- /dev/null +++ b/roms/skiboot/libflash/mbox-flash.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2017 IBM Corp. */ + +#ifndef __LIBFLASH_MBOX_FLASH_H +#define __LIBFLASH_MBOX_FLASH_H + +int mbox_flash_lock(struct blocklevel_device *bl, uint64_t pos, uint64_t len); +int mbox_flash_init(struct blocklevel_device **bl); +bool mbox_flash_exit(struct blocklevel_device *bl); +#endif /* __LIBFLASH_MBOX_FLASH_H */ + + diff --git a/roms/skiboot/libflash/test/Makefile.check b/roms/skiboot/libflash/test/Makefile.check new file mode 100644 index 000000000..4dbd7ee75 --- /dev/null +++ b/roms/skiboot/libflash/test/Makefile.check @@ -0,0 +1,167 @@ +# -*-Makefile-*- +libflash_test_test_ipmi_hiomap_SOURCES = \ + libflash/test/test-ipmi-hiomap.c \ + libflash/test/stubs.c \ + libflash/ipmi-hiomap.c + +libflash_test_test_blocklevel_SOURCES = \ + libflash/test/test-blocklevel.c \ + libflash/test/stubs.c + +libflash_test_test_flash_SOURCES = \ + libflash/test/test-flash.c \ + libflash/test/stubs.c \ + libflash/test/mbox-server.c + +libflash_test_test_ecc_SOURCES = \ + libflash/test/test-ecc.c \ + libflash/test/stubs.c \ + libflash/test/mbox-server.c + +libflash_test_test_mbox_SOURCES = \ + libflash/test/test-mbox.c \ + libflash/test/stubs.c \ + libflash/test/mbox-server.c + +check_PROGRAMS = \ + libflash/test/test-ipmi-hiomap \ + libflash/test/test-blocklevel \ + libflash/test/test-flash \ + libflash/test/test-ecc \ + libflash/test/test-mbox + +TEST_FLAGS = -D__TEST__ -MMD -MP + +.PHONY: libflash-check libflash-coverage +libflash-check: $(check_PROGRAMS:%=%-check) +libflash-coverage: $(check_PROGRAMS:%=%-gcov-run) +clean: libflash-test-clean +check: libflash-check +coverage: libflash-coverage +strict-check: TEST_FLAGS += -D__STRICT_TEST__ +strict-check: check + +LCOV_EXCLUDE += $(check_PROGRAMS:%=%.c) + +$(check_PROGRAMS:%=%-check) : %-check : % + $(call QTEST, RUN-TEST , $(VALGRIND) $<, $<) + +# Transform a prerequisite into something approximating a variable name. This +# is used to map check_PROGRAMS prerequisits to the corresponding _SOURCES +# variable. +# +# For example: +# +# $(call prereq2var,libflash/test/test-mbox) +# +# Will output: +# +# 'libflash_test_test_mbox' +# +prereq2var = $(subst /,_,$(subst -,_,$(1))) + +# Generate prerequisites from a target based on the target's corresponding +# _SOURCES variable. +# +# For example, with: +# +# libflash_test_test_mbox_SOURCES = \ +# libflash/test/test-mbox.c \ +# libflash/test/stubs.c \ +# libflash/test/mbox-server.c +# HOST_TRIPLE = x86_64-linux-gnu +# +# A call to target2prereq where the target is libflash/test/test-mbox: +# +# $(call target2prereq,$@,$(HOST_TRIPLE)/) +# +# Will output: +# +# x86_64-linux-gnu/libflash/test/test-mbox.o +# x86_64-linux-gnu/libflash/test/stubs.o +# x86_64-linux-gnu/libflash/test/mbox-server.o +target2prereq = $(patsubst %.c,%.o,$(addprefix $(2),$($(call prereq2var,$(1))_SOURCES))) + +# Generate path stems for all applications in check_PROGRAMS. This is usef +# +# For example, with: +# +# libflash_test_test_mbox_SOURCES = \ +# libflash/test/test-mbox.c \ +# libflash/test/stubs.c \ +# libflash/test/mbox-server.c +# libflash_test_test_ecc_SOURCES = \ +# libflash/test/test-ecc.c \ +# libflash/test/stubs.c \ +# libflash/test/mbox-server.c +# check_PROGRAMS = libflash/test/test-mbox libflash/test/test-ecc +# HOST_TRIPLE = x86_64-linux-gnu +# +# A call to: +# +# $(call objstem,$(check_PROGRAMS),$(HOST_TRIPLE)/) +# +# Will output: +# +# x86_64-linux-gnu/libflash/test/test-mbox +# x86_64-linux-gnu/libflash/test/stubs +# x86_64-linux-gnu/libflash/test/mbox-server +# x86_64-linux-gnu/libflash/test/test-ecc +# x86_64-linux-gnu/libflash/test/stubs +# x86_64-linux-gnu/libflash/test/mbox-server +objstem = $(patsubst %.c,%,$(addprefix $(2),$(foreach bin,$(1),$($(call prereq2var,$(bin))_SOURCES)))) + +# Record the host platform triple to separate test vs production objects. +HOST_TRIPLE = $(shell $(HOSTCC) -dumpmachine) + +# Mirror the skiboot directory structure under a directory named after the host +# triple in the skiboot root directory, and place the built objects in this +# mirrored structure. +$(HOST_TRIPLE)/%.o : %.c + @mkdir -p $(dir $@) + $(call Q, HOSTCC ,$(HOSTCC) $(HOSTCFLAGS) $(TEST_FLAGS) -Wno-suggest-attribute=const -g -c -o $@ $<, $@) + +# Use GNU make metaprogramming dynamically define targets and prequisites for +# binaries listed in check_PROGRAMS. +# +# Secondary expansion[1] allows us to use the target automatic variable ($@) in +# the prequisite list. Knowing the target we can map to the corresponding +# _SOURCES variable to learn what to build and link. Finally, make sure the +# artifacts are output under the $(HOST_TRIPLE) directory to separate them from +# objects intended for skiboot proper. +# +# [1] https://www.gnu.org/software/make/manual/html_node/Secondary-Expansion.html#Secondary-Expansion +.SECONDEXPANSION: +$(check_PROGRAMS) : $$(call target2prereq,$$@,$(HOST_TRIPLE)/) + $(call Q, HOSTCC , $(HOSTCC) $(HOSTCFLAGS) $(TEST_FLAGS) -Wno-suggest-attribute=const -O0 -g -o $@ $^, $@) + +.PHONY: libflash-test-clean +libflash-test-clean: OBJ_STEMS = $(call objstem,$(check_PROGRAMS),$(HOST_TRIPLE)/) +libflash-test-clean: libflash-test-gcov-clean + $(RM) $(check_PROGRAMS) + $(RM) $(OBJ_STEMS:%=%.o) + $(RM) $(OBJ_STEMS:%=%.d) + +# gcov support: Build objects under $(HOST_TRIPLE)/gcov/ +$(check_PROGRAMS:%=%-gcov-run) : %-run: % + $(call QTEST, TEST-COVERAGE ,$< , $<) + +$(HOST_TRIPLE)/gcov/%.o : %.c + @mkdir -p $(dir $@) + $(call Q, HOSTCC ,$(HOSTCC) $(HOSTCFLAGS) $(HOSTGCOVCFLAGS) $(TEST_FLAGS) -Wno-suggest-attribute=const -g -c -o $@ $<, $@) + +.SECONDEXPANSION: +$(check_PROGRAMS:%=%-gcov) : $$(call target2prereq,$$(patsubst %-gcov,%,$$@),$(HOST_TRIPLE)/gcov/) + $(call Q, HOSTCC , $(HOSTCC) $(HOSTCFLAGS) $(HOSTGCOVCFLAGS) $(TEST_FLAGS) -Wno-suggest-attribute=const -O0 -g -o $@ $^, $@) + +.PHONY: libflash-test-gcov-clean +libflash-test-gcov-clean: GCOV_OBJ_STEMS = $(call objstem,$(check_PROGRAMS),$(HOST_TRIPLE)/gcov/) +libflash-test-gcov-clean: + $(RM) $(check_PROGRAMS:%=%-gcov) + $(RM) $(GCOV_OBJ_STEMS:%=%.o) + $(RM) $(GCOV_OBJ_STEMS:%=%.d) + $(RM) $(GCOV_OBJ_STEMS:%=%.gcda) + $(RM) $(GCOV_OBJ_STEMS:%=%.gcno) + +-include $(patsubst %,%.d,$(call objstem,$(check_PROGRAMS),$(HOST_TRIPLE)/)) +-include $(patsubst %,%.d,$(call objstem,$(check_PROGRAMS),$(HOST_TRIPLE)/gcov/)) diff --git a/roms/skiboot/libflash/test/mbox-server.c b/roms/skiboot/libflash/test/mbox-server.c new file mode 100644 index 000000000..8a68cfff6 --- /dev/null +++ b/roms/skiboot/libflash/test/mbox-server.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2017 IBM Corp. */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> +#include <stdarg.h> +#include <inttypes.h> + +#include <sys/mman.h> /* for mprotect() */ + +#define pr_fmt(fmt) "MBOX-SERVER: " fmt +#include "skiboot.h" +#include "opal-api.h" + +#include "mbox-server.h" +#include "stubs.h" + +#define ERASE_GRANULE 0x100 + +#define LPC_BLOCKS 256 + +#define __unused __attribute__((unused)) + +enum win_type { + WIN_CLOSED, + WIN_READ, + WIN_WRITE +}; + +typedef void (*mbox_data_cb)(struct bmc_mbox_msg *msg, void *priv); +typedef void (*mbox_attn_cb)(uint8_t reg, void *priv); + +struct { + mbox_data_cb fn; + void *cb_data; + struct bmc_mbox_msg *msg; + mbox_attn_cb attn; + void *cb_attn; +} mbox_data; + +static struct { + int api; + bool reset; + + void *lpc_base; + size_t lpc_size; + + uint8_t attn_reg; + + uint32_t block_shift; + uint32_t erase_granule; + + uint16_t def_read_win; /* default window size in blocks */ + uint16_t def_write_win; + + uint16_t max_read_win; /* max window size in blocks */ + uint16_t max_write_win; + + enum win_type win_type; + uint32_t win_base; + uint32_t win_size; + bool win_dirty; +} server_state; + + +static bool check_window(uint32_t pos, uint32_t size) +{ + /* If size is zero then all is well */ + if (size == 0) + return true; + + if (server_state.api == 1) { + /* + * Can actually be stricter in v1 because pos is relative to + * flash not window + */ + if (pos < server_state.win_base || + pos + size > server_state.win_base + server_state.win_size) { + fprintf(stderr, "pos: 0x%08x size: 0x%08x aren't in active window\n", + pos, size); + fprintf(stderr, "window pos: 0x%08x window size: 0x%08x\n", + server_state.win_base, server_state.win_size); + return false; + } + } else { + if (pos + size > server_state.win_base + server_state.win_size) + return false; + } + return true; +} + +/* skiboot test stubs */ +int64_t lpc_read(enum OpalLPCAddressType __unused addr_type, uint32_t addr, + uint32_t *data, uint32_t sz); +int64_t lpc_read(enum OpalLPCAddressType __unused addr_type, uint32_t addr, + uint32_t *data, uint32_t sz) +{ + /* Let it read from a write window... Spec says it ok! */ + if (!check_window(addr, sz) || server_state.win_type == WIN_CLOSED) + return 1; + + switch (sz) { + case 1: + *(uint8_t *)data = *(uint8_t *)(server_state.lpc_base + addr); + break; + case 2: + *(uint16_t *)data = be16_to_cpu(*(uint16_t *)(server_state.lpc_base + addr)); + break; + case 4: + *(uint32_t *)data = be32_to_cpu(*(uint32_t *)(server_state.lpc_base + addr)); + break; + default: + prerror("Invalid data size %d\n", sz); + return 1; + } + return 0; +} + +int64_t lpc_write(enum OpalLPCAddressType __unused addr_type, uint32_t addr, + uint32_t data, uint32_t sz); +int64_t lpc_write(enum OpalLPCAddressType __unused addr_type, uint32_t addr, + uint32_t data, uint32_t sz) +{ + if (!check_window(addr, sz) || server_state.win_type != WIN_WRITE) + return 1; + switch (sz) { + case 1: + *(uint8_t *)(server_state.lpc_base + addr) = data; + break; + case 2: + *(uint16_t *)(server_state.lpc_base + addr) = cpu_to_be16(data); + break; + case 4: + *(uint32_t *)(server_state.lpc_base + addr) = cpu_to_be32(data); + break; + default: + prerror("Invalid data size %d\n", sz); + return 1; + } + return 0; +} + +int bmc_mbox_register_attn(mbox_attn_cb handler, void *drv_data) +{ + mbox_data.attn = handler; + mbox_data.cb_attn = drv_data; + + return 0; +} + +uint8_t bmc_mbox_get_attn_reg(void) +{ + return server_state.attn_reg; +} + +int bmc_mbox_register_callback(mbox_data_cb handler, void *drv_data) +{ + mbox_data.fn = handler; + mbox_data.cb_data = drv_data; + + return 0; +} + +static int close_window(bool check) +{ + /* + * This isn't strictly prohibited and some daemons let you close + * windows even if none are open. + * I've made the test fail because closing with no windows open is + * a sign that something 'interesting' has happened. + * You should investigate why + * + * If check is false it is because we just want to do the logic + * because open window has been called - you can open a window + * over a closed window obviously + */ + if (check && server_state.win_type == WIN_CLOSED) + return MBOX_R_PARAM_ERROR; + + server_state.win_type = WIN_CLOSED; + mprotect(server_state.lpc_base, server_state.lpc_size, PROT_NONE); + + return MBOX_R_SUCCESS; +} + +static int do_dirty(uint32_t pos, uint32_t size) +{ + pos <<= server_state.block_shift; + if (server_state.api > 1) + size <<= server_state.block_shift; + if (!check_window(pos, size)) { + prlog(PR_ERR, "Trying to dirty not in open window range\n"); + return MBOX_R_PARAM_ERROR; + } + if (server_state.win_type != WIN_WRITE) { + prlog(PR_ERR, "Trying to dirty not write window\n"); + return MBOX_R_PARAM_ERROR; + } + + /* Thats about all actually */ + return MBOX_R_SUCCESS; +} + +void check_timers(bool __unused unused) +{ + /* now that we've handled the message, holla-back */ + if (mbox_data.msg) { + mbox_data.fn(mbox_data.msg, mbox_data.cb_data); + mbox_data.msg = NULL; + } +} + +static int open_window(struct bmc_mbox_msg *msg, bool write, u32 offset, u32 size) +{ + int max_size = server_state.max_read_win << server_state.block_shift; + //int win_size = server_state.def_read_win; + enum win_type type = WIN_READ; + int prot = PROT_READ; + + assert(server_state.win_type == WIN_CLOSED); + + /* Shift params up */ + offset <<= server_state.block_shift; + size <<= server_state.block_shift; + + if (!size || server_state.api == 1) + size = server_state.def_read_win << server_state.block_shift; + + if (write) { + max_size = server_state.max_write_win << server_state.block_shift; + //win_size = server_state.def_write_win; + prot |= PROT_WRITE; + type = WIN_WRITE; + /* Use the default size if zero size is set */ + if (!size || server_state.api == 1) + size = server_state.def_write_win << server_state.block_shift; + } + + + prlog(PR_INFO, "Opening range %#.8x, %#.8x for %s\n", + offset, offset + size - 1, write ? "writing" : "reading"); + + /* XXX: Document this behaviour */ + if ((size + offset) > server_state.lpc_size) { + prlog(PR_INFO, "tried to open beyond end of flash\n"); + return MBOX_R_PARAM_ERROR; + } + + /* XXX: should we do this before or after checking for errors? + * Doing it afterwards ensures consistency between + * implementations + */ + if (server_state.api == 2) + size = MIN(size, max_size); + + mprotect(server_state.lpc_base + offset, size, prot); + server_state.win_type = type; + server_state.win_base = offset; + server_state.win_size = size; + + memset(msg->args, 0, sizeof(msg->args)); + bmc_put_u16(msg, 0, offset >> server_state.block_shift); + if (server_state.api == 1) { + /* + * Put nonsense in here because v1 mbox-flash shouldn't know about it. + * If v1 mbox-flash does read this, 0xffff should trigger a big mistake. + */ + bmc_put_u16(msg, 2, 0xffff >> server_state.block_shift); + bmc_put_u16(msg, 4, 0xffff >> server_state.block_shift); + } else { + bmc_put_u16(msg, 2, size >> server_state.block_shift); + bmc_put_u16(msg, 4, offset >> server_state.block_shift); + } + return MBOX_R_SUCCESS; +} + +int bmc_mbox_enqueue(struct bmc_mbox_msg *msg, + unsigned int __unused timeout_sec) +{ + /* + * FIXME: should we be using the same storage for message + * and response? + */ + int rc = MBOX_R_SUCCESS; + uint32_t start, size; + + if (server_state.reset && msg->command != MBOX_C_GET_MBOX_INFO && + msg->command != MBOX_C_BMC_EVENT_ACK) { + /* + * Real daemons should return an error, but for testing we'll + * be a bit more strict + */ + prlog(PR_EMERG, "Server was in reset state - illegal command %d\n", + msg->command); + exit(1); + } + + switch (msg->command) { + case MBOX_C_RESET_STATE: + prlog(PR_INFO, "RESET_STATE\n"); + server_state.win_type = WIN_CLOSED; + rc = open_window(msg, false, 0, LPC_BLOCKS); + memset(msg->args, 0, sizeof(msg->args)); + break; + + case MBOX_C_GET_MBOX_INFO: + prlog(PR_INFO, "GET_MBOX_INFO version = %d, block_shift = %d\n", + server_state.api, server_state.block_shift); + msg->args[0] = server_state.api; + if (server_state.api == 1) { + prlog(PR_INFO, "\tread_size = 0x%08x, write_size = 0x%08x\n", + server_state.def_read_win, server_state.def_write_win); + bmc_put_u16(msg, 1, server_state.def_read_win); + bmc_put_u16(msg, 3, server_state.def_write_win); + msg->args[5] = 0xff; /* If v1 reads this, 0xff will force the mistake */ + } else { + msg->args[5] = server_state.block_shift; + } + server_state.reset = false; + break; + + case MBOX_C_GET_FLASH_INFO: + prlog(PR_INFO, "GET_FLASH_INFO: size: 0x%" PRIu64 ", erase: 0x%08x\n", + server_state.lpc_size, server_state.erase_granule); + if (server_state.api == 1) { + bmc_put_u32(msg, 0, server_state.lpc_size); + bmc_put_u32(msg, 4, server_state.erase_granule); + } else { + bmc_put_u16(msg, 0, server_state.lpc_size >> server_state.block_shift); + bmc_put_u16(msg, 2, server_state.erase_granule >> server_state.block_shift); + } + break; + + case MBOX_C_CREATE_READ_WINDOW: + start = bmc_get_u16(msg, 0); + size = bmc_get_u16(msg, 2); + prlog(PR_INFO, "CREATE_READ_WINDOW: pos: 0x%08x, len: 0x%08x\n", start, size); + rc = close_window(false); + if (rc != MBOX_R_SUCCESS) + break; + rc = open_window(msg, false, start, size); + break; + + case MBOX_C_CLOSE_WINDOW: + rc = close_window(true); + break; + + case MBOX_C_CREATE_WRITE_WINDOW: + start = bmc_get_u16(msg, 0); + size = bmc_get_u16(msg, 2); + prlog(PR_INFO, "CREATE_WRITE_WINDOW: pos: 0x%08x, len: 0x%08x\n", start, size); + rc = close_window(false); + if (rc != MBOX_R_SUCCESS) + break; + rc = open_window(msg, true, start, size); + break; + + /* TODO: make these do something */ + case MBOX_C_WRITE_FLUSH: + prlog(PR_INFO, "WRITE_FLUSH\n"); + /* + * This behaviour isn't strictly illegal however it could + * be a sign of bad behaviour + */ + if (server_state.api > 1 && !server_state.win_dirty) { + prlog(PR_EMERG, "Version >1 called FLUSH without a previous DIRTY\n"); + exit (1); + } + server_state.win_dirty = false; + if (server_state.api > 1) + break; + + /* This is only done on V1 */ + start = bmc_get_u16(msg, 0); + if (server_state.api == 1) + size = bmc_get_u32(msg, 2); + else + size = bmc_get_u16(msg, 2); + prlog(PR_INFO, "\tpos: 0x%08x len: 0x%08x\n", start, size); + rc = do_dirty(start, size); + break; + case MBOX_C_MARK_WRITE_DIRTY: + start = bmc_get_u16(msg, 0); + if (server_state.api == 1) + size = bmc_get_u32(msg, 2); + else + size = bmc_get_u16(msg, 2); + prlog(PR_INFO, "MARK_WRITE_DIRTY: pos: 0x%08x, len: %08x\n", start, size); + server_state.win_dirty = true; + rc = do_dirty(start, size); + break; + case MBOX_C_BMC_EVENT_ACK: + /* + * Clear any BMC notifier flags. Don't clear the server + * reset state here, it is a permitted command but only + * GET_INFO should clear it. + * + * Make sure that msg->args[0] is only acking bits we told + * it about, in server_state.attn_reg. The caveat is that + * it could NOT ack some bits... + */ + prlog(PR_INFO, "BMC_EVENT_ACK 0x%02x\n", msg->args[0]); + if ((msg->args[0] | server_state.attn_reg) != server_state.attn_reg) { + prlog(PR_EMERG, "Tried to ack bits we didn't say!\n"); + exit(1); + } + msg->bmc &= ~msg->args[0]; + server_state.attn_reg &= ~msg->args[0]; + break; + case MBOX_C_MARK_WRITE_ERASED: + start = bmc_get_u16(msg, 0) << server_state.block_shift; + size = bmc_get_u16(msg, 2) << server_state.block_shift; + /* If we've negotiated v1 this should never be called */ + if (server_state.api == 1) { + prlog(PR_EMERG, "Version 1 protocol called a V2 only command\n"); + exit(1); + } + /* + * This will likely result in flush (but not + * dirty) being called. This is the point. + */ + server_state.win_dirty = true; + /* This should really be done when they call flush */ + memset(server_state.lpc_base + server_state.win_base + start, 0xff, size); + break; + default: + prlog(PR_EMERG, "Got unknown command code from mbox: %d\n", msg->command); + } + + prerror("command response = %d\n", rc); + msg->response = rc; + + mbox_data.msg = msg; + + return 0; +} + +int mbox_server_memcmp(int off, const void *buf, size_t len) +{ + return memcmp(server_state.lpc_base + off, buf, len); +} + +void mbox_server_memset(int c) +{ + memset(server_state.lpc_base, c, server_state.lpc_size); +} + +uint32_t mbox_server_total_size(void) +{ + /* Not actually but for this server we don't differentiate */ + return server_state.lpc_size; +} + +uint32_t mbox_server_erase_granule(void) +{ + return server_state.erase_granule; +} + +int mbox_server_version(void) +{ + return server_state.api; +} + +int mbox_server_reset(unsigned int version, uint8_t block_shift) +{ + if (version > 3) + return 1; + + server_state.api = version; + if (block_shift) + server_state.block_shift = block_shift; + if (server_state.erase_granule < (1 << server_state.block_shift)) + server_state.erase_granule = 1 << server_state.block_shift; + server_state.lpc_size = LPC_BLOCKS * (1 << server_state.block_shift); + free(server_state.lpc_base); + server_state.lpc_base = malloc(server_state.lpc_size); + server_state.attn_reg = MBOX_ATTN_BMC_REBOOT | MBOX_ATTN_BMC_DAEMON_READY; + server_state.win_type = WIN_CLOSED; + server_state.reset = true; + mbox_data.attn(MBOX_ATTN_BMC_REBOOT, mbox_data.cb_attn); + + return 0; +} + +int mbox_server_init(void) +{ + server_state.api = 1; + server_state.reset = true; + + /* We're always ready! */ + server_state.attn_reg = MBOX_ATTN_BMC_DAEMON_READY; + + /* setup server */ + server_state.block_shift = 12; + server_state.erase_granule = 0x1000; + server_state.lpc_size = LPC_BLOCKS * (1 << server_state.block_shift); + server_state.lpc_base = malloc(server_state.lpc_size); + + server_state.def_read_win = 1; /* These are in units of block shift "= 1 is 4K" */ + server_state.def_write_win = 1; /* These are in units of block shift "= 1 is 4K" */ + + server_state.max_read_win = LPC_BLOCKS; + server_state.max_write_win = LPC_BLOCKS; + server_state.win_type = WIN_CLOSED; + + return 0; +} + +void mbox_server_destroy(void) +{ + free(server_state.lpc_base); +} diff --git a/roms/skiboot/libflash/test/mbox-server.h b/roms/skiboot/libflash/test/mbox-server.h new file mode 100644 index 000000000..e7aec817f --- /dev/null +++ b/roms/skiboot/libflash/test/mbox-server.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2017 IBM Corp. */ + +#include <stdint.h> + +uint32_t mbox_server_total_size(void); +uint32_t mbox_server_erase_granule(void); +int mbox_server_version(void); +void mbox_server_memset(int c); +int mbox_server_memcmp(int off, const void *buf, size_t len); +int mbox_server_reset(unsigned int version, uint8_t block_shift); +int mbox_server_init(void); +void mbox_server_destroy(void); diff --git a/roms/skiboot/libflash/test/stubs.c b/roms/skiboot/libflash/test/stubs.c new file mode 100644 index 000000000..09f004359 --- /dev/null +++ b/roms/skiboot/libflash/test/stubs.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Stubs for libflash test + * + * Copyright 2013-2018 IBM Corp. + */ + +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> +#include <stdarg.h> +#include <sys/unistd.h> /* for usleep */ + +#include "../../include/lpc-mbox.h" +#include "stubs.h" + +#define __unused __attribute__((unused)) + +__attribute__((weak)) void check_timers(bool __unused unused) +{ + return; +} + +void time_wait_ms(unsigned long ms) +{ + usleep(ms * 1000); +} + +/* skiboot stubs */ +unsigned long mftb(void) +{ + return 42; +} +unsigned long tb_hz = 512000000ul; + +void _prlog(int __unused log_level, const char* fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); +} + +/* accessor junk */ + +void bmc_put_u16(struct bmc_mbox_msg *msg, int offset, uint16_t data) +{ + msg->args[offset + 0] = data & 0xff; + msg->args[offset + 1] = data >> 8; +} + +void bmc_put_u32(struct bmc_mbox_msg *msg, int offset, uint32_t data) +{ + msg->args[offset + 0] = (data) & 0xff; + msg->args[offset + 1] = (data >> 8) & 0xff; + msg->args[offset + 2] = (data >> 16) & 0xff; + msg->args[offset + 3] = (data >> 24) & 0xff; +} + +u32 bmc_get_u32(struct bmc_mbox_msg *msg, int offset) +{ + u32 data = 0; + + data |= msg->args[offset + 0]; + data |= msg->args[offset + 1] << 8; + data |= msg->args[offset + 2] << 16; + data |= msg->args[offset + 3] << 24; + + return data; +} + +u16 bmc_get_u16(struct bmc_mbox_msg *msg, int offset) +{ + u16 data = 0; + + data |= msg->args[offset + 0]; + data |= msg->args[offset + 1] << 8; + + return data; +} + +void *__zalloc(size_t sz) +{ + return calloc(1, sz); +} + +void __free(const void *p) +{ + free((void *)p); +} + +void lock_caller(struct lock *l __attribute__((unused)), + const char *caller __attribute__((unused))) +{ +} + +void unlock(struct lock *l __attribute__((unused))) +{ +} diff --git a/roms/skiboot/libflash/test/stubs.h b/roms/skiboot/libflash/test/stubs.h new file mode 100644 index 000000000..3f890c930 --- /dev/null +++ b/roms/skiboot/libflash/test/stubs.h @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2018 IBM Corp. */ + +#include <lock.h> +#include <stdint.h> + +#include "../../include/lpc-mbox.h" + +void check_timers(bool unused); +void time_wait_ms(unsigned long ms); +unsigned long mftb(void); +void _prlog(int log_level, const char* fmt, ...); +void bmc_put_u16(struct bmc_mbox_msg *msg, int offset, uint16_t data); +void bmc_put_u32(struct bmc_mbox_msg *msg, int offset, uint32_t data); +u16 bmc_get_u16(struct bmc_mbox_msg *msg, int offset); +u32 bmc_get_u32(struct bmc_mbox_msg *msg, int offset); +void *__zalloc(size_t sz); +void __free(const void *p); +void lock_caller(struct lock *l, const char *caller); +void unlock(struct lock *l); diff --git a/roms/skiboot/libflash/test/test-blocklevel.c b/roms/skiboot/libflash/test/test-blocklevel.c new file mode 100644 index 000000000..7a4fe19ac --- /dev/null +++ b/roms/skiboot/libflash/test/test-blocklevel.c @@ -0,0 +1,664 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2018 IBM Corp. */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> + +#include <libflash/blocklevel.h> + +#include "../ecc.c" +#include "../blocklevel.c" + +#define __unused __attribute__((unused)) + +#define ERR(fmt...) fprintf(stderr, fmt) + +bool libflash_debug; + +static int bl_test_bad_read(struct blocklevel_device *bl __unused, uint64_t pos __unused, + void *buf __unused, uint64_t len __unused) +{ + return FLASH_ERR_PARM_ERROR; +} + +static int bl_test_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len) +{ + if (pos + len > 0x1000) + return FLASH_ERR_PARM_ERROR; + + memcpy(buf, bl->priv + pos, len); + + return 0; +} + +static int bl_test_bad_write(struct blocklevel_device *bl __unused, uint64_t pos __unused, + const void *buf __unused, uint64_t len __unused) +{ + return FLASH_ERR_PARM_ERROR; +} + +static int bl_test_write(struct blocklevel_device *bl, uint64_t pos, const void *buf, uint64_t len) +{ + if (pos + len > 0x1000) + return FLASH_ERR_PARM_ERROR; + + memcpy(bl->priv + pos, buf, len); + + return 0; +} + +static int bl_test_erase(struct blocklevel_device *bl, uint64_t pos, uint64_t len) +{ + if (pos + len > 0x1000) + return FLASH_ERR_PARM_ERROR; + + memset(bl->priv + pos, 0xff, len); + + return 0; +} + +static void dump_buf(uint8_t *buf, int start, int end, int miss) +{ + int i; + + printf("pos: value\n"); + for (i = start; i < end; i++) + printf("%04x: %c%s\n", i, buf[i] == 0xff ? '-' : buf[i], i == miss ? " <- First missmatch" : ""); +} + +/* + * Returns zero if the buffer is ok. Otherwise returns the position of + * the mismatch. If the mismatch is at zero -1 is returned + */ +static int check_buf(uint8_t *buf, int zero_start, int zero_end) +{ + int i; + + for (i = 0; i < 0x1000; i++) { + if (i >= zero_start && i < zero_end && buf[i] != 0xff) + return i == 0 ? -1 : i; + if ((i < zero_start || i >= zero_end) && buf[i] != (i % 26) + 'a') + return i == 0 ? -1 : i; + } + + return 0; +} + +static void reset_buf(uint8_t *buf) +{ + int i; + + for (i = 0; i < 0x1000; i++) { + /* This gives repeating a - z which will be nice to visualise */ + buf[i] = (i % 26) + 'a'; + } +} + +static void print_ptr(void *ptr, int len) +{ + int i; + char *p = ptr; + + printf("0x"); + for (i = 0; i < len; i++) { + putchar(*p); + if (i && i % 8 == 0) { + putchar('\n'); + if (len - i) + printf("0x"); + } + } + putchar('\n'); +} + +int main(void) +{ + struct blocklevel_device bl_mem = { 0 }; + struct blocklevel_device *bl = &bl_mem; + uint64_t with_ecc[10], without_ecc[10]; + char *buf = NULL, *data = NULL; + int i, rc, miss; + + if (blocklevel_ecc_protect(bl, 0, 0x1000)) { + ERR("Failed to blocklevel_ecc_protect!\n"); + return 1; + } + + /* 0x1000 -> 0x3000 should remain unprotected */ + + if (blocklevel_ecc_protect(bl, 0x3000, 0x1000)) { + ERR("Failed to blocklevel_ecc_protect(0x3000, 0x1000)\n"); + return 1; + } + if (blocklevel_ecc_protect(bl, 0x2f00, 0x1100)) { + ERR("Failed to blocklevel_ecc_protect(0x2f00, 0x1100)\n"); + return 1; + } + + /* Zero length protection */ + if (!blocklevel_ecc_protect(bl, 0x4000, 0)) { + ERR("Shouldn't have succeeded blocklevel_ecc_protect(0x4000, 0)\n"); + return 1; + } + + /* Minimum creatable size */ + if (blocklevel_ecc_protect(bl, 0x4000, BYTES_PER_ECC)) { + ERR("Failed to blocklevel_ecc_protect(0x4000, BYTES_PER_ECC)\n"); + return 1; + } + + /* Deal with overlapping protections */ + if (blocklevel_ecc_protect(bl, 0x100, 0x1000)) { + ERR("Failed to protect overlaping region blocklevel_ecc_protect(0x100, 0x1000)\n"); + return 1; + } + + /* Deal with overflow */ + if (!blocklevel_ecc_protect(bl, 1, 0xFFFFFFFF)) { + ERR("Added an 'overflow' protection blocklevel_ecc_protect(1, 0xFFFFFFFF)\n"); + return 1; + } + + /* Protect everything */ + if (blocklevel_ecc_protect(bl, 0, 0xFFFFFFFF)) { + ERR("Couldn't protect everything blocklevel_ecc_protect(0, 0xFFFFFFFF)\n"); + return 1; + } + + if (ecc_protected(bl, 0, 1, NULL) != 1) { + ERR("Invaid result for ecc_protected(0, 1)\n"); + return 1; + } + + if (ecc_protected(bl, 0, 0x1000, NULL) != 1) { + ERR("Invalid result for ecc_protected(0, 0x1000)\n"); + return 1; + } + + if (ecc_protected(bl, 0x100, 0x100, NULL) != 1) { + ERR("Invalid result for ecc_protected(0x0100, 0x100)\n"); + return 1; + } + + /* Clear the protections */ + bl->ecc_prot.n_prot = 0; + /* Reprotect */ + if (blocklevel_ecc_protect(bl, 0x3000, 0x1000)) { + ERR("Failed to blocklevel_ecc_protect(0x3000, 0x1000)\n"); + return 1; + } + /* Deal with overlapping protections */ + if (blocklevel_ecc_protect(bl, 0x100, 0x1000)) { + ERR("Failed to protect overlaping region blocklevel_ecc_protect(0x100, 0x1000)\n"); + return 1; + } + + if (ecc_protected(bl, 0x1000, 0, NULL) != 1) { + ERR("Invalid result for ecc_protected(0x1000, 0)\n"); + return 1; + } + + if (ecc_protected(bl, 0x1000, 0x1000, NULL) != -1) { + ERR("Invalid result for ecc_protected(0x1000, 0x1000)\n"); + return 1; + } + + if (ecc_protected(bl, 0x1000, 0x100, NULL) != 1) { + ERR("Invalid result for ecc_protected(0x1000, 0x100)\n"); + return 1; + } + + if (ecc_protected(bl, 0x2000, 0, NULL) != 0) { + ERR("Invalid result for ecc_protected(0x2000, 0)\n"); + return 1; + } + + if (ecc_protected(bl, 0x4000, 1, NULL) != 0) { + ERR("Invalid result for ecc_protected(0x4000, 1)\n"); + return 1; + } + + /* Check for asking for a region with mixed protection */ + if (ecc_protected(bl, 0x100, 0x2000, NULL) != -1) { + ERR("Invalid result for ecc_protected(0x100, 0x2000)\n"); + return 1; + } + + /* Test the auto extending of regions */ + if (blocklevel_ecc_protect(bl, 0x5000, 0x100)) { + ERR("Failed to blocklevel_ecc_protect(0x5000, 0x100)\n"); + return 1; + } + + if (blocklevel_ecc_protect(bl, 0x5100, 0x100)) { + ERR("Failed to blocklevel_ecc_protect(0x5100, 0x100)\n"); + return 1; + } + + if (blocklevel_ecc_protect(bl, 0x5200, 0x100)) { + ERR("Failed to blocklevel_ecc_protect(0x5200, 0x100)\n"); + return 1; + } + + if (ecc_protected(bl, 0x5120, 0x10, NULL) != 1) { + ERR("Invalid result for ecc_protected(0x5120, 0x10)\n"); + return 1; + } + + if (blocklevel_ecc_protect(bl, 0x4f00, 0x100)) { + ERR("Failed to blocklevel_ecc_protected(0x4900, 0x100)\n"); + return 1; + } + + if (blocklevel_ecc_protect(bl, 0x4900, 0x100)) { + ERR("Failed to blocklevel_ecc_protected(0x4900, 0x100)\n"); + return 1; + } + + if (ecc_protected(bl, 0x4920, 0x10, NULL) != 1) { + ERR("Invalid result for ecc_protected(0x4920, 0x10)\n"); + return 1; + } + + if (blocklevel_ecc_protect(bl, 0x5290, 0x10)) { + ERR("Failed to blocklevel_ecc_protect(0x5290, 0x10)\n"); + return 1; + } + + /* Test the auto extending of regions */ + if (blocklevel_ecc_protect(bl, 0x6000, 0x100)) { + ERR("Failed to blocklevel_ecc_protect(0x6000, 0x100)\n"); + return 1; + } + + if (blocklevel_ecc_protect(bl, 0x6200, 0x100)) { + ERR("Failed to blocklevel_ecc_protect(0x6200, 0x100)\n"); + return 1; + } + + /* Test ECC reading and writing being 100% transparent to the + * caller */ + buf = malloc(0x1000); + data = malloc(0x100); + if (!buf || !data) { + ERR("Malloc failed\n"); + rc = 1; + goto out; + } + memset(bl, 0, sizeof(*bl)); + bl_mem.read = &bl_test_read; + bl_mem.write = &bl_test_write; + bl_mem.erase = &bl_test_erase; + bl_mem.erase_mask = 0xff; + bl_mem.priv = buf; + reset_buf(buf); + + + /* + * Test 1: One full and exact erase block, this shouldn't call + * read or write, ensure this fails if it does. + */ + bl_mem.write = &bl_test_bad_write; + bl_mem.read = &bl_test_bad_read; + if (blocklevel_smart_erase(bl, 0x100, 0x100)) { + ERR("Failed to blocklevel_smart_erase(0x100, 0x100)\n"); + goto out; + } + miss = check_buf(buf, 0x100, 0x200); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x100, 0x100) at 0x%0x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0xfc, 0x105, miss == -1 ? 0 : miss); + dump_buf(buf, 0x1fc, 0x205, miss == -1 ? 0 : miss); + goto out; + } + bl_mem.read = &bl_test_read; + bl_mem.write = &bl_test_write; + + reset_buf(buf); + /* Test 2: Only touch one erase block */ + if (blocklevel_smart_erase(bl, 0x20, 0x40)) { + ERR("Failed to blocklevel_smart_erase(0x20, 0x40)\n"); + goto out; + } + miss = check_buf(buf, 0x20, 0x60); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x20, 0x40) at 0x%x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0x1c, 0x65, miss == -1 ? 0 : miss); + goto out; + } + + reset_buf(buf); + /* Test 3: Start aligned but finish somewhere in it */ + if (blocklevel_smart_erase(bl, 0x100, 0x50)) { + ERR("Failed to blocklevel_smart_erase(0x100, 0x50)\n"); + goto out; + } + miss = check_buf(buf, 0x100, 0x150); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x100, 0x50) at 0x%0x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0xfc, 0x105, miss == -1 ? 0 : miss); + dump_buf(buf, 0x14c, 0x155, miss == -1 ? 0 : miss); + goto out; + } + + reset_buf(buf); + /* Test 4: Start somewhere in it, finish aligned */ + if (blocklevel_smart_erase(bl, 0x50, 0xb0)) { + ERR("Failed to blocklevel_smart_erase(0x50, 0xb0)\n"); + goto out; + } + miss = check_buf(buf, 0x50, 0x100); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x50, 0xb0) at 0x%x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0x4c, 0x55, miss == -1 ? 0 : miss); + dump_buf(buf, 0x100, 0x105, miss == -1 ? 0 : miss); + goto out; + } + + reset_buf(buf); + /* Test 5: Cover two erase blocks exactly */ + if (blocklevel_smart_erase(bl, 0x100, 0x200)) { + ERR("Failed to blocklevel_smart_erase(0x100, 0x200)\n"); + goto out; + } + miss = check_buf(buf, 0x100, 0x300); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x100, 0x200) at 0x%x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0xfc, 0x105, miss == -1 ? 0 : miss); + dump_buf(buf, 0x2fc, 0x305, miss == -1 ? 0 : miss); + goto out; + } + + reset_buf(buf); + /* Test 6: Erase 1.5 blocks (start aligned) */ + if (blocklevel_smart_erase(bl, 0x100, 0x180)) { + ERR("Failed to blocklevel_smart_erase(0x100, 0x180)\n"); + goto out; + } + miss = check_buf(buf, 0x100, 0x280); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x100, 0x180) at 0x%x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0xfc, 0x105, miss == -1 ? 0 : miss); + dump_buf(buf, 0x27c, 0x285, miss == -1 ? 0 : miss); + goto out; + } + + reset_buf(buf); + /* Test 7: Erase 1.5 blocks (end aligned) */ + if (blocklevel_smart_erase(bl, 0x80, 0x180)) { + ERR("Failed to blocklevel_smart_erase(0x80, 0x180)\n"); + goto out; + } + miss = check_buf(buf, 0x80, 0x200); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x80, 0x180) at 0x%x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0x7c, 0x85, miss == -1 ? 0 : miss); + dump_buf(buf, 0x1fc, 0x205, miss == -1 ? 0 : miss); + goto out; + } + + reset_buf(buf); + /* Test 8: Erase a big section, not aligned */ + if (blocklevel_smart_erase(bl, 0x120, 0x544)) { + ERR("Failed to blocklevel_smart_erase(0x120, 0x544)\n"); + goto out; + } + miss = check_buf(buf, 0x120, 0x664); + if (miss) { + ERR("Buffer mismatch after blocklevel_smart_erase(0x120, 0x544) at 0x%x\n", + miss == -1 ? 0 : miss); + dump_buf(buf, 0x11c, 0x125, miss == -1 ? 0 : miss); + dump_buf(buf, 0x65f, 0x669, miss == -1 ? 0 : miss); + goto out; + } + + bl_mem.priv = buf; + reset_buf(buf); + + for (i = 0; i < 0x100; i++) + data[i] = i; + + /* This really shouldn't fail */ + rc = blocklevel_ecc_protect(bl, 0, 0x100); + if (rc) { + ERR("Couldn't blocklevel_ecc_protect(0, 0x100)\n"); + goto out; + } + + rc = blocklevel_write(bl, 0, data, 0x100); + if (rc) { + ERR("Couldn't blocklevel_write(0, 0x100)\n"); + goto out; + } + + rc = blocklevel_write(bl, 0x200, data, 0x100); + if (rc) { + ERR("Couldn't blocklevel_write(0x200, 0x100)\n"); + goto out; + } + + /* + * 0x50 once adjusted for the presence of ECC becomes 0x5a which + * is ECC aligned. + */ + rc = blocklevel_read(bl, 0x50, with_ecc, 8); + if (rc) { + ERR("Couldn't blocklevel_read(0x50, 8) with ecc rc=%d\n", rc); + goto out; + } + rc = blocklevel_read(bl, 0x250, without_ecc, 8); + if (rc) { + ERR("Couldn't blocklevel_read(0x250, 8) without ecc rc=%d\n", rc); + goto out; + } + if (memcmp(with_ecc, without_ecc, 8) || memcmp(with_ecc, &data[0x50], 8)) { + ERR("ECC read and non-ECC read don't match or are wrong line: %d\n", __LINE__); + print_ptr(with_ecc, 8); + print_ptr(without_ecc, 8); + print_ptr(&data[50], 8); + rc = 1; + goto out; + } + + /* + * 0x50 once adjusted for the presence of ECC becomes 0x5a which + * is ECC aligned. + * So 0x4f won't be aligned! + */ + rc = blocklevel_read(bl, 0x4f, with_ecc, 8); + if (rc) { + ERR("Couldn't blocklevel_read(0x4f, 8) with ecc %d\n", rc); + goto out; + } + rc = blocklevel_read(bl, 0x24f, without_ecc, 8); + if (rc) { + ERR("Couldn't blocklevel_read(0x24f, 8) without ecc %d\n", rc); + goto out; + } + if (memcmp(with_ecc, without_ecc, 8) || memcmp(with_ecc, &data[0x4f], 8)) { + ERR("ECC read and non-ECC read don't match or are wrong line: %d\n", __LINE__); + print_ptr(with_ecc, 8); + print_ptr(without_ecc, 8); + print_ptr(&data[0x4f], 8); + rc = 1; + goto out; + } + + /* + * 0x50 once adjusted for the presence of ECC becomes 0x5a which + * is ECC aligned. + */ + rc = blocklevel_read(bl, 0x50, with_ecc, 16); + if (rc) { + ERR("Couldn't blocklevel_read(0x50, 16) with ecc %d\n", rc); + goto out; + } + rc = blocklevel_read(bl, 0x250, without_ecc, 16); + if (rc) { + ERR("Couldn't blocklevel_read(0x250, 16) without ecc %d\n", rc); + goto out; + } + if (memcmp(with_ecc, without_ecc, 16)|| memcmp(with_ecc, &data[0x50], 16)) { + ERR("(long read )ECC read and non-ECC read don't match or are wrong line: %d\n", __LINE__); + print_ptr(with_ecc, 16); + print_ptr(without_ecc, 16); + print_ptr(&data[0x50], 16); + rc = 1; + goto out; + } + + /* + * 0x50 once adjusted for the presence of ECC becomes 0x5a which + * is ECC aligned. So 4f won't be. + */ + rc = blocklevel_read(bl, 0x4f, with_ecc, 24); + if (rc) { + ERR("Couldn't blocklevel_read(0x4f, 24) with ecc %d\n", rc); + goto out; + } + rc = blocklevel_read(bl, 0x24f, without_ecc, 24); + if (rc) { + ERR("Couldn't blocklevel_read(0x24f, 24) without ecc %d\n", rc); + goto out; + } + if (memcmp(with_ecc, without_ecc, 24)|| memcmp(with_ecc, &data[0x4f], 24)) { + ERR("(long read )ECC read and non-ECC read don't match or are wrong: %d\n", __LINE__); + print_ptr(with_ecc, 24); + print_ptr(without_ecc, 24); + print_ptr(&data[0x4f], 24); + rc = 1; + goto out; + } + + /* + * Now lets try to write at non ECC aligned positions + * Go easy first, 0x50 becomes 0x5a which is ECC byte aligned but + * not aligned to the start of the partition + */ + + rc = blocklevel_write(bl, 0x50, data, 0xb0); + if (rc) { + ERR("Couldn't blocklevel_write()\n"); + goto out; + } + /* Read 8 bytes before to make sure we didn't ruin that */ + rc = blocklevel_read(bl, 0x48, with_ecc, 24); + if (rc) { + ERR("Couldn't blocklevel_read() with ecc %d\n", rc); + goto out; + } + if (memcmp(with_ecc, data + 0x48, 8) || memcmp(with_ecc + 1, data, 16)) { + rc = 1; + ERR("Couldn't read back what we thought we wrote line: %d\n", __LINE__); + print_ptr(with_ecc, 24); + print_ptr(&data[0x48], 8); + print_ptr(data, 16); + goto out; + } + + /* Ok lets get tricky */ + rc = blocklevel_write(bl, 0x31, data, 0xcf); + if (rc) { + ERR("Couldn't blocklevel_write(0x31, 0xcf)\n"); + goto out; + } + /* Read 8 bytes before to make sure we didn't ruin that */ + rc = blocklevel_read(bl, 0x29, with_ecc, 24); + if (rc) { + ERR("Couldn't blocklevel_read(0x29, 24) with ecc rc=%d\n", rc); + goto out; + } + if (memcmp(with_ecc, &data[0x29], 8) || memcmp(with_ecc + 1, data, 16)) { + ERR("Couldn't read back what we thought we wrote line: %d\n", __LINE__); + print_ptr(with_ecc, 24); + print_ptr(&data[0x29], 8); + print_ptr(data, 16); + rc = 1; + goto out; + } + + /* + * Rewrite the pattern that we've messed up + */ + rc = blocklevel_write(bl, 0, data, 0x100); + if (rc) { + ERR("Couldn't blocklevel_write(0, 0x100) to reset\n"); + goto out; + } + + /* Be unalignmed as possible from now on, starting somewhat easy */ + rc = blocklevel_read(bl, 0, with_ecc, 5); + if (rc) { + ERR("Couldn't blocklevel_write(0, 5)\n"); + goto out; + } + if (memcmp(with_ecc, data, 5)) { + ERR("blocklevel_read 5, 0) didn't match line: %d\n", __LINE__); + print_ptr(with_ecc, 5); + print_ptr(data, 5); + rc = 1; + goto out; + } + + /* 39 is neither divisible by 8 or by 9 */ + rc = blocklevel_read(bl, 39, with_ecc, 5); + if (rc) { + ERR("Couldn't blocklevel_write(39, 5)\n"); + goto out; + } + if (memcmp(with_ecc, &data[39], 5)) { + ERR("blocklevel_read(5, 39() didn't match line: %d\n", __LINE__); + print_ptr(with_ecc, 5); + print_ptr(&data[39], 5); + rc = 1; + goto out; + } + + rc = blocklevel_read(bl, 0xb, &with_ecc, 39); + if (rc) { + ERR("Couldn't blocklevel_read(0xb, 39)\n"); + goto out; + } + if (memcmp(with_ecc, &data[0xb], 39)) { + ERR("Strange sized and positioned read failed, blocklevel_read(0xb, 39) line: %d\n", __LINE__); + print_ptr(with_ecc, 39); + print_ptr(&data[0xb], 39); + rc = 1; + goto out; + } + + rc = blocklevel_write(bl, 39, data, 50); + if (rc) { + ERR("Couldn't blocklevel_write(39, 50)\n"); + goto out; + } + + rc = blocklevel_read(bl, 32, with_ecc, 39); + if (rc) { + ERR("Couldn't blocklevel_read(32, 39)\n"); + goto out; + } + + if (memcmp(with_ecc, &data[32], 7) || memcmp(((char *)with_ecc) + 7, data, 32)) { + ERR("Read back of odd placed/odd sized write failed, blocklevel_read(32, 39) line: %d\n", __LINE__); + print_ptr(with_ecc, 39); + print_ptr(&data[32], 7); + print_ptr(data, 32); + rc = 1; + goto out; + } + +out: + free(buf); + free(data); +return rc; +} diff --git a/roms/skiboot/libflash/test/test-ecc.c b/roms/skiboot/libflash/test/test-ecc.c new file mode 100644 index 000000000..b9489f8ec --- /dev/null +++ b/roms/skiboot/libflash/test/test-ecc.c @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2018 IBM Corp. */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> + +#include <libflash/ecc.h> + +#include "../ecc.c" + +#define __unused __attribute__((unused)) + +#define ERR(fmt...) fprintf(stderr, fmt) + +#define NUM_ECC_ROWS 320 + +/* + * Note this data is big endian as this is what the ecc code expects. + * The ECC code returns IBM bit numbers assuming the word was in CPU + * endian! + */ + +/* 8 data bytes 1 ecc byte per row */ +struct ecc64 ecc_data[] = { + { 0xfeffffffffffffff, 0x00 }, /* This row will have ecc correct bit 63 */ + { 0xfdffffffffffffff, 0x00 }, /* This row will have ecc correct bit 62 */ + { 0xfbffffffffffffff, 0x00 }, /* This row will have ecc correct bit 61 */ + { 0xf7ffffffffffffff, 0x00 }, /* This row will have ecc correct bit 60 */ + { 0xefffffffffffffff, 0x00 }, /* This row will have ecc correct bit 59 */ + { 0xdfffffffffffffff, 0x00 }, /* This row will have ecc correct bit 58 */ + { 0xbfffffffffffffff, 0x00 }, /* This row will have ecc correct bit 57 */ + { 0x7fffffffffffffff, 0x00 }, /* This row will have ecc correct bit 56 */ + { 0xfffeffffffffffff, 0x00 }, /* This row will have ecc correct bit 55 */ + { 0xfffdffffffffffff, 0x00 }, /* This row will have ecc correct bit 54 */ + { 0xfffbffffffffffff, 0x00 }, /* This row will have ecc correct bit 53 */ + { 0xfff7ffffffffffff, 0x00 }, /* This row will have ecc correct bit 52 */ + { 0xffefffffffffffff, 0x00 }, /* This row will have ecc correct bit 51 */ + { 0xffdfffffffffffff, 0x00 }, /* This row will have ecc correct bit 50 */ + { 0xffbfffffffffffff, 0x00 }, /* This row will have ecc correct bit 49 */ + { 0xff7fffffffffffff, 0x00 }, /* This row will have ecc correct bit 48 */ + { 0xfffffeffffffffff, 0x00 }, /* This row will have ecc correct bit 47 */ + { 0xfffffdffffffffff, 0x00 }, /* This row will have ecc correct bit 46 */ + { 0xfffffbffffffffff, 0x00 }, /* This row will have ecc correct bit 45 */ + { 0xfffff7ffffffffff, 0x00 }, /* This row will have ecc correct bit 44 */ + { 0xffffefffffffffff, 0x00 }, /* This row will have ecc correct bit 43 */ + { 0xffffdfffffffffff, 0x00 }, /* This row will have ecc correct bit 42 */ + { 0xffffbfffffffffff, 0x00 }, /* This row will have ecc correct bit 41 */ + { 0xffff7fffffffffff, 0x00 }, /* This row will have ecc correct bit 40 */ + { 0xfffffffeffffffff, 0x00 }, /* This row will have ecc correct bit 39 */ + { 0xfffffffdffffffff, 0x00 }, /* This row will have ecc correct bit 38 */ + { 0xfffffffbffffffff, 0x00 }, /* This row will have ecc correct bit 37 */ + { 0xfffffff7ffffffff, 0x00 }, /* This row will have ecc correct bit 36 */ + { 0xffffffefffffffff, 0x00 }, /* This row will have ecc correct bit 35 */ + { 0xffffffdfffffffff, 0x00 }, /* This row will have ecc correct bit 34 */ + { 0xffffffbfffffffff, 0x00 }, /* This row will have ecc correct bit 33 */ + { 0xffffff7fffffffff, 0x00 }, /* This row will have ecc correct bit 32 */ + { 0xfffffffffeffffff, 0x00 }, /* This row will have ecc correct bit 31 */ + { 0xfffffffffdffffff, 0x00 }, /* This row will have ecc correct bit 30 */ + { 0xfffffffffbffffff, 0x00 }, /* This row will have ecc correct bit 29 */ + { 0xfffffffff7ffffff, 0x00 }, /* This row will have ecc correct bit 28 */ + { 0xffffffffefffffff, 0x00 }, /* This row will have ecc correct bit 27 */ + { 0xffffffffdfffffff, 0x00 }, /* This row will have ecc correct bit 26 */ + { 0xffffffffbfffffff, 0x00 }, /* This row will have ecc correct bit 25 */ + { 0xffffffff7fffffff, 0x00 }, /* This row will have ecc correct bit 24 */ + { 0xfffffffffffeffff, 0x00 }, /* This row will have ecc correct bit 23 */ + { 0xfffffffffffdffff, 0x00 }, /* This row will have ecc correct bit 22 */ + { 0xfffffffffffbffff, 0x00 }, /* This row will have ecc correct bit 21 */ + { 0xfffffffffff7ffff, 0x00 }, /* This row will have ecc correct bit 20 */ + { 0xffffffffffefffff, 0x00 }, /* This row will have ecc correct bit 19 */ + { 0xffffffffffdfffff, 0x00 }, /* This row will have ecc correct bit 18 */ + { 0xffffffffffbfffff, 0x00 }, /* This row will have ecc correct bit 17 */ + { 0xffffffffff7fffff, 0x00 }, /* This row will have ecc correct bit 16 */ + { 0xfffffffffffffeff, 0x00 }, /* This row will have ecc correct bit 15 */ + { 0xfffffffffffffdff, 0x00 }, /* This row will have ecc correct bit 14 */ + { 0xfffffffffffffbff, 0x00 }, /* This row will have ecc correct bit 13 */ + { 0xfffffffffffff7ff, 0x00 }, /* This row will have ecc correct bit 12 */ + { 0xffffffffffffefff, 0x00 }, /* This row will have ecc correct bit 11 */ + { 0xffffffffffffdfff, 0x00 }, /* This row will have ecc correct bit 10 */ + { 0xffffffffffffbfff, 0x00 }, /* This row will have ecc correct bit 9 */ + { 0xffffffffffff7fff, 0x00 }, /* This row will have ecc correct bit 8 */ + { 0xfffffffffffffffe, 0x00 }, /* This row will have ecc correct bit 7 */ + { 0xfffffffffffffffd, 0x00 }, /* This row will have ecc correct bit 6 */ + { 0xfffffffffffffffb, 0x00 }, /* This row will have ecc correct bit 5 */ + { 0xfffffffffffffff7, 0x00 }, /* This row will have ecc correct bit 4 */ + { 0xffffffffffffffef, 0x00 }, /* This row will have ecc correct bit 3 */ + { 0xffffffffffffffdf, 0x00 }, /* This row will have ecc correct bit 2 */ + { 0xffffffffffffffbf, 0x00 }, /* This row will have ecc correct bit 1 */ + { 0xffffffffffffff7f, 0x00 }, /* This row will have ecc correct bit 0 */ + /* + * 'Randomised' input into eccgenerate 0x54f7c5d1 was seeded to rand() + * Note: eccgenerate from skiboot commit 6cfaa3ba1015c6ac9cc4a06f878b4289022cff54 + * was used to generate these ecc numbers + */ + { 0x29d87c7c8ab7d46d, 0xb9 }, /* Use this row to check eccgenerate() */ + { 0x9064174098381641, 0x3b }, /* Use this row to check eccgenerate() */ + { 0x77fd7d2fc7d22154, 0xe4 }, /* Use this row to check eccgenerate() */ + { 0x6b02ba39b64a6168, 0xbf }, /* Use this row to check eccgenerate() */ + { 0x68fa9c633eef0544, 0x2a }, /* Use this row to check eccgenerate() */ + { 0xe814b258b3f92e55, 0x35 }, /* Use this row to check eccgenerate() */ + { 0xc3e2bd658db4db6d, 0xda }, /* Use this row to check eccgenerate() */ + { 0xe1dd487b6209876a, 0x45 }, /* Use this row to check eccgenerate() */ + { 0x309f9e6b91831433, 0xe4 }, /* Use this row to check eccgenerate() */ + { 0xd8b77d39f4d66410, 0x6c }, /* Use this row to check eccgenerate() */ + { 0x83ba293cf30a9e6a, 0xc9 }, /* Use this row to check eccgenerate() */ + { 0x3aeaef79af97ec1a, 0x09 }, /* Use this row to check eccgenerate() */ + { 0xa90ef431e4778c43, 0x91 }, /* Use this row to check eccgenerate() */ + { 0xa74bbf1e6b6fda00, 0xc5 }, /* Use this row to check eccgenerate() */ + { 0x67b5a872efa57c30, 0xb9 }, /* Use this row to check eccgenerate() */ + { 0x795d511e3605ff67, 0x03 }, /* Use this row to check eccgenerate() */ + { 0xce3d1529918d256f, 0x36 }, /* Use this row to check eccgenerate() */ + { 0x586047430ac2685e, 0xab }, /* Use this row to check eccgenerate() */ + { 0xc00cca46463b9358, 0x42 }, /* Use this row to check eccgenerate() */ + { 0x842a991cc362017d, 0xb2 }, /* Use this row to check eccgenerate() */ + { 0x765c30522807672a, 0x26 }, /* Use this row to check eccgenerate() */ + { 0xb5bb42186c3f4b75, 0x2b }, /* Use this row to check eccgenerate() */ + { 0xce48d25f393fee37, 0x90 }, /* Use this row to check eccgenerate() */ + { 0xcbc2026b96998b13, 0x40 }, /* Use this row to check eccgenerate() */ + { 0x8b70f023ffe7704b, 0x23 }, /* Use this row to check eccgenerate() */ + { 0xf2f20e36a37a8024, 0x19 }, /* Use this row to check eccgenerate() */ + { 0x52126d3f0e2b1a60, 0xa0 }, /* Use this row to check eccgenerate() */ + { 0xf2a2a6232dddfe2f, 0xc4 }, /* Use this row to check eccgenerate() */ + { 0x984cd930fb206171, 0xa5 }, /* Use this row to check eccgenerate() */ + { 0xeac6dd2199ee6542, 0xea }, /* Use this row to check eccgenerate() */ + { 0xd0f3642aff018223, 0x3b }, /* Use this row to check eccgenerate() */ + { 0x908fa71263242f40, 0x0a }, /* Use this row to check eccgenerate() */ + { 0x6de6971e9e317a53, 0xa6 }, /* Use this row to check eccgenerate() */ + { 0xe46c0d2ce8efee55, 0xa4 }, /* Use this row to check eccgenerate() */ + { 0xab52f0522df36165, 0x06 }, /* Use this row to check eccgenerate() */ + { 0x55fac80f6997a648, 0x9a }, /* Use this row to check eccgenerate() */ + { 0xd5d6f13d21af2025, 0xed }, /* Use this row to check eccgenerate() */ + { 0x5bee0e5d0bb60b28, 0x66 }, /* Use this row to check eccgenerate() */ + { 0xa14f973ba41fc41d, 0xa8 }, /* Use this row to check eccgenerate() */ + { 0xa307356926b11148, 0x5a }, /* Use this row to check eccgenerate() */ + { 0xc92b926c2cc0875f, 0x7e }, /* Use this row to check eccgenerate() */ + { 0x3aeba13f95fa431f, 0x92 }, /* Use this row to check eccgenerate() */ + { 0xc2d7424f1b3eff2b, 0xe6 }, /* Use this row to check eccgenerate() */ + { 0x165f601d2c8e4863, 0x2b }, /* Use this row to check eccgenerate() */ + { 0xc67cae255a241c00, 0x78 }, /* Use this row to check eccgenerate() */ + { 0x5a269e2300263e3f, 0x07 }, /* Use this row to check eccgenerate() */ + { 0x634a6d7f96701350, 0xe9 }, /* Use this row to check eccgenerate() */ + { 0x34a28d23eab54536, 0xd2 }, /* Use this row to check eccgenerate() */ + { 0xd3a5340cd130051e, 0x48 }, /* Use this row to check eccgenerate() */ + { 0xfe236703190f9b4f, 0x7e }, /* Use this row to check eccgenerate() */ + { 0x82a641187ef8245f, 0x20 }, /* Use this row to check eccgenerate() */ + { 0xa0a74504541e3013, 0xc7 }, /* Use this row to check eccgenerate() */ + { 0x5fd43b3b577d3356, 0x85 }, /* Use this row to check eccgenerate() */ + { 0xfb9cf773fb955461, 0x06 }, /* Use this row to check eccgenerate() */ + { 0x214766290024d376, 0x80 }, /* Use this row to check eccgenerate() */ + { 0x2de45a569ea42c5d, 0x22 }, /* Use this row to check eccgenerate() */ + { 0x349f707cea72f815, 0xf3 }, /* Use this row to check eccgenerate() */ + { 0x05b1f74167cffc15, 0xe9 }, /* Use this row to check eccgenerate() */ + { 0x945d4579f676b34b, 0x63 }, /* Use this row to check eccgenerate() */ + { 0x519bcf4b1b10585f, 0x47 }, /* Use this row to check eccgenerate() */ + { 0x1b36961e5adaf31e, 0x25 }, /* Use this row to check eccgenerate() */ + { 0xf04a076fabc16d6f, 0x20 }, /* Use this row to check eccgenerate() */ + { 0x9577b3257e80031e, 0xef }, /* Use this row to check eccgenerate() */ + { 0x4fb1083c24ed9412, 0x97 }, /* Use this row to check eccgenerate() */ + { 0x3dfc2f62681de831, 0x1f }, /* Use this row to check eccgenerate() */ + { 0xe7150d114ed56f3f, 0x10 }, /* Use this row to check eccgenerate() */ + { 0xa2f39f52bfa2717a, 0x40 }, /* Use this row to check eccgenerate() */ + { 0x1720a55087bd5215, 0xb3 }, /* Use this row to check eccgenerate() */ + { 0x8253a77601c8db0d, 0x45 }, /* Use this row to check eccgenerate() */ + { 0x01ecae0412bd9c44, 0x5f }, /* Use this row to check eccgenerate() */ + { 0xb161c921a39a0d20, 0x51 }, /* Use this row to check eccgenerate() */ + { 0x8d0d06362ed0095b, 0x94 }, /* Use this row to check eccgenerate() */ + { 0x969f0671e5003a1e, 0x9b }, /* Use this row to check eccgenerate() */ + { 0xdb77ed6992befd77, 0x63 }, /* Use this row to check eccgenerate() */ + { 0xadce55572afd4b6a, 0x3e }, /* Use this row to check eccgenerate() */ + { 0x84d73f092c13bd35, 0x50 }, /* Use this row to check eccgenerate() */ + { 0xd7d42a25c804ec75, 0x05 }, /* Use this row to check eccgenerate() */ + { 0x4685ef1374224778, 0x72 }, /* Use this row to check eccgenerate() */ + { 0x980fdc0a6d4cde4a, 0x9d }, /* Use this row to check eccgenerate() */ + { 0xd569c67c9636f84f, 0x81 }, /* Use this row to check eccgenerate() */ + { 0xe40b680fd60b0c6d, 0x2c }, /* Use this row to check eccgenerate() */ + { 0x95ae7d67bc7fd30d, 0x72 }, /* Use this row to check eccgenerate() */ + { 0x433d262386ff0762, 0xf4 }, /* Use this row to check eccgenerate() */ + { 0x87c7e36facce2238, 0x5a }, /* Use this row to check eccgenerate() */ + { 0xbf8bbf7cc590cd19, 0xe0 }, /* Use this row to check eccgenerate() */ + { 0x682bdb3988b39274, 0x4f }, /* Use this row to check eccgenerate() */ + { 0xb7839c4f70ed881e, 0x6b }, /* Use this row to check eccgenerate() */ + { 0x55eec23cf538e16f, 0x72 }, /* Use this row to check eccgenerate() */ + { 0x87f7de674d23a340, 0xb4 }, /* Use this row to check eccgenerate() */ + { 0x7720ef2a3066b026, 0x7c }, /* Use this row to check eccgenerate() */ + { 0x5d796d5c34c6343f, 0x5e }, /* Use this row to check eccgenerate() */ + { 0xfcca2035fbf72e34, 0xc6 }, /* Use this row to check eccgenerate() */ + { 0x6f1a762c344e9801, 0x87 }, /* Use this row to check eccgenerate() */ + { 0xa19a764c43501049, 0x35 }, /* Use this row to check eccgenerate() */ + { 0xd9860819072a5237, 0x6a }, /* Use this row to check eccgenerate() */ + { 0xdd355e2477043d49, 0x2d }, /* Use this row to check eccgenerate() */ + { 0x33841057bd927028, 0xaa }, /* Use this row to check eccgenerate() */ + { 0x4392780a73e4db0b, 0xfa }, /* Use this row to check eccgenerate() */ + { 0x1fb3fe4377c1367a, 0x47 }, /* Use this row to check eccgenerate() */ + { 0x3c520414ca595c7a, 0x58 }, /* Use this row to check eccgenerate() */ + { 0x520def6ede3ebe40, 0xac }, /* Use this row to check eccgenerate() */ + { 0x4e2c475fa57ddf4d, 0x5c }, /* Use this row to check eccgenerate() */ + { 0x9ab6c03d09918b3e, 0x95 }, /* Use this row to check eccgenerate() */ + { 0x56b42e7fa31a0a1c, 0x5d }, /* Use this row to check eccgenerate() */ + { 0xd480ba4222ae9f25, 0x87 }, /* Use this row to check eccgenerate() */ + { 0x5674d464cdd41d2a, 0xc7 }, /* Use this row to check eccgenerate() */ + { 0xc8cc4c5e31fa271f, 0x6e }, /* Use this row to check eccgenerate() */ + { 0x6548c020533ff519, 0x00 }, /* Use this row to check eccgenerate() */ + { 0x968f056337e7c20a, 0x0e }, /* Use this row to check eccgenerate() */ + { 0x3f11154207e3366d, 0xbe }, /* Use this row to check eccgenerate() */ + { 0x7ee773366f160e7c, 0x53 }, /* Use this row to check eccgenerate() */ + { 0x2ca97e241c477366, 0x1c }, /* Use this row to check eccgenerate() */ + { 0x8f2b4f72b16b840d, 0x88 }, /* Use this row to check eccgenerate() */ + { 0x282dbb076f3bf72e, 0xd0 }, /* Use this row to check eccgenerate() */ + { 0x39955329afde4d36, 0xc7 }, /* Use this row to check eccgenerate() */ + { 0x8d1d0c77657fbf1b, 0x22 }, /* Use this row to check eccgenerate() */ + { 0x0afd9e698ba24218, 0x1a }, /* Use this row to check eccgenerate() */ + { 0x9533ce56dc495356, 0x2a }, /* Use this row to check eccgenerate() */ + { 0x7f645d72a4b35f27, 0x80 }, /* Use this row to check eccgenerate() */ + { 0xc661ff4cebe7fc55, 0xe2 }, /* Use this row to check eccgenerate() */ + { 0xb9bc1a0053e51735, 0xff }, /* Use this row to check eccgenerate() */ + { 0x84df3f541dd6d331, 0x54 }, /* Use this row to check eccgenerate() */ + { 0x7015c94b8189675e, 0x02 }, /* Use this row to check eccgenerate() */ + { 0xb9702a69ea270075, 0x1f }, /* Use this row to check eccgenerate() */ + { 0xf10a376206a5ce2e, 0x6f }, /* Use this row to check eccgenerate() */ + { 0x75bbdc2af8813f2b, 0xb1 }, /* Use this row to check eccgenerate() */ + { 0x14c9b2116ff2aa18, 0x7a }, /* Use this row to check eccgenerate() */ + { 0x205e2f26a1645b4f, 0x2b }, /* Use this row to check eccgenerate() */ + { 0x10a0527ea4f40104, 0xf6 }, /* Use this row to check eccgenerate() */ + { 0x53d34f3a498bea2d, 0x93 }, /* Use this row to check eccgenerate() */ + { 0xae0aaa494935a627, 0xbf }, /* Use this row to check eccgenerate() */ + { 0xd4d7e83fe0f05b31, 0x58 }, /* Use this row to check eccgenerate() */ + { 0xbc3aaf07b8074933, 0x74 }, /* Use this row to check eccgenerate() */ + { 0x5cbba85a690bb716, 0xbf }, /* Use this row to check eccgenerate() */ + { 0x55f3b36c3c9f0c7a, 0x3a }, /* Use this row to check eccgenerate() */ + { 0x8f84242f231da827, 0x50 }, /* Use this row to check eccgenerate() */ + { 0x40f37b590eb0ce6c, 0x9c }, /* Use this row to check eccgenerate() */ + { 0x8f39364b14646403, 0x0b }, /* Use this row to check eccgenerate() */ + { 0xfe8b6478b0084525, 0x21 }, /* Use this row to check eccgenerate() */ + { 0xb6ad135448aa6034, 0x1c }, /* Use this row to check eccgenerate() */ + { 0x402ca05fef969b5a, 0x90 }, /* Use this row to check eccgenerate() */ + { 0x5e8946732b69f07e, 0xaa }, /* Use this row to check eccgenerate() */ + { 0xcccd4b4e55f55271, 0xe8 }, /* Use this row to check eccgenerate() */ + { 0xf9e954757ee77519, 0xf8 }, /* Use this row to check eccgenerate() */ + { 0xc7726047dc6d9e4c, 0x67 }, /* Use this row to check eccgenerate() */ + { 0x25a344744cbda42f, 0x77 }, /* Use this row to check eccgenerate() */ + { 0x2cae0061757d0a11, 0xca }, /* Use this row to check eccgenerate() */ + { 0x2d855344f97a2d34, 0x9b }, /* Use this row to check eccgenerate() */ + { 0x6386e44ae9e8af68, 0x6c }, /* Use this row to check eccgenerate() */ + { 0x2588bc628a40fc1e, 0x4c }, /* Use this row to check eccgenerate() */ + { 0xad5da446b8799837, 0x31 }, /* Use this row to check eccgenerate() */ + { 0xc6296724b40ce111, 0xde }, /* Use this row to check eccgenerate() */ + { 0xc8704515ed502020, 0x72 }, /* Use this row to check eccgenerate() */ + { 0x9d59654555639d6f, 0x16 }, /* Use this row to check eccgenerate() */ + { 0x9e0dfe23c6fca90d, 0x37 }, /* Use this row to check eccgenerate() */ + { 0xb593456853077919, 0xee }, /* Use this row to check eccgenerate() */ + { 0x7e706918de399e03, 0xe7 }, /* Use this row to check eccgenerate() */ + { 0x332ff174131d8c5b, 0x34 }, /* Use this row to check eccgenerate() */ + { 0x920402754a3eb566, 0x2f }, /* Use this row to check eccgenerate() */ + { 0x26ac53332c19466a, 0x0c }, /* Use this row to check eccgenerate() */ + { 0x78d6ea195977623c, 0x6f }, /* Use this row to check eccgenerate() */ + { 0xcff46c4d4b4f9827, 0x20 }, /* Use this row to check eccgenerate() */ + { 0x44cac55ba584eb7a, 0x5f }, /* Use this row to check eccgenerate() */ + { 0x8e6d9b63fc79c011, 0xc8 }, /* Use this row to check eccgenerate() */ + { 0x86babc30a750aa26, 0x20 }, /* Use this row to check eccgenerate() */ + { 0x5fca425eb3f55746, 0x12 }, /* Use this row to check eccgenerate() */ + { 0x6702395833186177, 0xaf }, /* Use this row to check eccgenerate() */ + { 0x2069811725f4a902, 0x87 }, /* Use this row to check eccgenerate() */ + { 0x7b57477230737e6d, 0xd9 }, /* Use this row to check eccgenerate() */ + { 0xf66f287bbdc2e65c, 0xfa }, /* Use this row to check eccgenerate() */ + { 0x10ca5f7619654516, 0x52 }, /* Use this row to check eccgenerate() */ + { 0xf79ee319ac036e63, 0x58 }, /* Use this row to check eccgenerate() */ + { 0xbf20fa3e8e3ac90e, 0x82 }, /* Use this row to check eccgenerate() */ + { 0xd8787e752bced40e, 0x54 }, /* Use this row to check eccgenerate() */ + { 0x57e71a795125fc33, 0xfe }, /* Use this row to check eccgenerate() */ + { 0xab9c5e70fe24d228, 0xfc }, /* Use this row to check eccgenerate() */ + { 0x49746a50d0bd0513, 0x9d }, /* Use this row to check eccgenerate() */ + { 0x7542f10d7a91cb3d, 0xb9 }, /* Use this row to check eccgenerate() */ + { 0x760b8c4f8e3e302c, 0x82 }, /* Use this row to check eccgenerate() */ + { 0x358fda5203b08c71, 0x23 }, /* Use this row to check eccgenerate() */ + { 0xb6a5e437fdc54800, 0xb6 }, /* Use this row to check eccgenerate() */ + { 0x30dea97795591d31, 0x7c }, /* Use this row to check eccgenerate() */ + { 0xba4dc7331da81d10, 0x11 }, /* Use this row to check eccgenerate() */ + { 0x4d1b9c7d51472b0f, 0x37 }, /* Use this row to check eccgenerate() */ + { 0x0e0a126c35a50e26, 0xd6 }, /* Use this row to check eccgenerate() */ + { 0x4e0a543c448bc478, 0x0f }, /* Use this row to check eccgenerate() */ + { 0xf08e325c1fd47162, 0x6b }, /* Use this row to check eccgenerate() */ + { 0xad0e3b7146a93756, 0x86 }, /* Use this row to check eccgenerate() */ + { 0x71770c65afaf2c1b, 0xae }, /* Use this row to check eccgenerate() */ + { 0x01d5284f8687b966, 0x37 }, /* Use this row to check eccgenerate() */ + { 0x84ac8b0fc85e275e, 0x86 }, /* Use this row to check eccgenerate() */ + { 0x981c2d71ac71873f, 0x4e }, /* Use this row to check eccgenerate() */ + { 0x2603537dce20f65f, 0xb5 }, /* Use this row to check eccgenerate() */ + { 0x5c5f260c0d5f1e7f, 0x0b }, /* Use this row to check eccgenerate() */ + { 0x100fab709c0edf4c, 0xc9 }, /* Use this row to check eccgenerate() */ + { 0x99d4274d91ee005f, 0x83 }, /* Use this row to check eccgenerate() */ + { 0x26481e10c6b48f28, 0x16 }, /* Use this row to check eccgenerate() */ + { 0xe45cad38cab2d144, 0x9c }, /* Use this row to check eccgenerate() */ + { 0x1bfafc53e195e543, 0x8e }, /* Use this row to check eccgenerate() */ + { 0x163bf46931784936, 0xdc }, /* Use this row to check eccgenerate() */ + { 0x75030e2f29040f40, 0x48 }, /* Use this row to check eccgenerate() */ + { 0x48d8802265454826, 0x2a }, /* Use this row to check eccgenerate() */ + { 0xabee7f7c6592400b, 0x2b }, /* Use this row to check eccgenerate() */ + { 0x15426d26f6e6bb13, 0x89 }, /* Use this row to check eccgenerate() */ + { 0x7c6e757a1c668c61, 0x6d }, /* Use this row to check eccgenerate() */ + { 0xe4c4b33f16179675, 0x74 }, /* Use this row to check eccgenerate() */ + { 0xc2881d35001b010a, 0xd4 }, /* Use this row to check eccgenerate() */ + { 0xce3bf7697de1e030, 0x65 }, /* Use this row to check eccgenerate() */ + { 0x8a40ff2fe88b7032, 0x19 }, /* Use this row to check eccgenerate() */ + { 0x849a4f7f2a9b1d76, 0x58 }, /* Use this row to check eccgenerate() */ + { 0xbc891e559b4faa20, 0x4c }, /* Use this row to check eccgenerate() */ + { 0x61043a491e6f774c, 0x28 }, /* Use this row to check eccgenerate() */ + { 0xe8214911e2d13c65, 0x9e }, /* Use this row to check eccgenerate() */ + { 0xc36722294561e701, 0x3d }, /* Use this row to check eccgenerate() */ + { 0x77d93038031c4665, 0x55 }, /* Use this row to check eccgenerate() */ + { 0x2c205525daa21613, 0x85 }, /* Use this row to check eccgenerate() */ + { 0x3fe85e39ecdc3e67, 0x20 }, /* Use this row to check eccgenerate() */ + { 0x526f7f7275f8d547, 0xa4 }, /* Use this row to check eccgenerate() */ + { 0x6bdf915bead6de35, 0xac }, /* Use this row to check eccgenerate() */ + { 0x063d6b1767b1ec18, 0x78 }, /* Use this row to check eccgenerate() */ + { 0x7dc8820ee74d0756, 0x31 }, /* Use this row to check eccgenerate() */ + { 0xe7680860ea011f57, 0x3f }, /* Use this row to check eccgenerate() */ + { 0x67e3ff073f51a043, 0xd6 }, /* Use this row to check eccgenerate() */ + { 0x27dd1076b6a4ff49, 0x10 }, /* Use this row to check eccgenerate() */ + { 0xe03f1d40f223ff37, 0xec }, /* Use this row to check eccgenerate() */ + { 0x8d73a958ab776075, 0x6f }, /* Use this row to check eccgenerate() */ + { 0xc9e6d7419cc93b15, 0x8f }, /* Use this row to check eccgenerate() */ + { 0x7f9b787aee77e321, 0xb7 }, /* Use this row to check eccgenerate() */ + { 0x34d9ca23b1082153, 0xa9 }, /* Use this row to check eccgenerate() */ + { 0xb424673842039b23, 0xe2 }, /* Use this row to check eccgenerate() */ + { 0x1ca6b136abb2fb5b, 0xe1 }, /* Use this row to check eccgenerate() */ + { 0x978f3a43e144bc5d, 0x64 }, /* Use this row to check eccgenerate() */ + { 0x563d92255b8e1070, 0x14 }, /* Use this row to check eccgenerate() */ + { 0x4565ef25e9feb935, 0x2d }, /* Use this row to check eccgenerate() */ + { 0x50b0a64ec11c2401, 0x3c }, /* Use this row to check eccgenerate() */ + { 0xa86a2b574ba25a3d, 0x8b }, /* Use this row to check eccgenerate() */ + { 0x36a47914cd78295d, 0xf1 }, /* Use this row to check eccgenerate() */ + { 0x0ccac9208fd33337, 0xe4 }, /* Use this row to check eccgenerate() */ + { 0x457833019d87791c, 0xc4 }, /* Use this row to check eccgenerate() */ + { 0x8fab785433a7da16, 0x0c }, /* Use this row to check eccgenerate() */ + { 0xdf1e3b0c26b85041, 0x94 }, /* Use this row to check eccgenerate() */ + { 0xc2818c561c1f222d, 0x9a }, /* Use this row to check eccgenerate() */ + { 0x0b97054fa805134e, 0xec }, /* Use this row to check eccgenerate() */ + { 0x5a0e3421411d0551, 0x57 }, /* Use this row to check eccgenerate() */ + { 0x8420a0743f70d072, 0xa8 }, /* Use this row to check eccgenerate() */ + { 0xea22cc4e0e339b59, 0x15 }, /* Use this row to check eccgenerate() */ + { 0xef775737a0c6512b, 0xe7 }, /* Use this row to check eccgenerate() */ + { 0xfc54621b81b20612, 0x9a }, /* Use this row to check eccgenerate() */ + { 0x6bb1c04745b5e95c, 0x1e }, /* Use this row to check eccgenerate() */ + { 0x06d20d5e41ba5141, 0x56 }, /* Use this row to check eccgenerate() */ + { 0x8d5cac7ebb616716, 0x43 }, /* Use this row to check eccgenerate() */ + { 0x89da9073ae3c3935, 0xb1 }, /* Use this row to check eccgenerate() */ + { 0x3e106d6cc3002613, 0xec }, /* Use this row to check eccgenerate() */ + { 0x60889f2f95a45a14, 0x69 }, /* Use this row to check eccgenerate() */ + { 0xc94b352b8388a06d, 0x53 }, /* Use this row to check eccgenerate() */ + { 0xa940f12ef0331804, 0x7a }, /* Use this row to check eccgenerate() */ + +}; + +int main(void) +{ + int i; + uint8_t ret_memcpy; + uint8_t ret_verify; + uint64_t dst; + uint64_t *buf; + struct ecc64 *ret_buf; + + /* + * Test that eccgenerate() still works, but skip the first 64 because they + * have intentional bitflips + */ + printf("Checking eccgenerate()\n"); + for (i = 64; i < NUM_ECC_ROWS; i++) { + if (eccgenerate(be64toh(ecc_data[i].data)) != ecc_data[i].ecc) { + ERR("ECC did not generate the correct value, expecting 0x%02x, got 0x%02x\n", + ecc_data[i].ecc, eccgenerate(be64toh(ecc_data[i].data))); + } + } + + /* Test that the ecc code can detect and recover bitflips */ + printf("Testing bitflip recovery\n"); + for (i = 0; i < 64; i++) { + ret_memcpy = memcpy_from_ecc(&dst, &ecc_data[i], sizeof(dst)); + if (dst != 0xffffffffffffffff || ret_memcpy) { + ERR("ECC code didn't correct bad bit %d in 0x%016lx\n", 63 - i, be64toh(ecc_data[i].data)); + exit(1); + } + + ret_verify = eccverify(be64toh(ecc_data[i].data), ecc_data[i].ecc); + if (ret_verify != 63 - i) { + ERR("ECC did not catch incorrect bit %d in row 0x%016lx 0x%02x, got 0x%02x\n", + i, ecc_data[i].data, ecc_data[i].ecc, ret_verify); + exit(1); + } + } + + buf = malloc(NUM_ECC_ROWS * sizeof(*buf)); + if (!buf) { + ERR("malloc #1 failed during ecc test\n"); + exit(1); + } + printf("pass\n"); + + /* Test a large memcpy */ + printf("Testing a large(ish) memcpy_from_ecc()\n"); + ret_memcpy = memcpy_from_ecc(buf, ecc_data, NUM_ECC_ROWS * sizeof(*buf)); + if (ret_memcpy) { + ERR("ECC Couldn't memcpy entire buffer\n"); + exit(1); + } + + for (i = 0; i < NUM_ECC_ROWS; i++) { + /* Large memcpy should have fixed the bitflips */ + if (i < 64 && buf[i] != 0xffffffffffffffff) { + ERR("memcpy_from_ecc got it wrong for uint64_t number %d, got 0x%016lx, expecting 0xffffffffffffffff\n", + i, buf[i]); + exit(1); + } + + /* But not changed any of the correct data */ + if (i > 63 && buf[i] != ecc_data[i].data) { + ERR("memcpy_from_ecc got it wrong for uint64_t number %d, git 0x%016lx, expecting 0x%016lx\n", + i, buf[i], ecc_data[i].data); + exit(1); + } + } + printf("pass\n"); + + /* Test a memcpy to add ecc data */ + printf("Testing a large(ish) memcpy_to_ecc()\n"); + ret_buf = malloc(ecc_buffer_size(NUM_ECC_ROWS * sizeof(*buf))); + if (!buf) { + ERR("malloc #2 failed during ecc test\n"); + exit(1); + } + + ret_memcpy = memcpy_to_ecc(ret_buf, buf, NUM_ECC_ROWS * sizeof(*buf)); + if (ret_memcpy) { + ERR("ECC Couldn't memcpy entire buffer\n"); + exit(1); + } + + for (i = 0; i < NUM_ECC_ROWS; i++) { + /* The data should be the same */ + if (ret_buf[i].data != buf[i]) { + ERR("memcpy_to_ecc got it wrong on uint64_t %d, expecting 0x%016lx, got 0x%016lx\n", + i, buf[i], ret_buf[i].data); + exit(1); + } + + /* Check the correctness of ecc bytes */ + if (ret_buf[i].ecc != ecc_data[i].ecc) { + ERR("memcpy_to_ecc got it on the ecc for uint64_t %d, expecting 0x%02x, got 0x%02x\n", + i, ecc_data[i].ecc, ret_buf[i].ecc); + exit(1); + } + } + printf("ECC tests pass\n"); + + printf("ECC test error conditions\n"); + if (memcpy_to_ecc(ret_buf, buf, 7) == 0) { + ERR("memcpy_to_ecc didn't detect bad size 7\n"); + exit(1); + } + + if (memcpy_to_ecc(ret_buf, buf, 15) == 0) { + ERR("memcpy_to_ecc didn't detect bad size 15\n"); + exit(1); + } + if (memcpy_from_ecc(buf, ret_buf, 7) == 0) { + ERR("memcpy_from_ecc didn't detect bad size 7\n"); + exit(1); + } + if (memcpy_from_ecc(buf, ret_buf, 15) == 0) { + ERR("memcpy_from_ecc didn't detect bad size 15\n"); + exit(1); + } + printf("ECC error conditions pass\n"); + + free(buf); + free(ret_buf); + + /* Check that unaligned address become aligned */ + if (ecc_buffer_align(0, 5) != 0) { + ERR("ecc_buffer_align(0, 5) not 0 -> %ld\n", ecc_buffer_align(0, 5)); + exit(1); + } + + if (ecc_buffer_align(0, 8) != 0) { + ERR("ecc_buffer_align(0, 8) not 0 -> %ld\n", ecc_buffer_align(0, 8)); + exit(1); + } + if (ecc_buffer_align(0, 9) != 9) { + ERR("ecc_buffer_align(0, 9) not 9 -> %ld\n", ecc_buffer_align(0, 9)); + exit(1); + } + if (ecc_buffer_align(0, 15) != 9) { + ERR("ecc_buffer_align(0, 15) not 9 -> %ld\n", ecc_buffer_align(0, 15)); + exit(1); + } + if (ecc_buffer_align(5, 10) != 5) { + ERR("ecc_buffer_align(5, 10) not 5 -> %ld\n", ecc_buffer_align(5, 10)); + exit(1); + } + if (ecc_buffer_align(5, 18) != 14) { + ERR("ecc_buffer_align(5, 18) not 14 -> %ld\n", ecc_buffer_align(5, 18)); + exit(1); + } + if (ecc_buffer_align(0, 50) != 45) { + ERR("ecc_buffer_align(0, 50) not 45 -> %ld\n", ecc_buffer_align(0, 50)); + exit(1); + } + return 0; +} diff --git a/roms/skiboot/libflash/test/test-flash.c b/roms/skiboot/libflash/test/test-flash.c new file mode 100644 index 000000000..3304195f8 --- /dev/null +++ b/roms/skiboot/libflash/test/test-flash.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2017 IBM Corp. */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> + +#include <libflash/libflash.h> +#include <libflash/libflash-priv.h> + +#include "../libflash.c" +#include "../ecc.c" + +#define __unused __attribute__((unused)) + +#define ERR(fmt...) fprintf(stderr, fmt) + +/* Flash commands */ +#define CMD_PP 0x02 +#define CMD_READ 0x03 +#define CMD_WRDI 0x04 +#define CMD_RDSR 0x05 +#define CMD_WREN 0x06 +#define CMD_SE 0x20 +#define CMD_RDSCUR 0x2b +#define CMD_BE32K 0x52 +#define CMD_CE 0x60 +#define CMD_RDID 0x9f +#define CMD_EN4B 0xb7 +#define CMD_BE 0xd8 +#define CMD_RDDPB 0xe0 +#define CMD_RDSPB 0xe2 +#define CMD_EX4B 0xe9 + +/* Flash status bits */ +#define STAT_WIP 0x01 +#define STAT_WEN 0x02 + +static uint8_t *sim_image; +static uint32_t sim_image_sz = 0x100000; +static uint32_t sim_index; +static uint32_t sim_addr; +static uint32_t sim_er_size; +static uint8_t sim_sr; +static bool sim_fl_4b; +static bool sim_ct_4b; + +static enum sim_state { + sim_state_idle, + sim_state_rdid, + sim_state_rdsr, + sim_state_read_addr, + sim_state_read_data, + sim_state_write_addr, + sim_state_write_data, + sim_state_erase_addr, + sim_state_erase_done, +} sim_state; + +/* + * Simulated flash & controller + */ +static int sim_start_cmd(uint8_t cmd) +{ + if (sim_state != sim_state_idle) { + ERR("SIM: Command %02x in wrong state %d\n", cmd, sim_state); + return -1; + } + + sim_index = 0; + sim_addr = 0; + + switch(cmd) { + case CMD_RDID: + sim_state = sim_state_rdid; + break; + case CMD_RDSR: + sim_state = sim_state_rdsr; + break; + case CMD_EX4B: + sim_fl_4b = false; + break; + case CMD_EN4B: + sim_fl_4b = true; + break; + case CMD_WREN: + sim_sr |= STAT_WEN; + break; + case CMD_READ: + sim_state = sim_state_read_addr; + if (sim_ct_4b != sim_fl_4b) + ERR("SIM: 4b mode mismatch in READ !\n"); + break; + case CMD_PP: + sim_state = sim_state_write_addr; + if (sim_ct_4b != sim_fl_4b) + ERR("SIM: 4b mode mismatch in PP !\n"); + if (!(sim_sr & STAT_WEN)) + ERR("SIM: PP without WEN, ignoring... \n"); + break; + case CMD_SE: + case CMD_BE32K: + case CMD_BE: + if (sim_ct_4b != sim_fl_4b) + ERR("SIM: 4b mode mismatch in SE/BE !\n"); + if (!(sim_sr & STAT_WEN)) + ERR("SIM: SE/BE without WEN, ignoring... \n"); + sim_state = sim_state_erase_addr; + switch(cmd) { + case CMD_SE: sim_er_size = 0x1000; break; + case CMD_BE32K: sim_er_size = 0x8000; break; + case CMD_BE: sim_er_size = 0x10000; break; + } + break; + case CMD_CE: + if (!(sim_sr & STAT_WEN)) { + ERR("SIM: CE without WEN, ignoring... \n"); + break; + } + memset(sim_image, 0xff, sim_image_sz); + sim_sr |= STAT_WIP; + sim_sr &= ~STAT_WEN; + break; + default: + ERR("SIM: Unsupported command %02x\n", cmd); + return -1; + } + return 0; +} + +static void sim_end_cmd(void) +{ + /* For write and sector/block erase, set WIP & clear WEN here */ + if (sim_state == sim_state_write_data) { + sim_sr |= STAT_WIP; + sim_sr &= ~STAT_WEN; + } + sim_state = sim_state_idle; +} + +static bool sim_do_address(const uint8_t **buf, uint32_t *len) +{ + uint8_t asize = sim_fl_4b ? 4 : 3; + const uint8_t *p = *buf; + + while(*len) { + sim_addr = (sim_addr << 8) | *(p++); + *buf = p; + *len = *len - 1; + sim_index++; + if (sim_index >= asize) + return true; + } + return false; +} + +static int sim_wbytes(const void *buf, uint32_t len) +{ + const uint8_t *b = buf; + bool addr_complete; + + again: + switch(sim_state) { + case sim_state_read_addr: + addr_complete = sim_do_address(&b, &len); + if (addr_complete) { + sim_state = sim_state_read_data; + sim_index = 0; + if (len) + goto again; + } + break; + case sim_state_write_addr: + addr_complete = sim_do_address(&b, &len); + if (addr_complete) { + sim_state = sim_state_write_data; + sim_index = 0; + if (len) + goto again; + } + break; + case sim_state_write_data: + if (!(sim_sr & STAT_WEN)) + break; + while(len--) { + uint8_t c = *(b++); + if (sim_addr >= sim_image_sz) { + ERR("SIM: Write past end of flash\n"); + return -1; + } + /* Flash write only clears bits */ + sim_image[sim_addr] &= c; + sim_addr = (sim_addr & 0xffffff00) | + ((sim_addr + 1) & 0xff); + } + break; + case sim_state_erase_addr: + if (!(sim_sr & STAT_WEN)) + break; + addr_complete = sim_do_address(&b, &len); + if (addr_complete) { + memset(sim_image + sim_addr, 0xff, sim_er_size); + sim_sr |= STAT_WIP; + sim_sr &= ~STAT_WEN; + sim_state = sim_state_erase_done; + } + break; + default: + ERR("SIM: Write in wrong state %d\n", sim_state); + return -1; + } + return 0; +} + +static int sim_rbytes(void *buf, uint32_t len) +{ + uint8_t *b = buf; + + switch(sim_state) { + case sim_state_rdid: + while(len--) { + switch(sim_index) { + case 0: + *(b++) = 0x55; + break; + case 1: + *(b++) = 0xaa; + break; + case 2: + *(b++) = 0x55; + break; + default: + ERR("SIM: RDID index %d\n", sim_index); + *(b++) = 0; + break; + } + sim_index++; + } + break; + case sim_state_rdsr: + while(len--) { + *(b++) = sim_sr; + if (sim_index > 0) + ERR("SIM: RDSR index %d\n", sim_index); + sim_index++; + + /* If WIP was 1, clear it, ie, simulate write/erase + * completion + */ + sim_sr &= ~STAT_WIP; + } + break; + case sim_state_read_data: + while(len--) { + if (sim_addr >= sim_image_sz) { + ERR("SIM: Read past end of flash\n"); + return -1; + } + *(b++) = sim_image[sim_addr++]; + } + break; + default: + ERR("SIM: Read in wrong state %d\n", sim_state); + return -1; + } + return 0; +} + +static int sim_send_addr(uint32_t addr) +{ + const void *ap; + + /* Layout address MSB first in memory */ + addr = cpu_to_be32(addr); + + /* Send the right amount of bytes */ + ap = (char *)&addr; + + if (sim_ct_4b) + return sim_wbytes(ap, 4); + else + return sim_wbytes(ap + 1, 3); +} + +static int sim_cmd_rd(struct spi_flash_ctrl *ctrl __unused, uint8_t cmd, + bool has_addr, uint32_t addr, void *buffer, + uint32_t size) +{ + int rc; + + rc = sim_start_cmd(cmd); + if (rc) + goto bail; + if (has_addr) { + rc = sim_send_addr(addr); + if (rc) + goto bail; + } + if (buffer && size) + rc = sim_rbytes(buffer, size); + bail: + sim_end_cmd(); + return rc; +} + +static int sim_cmd_wr(struct spi_flash_ctrl *ctrl __unused, uint8_t cmd, + bool has_addr, uint32_t addr, const void *buffer, + uint32_t size) +{ + int rc; + + rc = sim_start_cmd(cmd); + if (rc) + goto bail; + if (has_addr) { + rc = sim_send_addr(addr); + if (rc) + goto bail; + } + if (buffer && size) + rc = sim_wbytes(buffer, size); + bail: + sim_end_cmd(); + return rc; +} + +static int sim_set_4b(struct spi_flash_ctrl *ctrl __unused, bool enable) +{ + sim_ct_4b = enable; + + return 0; +} + +static int sim_read(struct spi_flash_ctrl *ctrl __unused, uint32_t pos, + void *buf, uint32_t len) +{ + if (sim_ct_4b != sim_fl_4b) + ERR("SIM: 4b mode mismatch in autoread !\n"); + if ((pos + len) < pos) + return -1; + if ((pos + len) > sim_image_sz) + return -1; + memcpy(buf, sim_image + pos, len); + return 0; +}; + +struct spi_flash_ctrl sim_ctrl = { + .cmd_wr = sim_cmd_wr, + .cmd_rd = sim_cmd_rd, + .set_4b = sim_set_4b, + .read = sim_read, +}; + +int main(void) +{ + struct blocklevel_device *bl; + uint64_t total_size; + uint32_t erase_granule; + const char *name; + uint16_t *test; + struct ecc64 *ecc_test; + uint64_t *test64; + int i, rc; + + sim_image = malloc(sim_image_sz); + memset(sim_image, 0xff, sim_image_sz); + test = malloc(0x10000 * 2); + + rc = flash_init(&sim_ctrl, &bl, NULL); + if (rc) { + ERR("flash_init failed with err %d\n", rc); + exit(1); + } + rc = flash_get_info(bl, &name, &total_size, &erase_granule); + if (rc) { + ERR("flash_get_info failed with err %d\n", rc); + exit(1); + } + + /* Make up a test pattern */ + for (i=0; i<0x10000;i++) + test[i] = cpu_to_be16(i); + + /* Write 64k of stuff at 0 and at 128k */ + printf("Writing test patterns...\n"); + flash_smart_write(bl, 0, test, 0x10000); + flash_smart_write(bl, 0x20000, test, 0x10000); + + /* Write "Hello world" straddling the 64k boundary */ +#define HW "Hello World" + printf("Writing test string...\n"); + flash_smart_write(bl, 0xfffc, HW, sizeof(HW)); + + /* Check result */ + if (memcmp(sim_image + 0xfffc, HW, sizeof(HW))) { + ERR("Test string mismatch !\n"); + exit(1); + } + printf("Test string pass\n"); + if (memcmp(sim_image, test, 0xfffc)) { + ERR("Test pattern mismatch !\n"); + exit(1); + } + printf("Test pattern pass\n"); + + printf("Test ECC interfaces\n"); + flash_smart_write_corrected(bl, 0, test, 0x10000, 1); + ecc_test = (struct ecc64 *)sim_image; + test64 = (uint64_t *)test; + for (i = 0; i < 0x10000 / sizeof(*ecc_test); i++) { + if (test64[i] != ecc_test[i].data) { + ERR("flash_smart_write_corrected() pattern missmatch at %d: 0x%016lx vs 0x%016lx\n", + i, test64[i], ecc_test[i].data); + exit(1); + } + if (ecc_test[i].ecc != eccgenerate(be64toh(test64[i]))) { + ERR("ECCs don't match 0x%02x vs 0x%02x\n", ecc_test[i].ecc, eccgenerate(test64[i])); + exit(1); + } + } + printf("Test ECC interface pass\n"); + + printf("Test ECC erase\n"); + if (flash_erase(bl, 0, 0x10000) != 0) { + ERR("flash_erase didn't return 0\n"); + exit(1); + } + + for (i = 0; i < 0x10000 / sizeof(*ecc_test); i++) { + uint8_t zero = 0; + if (ecc_test[i].data != 0xFFFFFFFFFFFFFFFF) { + ERR("Data not properly cleared at %d\n", i); + exit(1); + } + rc = flash_write(bl, i * sizeof(*ecc_test) + 8, &zero, 1, 0); + if (rc || ecc_test[i].ecc != 0) { + ERR("Cleared data not correctly ECCed: 0x%02x (0x%016lx) expecting 0 at %d\n", ecc_test[i].ecc, ecc_test[i].data, i); + exit(1); + } + } + printf("Test ECC erase pass\n"); + + flash_exit(bl); + free(test); + + return 0; +} diff --git a/roms/skiboot/libflash/test/test-ipmi-hiomap.c b/roms/skiboot/libflash/test/test-ipmi-hiomap.c new file mode 100644 index 000000000..6117e9dd4 --- /dev/null +++ b/roms/skiboot/libflash/test/test-ipmi-hiomap.c @@ -0,0 +1,3388 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2018-2019 IBM Corp. */ + +#include <assert.h> +#include <ccan/container_of/container_of.h> +#include <libflash/blocklevel.h> +#include <lock.h> +#include <lpc.h> +#include <hiomap.h> +#include <ipmi.h> +#include <opal-api.h> +#include <platform.h> +#include <stdio.h> +#include <stdlib.h> + +#include "../ipmi-hiomap.h" +#include "../errors.h" + +/* Stub for blocklevel debug macros */ +bool libflash_debug; + +const struct bmc_sw_config bmc_sw_hiomap = { + .ipmi_oem_hiomap_cmd = IPMI_CODE(0x3a, 0x5a), +}; + +const struct bmc_platform _bmc_platform = { + .name = "generic:hiomap", + .sw = &bmc_sw_hiomap, +}; + +enum scenario_event_type { + scenario_sentinel = 0, + scenario_event_p, + scenario_cmd, + scenario_sel, + scenario_delay, +}; + +struct scenario_cmd_data { + uint8_t cmd; + uint8_t seq; + uint8_t args[13]; +} __attribute__((packed)); + +struct scenario_cmd { + struct scenario_cmd_data req; + struct scenario_cmd_data resp; + uint8_t cc; + size_t resp_size; +}; + +struct scenario_sel { + uint8_t bmc_state; +}; + +struct scenario_event { + enum scenario_event_type type; + union { + const struct scenario_event *p; + struct scenario_cmd c; + struct scenario_sel s; + }; +}; + +#define SCENARIO_SENTINEL { .type = scenario_sentinel } + +struct ipmi_sel { + void (*fn)(uint8_t data, void *context); + void *context; +}; + +struct ipmi_msg_ctx { + const struct scenario_event *scenario; + const struct scenario_event *cursor; + + struct ipmi_sel sel; + + struct ipmi_msg msg; +}; + +struct ipmi_msg_ctx ipmi_msg_ctx; + +const struct bmc_platform *bmc_platform = &_bmc_platform; + +static void scenario_enter(const struct scenario_event *scenario) +{ + ipmi_msg_ctx.scenario = scenario; + ipmi_msg_ctx.cursor = scenario; +} + +static void scenario_advance(void) +{ + struct ipmi_msg_ctx *ctx = &ipmi_msg_ctx; + + assert(ctx->cursor->type == scenario_delay); + ctx->cursor++; + + /* Deliver all the undelayed, scheduled SELs */ + while (ctx->cursor->type == scenario_sel) { + ctx->sel.fn(ctx->cursor->s.bmc_state, ctx->sel.context); + ctx->cursor++; + } +} + +static void scenario_exit(void) +{ + if (ipmi_msg_ctx.cursor->type != scenario_sentinel) { + ptrdiff_t d = ipmi_msg_ctx.cursor - ipmi_msg_ctx.scenario; + printf("%s: Exiting on event %tu with event type %d \n", + __func__, d, ipmi_msg_ctx.cursor->type); + assert(false); + } +} + +void ipmi_init_msg(struct ipmi_msg *msg, int interface __attribute__((unused)), + uint32_t code, void (*complete)(struct ipmi_msg *), + void *user_data, size_t req_size, size_t resp_size) +{ + msg->backend = NULL; + msg->cmd = IPMI_CMD(code); + msg->netfn = IPMI_NETFN(code) << 2; + msg->req_size = req_size; + msg->resp_size = resp_size; + msg->complete = complete; + msg->user_data = user_data; +} + +struct ipmi_msg *ipmi_mkmsg(int interface __attribute__((unused)), + uint32_t code, void (*complete)(struct ipmi_msg *), + void *user_data, void *req_data, size_t req_size, + size_t resp_size) +{ + struct ipmi_msg *msg = &ipmi_msg_ctx.msg; + + ipmi_init_msg(msg, 0 /* some bogus value */, code, complete, user_data, + req_size, resp_size); + + msg->data = malloc(req_size > resp_size ? req_size : resp_size); + if (req_data) + memcpy(msg->data, req_data, req_size); + + return msg; +} + +void ipmi_free_msg(struct ipmi_msg *msg __attribute__((unused))) +{ + if (msg) + free(msg->data); +} + +void ipmi_queue_msg_sync(struct ipmi_msg *msg) +{ + struct ipmi_msg_ctx *ctx = container_of(msg, struct ipmi_msg_ctx, msg); + const struct scenario_cmd *cmd; + + if (ctx->cursor->type == scenario_cmd) { + cmd = &ctx->cursor->c; + } else if (ctx->cursor->type == scenario_event_p) { + assert(ctx->cursor->p->type == scenario_cmd); + cmd = &ctx->cursor->p->c; + } else { + printf("Got unexpected request:\n"); + for (ssize_t i = 0; i < msg->req_size; i++) + printf("msg->data[%zd]: 0x%02x\n", i, msg->data[i]); + assert(false); + } + + assert((msg->netfn >> 2) == 0x3a); + assert(msg->cmd == 0x5a); + assert(msg->req_size >= 2); + + if (memcmp(msg->data, &cmd->req, msg->req_size)) { + printf("Comparing received vs expected message\n"); + for (ssize_t i = 0; i < msg->req_size; i++) { + printf("msg->data[%zd]: 0x%02x, cmd->req[%zd]: 0x%02x\n", + i, msg->data[i], i, ((uint8_t *)(&cmd->req))[i]); + } + assert(false); + } + + msg->cc = cmd->cc; + memcpy(msg->data, &cmd->resp, msg->resp_size); + + if (cmd->resp_size) + msg->resp_size = cmd->resp_size; + + msg->complete(msg); + + ctx->cursor++; + + /* Deliver all the scheduled SELs */ + while (ctx->cursor->type == scenario_sel) { + ctx->sel.fn(ctx->cursor->s.bmc_state, ctx->sel.context); + ctx->cursor++; + } +} + +int ipmi_sel_register(uint8_t oem_cmd __attribute__((unused)), + void (*fn)(uint8_t data, void *context), + void *context) +{ + ipmi_msg_ctx.sel.fn = fn; + ipmi_msg_ctx.sel.context = context; + + return 0; +} + +int64_t lpc_write(enum OpalLPCAddressType addr_type __attribute__((unused)), + uint32_t addr __attribute__((unused)), + uint32_t data __attribute__((unused)), + uint32_t sz) +{ + assert(sz != 0); + return 0; +} + +int64_t lpc_read(enum OpalLPCAddressType addr_type __attribute__((unused)), + uint32_t addr __attribute__((unused)), uint32_t *data, + uint32_t sz) +{ + memset(data, 0xaa, sz); + + return 0; +} + +static bool lpc_read_success(const uint8_t *buf, size_t len) +{ + if (len < 64) { + while (len--) + if (*buf++ != 0xaa) + return false; + return true; + } + + for (int i = 0; i < 64; i++) + if (buf[i] != 0xaa) + return false; + + return !memcmp(buf, buf + 64, len - 64); +} + +/* Commonly used messages */ + +static const struct scenario_event hiomap_ack_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + .args = { + [0] = HIOMAP_E_ACK_MASK, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + }, + }, +}; + +static const struct scenario_event hiomap_get_info_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 2, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 2, + .args = { + [0] = HIOMAP_V2, + [1] = 12, + [2] = 8, [3] = 0, + }, + }, + }, +}; + +static const struct scenario_event hiomap_get_flash_info_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + .args = { + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + .args = { + [0] = 0x00, [1] = 0x20, + [2] = 0x01, [3] = 0x00, + }, + }, + }, +}; + +static const struct scenario_event +hiomap_create_read_window_qs0l1_rs0l1_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0xff, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, +}; + +static const struct scenario_event +hiomap_create_read_window_qs0l2_rs0l1_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x02, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0xff, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, +}; + +static const struct scenario_event +hiomap_create_write_window_qs0l1_rs0l1_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + .args = { + [0] = 0xff, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, +}; + +static const struct scenario_event hiomap_mark_dirty_qs0l1_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 5, + }, + }, +}; + +static const struct scenario_event +hiomap_create_write_window_qs0l2_rs0l1_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x02, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + .args = { + [0] = 0xff, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, +}; + +static const struct scenario_event hiomap_flush_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 6, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 6, + }, + }, +}; + +static const struct scenario_event +hiomap_create_write_window_qs1l1_rs1l1_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 7, + .args = { + [0] = 0x01, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 7, + .args = { + [0] = 0xfe, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x01, [5] = 0x00, + }, + }, + }, +}; + +static const struct scenario_event hiomap_erase_qs0l1_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 5, + }, + }, +}; + +static const struct scenario_event hiomap_reset_call_seq_4 = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 4, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 4, + }, + }, +}; + +static const struct scenario_event hiomap_reset_call_seq_5 = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 5, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 5, + }, + }, +}; + +static const struct scenario_event hiomap_reset_call_seq_6 = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 6, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 6, + }, + }, +}; + +static const struct scenario_event hiomap_reset_call_seq_7 = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 7, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 7, + }, + }, +}; + +static const struct scenario_event hiomap_reset_call_seq_9 = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 9, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 9, + }, + }, +}; + +static const struct scenario_event hiomap_reset_call_seq_a = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 0xa, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 0xa, + }, + }, +}; + +static const struct scenario_event scenario_hiomap_init[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_4, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_init(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_init); + assert(!ipmi_hiomap_init(&bl)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_event_daemon_ready[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_DAEMON_READY } }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_4, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_event_daemon_ready(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + + scenario_enter(scenario_hiomap_event_daemon_ready); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + assert(ctx->bmc_state == HIOMAP_E_DAEMON_READY); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_event_daemon_stopped[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_DAEMON_READY } }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_PROTOCOL_RESET } }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_4, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_event_daemon_stopped(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + + scenario_enter(scenario_hiomap_event_daemon_stopped); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + assert(ctx->bmc_state == HIOMAP_E_PROTOCOL_RESET); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_event_daemon_restarted[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_DAEMON_READY } }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_PROTOCOL_RESET } }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_DAEMON_READY } }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_4, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_event_daemon_restarted(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + + scenario_enter(scenario_hiomap_event_daemon_restarted); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + assert(ctx->bmc_state == (HIOMAP_E_DAEMON_READY | HIOMAP_E_PROTOCOL_RESET)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_event_daemon_lost_flash_control[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_DAEMON_READY } }, + { + .type = scenario_sel, + .s = { + .bmc_state = (HIOMAP_E_DAEMON_READY + | HIOMAP_E_FLASH_LOST), + } + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_event_daemon_lost_flash_control(void) +{ + struct blocklevel_device *bl; + size_t len = 2 * (1 << 12); + void *buf; + + buf = malloc(len); + assert(buf); + + scenario_enter(scenario_hiomap_event_daemon_lost_flash_control); + assert(!ipmi_hiomap_init(&bl)); + assert(bl->read(bl, 0, buf, len) == FLASH_ERR_AGAIN); + ipmi_hiomap_exit(bl); + scenario_exit(); + + free(buf); +} + +static const struct scenario_event +scenario_hiomap_event_daemon_regained_flash_control_dirty[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_DAEMON_READY } }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x02, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0xfe, [1] = 0x0f, + [2] = 0x02, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, + }, + { + .type = scenario_delay + }, + { + .type = scenario_sel, + .s = { + .bmc_state = (HIOMAP_E_DAEMON_READY + | HIOMAP_E_FLASH_LOST), + } + }, + { + .type = scenario_sel, + .s = { + .bmc_state = (HIOMAP_E_DAEMON_READY + | HIOMAP_E_WINDOW_RESET), + } + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 5, + .args = { [0] = HIOMAP_E_WINDOW_RESET }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 5, + } + } + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 6, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x02, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 6, + .args = { + [0] = 0xfe, [1] = 0x0f, + [2] = 0x02, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_7, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_event_daemon_regained_flash_control_dirty(void) +{ + struct blocklevel_device *bl; + size_t len = 2 * (1 << 12); + void *buf; + + buf = malloc(len); + assert(buf); + + scenario_enter(scenario_hiomap_event_daemon_regained_flash_control_dirty); + assert(!ipmi_hiomap_init(&bl)); + assert(!bl->read(bl, 0, buf, len)); + scenario_advance(); + assert(!bl->read(bl, 0, buf, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); + + free(buf); +} + +static const struct scenario_event scenario_hiomap_protocol_reset_recovery[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_DAEMON_READY } }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x02, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0xfe, [1] = 0x0f, + [2] = 0x02, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, + }, + { + .type = scenario_delay + }, + { + .type = scenario_sel, + .s = { .bmc_state = HIOMAP_E_PROTOCOL_RESET, } + }, + { + .type = scenario_sel, + .s = { .bmc_state = HIOMAP_E_DAEMON_READY, } + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 5, + .args = { [0] = HIOMAP_E_PROTOCOL_RESET }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 5, + } + } + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 6, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 6, + .args = { + [0] = HIOMAP_V2, + [1] = 12, + [2] = 8, [3] = 0, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 7, + .args = { + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 7, + .args = { + [0] = 0x00, [1] = 0x20, + [2] = 0x01, [3] = 0x00, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 8, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x02, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 8, + .args = { + [0] = 0xfe, [1] = 0x0f, + [2] = 0x02, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_9, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_reset_recovery(void) +{ + struct blocklevel_device *bl; + size_t len = 2 * (1 << 12); + void *buf; + + buf = malloc(len); + assert(buf); + + scenario_enter(scenario_hiomap_protocol_reset_recovery); + assert(!ipmi_hiomap_init(&bl)); + assert(!bl->read(bl, 0, buf, len)); + scenario_advance(); + assert(!bl->read(bl, 0, buf, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); + + free(buf); +} + +static const struct scenario_event +scenario_hiomap_protocol_read_one_block[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_read_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_read_one_block(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_read_one_block); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(!bl->read(bl, 0, buf, len)); + assert(lpc_read_success(buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_protocol_read_one_byte(void) +{ + struct blocklevel_device *bl; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_read_one_block); + assert(!ipmi_hiomap_init(&bl)); + len = 1; + buf = calloc(1, len); + assert(buf); + assert(!bl->read(bl, 0, buf, len)); + assert(lpc_read_success(buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_read_two_blocks[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_read_window_qs0l2_rs0l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 5, + .args = { + [0] = 0x01, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 5, + .args = { + [0] = 0xfe, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x01, [5] = 0x00, + }, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_read_two_blocks(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_read_two_blocks); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 2 * (1 << ctx->block_size_shift); + buf = calloc(1, len); + assert(buf); + assert(!bl->read(bl, 0, buf, len)); + assert(lpc_read_success(buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_protocol_read_1block_1byte(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_read_two_blocks); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = (1 << ctx->block_size_shift) + 1; + buf = calloc(1, len); + assert(buf); + assert(!bl->read(bl, 0, buf, len)); + assert(lpc_read_success(buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_read_one_block_twice[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_read_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_read_one_block_twice(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_read_one_block_twice); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(!bl->read(bl, 0, buf, len)); + assert(!bl->read(bl, 0, buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_event_before_action[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_sel, + .s = { + .bmc_state = HIOMAP_E_DAEMON_READY | + HIOMAP_E_FLASH_LOST, + } + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_event_before_read(void) +{ + struct blocklevel_device *bl; + char buf; + int rc; + + scenario_enter(scenario_hiomap_protocol_event_before_action); + assert(!ipmi_hiomap_init(&bl)); + rc = bl->read(bl, 0, &buf, sizeof(buf)); + assert(rc == FLASH_ERR_AGAIN); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_event_during_read[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_read_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_sel, + .s = { + .bmc_state = HIOMAP_E_DAEMON_READY | + HIOMAP_E_FLASH_LOST, + } + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_event_during_read(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + int rc; + + scenario_enter(scenario_hiomap_protocol_event_during_read); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + rc = bl->read(bl, 0, buf, len); + assert(rc == FLASH_ERR_AGAIN); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_write_one_block[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_mark_dirty_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_7, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_write_one_block(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_write_one_block); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(!bl->write(bl, 0, buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_protocol_write_one_byte(void) +{ + struct blocklevel_device *bl; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_write_one_block); + assert(!ipmi_hiomap_init(&bl)); + len = 1; + buf = calloc(1, len); + assert(buf); + assert(!bl->write(bl, 0, buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_write_two_blocks[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l2_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_mark_dirty_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs1l1_rs1l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 8, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 8, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 9, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 9, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_a, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_write_two_blocks(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_write_two_blocks); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 2 * (1 << ctx->block_size_shift); + buf = calloc(1, len); + assert(buf); + assert(!bl->write(bl, 0, buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_protocol_write_1block_1byte(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_write_two_blocks); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = (1 << ctx->block_size_shift) + 1; + buf = calloc(1, len); + assert(buf); + assert(!bl->write(bl, 0, buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_write_one_block_twice[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_mark_dirty_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 7, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 7, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 8, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 8, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_9, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_write_one_block_twice(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + uint8_t *buf; + size_t len; + + scenario_enter(scenario_hiomap_protocol_write_one_block_twice); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(!bl->write(bl, 0, buf, len)); + assert(!bl->write(bl, 0, buf, len)); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_protocol_event_before_write(void) +{ + struct blocklevel_device *bl; + char buf; + int rc; + + scenario_enter(scenario_hiomap_protocol_event_before_action); + assert(!ipmi_hiomap_init(&bl)); + rc = bl->write(bl, 0, &buf, sizeof(buf)); + assert(rc == FLASH_ERR_AGAIN); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_event_during_write[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_sel, + .s = { + .bmc_state = HIOMAP_E_DAEMON_READY | + HIOMAP_E_FLASH_LOST, + } + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_event_during_write(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + char *buf; + int rc; + + scenario_enter(scenario_hiomap_protocol_event_during_write); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + rc = bl->write(bl, 0, buf, len); + free(buf); + assert(rc == FLASH_ERR_AGAIN); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_erase_one_block[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_event_p, + .p = &hiomap_erase_qs0l1_call, + }, + { + .type = scenario_event_p, + .p = &hiomap_flush_call, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_7, }, + SCENARIO_SENTINEL, +}; + +static const struct scenario_event +scenario_hiomap_protocol_erase_two_blocks[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l2_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_erase_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs1l1_rs1l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 8, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 8, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 9, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 9, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_a, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_erase_two_blocks(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_protocol_erase_two_blocks); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 2 * (1 << ctx->block_size_shift); + assert(!bl->erase(bl, 0, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_erase_one_block_twice[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_erase_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 7, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 7, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 8, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 8, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_9, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_erase_one_block_twice(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_protocol_erase_one_block_twice); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + assert(!bl->erase(bl, 0, len)); + assert(!bl->erase(bl, 0, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_protocol_erase_one_block(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_protocol_erase_one_block); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + assert(!bl->erase(bl, 0, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_protocol_event_before_erase(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + int rc; + + scenario_enter(scenario_hiomap_protocol_event_before_action); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + rc = bl->erase(bl, 0, len); + assert(rc == FLASH_ERR_AGAIN); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_event_during_erase[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_sel, + .s = { + .bmc_state = HIOMAP_E_DAEMON_READY | + HIOMAP_E_FLASH_LOST, + } + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_event_during_erase(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + int rc; + + scenario_enter(scenario_hiomap_protocol_event_during_erase); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + rc = bl->erase(bl, 0, len); + assert(rc == FLASH_ERR_AGAIN); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_protocol_bad_sequence[] = { + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + .args = { + [0] = HIOMAP_E_ACK_MASK, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 0, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_bad_sequence(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_protocol_bad_sequence); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_protocol_action_error[] = { + { + .type = scenario_cmd, + .c = { + /* Ack is legitimate, but we'll pretend it's invalid */ + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + .args = { [0] = 0x3 }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_action_error(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_protocol_action_error); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_get_flash_info[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 4, + .args = { + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x20, + [2] = 0x01, [3] = 0x00, + }, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_get_flash_info(void) +{ + struct blocklevel_device *bl; + const char *name; + uint32_t granule; + uint64_t size; + + scenario_enter(scenario_hiomap_protocol_get_flash_info); + assert(!ipmi_hiomap_init(&bl)); + assert(!bl->get_info(bl, &name, &size, &granule)); + assert(!name); + assert(size == (32 * 1024 * 1024)); + assert(granule == (4 * 1024)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_persistent_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { .type = scenario_sel, .s = { .bmc_state = HIOMAP_E_PROTOCOL_RESET } }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_persistent_error(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + char buf; + int rc; + + scenario_enter(scenario_hiomap_protocol_persistent_error); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + assert(ctx->bmc_state == HIOMAP_E_PROTOCOL_RESET); + rc = bl->read(bl, 0, &buf, sizeof(buf)); + assert(rc == FLASH_ERR_DEVICE_GONE); + rc = bl->read(bl, 0, &buf, sizeof(buf)); + assert(rc == FLASH_ERR_DEVICE_GONE); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_get_info_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 2, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_get_info_error(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_get_info_error); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_get_flash_info_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_get_flash_info_error(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_get_flash_info_error); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_create_read_window_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_create_read_window_error(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_create_read_window_error); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->read(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_create_write_window_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_create_write_window_error(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_create_write_window_error); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_mark_dirty_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_mark_dirty_error(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_mark_dirty_error); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_flush_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_mark_dirty_qs0l1_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 6, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_7, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_flush_error(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_flush_error); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static void test_hiomap_ack_error(void) +{ + /* Same thing at the moment */ + test_hiomap_protocol_action_error(); +} + +static const struct scenario_event scenario_hiomap_erase_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_erase_error(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_erase_error); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + assert(bl->erase(bl, 0, len) > 0); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_ack_malformed_small[] = { + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + .args = { [0] = 0x3 }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 1 + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_ack_malformed_small(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_ack_malformed_small); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event scenario_hiomap_ack_malformed_large[] = { + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + .args = { [0] = 0x3 }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 3, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 1, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_ack_malformed_large(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_ack_malformed_large); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_get_info_malformed_small[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 2, + .args = { [0] = 0x2 }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 7, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 2, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_get_info_malformed_small(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_get_info_malformed_small); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_get_info_malformed_large[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 2, + .args = { [0] = 0x2 }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 9, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 2, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_get_info_malformed_large(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_get_info_malformed_large); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_get_flash_info_malformed_small[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 5, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_get_flash_info_malformed_small(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_get_flash_info_malformed_small); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_get_flash_info_malformed_large[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 7, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_get_flash_info_malformed_large(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_get_flash_info_malformed_large); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_create_read_window_malformed_small[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 7, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_create_read_window_malformed_small(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_create_read_window_malformed_small); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->read(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); + +} + +static const struct scenario_event +scenario_hiomap_create_read_window_malformed_large[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 9, + .resp = { + .cmd = HIOMAP_C_CREATE_READ_WINDOW, + .seq = 4, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_create_read_window_malformed_large(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_create_read_window_malformed_large); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->read(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_create_write_window_malformed_small[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 7, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_create_write_window_malformed_small(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_create_write_window_malformed_small); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); + +} + +static const struct scenario_event +scenario_hiomap_create_write_window_malformed_large[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp_size = 9, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 4, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_5, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_create_write_window_malformed_large(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_create_write_window_malformed_large); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_mark_dirty_malformed_small[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp_size = 1, + .resp = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 5, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_mark_dirty_malformed_small(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_mark_dirty_malformed_small); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); + +} + +static const struct scenario_event +scenario_hiomap_mark_dirty_malformed_large[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp_size = 3, + .resp = { + .cmd = HIOMAP_C_MARK_DIRTY, + .seq = 5, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_mark_dirty_malformed_large(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_mark_dirty_malformed_large); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_flush_malformed_small[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_mark_dirty_qs0l1_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 6, + }, + .resp_size = 1, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 6, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_7, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_flush_malformed_small(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_flush_malformed_small); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); + +} + +static const struct scenario_event +scenario_hiomap_flush_malformed_large[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_mark_dirty_qs0l1_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 6, + }, + .resp_size = 3, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 6, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_7, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_flush_malformed_large(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + void *buf; + + scenario_enter(scenario_hiomap_flush_malformed_large); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + buf = calloc(1, len); + assert(buf); + assert(bl->write(bl, 0, buf, len) > 0); + free(buf); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_erase_malformed_small[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp_size = 1, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 5, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_erase_malformed_small(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_erase_malformed_small); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + assert(bl->erase(bl, 0, len) > 0); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_erase_malformed_large[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 5, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp_size = 3, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 5, + }, + }, + }, + { .type = scenario_event_p, .p = &hiomap_reset_call_seq_6, }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_erase_malformed_large(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_erase_malformed_large); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + assert(bl->erase(bl, 0, len) > 0); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +/* Common recovery calls */ + +static const struct scenario_event hiomap_recovery_ack_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 7, + .args = { + [0] = HIOMAP_E_PROTOCOL_RESET, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 7, + }, + }, +}; + +static const struct scenario_event hiomap_recovery_get_info_call = { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 8, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 8, + .args = { + [0] = HIOMAP_V2, + [1] = 12, + [2] = 8, [3] = 0, + }, + }, + }, +}; + +static const struct scenario_event +scenario_hiomap_protocol_recovery_failure_ack[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_erase_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { .type = scenario_delay }, + { + .type = scenario_sel, + .s = { + .bmc_state = HIOMAP_E_DAEMON_READY | + HIOMAP_E_PROTOCOL_RESET + } + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 7, + .args = { + [0] = HIOMAP_E_PROTOCOL_RESET, + }, + }, + .cc = IPMI_ERR_UNSPECIFIED, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 8, + .args = { + [0] = HIOMAP_E_PROTOCOL_RESET, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 8, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 9, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 9, + .args = { + [0] = HIOMAP_V2, + [1] = 12, + [2] = 8, [3] = 0, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 10, + .args = { + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 10, + .args = { + [0] = 0x00, [1] = 0x20, + [2] = 0x01, [3] = 0x00, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 11, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 11, + .args = { + [0] = 0xff, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 12, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 12, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 13, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 13, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 14, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 14, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_recovery_failure_ack(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_protocol_recovery_failure_ack); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + /* + * We're erasing the same block 3 times - it's irrelevant, we're just + * trying to manipulate window state + */ + assert(!bl->erase(bl, 0, len)); + scenario_advance(); + assert(bl->erase(bl, 0, len) > 0); + assert(!bl->erase(bl, 0, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_recovery_failure_get_info[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_erase_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { .type = scenario_delay }, + { + .type = scenario_sel, + .s = { + .bmc_state = HIOMAP_E_DAEMON_READY | + HIOMAP_E_PROTOCOL_RESET + } + }, + { .type = scenario_event_p, .p = &hiomap_recovery_ack_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 8, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_ERR_UNSPECIFIED, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 9, + .args = { + [0] = HIOMAP_E_PROTOCOL_RESET, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 9, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 10, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 10, + .args = { + [0] = HIOMAP_V2, + [1] = 12, + [2] = 8, [3] = 0, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 11, + .args = { + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 11, + .args = { + [0] = 0x00, [1] = 0x20, + [2] = 0x01, [3] = 0x00, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 12, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 12, + .args = { + [0] = 0xff, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 13, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 13, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 14, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 14, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 15, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 15, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_recovery_failure_get_info(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_protocol_recovery_failure_get_info); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + /* + * We're erasing the same block 3 times - it's irrelevant, we're just + * trying to manipulate window state + */ + assert(!bl->erase(bl, 0, len)); + scenario_advance(); + assert(bl->erase(bl, 0, len) > 0); + assert(!bl->erase(bl, 0, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +static const struct scenario_event +scenario_hiomap_protocol_recovery_failure_get_flash_info[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, }, + { + .type = scenario_event_p, + .p = &hiomap_create_write_window_qs0l1_rs0l1_call, + }, + { .type = scenario_event_p, .p = &hiomap_erase_qs0l1_call, }, + { .type = scenario_event_p, .p = &hiomap_flush_call, }, + { .type = scenario_delay }, + { + .type = scenario_sel, + .s = { + .bmc_state = HIOMAP_E_DAEMON_READY | + HIOMAP_E_PROTOCOL_RESET + } + }, + { .type = scenario_event_p, .p = &hiomap_recovery_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_recovery_get_info_call}, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 9, + }, + .cc = IPMI_ERR_UNSPECIFIED, + }, + + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ACK, + .seq = 10, + .args = { + [0] = HIOMAP_E_PROTOCOL_RESET, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_ACK, + .seq = 10, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 11, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_INFO, + .seq = 11, + .args = { + [0] = HIOMAP_V2, + [1] = 12, + [2] = 8, [3] = 0, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 12, + .args = { + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 12, + .args = { + [0] = 0x00, [1] = 0x20, + [2] = 0x01, [3] = 0x00, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 13, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_CREATE_WRITE_WINDOW, + .seq = 13, + .args = { + [0] = 0xff, [1] = 0x0f, + [2] = 0x01, [3] = 0x00, + [4] = 0x00, [5] = 0x00, + }, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_ERASE, + .seq = 14, + .args = { + [0] = 0x00, [1] = 0x00, + [2] = 0x01, [3] = 0x00, + }, + }, + .resp = { + .cmd = HIOMAP_C_ERASE, + .seq = 14, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_FLUSH, + .seq = 15, + }, + .resp = { + .cmd = HIOMAP_C_FLUSH, + .seq = 15, + }, + }, + }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_RESET, + .seq = 16, + }, + .cc = IPMI_CC_NO_ERROR, + .resp = { + .cmd = HIOMAP_C_RESET, + .seq = 16, + }, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_protocol_recovery_failure_get_flash_info(void) +{ + struct blocklevel_device *bl; + struct ipmi_hiomap *ctx; + size_t len; + + scenario_enter(scenario_hiomap_protocol_recovery_failure_get_flash_info); + assert(!ipmi_hiomap_init(&bl)); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + /* + * We're erasing the same block 3 times - it's irrelevant, we're just + * trying to manipulate window state + */ + assert(!bl->erase(bl, 0, len)); + scenario_advance(); + ctx = container_of(bl, struct ipmi_hiomap, bl); + len = 1 << ctx->block_size_shift; + assert(bl->erase(bl, 0, len) > 0); + assert(!bl->erase(bl, 0, len)); + ipmi_hiomap_exit(bl); + scenario_exit(); +} + +struct test_case { + const char *name; + void (*fn)(void); +}; + +#define TEST_CASE(x) { #x, x } + +struct test_case test_cases[] = { + TEST_CASE(test_hiomap_init), + TEST_CASE(test_hiomap_event_daemon_ready), + TEST_CASE(test_hiomap_event_daemon_stopped), + TEST_CASE(test_hiomap_event_daemon_restarted), + TEST_CASE(test_hiomap_event_daemon_lost_flash_control), + TEST_CASE(test_hiomap_event_daemon_regained_flash_control_dirty), + TEST_CASE(test_hiomap_protocol_reset_recovery), + TEST_CASE(test_hiomap_protocol_read_one_block), + TEST_CASE(test_hiomap_protocol_read_one_byte), + TEST_CASE(test_hiomap_protocol_read_two_blocks), + TEST_CASE(test_hiomap_protocol_read_1block_1byte), + TEST_CASE(test_hiomap_protocol_read_one_block_twice), + TEST_CASE(test_hiomap_protocol_event_before_read), + TEST_CASE(test_hiomap_protocol_event_during_read), + TEST_CASE(test_hiomap_protocol_write_one_block), + TEST_CASE(test_hiomap_protocol_write_one_byte), + TEST_CASE(test_hiomap_protocol_write_two_blocks), + TEST_CASE(test_hiomap_protocol_write_1block_1byte), + TEST_CASE(test_hiomap_protocol_write_one_block_twice), + TEST_CASE(test_hiomap_protocol_event_before_write), + TEST_CASE(test_hiomap_protocol_event_during_write), + TEST_CASE(test_hiomap_protocol_erase_one_block), + TEST_CASE(test_hiomap_protocol_erase_two_blocks), + TEST_CASE(test_hiomap_protocol_erase_one_block_twice), + TEST_CASE(test_hiomap_protocol_event_before_erase), + TEST_CASE(test_hiomap_protocol_event_during_erase), + TEST_CASE(test_hiomap_protocol_bad_sequence), + TEST_CASE(test_hiomap_protocol_action_error), + TEST_CASE(test_hiomap_protocol_persistent_error), + TEST_CASE(test_hiomap_protocol_get_flash_info), + TEST_CASE(test_hiomap_get_info_error), + TEST_CASE(test_hiomap_get_flash_info_error), + TEST_CASE(test_hiomap_create_read_window_error), + TEST_CASE(test_hiomap_create_write_window_error), + TEST_CASE(test_hiomap_mark_dirty_error), + TEST_CASE(test_hiomap_flush_error), + TEST_CASE(test_hiomap_ack_error), + TEST_CASE(test_hiomap_erase_error), + TEST_CASE(test_hiomap_ack_malformed_small), + TEST_CASE(test_hiomap_ack_malformed_large), + TEST_CASE(test_hiomap_get_info_malformed_small), + TEST_CASE(test_hiomap_get_info_malformed_large), + TEST_CASE(test_hiomap_get_flash_info_malformed_small), + TEST_CASE(test_hiomap_get_flash_info_malformed_large), + TEST_CASE(test_hiomap_create_read_window_malformed_small), + TEST_CASE(test_hiomap_create_read_window_malformed_large), + TEST_CASE(test_hiomap_create_write_window_malformed_small), + TEST_CASE(test_hiomap_create_write_window_malformed_large), + TEST_CASE(test_hiomap_mark_dirty_malformed_small), + TEST_CASE(test_hiomap_mark_dirty_malformed_large), + TEST_CASE(test_hiomap_flush_malformed_small), + TEST_CASE(test_hiomap_flush_malformed_large), + TEST_CASE(test_hiomap_erase_malformed_small), + TEST_CASE(test_hiomap_erase_malformed_large), + TEST_CASE(test_hiomap_protocol_recovery_failure_ack), + TEST_CASE(test_hiomap_protocol_recovery_failure_get_info), + TEST_CASE(test_hiomap_protocol_recovery_failure_get_flash_info), + { NULL, NULL }, +}; + +int main(void) +{ + struct test_case *tc = &test_cases[0]; + + do { + printf("%s\n", tc->name); + tc->fn(); + printf("\n"); + } while ((++tc)->fn); + + return 0; +} diff --git a/roms/skiboot/libflash/test/test-mbox.c b/roms/skiboot/libflash/test/test-mbox.c new file mode 100644 index 000000000..260a3c7d4 --- /dev/null +++ b/roms/skiboot/libflash/test/test-mbox.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2017-2018 IBM Corp. */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <string.h> +#include <stdarg.h> + +#include <libflash/libflash.h> +#include <libflash/libflash-priv.h> + +#include "stubs.h" +#include "mbox-server.h" + +#define zalloc(n) calloc(1, n) +#define __unused __attribute__((unused)) + +#undef pr_fmt + +void mbox_init(void) +{ +} + +#include "../libflash.c" +#include "../mbox-flash.c" +#include "../ecc.c" +#include "../blocklevel.c" + +#undef pr_fmt +#define pr_fmt(fmt) "MBOX-PROXY: " fmt + +/* client interface */ + +#include "../../include/lpc-mbox.h" + +#define ERR(...) FL_DBG(__VA_ARGS__) + +static int run_flash_test(struct blocklevel_device *bl) +{ + struct mbox_flash_data *mbox_flash; + char hello[] = "Hello World"; + uint32_t erase_granule; + uint64_t total_size; + const char *name; + uint16_t *test; + char *tmp; + int i, rc; + + mbox_flash = container_of(bl, struct mbox_flash_data, bl); + + /* + * Do something first so that if it has been reset it does that + * before we check versions + */ + rc = blocklevel_get_info(bl, &name, &total_size, &erase_granule); + if (rc) { + ERR("blocklevel_get_info() failed with err %d\n", rc); + return 1; + } + if (total_size != mbox_server_total_size()) { + ERR("Total flash size is incorrect: 0x%08lx vs 0x%08x\n", + total_size, mbox_server_total_size()); + return 1; + } + if (erase_granule != mbox_server_erase_granule()) { + ERR("Erase granule is incorrect 0x%08x vs 0x%08x\n", + erase_granule, mbox_server_erase_granule()); + return 1; + } + + + /* Sanity check that mbox_flash has inited correctly */ + if (mbox_flash->version != mbox_server_version()) { + ERR("MBOX Flash didn't agree with the server version\n"); + return 1; + } + if (mbox_flash->version == 1 && mbox_flash->shift != 12) { + ERR("MBOX Flash version 1 isn't using a 4K shift\n"); + return 1; + } + + mbox_server_memset(0xff); + + test = calloc(erase_granule * 20, 1); + + /* Make up a test pattern */ + for (i = 0; i < erase_granule * 10; i++) + test[i] = i; + + /* Write 64k of stuff at 0 and at 128k */ + printf("Writing test patterns...\n"); + rc = blocklevel_write(bl, 0, test, erase_granule * 10); + if (rc) { + ERR("blocklevel_write(0, erase_granule * 10) failed with err %d\n", rc); + return 1; + } + rc = blocklevel_write(bl, erase_granule * 20, test, erase_granule * 10); + if (rc) { + ERR("blocklevel_write(0x20000, 0x10000) failed with err %d\n", rc); + return 1; + } + + if (mbox_server_memcmp(0, test, erase_granule * 10)) { + ERR("Test pattern mismatch !\n"); + return 1; + } + + /* Write "Hello world" straddling the 64k boundary */ + printf("Writing test string...\n"); + rc = blocklevel_write(bl, (erase_granule * 10) - 8, hello, sizeof(hello)); + if (rc) { + ERR("blocklevel_write(0xfffc, %s, %lu) failed with err %d\n", + hello, sizeof(hello), rc); + return 1; + } + + /* Check result */ + if (mbox_server_memcmp((erase_granule * 10) - 8, hello, sizeof(hello))) { + ERR("Test string mismatch!\n"); + return 1; + } + + /* Erase granule is something but never 0x50, this shouldn't succeed */ + rc = blocklevel_erase(bl, 0, 0x50); + if (!rc) { + ERR("blocklevel_erase(0, 0x50) didn't fail!\n"); + return 1; + } + + /* Check it didn't silently erase */ + if (mbox_server_memcmp(0, test, (erase_granule * 10) - 8)) { + ERR("Test pattern mismatch !\n"); + return 1; + } + + /* + * For v1 protocol this should NOT call MARK_WRITE_ERASED! + * The server MARK_WRITE_ERASED will call exit(1) if it gets a + * MARK_WRITE_ERASED and version == 1 + */ + rc = blocklevel_erase(bl, 0, erase_granule); + if (rc) { + ERR("blocklevel_erase(0, erase_granule) failed with err %d\n", rc); + return 1; + } + + /* + * Version 1 doesn't specify that the buffer actually becomes 0xff + * It is up to the daemon to do what it wants really - there are + * implementations that do nothing but writes to the same region + * work fine + */ + + /* This check is important for v2 */ + /* Check stuff got erased */ + tmp = malloc(erase_granule * 2); + if (!tmp) { + ERR("malloc failed\n"); + return 1; + } + if (mbox_server_version() > 1) { + memset(tmp, 0xff, erase_granule); + if (mbox_server_memcmp(0, tmp, erase_granule)) { + ERR("Buffer not erased\n"); + rc = 1; + goto out; + } + } + + /* Read beyond the end of flash */ + rc = blocklevel_read(bl, total_size, tmp, 0x1000); + if (!rc) { + ERR("blocklevel_read(total_size, 0x1000) (read beyond the end) succeeded\n"); + goto out; + } + + /* Test some simple write/read cases, avoid first page */ + rc = blocklevel_write(bl, erase_granule * 2, test, erase_granule / 2); + if (rc) { + ERR("blocklevel_write(erase_granule, erase_granule / 2) failed with err %d\n", rc); + goto out; + } + rc = blocklevel_write(bl, erase_granule * 2 + erase_granule / 2, test, erase_granule / 2); + if (rc) { + ERR("blocklevel_write(erase_granule * 2 + erase_granule / 2, erase_granule) failed with err %d\n", rc); + goto out; + } + + rc = mbox_server_memcmp(erase_granule * 2, test, erase_granule / 2); + if (rc) { + ERR("%s:%d mbox_server_memcmp miscompare\n", __FILE__, __LINE__); + goto out; + } + rc = mbox_server_memcmp(erase_granule * 2 + erase_granule / 2, test, erase_granule / 2); + if (rc) { + ERR("%s:%d mbox_server_memcmp miscompare\n", __FILE__, __LINE__); + goto out; + } + + /* Great so the writes made it, can we read them back? Do it in + * four small reads */ + for (i = 0; i < 4; i++) { + rc = blocklevel_read(bl, erase_granule * 2 + (i * erase_granule / 4), tmp + (i * erase_granule / 4), erase_granule / 4); + if (rc) { + ERR("blocklevel_read(0x%08x, erase_granule / 4) failed with err %d\n", + 2 * erase_granule + (i * erase_granule / 4), rc); + goto out; + } + } + rc = memcmp(test, tmp, erase_granule / 2); + if (rc) { + ERR("%s:%d read back miscompare\n", __FILE__, __LINE__); + goto out; + } + rc = memcmp(test, tmp + erase_granule / 2, erase_granule / 2); + if (rc) { + ERR("%s:%d read back miscompare\n", __FILE__, __LINE__); + goto out; + } + + /* + * Make sure we didn't corrupt other stuff, also make sure one + * blocklevel call will understand how to read from two windows + */ + for (i = 3; i < 9; i = i + 2) { + printf("i:%d erase: 0x%08x\n", i, erase_granule); + rc = blocklevel_read(bl, i * erase_granule, tmp, 2 * erase_granule); + if (rc) { + ERR("blocklevel_read(0x%08x, 2 * erase_granule) failed with err: %d\n", i * erase_granule, rc); + goto out; + } + rc = memcmp(((char *)test) + (i * erase_granule), tmp, 2 * erase_granule); + if (rc) { + ERR("%s:%d read back miscompare (pos: 0x%08x)\n", __FILE__, __LINE__, i * erase_granule); + goto out; + } + } + + srand(1); + /* + * Try to jump around the place doing a tonne of small reads. + * Worth doing the same with writes TODO + */ +#ifdef __STRICT_TEST__ +#define TEST_LOOPS 1000 +#else +#define TEST_LOOPS 100 +#endif + for (i = 0; i < TEST_LOOPS; i++) { + int r = rand(); + + printf("Loop %d of %d\n", i, TEST_LOOPS); + /* Avoid reading too far, just skip it */ + if ((r % erase_granule * 10) + (r % erase_granule * 2) > erase_granule * 10) + continue; + + rc = blocklevel_read(bl, erase_granule * 20 + (r % erase_granule * 10), tmp, r % erase_granule * 2); + if (rc) { + ERR("blocklevel_read(0x%08x, 0x%08x) failed with err %d\n", 0x20000 + (r % 0x100000), r % 0x2000, rc); + goto out; + } + rc = memcmp(((char *)test) + (r % erase_granule * 10), tmp, r % erase_granule * 2); + if (rc) { + ERR("%s:%d read back miscompare (pos: 0x%08x)\n", __FILE__, __LINE__, 0x20000 + (r % 0x10000)); + goto out; + } + } +out: + free(tmp); + return rc; +} + +int main(void) +{ + struct blocklevel_device *bl; + int rc; + + libflash_debug = true; + + mbox_server_init(); + +#ifdef __STRICT_TEST__ + printf("Found __STRICT_TEST__, this may take time time.\n"); +#else + printf("__STRICT_TEST__ not found, use make strict-check for a more\n"); + printf("thorough test, it will take significantly longer.\n"); +#endif + + printf("Doing mbox-flash V1 tests\n"); + + /* run test */ + mbox_flash_init(&bl); + rc = run_flash_test(bl); + if (rc) + goto out; + /* + * Trick mbox-flash into thinking there was a reboot so we can + * switch to v2 + */ + + printf("Doing mbox-flash V2 tests\n"); + + mbox_server_reset(2, 12); + + /* Do all the tests again */ + rc = run_flash_test(bl); + if (rc) + goto out; + + mbox_server_reset(2, 17); + + /* Do all the tests again */ + rc = run_flash_test(bl); + if (rc) + goto out; + + + printf("Doing mbox-flash V3 tests\n"); + + mbox_server_reset(3, 20); + + /* Do all the tests again */ + rc = run_flash_test(bl); + + +out: + mbox_flash_exit(bl); + + mbox_server_destroy(); + + return rc; +} |