From af1a266670d040d2f4083ff309d732d648afba2a Mon Sep 17 00:00:00 2001 From: Angelos Mouzakitis Date: Tue, 10 Oct 2023 14:33:42 +0000 Subject: Add submodule dependency files Change-Id: Iaf8d18082d3991dec7c0ebbea540f092188eb4ec --- roms/skiboot/hw/fsp/Makefile.inc | 13 + roms/skiboot/hw/fsp/fsp-attn.c | 143 ++ roms/skiboot/hw/fsp/fsp-chiptod.c | 69 + roms/skiboot/hw/fsp/fsp-codeupdate.c | 1315 ++++++++++++++++ roms/skiboot/hw/fsp/fsp-codeupdate.h | 222 +++ roms/skiboot/hw/fsp/fsp-console.c | 1062 +++++++++++++ roms/skiboot/hw/fsp/fsp-diag.c | 46 + roms/skiboot/hw/fsp/fsp-dpo.c | 154 ++ roms/skiboot/hw/fsp/fsp-dump.c | 916 +++++++++++ roms/skiboot/hw/fsp/fsp-elog-read.c | 608 +++++++ roms/skiboot/hw/fsp/fsp-elog-write.c | 441 ++++++ roms/skiboot/hw/fsp/fsp-epow.c | 192 +++ roms/skiboot/hw/fsp/fsp-epow.h | 21 + roms/skiboot/hw/fsp/fsp-ipmi.c | 400 +++++ roms/skiboot/hw/fsp/fsp-leds.c | 1939 +++++++++++++++++++++++ roms/skiboot/hw/fsp/fsp-mem-err.c | 401 +++++ roms/skiboot/hw/fsp/fsp-nvram.c | 424 +++++ roms/skiboot/hw/fsp/fsp-occ.c | 417 +++++ roms/skiboot/hw/fsp/fsp-op-panel.c | 266 ++++ roms/skiboot/hw/fsp/fsp-psi.c | 75 + roms/skiboot/hw/fsp/fsp-rtc.c | 567 +++++++ roms/skiboot/hw/fsp/fsp-sensor.c | 860 ++++++++++ roms/skiboot/hw/fsp/fsp-surveillance.c | 226 +++ roms/skiboot/hw/fsp/fsp-sysdump.c | 407 +++++ roms/skiboot/hw/fsp/fsp-sysparam.c | 508 ++++++ roms/skiboot/hw/fsp/fsp.c | 2709 ++++++++++++++++++++++++++++++++ 26 files changed, 14401 insertions(+) create mode 100644 roms/skiboot/hw/fsp/Makefile.inc create mode 100644 roms/skiboot/hw/fsp/fsp-attn.c create mode 100644 roms/skiboot/hw/fsp/fsp-chiptod.c create mode 100644 roms/skiboot/hw/fsp/fsp-codeupdate.c create mode 100644 roms/skiboot/hw/fsp/fsp-codeupdate.h create mode 100644 roms/skiboot/hw/fsp/fsp-console.c create mode 100644 roms/skiboot/hw/fsp/fsp-diag.c create mode 100644 roms/skiboot/hw/fsp/fsp-dpo.c create mode 100644 roms/skiboot/hw/fsp/fsp-dump.c create mode 100644 roms/skiboot/hw/fsp/fsp-elog-read.c create mode 100644 roms/skiboot/hw/fsp/fsp-elog-write.c create mode 100644 roms/skiboot/hw/fsp/fsp-epow.c create mode 100644 roms/skiboot/hw/fsp/fsp-epow.h create mode 100644 roms/skiboot/hw/fsp/fsp-ipmi.c create mode 100644 roms/skiboot/hw/fsp/fsp-leds.c create mode 100644 roms/skiboot/hw/fsp/fsp-mem-err.c create mode 100644 roms/skiboot/hw/fsp/fsp-nvram.c create mode 100644 roms/skiboot/hw/fsp/fsp-occ.c create mode 100644 roms/skiboot/hw/fsp/fsp-op-panel.c create mode 100644 roms/skiboot/hw/fsp/fsp-psi.c create mode 100644 roms/skiboot/hw/fsp/fsp-rtc.c create mode 100644 roms/skiboot/hw/fsp/fsp-sensor.c create mode 100644 roms/skiboot/hw/fsp/fsp-surveillance.c create mode 100644 roms/skiboot/hw/fsp/fsp-sysdump.c create mode 100644 roms/skiboot/hw/fsp/fsp-sysparam.c create mode 100644 roms/skiboot/hw/fsp/fsp.c (limited to 'roms/skiboot/hw/fsp') diff --git a/roms/skiboot/hw/fsp/Makefile.inc b/roms/skiboot/hw/fsp/Makefile.inc new file mode 100644 index 000000000..21dc52a9f --- /dev/null +++ b/roms/skiboot/hw/fsp/Makefile.inc @@ -0,0 +1,13 @@ +SUBDIRS += hw/fsp + +FSP_OBJS = fsp.o fsp-console.o fsp-rtc.o fsp-nvram.o fsp-sysparam.o +FSP_OBJS += fsp-surveillance.o fsp-codeupdate.o fsp-sensor.o +FSP_OBJS += fsp-diag.o fsp-leds.o fsp-mem-err.o fsp-op-panel.o +FSP_OBJS += fsp-elog-read.o fsp-elog-write.o fsp-epow.o fsp-dpo.o +FSP_OBJS += fsp-dump.o fsp-sysdump.o fsp-chiptod.o fsp-ipmi.o +FSP_OBJS += fsp-attn.o fsp-occ.o fsp-psi.o +FSP = hw/fsp/built-in.a + +ifeq ($(CONFIG_FSP),1) +$(FSP): $(FSP_OBJS:%=hw/fsp/%) +endif diff --git a/roms/skiboot/hw/fsp/fsp-attn.c b/roms/skiboot/hw/fsp/fsp-attn.c new file mode 100644 index 000000000..6e358e0d4 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-attn.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * FSP ATTentioN support + * + * FSP can grab a bunch of things on host firmware dying, + * let's set that up. + * + * Copyright 2013-2019 IBM Corp. +*/ +#include +#include +#include +#include +#include +#include +#include +#include + +#define TI_CMD_VALID 0x1 /* Command valid */ +#define TI_CMD 0xA1 /* Terminate Immediate command */ +#define TI_DATA_LEN 0x0400 /* Data length */ +/* Controls dump actions + * - Non-destructive hardware dump (bit 0) + * - memory dump (bit 1) + * - Destructive hardware dump (bit 2) + */ +#define TI_DMP_CTL 0x6 +/* Dump type + * 0 - Abbreviated hardware dump + * 1 - Complete hardware dump + * 2 - No hardware dump + */ +#define TI_DUMP_TYPE 0x1 +#define TI_FORMAT 0x02 /* SRC format */ +#define TI_SRC_FLAGS 0x0 /* SRC flags */ +#define TI_ASCII_WORDS 0x0 /* Number of ASCII words */ + +/* HEX words: Number of hex words of data added, up to 8 total + * this value is one more. + */ +#define TI_HEX_WORDS 0x02 +/* SRC length : 8 byte header, 8 hex words of data and + * 32 byte ASCII SRC + */ +#define TI_SRC_LEN 0x48 + +static struct ti_attn *ti_attn; + +/* Initialises SP attention area with default values */ +static void init_sp_attn_area(void) +{ + /* Already done */ + if (ti_attn) + return; + + /* We are just enabling attention area 1 */ + ti_attn = (struct ti_attn *)&cpu_ctl_sp_attn_area1; + + /* Attention component checks Attn area 2 first, if its NULL + * it will check for Attn area 1. + */ + memset(&cpu_ctl_sp_attn_area1, 0, sizeof(struct sp_attn_area)); + memset(&cpu_ctl_sp_attn_area2, 0, sizeof(struct sp_attn_area)); + + ti_attn->cmd_valid = TI_CMD_VALID; + ti_attn->attn_cmd = TI_CMD; + ti_attn->data_len = CPU_TO_BE16(TI_DATA_LEN); + /* Dump control byte not used as of now */ + ti_attn->dump_ctrl =TI_DMP_CTL; + ti_attn->dump_type = CPU_TO_BE16(TI_DUMP_TYPE); + + /* SRC format */ + ti_attn->src_fmt = TI_FORMAT; + /* SRC flags */ + ti_attn->src_flags = TI_SRC_FLAGS; + /* #ASCII words */ + ti_attn->ascii_cnt = TI_ASCII_WORDS; + /* #HEX words */ + ti_attn->hex_cnt = TI_HEX_WORDS; + ti_attn->src_len = CPU_TO_BE16(TI_SRC_LEN); + snprintf(ti_attn->src, SRC_LEN, "%X", generate_src_from_comp(OPAL_RC_ATTN)); +} + +/* Updates src in sp attention area + */ +static void update_sp_attn_area(const char *msg) +{ +#define STACK_BUF_ENTRIES 20 + struct bt_entry bt_buf[STACK_BUF_ENTRIES]; + struct bt_metadata metadata; + unsigned int len; + + if (!fsp_present()) + return; + + /* This can be called early */ + if (!ti_attn) + init_sp_attn_area(); + + ti_attn->src_word[0] = + cpu_to_be32((uint32_t)((uint64_t)__builtin_return_address(0) & 0xffffffff)); + + snprintf(ti_attn->msg.version, VERSION_LEN, "%s", version); + backtrace_create(bt_buf, STACK_BUF_ENTRIES, &metadata); + metadata.token = OPAL_LAST + 1; + len = BT_FRAME_LEN; + backtrace_print(bt_buf, &metadata, ti_attn->msg.bt_buf, &len, false); + snprintf(ti_attn->msg.file_info, FILE_INFO_LEN, "%s", msg); + + ti_attn->msg_len = cpu_to_be32(VERSION_LEN + BT_FRAME_LEN + + strlen(ti_attn->msg.file_info)); +} + +void __attribute__((noreturn)) ibm_fsp_terminate(const char *msg) +{ + /* Update SP attention area */ + update_sp_attn_area(msg); + + /* Update op panel op_display */ + op_display(OP_FATAL, OP_MOD_CORE, 0x6666); + + /* Save crashing CPU details */ + opal_mpipl_save_crashing_pir(); + + /* XXX FIXME: We should fsp_poll for a while to ensure any pending + * console writes have made it out, but until we have decent PSI + * link handling we must not do it forever. Polling can prevent the + * FSP from bringing the PSI link up and it can get stuck in a + * reboot loop. + */ + + trigger_attn(); + for (;;) ; +} + +/* Intialises SP attention area */ +void fsp_attn_init(void) +{ + if (!fsp_present()) + return; + + init_sp_attn_area(); +} diff --git a/roms/skiboot/hw/fsp/fsp-chiptod.c b/roms/skiboot/hw/fsp/fsp-chiptod.c new file mode 100644 index 000000000..e4ede3c1c --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-chiptod.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * On some chiptod errors, ask the FSP for a new topology + * + * Copyright 2013-2017 IBM Corp. + */ + +#define pr_fmt(fmt) "CHIPTOD: " fmt + +#include +#include +#include + +/* Response status for fsp command 0xE6, s/c 0x06 (Enable/Disable Topology) */ +#define FSP_STATUS_TOPO_IN_USE 0xb8 /* topology is in use */ + +static bool fsp_chiptod_update_topology(uint32_t cmd_sub_mod, + struct fsp_msg *msg) +{ + struct fsp_msg *resp; + enum chiptod_topology topo; + bool action; + uint8_t status = 0; + + switch (cmd_sub_mod) { + case FSP_CMD_TOPO_ENABLE_DISABLE: + /* + * Action Values: 0x00 = Disable, 0x01 = Enable + * Topology Values: 0x00 = Primary, 0x01 = Secondary + */ + action = !!msg->data.bytes[2]; + topo = msg->data.bytes[3]; + prlog(PR_DEBUG, "Topology update event:\n"); + prlog(PR_DEBUG, " Action = %s, Topology = %s\n", + action ? "Enable" : "Disable", + topo ? "Secondary" : "Primary"); + + if (!chiptod_adjust_topology(topo, action)) + status = FSP_STATUS_TOPO_IN_USE; + else + status = 0x00; + + resp = fsp_mkmsg(FSP_RSP_TOPO_ENABLE_DISABLE | status, 0); + if (!resp) { + prerror("Response allocation failed\n"); + return false; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("Failed to queue response msg\n"); + return false; + } + return true; + default: + prlog(PR_DEBUG, "Unhandled sub cmd: %06x\n", cmd_sub_mod); + break; + } + return false; +} + +static struct fsp_client fsp_chiptod_client = { + .message = fsp_chiptod_update_topology, +}; + +void fsp_chiptod_init(void) +{ + /* Register for Class E6 (HW maintanance) */ + fsp_register_client(&fsp_chiptod_client, FSP_MCLASS_HW_MAINT); +} diff --git a/roms/skiboot/hw/fsp/fsp-codeupdate.c b/roms/skiboot/hw/fsp/fsp-codeupdate.c new file mode 100644 index 000000000..3cd5b2bc9 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-codeupdate.c @@ -0,0 +1,1315 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Firmware code update for FSP systems + * + * Copyright 2013-2018 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fsp-codeupdate.h" + +enum flash_state { + FLASH_STATE_ABSENT, + FLASH_STATE_INVALID, /* IPL side marker lid is invalid */ + FLASH_STATE_READING, + FLASH_STATE_READ, + FLASH_STATE_ABORT, +}; + +enum lid_fetch_side { + FETCH_T_SIDE_ONLY, + FETCH_P_SIDE_ONLY, + FETCH_BOTH_SIDE, +}; + +static enum flash_state flash_state = FLASH_STATE_INVALID; +static enum lid_fetch_side lid_fetch_side = FETCH_BOTH_SIDE; + +/* Image buffers */ +static struct opal_sg_list *image_data; +static uint32_t tce_start; +static void *lid_data; +static char validate_buf[VALIDATE_BUF_SIZE]; + +/* TCE buffer lock */ +static struct lock flash_lock = LOCK_UNLOCKED; + +/* FW VPD data */ +static struct fw_image_vpd fw_vpd[2]; + +/* Code update related sys parameters */ +static uint32_t ipl_side; +static uint32_t hmc_managed; +static uint32_t update_policy; +static uint32_t in_flight_params; + +/* If non-NULL, this gets called just before rebooting */ +int (*fsp_flash_term_hook)(void); + +DEFINE_LOG_ENTRY(OPAL_RC_CU_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_CU_FLASH, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_CU_SG_LIST, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_CU_COMMIT, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_CU_MSG, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_CU_NOTIFY, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_CU_MARKER_LID, OPAL_PLATFORM_ERR_EVT, OPAL_CODEUPDATE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, OPAL_NA); + +static inline void code_update_tce_map(uint32_t tce_offset, + void *buffer, uint32_t size) +{ + uint32_t tlen = ALIGN_UP(size, TCE_PSIZE); + + fsp_tce_map(PSI_DMA_CODE_UPD + tce_offset, buffer, tlen); +} + +static inline void code_update_tce_unmap(uint32_t size) +{ + fsp_tce_unmap(PSI_DMA_CODE_UPD, size); +} + +static inline void set_def_fw_version(uint32_t side) +{ + strncpy(fw_vpd[side].mi_keyword, FW_VERSION_UNKNOWN, MI_KEYWORD_SIZE); + strncpy(fw_vpd[side].ext_fw_id, FW_VERSION_UNKNOWN, ML_KEYWORD_SIZE); +} + +/* + * Get IPL side + */ +static void get_ipl_side(void) +{ + struct dt_node *iplp; + const char *side = NULL; + + iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params"); + if (iplp) + side = dt_prop_get_def(iplp, "cec-ipl-side", NULL); + prlog(PR_NOTICE, "CUPD: IPL SIDE = %s\n", side); + + if (!side || !strcmp(side, "temp")) + ipl_side = FW_IPL_SIDE_TEMP; + else + ipl_side = FW_IPL_SIDE_PERM; +} + + +/* + * Helper routines to retrieve code update related + * system parameters from FSP. + */ + +static void inc_in_flight_param(void) +{ + lock(&flash_lock); + in_flight_params++; + unlock(&flash_lock); +} + +static void dec_in_flight_param(void) +{ + lock(&flash_lock); + assert(in_flight_params > 0); + in_flight_params--; + unlock(&flash_lock); +} + +static void got_code_update_policy(uint32_t param_id __unused, int err_len, + void *data __unused) +{ + if (err_len != 4) { + log_simple_error(&e_info(OPAL_RC_CU_INIT), "CUPD: Error " + "retrieving code update policy: %d\n", err_len); + } else { + update_policy = be32_to_cpu((__be32)update_policy); + prlog(PR_NOTICE, "CUPD: Code update policy from FSP: %d\n", + update_policy); + } + + dec_in_flight_param(); +} + +static void get_code_update_policy(void) +{ + int rc; + + inc_in_flight_param(); + rc = fsp_get_sys_param(SYS_PARAM_FLASH_POLICY, &update_policy, 4, + got_code_update_policy, NULL); + if (rc) { + log_simple_error(&e_info(OPAL_RC_CU_INIT), + "CUPD: Error %d queueing param request\n", rc); + dec_in_flight_param(); + } +} + +static void got_platform_hmc_managed(uint32_t param_id __unused, int err_len, + void *data __unused) +{ + if (err_len != 4) { + log_simple_error(&e_info(OPAL_RC_CU_INIT), "CUPD: Error " + "retrieving hmc managed status: %d\n", err_len); + } else { + hmc_managed = be32_to_cpu((__be32)hmc_managed); + prlog(PR_NOTICE, "CUPD: HMC managed status from FSP: %d\n", + hmc_managed); + } + + dec_in_flight_param(); +} + +static void get_platform_hmc_managed(void) +{ + int rc; + + inc_in_flight_param(); + rc = fsp_get_sys_param(SYS_PARAM_HMC_MANAGED, &hmc_managed, 4, + got_platform_hmc_managed, NULL); + if (rc) { + log_simple_error(&e_info(OPAL_RC_CU_INIT), + "CUPD: Error %d queueing param request\n", rc); + dec_in_flight_param(); + } +} + +static bool fw_ipl_side_update_notify(struct fsp_msg *msg) +{ + u32 param_id = fsp_msg_get_data_word(msg, 0); + int dlen = fsp_msg_get_data_word(msg, 1) & 0xffff; + uint32_t state = fsp_msg_get_data_word(msg, 2); + + if (param_id != SYS_PARAM_FW_IPL_SIDE) + return false; + + if (dlen != 4) { + prlog(PR_DEBUG, + "CUPD: Invalid sysparams notify len : 0x%x\n", dlen); + return false; + } + + prlog(PR_NOTICE, "CUPD: FW IPL side changed. Disable fast reboot\n"); + prlog(PR_NOTICE, "CUPD: Next IPL side : %s\n", + state == FW_IPL_SIDE_TEMP ? "temp" : "perm"); + + disable_fast_reboot("FSP IPL Side Change"); + return true; +} + +static int64_t code_update_check_state(void) +{ + switch(flash_state) { + case FLASH_STATE_ABSENT: + return OPAL_HARDWARE; + case FLASH_STATE_INVALID: + case FLASH_STATE_ABORT: + return OPAL_INTERNAL_ERROR; + case FLASH_STATE_READING: + return OPAL_BUSY; + default: + break; + } + return OPAL_SUCCESS; +} + +/* + * Get common marker LID additional data section + */ +static void *get_adf_sec_data(struct com_marker_adf_sec *adf_sec, + uint32_t name) +{ + struct com_marker_adf_header *adf_header; + int i; + + adf_header = (void *)adf_sec->adf_data; + for (i = 0; i < be32_to_cpu(adf_sec->adf_cnt); i++) { + if (be32_to_cpu(adf_header->name) == name) + return adf_header; + + adf_header = (void *)adf_header + be32_to_cpu(adf_header->size); + } + return NULL; +} + +/* + * Parse common marker LID to get FW version details + * + * Note: + * At present, we are parsing "Service Pack Nomenclature ADF" + * section only. If we are adding FW IP support, then we have + * to parse "Firmware IP Protection ADF" as well. + */ +static void parse_marker_lid(uint32_t side) +{ + struct com_marker_header *header; + struct com_marker_mi_section *mi_sec; + struct com_marker_adf_sec *adf_sec; + struct com_marker_adf_sp *adf_sp; + + header = (void *)lid_data; + + /* Get MI details */ + mi_sec = (void *)header + be32_to_cpu(header->MI_offset); + /* + * If Marker LID is invalid, then FSP will return a Marker + * LID with ASCII zeros for the entire MI keyword. + */ + if (mi_sec->mi_keyword[0] == '0') + return; + + strncpy(fw_vpd[side].mi_keyword, mi_sec->mi_keyword, MI_KEYWORD_SIZE); + fw_vpd[side].mi_keyword[MI_KEYWORD_SIZE - 1] = '\0'; + prlog(PR_NOTICE, "CUPD: %s side MI Keyword = %s\n", + side == 0x00 ? "P" : "T", fw_vpd[side].mi_keyword); + + /* Get ML details */ + adf_sec = (void *)header + be32_to_cpu(mi_sec->adf_offset); + adf_sp = get_adf_sec_data(adf_sec, ADF_NAME_SP); + if (!adf_sp) + return; + + strncpy(fw_vpd[side].ext_fw_id, + (void *)adf_sp + be32_to_cpu(adf_sp->sp_name_offset), + ML_KEYWORD_SIZE); + fw_vpd[side].ext_fw_id[ML_KEYWORD_SIZE - 1] = '\0'; + prlog(PR_NOTICE, "CUPD: %s side ML Keyword = %s\n", + side == 0x00 ? "P" : "T", fw_vpd[side].ext_fw_id); +} + +static void validate_com_marker_lid(void) +{ + if (!strncmp(fw_vpd[ipl_side].mi_keyword, FW_VERSION_UNKNOWN, + sizeof(FW_VERSION_UNKNOWN))) { + log_simple_error(&e_info(OPAL_RC_CU_MARKER_LID), + "CUPD: IPL side Marker LID is not valid\n"); + flash_state = FLASH_STATE_INVALID; + return; + } + + flash_state = FLASH_STATE_READ; +} + +static void fetch_lid_data_complete(struct fsp_msg *msg) +{ + void *buffer; + size_t length, chunk; + uint32_t lid_id, offset; + uint16_t id; + uint8_t flags, status; + int rc; + + status = (msg->resp->word1 >> 8) & 0xff; + flags = (fsp_msg_get_data_word(msg, 0) >> 16) & 0xff; + id = fsp_msg_get_data_word(msg, 0) & 0xffff; + lid_id = fsp_msg_get_data_word(msg, 1); + offset = fsp_msg_get_data_word(msg->resp, 1); + length = fsp_msg_get_data_word(msg->resp, 2); + + prlog(PR_NOTICE, "CUPD: Marker LID id : size : status = " + "0x%x : 0x%x : 0x%x\n", + fsp_msg_get_data_word(msg, 1), fsp_msg_get_data_word(msg->resp, 2), status); + + fsp_freemsg(msg); + + switch (status) { + case FSP_STATUS_SUCCESS: /* Read complete, parse VPD */ + parse_marker_lid(lid_id == P_COM_MARKER_LID_ID ? 0 : 1); + break; + case FSP_STATUS_MORE_DATA: /* More data left */ + offset += length; + chunk = MARKER_LID_SIZE - offset; + if (chunk > 0) { + buffer = (void *)PSI_DMA_CODE_UPD + offset; + rc = fsp_fetch_data_queue(flags, id, lid_id, + offset, buffer, &chunk, + fetch_lid_data_complete); + + /* If queue msg fails, then continue with marker LID + * validation hoping that we have at least boot side + * information. + */ + if (rc == OPAL_SUCCESS) + return; + } + break; + default: /* Fetch LID call failed */ + break; + } + + /* If required, fetch T side marker LID */ + if (lid_id == P_COM_MARKER_LID_ID && + lid_fetch_side == FETCH_BOTH_SIDE) { + length = MARKER_LID_SIZE; + rc = fsp_fetch_data_queue(flags, id, T_COM_MARKER_LID_ID, + 0, (void *)PSI_DMA_CODE_UPD, + &length, fetch_lid_data_complete); + + /* If queue msg fails, then continue with marker LID + * validation hoping that we have at least boot side + * information. + */ + if (rc == OPAL_SUCCESS) + return; + } + + lock(&flash_lock); + + /* Validate marker LID data */ + validate_com_marker_lid(); + /* TCE unmap */ + code_update_tce_unmap(MARKER_LID_SIZE); + + unlock(&flash_lock); +} + +static void fetch_com_marker_lid(void) +{ + size_t length = MARKER_LID_SIZE; + uint32_t lid_id; + int rc; + + /* Read in progress? */ + rc = code_update_check_state(); + if (rc == OPAL_HARDWARE || rc == OPAL_BUSY) + return; + + if (lid_fetch_side == FETCH_T_SIDE_ONLY) { + lid_id = T_COM_MARKER_LID_ID; + set_def_fw_version(FW_IPL_SIDE_TEMP); + } else if (lid_fetch_side == FETCH_P_SIDE_ONLY) { + lid_id = P_COM_MARKER_LID_ID; + set_def_fw_version(FW_IPL_SIDE_PERM); + } else { + lid_id = P_COM_MARKER_LID_ID; + set_def_fw_version(FW_IPL_SIDE_PERM); + set_def_fw_version(FW_IPL_SIDE_TEMP); + } + + code_update_tce_map(0, lid_data, length); + rc = fsp_fetch_data_queue(0x00, 0x05, lid_id, 0, + (void *)PSI_DMA_CODE_UPD, &length, + fetch_lid_data_complete); + if (!rc) + flash_state = FLASH_STATE_READING; + else + flash_state = FLASH_STATE_INVALID; +} + +/* + * Add MI and ML keyword details into DT + */ +#define FW_VER_SIZE 64 +static void add_opal_firmware_version(void) +{ + struct dt_node *dt_fw; + char buffer[FW_VER_SIZE]; + int offset; + + dt_fw = dt_find_by_path(dt_root, "ibm,opal/firmware"); + if (!dt_fw) + return; + + /* MI version */ + offset = snprintf(buffer, FW_VER_SIZE, "MI %s %s", + fw_vpd[FW_IPL_SIDE_TEMP].mi_keyword, + fw_vpd[FW_IPL_SIDE_PERM].mi_keyword); + if (ipl_side == FW_IPL_SIDE_TEMP) + snprintf(buffer + offset, FW_VER_SIZE - offset, + " %s", fw_vpd[FW_IPL_SIDE_TEMP].mi_keyword); + else + snprintf(buffer + offset, FW_VER_SIZE - offset, + " %s", fw_vpd[FW_IPL_SIDE_PERM].mi_keyword); + + dt_add_property(dt_fw, "mi-version", buffer, strlen(buffer)); + + /* ML version */ + offset = snprintf(buffer, FW_VER_SIZE, "ML %s %s", + fw_vpd[FW_IPL_SIDE_TEMP].ext_fw_id, + fw_vpd[FW_IPL_SIDE_PERM].ext_fw_id); + if (ipl_side == FW_IPL_SIDE_TEMP) + snprintf(buffer + offset, FW_VER_SIZE - offset, + " %s", fw_vpd[FW_IPL_SIDE_TEMP].ext_fw_id); + else + snprintf(buffer + offset, FW_VER_SIZE - offset, + " %s", fw_vpd[FW_IPL_SIDE_PERM].ext_fw_id); + + dt_add_property(dt_fw, "ml-version", buffer, strlen(buffer)); +} + +/* + * This is called right before starting the payload (Linux) to + * ensure the common marker LID read and parsing has happened + * before we transfer control. + */ +void fsp_code_update_wait_vpd(bool is_boot) +{ + int waited = 0; + + if (!fsp_present()) + return; + + prlog(PR_NOTICE, "CUPD: Waiting read marker LID" + " and in flight parsm completion...\n"); + + lock(&flash_lock); + while(true) { + if (!(flash_state == FLASH_STATE_READING || in_flight_params)) + break; + unlock(&flash_lock); + time_wait_ms(5); + waited+=5; + lock(&flash_lock); + } + unlock(&flash_lock); + + if (waited) + prlog(PR_DEBUG, "CUPD: fsp_code_update_wait_vpd %d\n", waited); + + if (is_boot) + add_opal_firmware_version(); +} + +static int code_update_start(void) +{ + struct fsp_msg *msg; + int rc; + uint16_t comp = 0x00; /* All components */ + uint8_t side = OPAL_COMMIT_TMP_SIDE; /* Temporary side */ + + msg = fsp_mkmsg(FSP_CMD_FLASH_START, 1, side << 16 | comp); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: CMD_FLASH_START message allocation failed !\n"); + return OPAL_INTERNAL_ERROR; + } + if (fsp_sync_msg(msg, false)) { + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(msg); + return rc; +} + +static int code_update_write_lid(uint32_t lid_id, uint32_t size) +{ + struct fsp_msg *msg; + int rc, n_pairs = 1; + + msg = fsp_mkmsg(FSP_CMD_FLASH_WRITE, 5, lid_id, + n_pairs, 0, tce_start, size); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: CMD_FLASH_WRITE message allocation failed !\n"); + return OPAL_INTERNAL_ERROR; + } + if (fsp_sync_msg(msg, false)) { + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(msg); + return rc; +} + +static int code_update_del_lid(uint32_t lid_id) +{ + struct fsp_msg *msg; + int rc; + + msg = fsp_mkmsg(FSP_CMD_FLASH_DEL, 1, lid_id); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: CMD_FLASH_DEL message allocation failed !\n"); + return OPAL_INTERNAL_ERROR; + } + if (fsp_sync_msg(msg, false)) { + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(msg); + return rc; +} + +static int code_update_complete(uint32_t cmd) +{ + struct fsp_msg *msg; + int rc; + + msg = fsp_mkmsg(cmd, 0); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: CUPD COMPLETE message allocation failed !\n"); + return OPAL_INTERNAL_ERROR; + } + if (fsp_sync_msg(msg, false)) { + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(msg); + return rc; +} + +static int code_update_swap_side(void) +{ + struct fsp_msg *msg; + int rc; + + msg = fsp_mkmsg(FSP_CMD_FLASH_SWAP, 0); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: CMD_FLASH_SWAP message allocation failed !\n"); + return OPAL_INTERNAL_ERROR; + } + + if (fsp_sync_msg(msg, false)) { + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(msg); + return rc; +} + +static int code_update_set_ipl_side(void) +{ + struct fsp_msg *msg; + uint8_t side = FW_IPL_SIDE_TEMP; /* Next IPL side */ + int rc; + + msg = fsp_mkmsg(FSP_CMD_SET_IPL_SIDE, 1, side << 16); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: CMD_SET_IPL_SIDE message allocation failed!\n"); + return OPAL_INTERNAL_ERROR; + } + if (fsp_sync_msg(msg, false)) { + fsp_freemsg(msg); + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: Setting next IPL side failed!\n"); + return OPAL_INTERNAL_ERROR; + } + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(msg); + return rc; +} + +static void code_update_commit_complete(struct fsp_msg *msg) +{ + int rc; + uint8_t type; + + rc = (msg->resp->word1 >> 8) & 0xff; + type = (msg->word1 >> 8) & 0xff; + fsp_freemsg(msg); + if (rc) { + log_simple_error(&e_info(OPAL_RC_CU_COMMIT), + "CUPD: Code update commit failed, err 0x%x\n", rc); + return; + } + + /* Reset cached VPD data */ + lock(&flash_lock); + + /* Find commit type */ + if (type == 0x01) { + lid_fetch_side = FETCH_P_SIDE_ONLY; + } else if (type == 0x02) + lid_fetch_side = FETCH_T_SIDE_ONLY; + else + lid_fetch_side = FETCH_BOTH_SIDE; + + fetch_com_marker_lid(); + + unlock(&flash_lock); +} + +static int code_update_commit(uint32_t cmd) +{ + struct fsp_msg *msg; + + msg = fsp_mkmsg(cmd, 0); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_CU_MSG), + "CUPD: COMMIT message allocation failed !\n"); + return OPAL_INTERNAL_ERROR; + } + if (fsp_queue_msg(msg, code_update_commit_complete)) { + log_simple_error(&e_info(OPAL_RC_CU_COMMIT), + "CUPD: Failed to queue code update commit message\n"); + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + return OPAL_SUCCESS; +} + +/* + * Inband code update is allowed? + */ +static int64_t validate_inband_policy(void) +{ + /* Quirk: + * If the code update policy is out-of-band, but the system + * is not HMC-managed, then inband update is allowed. + */ + if (hmc_managed != PLATFORM_HMC_MANAGED) + return 0; + if (update_policy == INBAND_UPDATE_ALLOWED) + return 0; + + return -1; +} + +/* + * Validate magic Number + */ +static int64_t validate_magic_num(uint16_t magic) +{ + if (magic != IMAGE_MAGIC_NUMBER) + return -1; + return 0; +} + +/* + * Compare MI keyword to make sure candidate image + * is valid for this platform. + */ +static int64_t validate_image_version(struct update_image_header *header, + uint32_t *result) +{ + struct fw_image_vpd vpd; + int t_valid = 0, p_valid = 0, cton_ver = -1, ptot_ver = -1; + + /* Valid flash image level? */ + if (strncmp(fw_vpd[0].mi_keyword, FW_VERSION_UNKNOWN, + sizeof(FW_VERSION_UNKNOWN)) != 0) + p_valid = 1; + + if (strncmp(fw_vpd[1].mi_keyword, FW_VERSION_UNKNOWN, + sizeof(FW_VERSION_UNKNOWN)) != 0) + t_valid = 1; + + /* Validate with IPL side image */ + vpd = fw_vpd[ipl_side]; + + /* Validate platform identifier (first two char of MI keyword) */ + if (strncmp(vpd.mi_keyword, header->mi_keyword_data, 2) != 0) { + *result = VALIDATE_INVALID_IMG; + return OPAL_SUCCESS; + } + + /* Don't flash different FW series (like P7 image on P8) */ + if (vpd.mi_keyword[2] != header->mi_keyword_data[2]) { + *result = VALIDATE_INVALID_IMG; + return OPAL_SUCCESS; + } + + /* Get current to new version difference */ + cton_ver = strncmp(vpd.mi_keyword + 3, header->mi_keyword_data + 3, 6); + + /* Get P to T version difference */ + if (t_valid && p_valid) + ptot_ver = strncmp(fw_vpd[0].mi_keyword + 3, + fw_vpd[1].mi_keyword + 3, 6); + + /* Update validation result */ + if (ipl_side == FW_IPL_SIDE_TEMP) { + if (!ptot_ver && cton_ver > 0) /* downgrade T side */ + *result = VALIDATE_TMP_UPDATE_DL; + else if (!ptot_ver && cton_ver <= 0) /* upgrade T side */ + *result = VALIDATE_TMP_UPDATE; + else if (cton_ver > 0) /* Implied commit & downgrade T side */ + *result = VALIDATE_TMP_COMMIT_DL; + else /* Implied commit & upgrade T side */ + *result = VALIDATE_TMP_COMMIT; + } else { + if (!t_valid) /* Current unknown */ + *result = VALIDATE_CUR_UNKNOWN; + else if (cton_ver > 0) /* downgrade FW version */ + *result = VALIDATE_TMP_UPDATE_DL; + else /* upgrade FW version */ + *result = VALIDATE_TMP_UPDATE; + } + return OPAL_SUCCESS; +} + +/* + * Validate candidate image + */ +static int validate_candidate_image(uint64_t buffer, + uint32_t size, uint32_t *result) +{ + struct update_image_header *header; + int rc = OPAL_PARAMETER; + + if (size < VALIDATE_BUF_SIZE) + goto out; + + rc = code_update_check_state(); + if (rc != OPAL_SUCCESS) + goto out; + + if (validate_inband_policy() != 0) { + *result = VALIDATE_FLASH_AUTH; + rc = OPAL_SUCCESS; + goto out; + } + + memcpy(validate_buf, (void *)buffer, VALIDATE_BUF_SIZE); + header = (struct update_image_header *)validate_buf; + + if (validate_magic_num(be16_to_cpu(header->magic)) != 0) { + *result = VALIDATE_INVALID_IMG; + rc = OPAL_SUCCESS; + goto out; + } + rc = validate_image_version(header, result); +out: + return rc; +} + +static int validate_out_buf_mi_data(void *buffer, int offset, uint32_t result) +{ + struct update_image_header *header = (void *)validate_buf; + + /* Current T & P side MI data */ + offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset, + "MI %s %s\n", + fw_vpd[1].mi_keyword, fw_vpd[0].mi_keyword); + + /* New T & P side MI data */ + offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset, + "MI %s", header->mi_keyword_data); + if (result == VALIDATE_TMP_COMMIT_DL || + result == VALIDATE_TMP_COMMIT) + offset += snprintf(buffer + offset, + VALIDATE_BUF_SIZE - offset, + " %s\n", fw_vpd[1].mi_keyword); + else + offset += snprintf(buffer + offset, + VALIDATE_BUF_SIZE - offset, + " %s\n", fw_vpd[0].mi_keyword); + return offset; +} + +static int validate_out_buf_ml_data(void *buffer, int offset, uint32_t result) +{ + struct update_image_header *header = (void *)validate_buf; + /* Candidate image ML data */ + char *ext_fw_id = (void *)header->data; + + /* Current T & P side ML data */ + offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset, + "ML %s %s\n", + fw_vpd[1].ext_fw_id, fw_vpd[0].ext_fw_id); + + /* New T & P side ML data */ + offset += snprintf(buffer + offset, VALIDATE_BUF_SIZE - offset, + "ML %s", ext_fw_id); + if (result == VALIDATE_TMP_COMMIT_DL || + result == VALIDATE_TMP_COMMIT) + offset += snprintf(buffer + offset, + VALIDATE_BUF_SIZE - offset, + " %s\n", fw_vpd[1].ext_fw_id); + else + offset += snprintf(buffer + offset, + VALIDATE_BUF_SIZE - offset, + " %s\n", fw_vpd[0].ext_fw_id); + + return offset; +} + +/* + * Copy LID data to TCE buffer + */ +static int get_lid_data(struct opal_sg_list *list, + int lid_size, int lid_offset) +{ + struct opal_sg_list *sg; + struct opal_sg_entry *entry; + int length, num_entries, i, buf_pos = 0; + int map_act, map_size; + bool last = false; + + /* Reset TCE start address */ + tce_start = 0; + + for (sg = list; sg; sg = (struct opal_sg_list*)be64_to_cpu(sg->next)) { + length = (be64_to_cpu(sg->length) & ~(SG_LIST_VERSION << 56)) - 16; + num_entries = length / sizeof(struct opal_sg_entry); + if (num_entries <= 0) + return -1; + + for (i = 0; i < num_entries; i++) { + entry = &sg->entry[i]; + + /* + * Continue until we get data block which + * contains LID data + */ + if (lid_offset > be64_to_cpu(entry->length)) { + lid_offset -= be64_to_cpu(entry->length); + continue; + } + + /* + * SG list entry size can be more than 4k. + * Map only required pages, instead of + * mapping entire entry. + */ + map_act = be64_to_cpu(entry->length); + map_size = be64_to_cpu(entry->length); + + /* First TCE mapping */ + if (!tce_start) { + tce_start = PSI_DMA_CODE_UPD + + (lid_offset & 0xfff); + map_act = be64_to_cpu(entry->length) - lid_offset; + lid_offset &= ~0xfff; + map_size = be64_to_cpu(entry->length) - lid_offset; + } + + /* Check pending LID size to map */ + if (lid_size <= map_act) { + /* (map_size - map_act) gives page + * start to tce offset difference. + * This is required when LID size + * is <= 4k. + */ + map_size = (map_size - map_act) + lid_size; + last = true; + } + + /* Ajust remaining size to map */ + lid_size -= map_act; + + /* TCE mapping */ + code_update_tce_map(buf_pos, + (void*)(be64_to_cpu(entry->data) + + lid_offset), + map_size); + buf_pos += map_size; + /* Reset LID offset count */ + lid_offset = 0; + + if (last) + return OPAL_SUCCESS; + } + } /* outer loop */ + return -1; +} + +/* + * If IPL side is T, then swap P & T sides to add + * new fix to T side. + */ +static int validate_ipl_side(void) +{ + if (ipl_side == FW_IPL_SIDE_PERM) + return 0; + return code_update_swap_side(); +} + +static int64_t fsp_opal_validate_flash(uint64_t buffer, + __be32 *size, __be32 *result) +{ + int64_t rc = 0; + int offset; + uint32_t r; + + lock(&flash_lock); + + rc = validate_candidate_image(buffer, be32_to_cpu(*size), &r); + /* Fill output buffer + * + * Format: + * MIcurrent-T-imagecurrent-P-image<0x0A> + * MInew-T-imagenew-P-image<0x0A> + * MLcurrent-T-imagecurrent-P-image<0x0A> + * MLnew-T-imagenew-P-image<0x0A> + */ + if (!rc && (r != VALIDATE_FLASH_AUTH && r != VALIDATE_INVALID_IMG)) { + /* Clear output buffer */ + memset((void *)buffer, 0, VALIDATE_BUF_SIZE); + + offset = validate_out_buf_mi_data((void *)buffer, 0, r); + offset += validate_out_buf_ml_data((void *)buffer, offset, r); + *size = cpu_to_be32(offset); + } + *result = cpu_to_be32(r); + + unlock(&flash_lock); + return rc; +} + +/* Commit/Reject T side image */ +static int64_t fsp_opal_manage_flash(uint8_t op) +{ + uint32_t cmd; + int rc; + + lock(&flash_lock); + rc = code_update_check_state(); + unlock(&flash_lock); + + if (rc != OPAL_SUCCESS) + return rc; + + if (op != OPAL_REJECT_TMP_SIDE && op != OPAL_COMMIT_TMP_SIDE) + return OPAL_PARAMETER; + + if ((op == OPAL_COMMIT_TMP_SIDE && ipl_side == FW_IPL_SIDE_PERM) || + (op == OPAL_REJECT_TMP_SIDE && ipl_side == FW_IPL_SIDE_TEMP)) + return OPAL_ACTIVE_SIDE_ERR; + + if (op == OPAL_COMMIT_TMP_SIDE) + cmd = FSP_CMD_FLASH_NORMAL; + else + cmd = FSP_CMD_FLASH_REMOVE; + + return code_update_commit(cmd); +} + +static int fsp_flash_firmware(void) +{ + struct update_image_header *header; + struct lid_index_entry *idx_entry; + struct opal_sg_list *list; + struct opal_sg_entry *entry; + int rc, i; + + /* Make sure no outstanding LID read is in progress */ + rc = code_update_check_state(); + if (rc == OPAL_BUSY) + fsp_code_update_wait_vpd(false); + + /* Get LID Index */ + list = image_data; + if (!list) + goto out; + entry = &list->entry[0]; + header = (struct update_image_header *)be64_to_cpu(entry->data); + idx_entry = (void *)header + be16_to_cpu(header->lid_index_offset); + + /* FIXME: + * At present we depend on FSP to validate CRC for + * individual LIDs. Calculate and validate individual + * LID CRC here. + */ + + if (validate_ipl_side() != 0) { + log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: " + "Rename (Swap T and P) failed!\n"); + goto out; + } + + /* Set next IPL side */ + if (code_update_set_ipl_side() != 0) { + log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: " + "Setting next IPL side failed!\n"); + goto out; + } + + /* Start code update process */ + if (code_update_start() != 0) { + log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: " + "Code update start failed!\n"); + goto out; + } + + /* + * Delete T side LIDs before writing. + * + * Note: + * - Applicable for FWv >= 760. + * - Current Code Update design is to ignore + * any delete lid failure, and continue with + * the update. + */ + rc = code_update_del_lid(DEL_UPD_SIDE_LIDS); + + if (rc) + prlog(PR_TRACE, "CUPD: Failed to delete LIDs (%d). This is okay, continuing..", rc); + + for (i = 0; i < be16_to_cpu(header->number_lids); i++) { + if (be32_to_cpu(idx_entry->size) > LID_MAX_SIZE) { + log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: LID" + " (0x%x) size 0x%x is > max LID size (0x%x).\n", + be32_to_cpu(idx_entry->id), + be32_to_cpu(idx_entry->size), LID_MAX_SIZE); + goto abort_update; + } + + rc = get_lid_data(list, be32_to_cpu(idx_entry->size), + be32_to_cpu(idx_entry->offset)); + if (rc) { + log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: " + "Failed to parse LID from firmware image." + " (rc : %d).\n", rc); + goto abort_update; + } + + rc = code_update_write_lid(be32_to_cpu(idx_entry->id), + be32_to_cpu(idx_entry->size)); + if (rc) { + log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: " + "Failed to write LID to FSP. (rc : %d).\n", rc); + goto abort_update; + } + + /* Unmap TCE */ + code_update_tce_unmap(PSI_DMA_CODE_UPD_SIZE); + + /* Next LID index */ + idx_entry = (void *)idx_entry + sizeof(struct lid_index_entry); + } + + /* Code update completed */ + rc = code_update_complete(FSP_CMD_FLASH_COMPLETE); + + return rc; + +abort_update: + rc = code_update_complete(FSP_CMD_FLASH_ABORT); + if (rc) + log_simple_error(&e_info(OPAL_RC_CU_FLASH), "CUPD: " + "Code update abort command failed. (rc : %d).", rc); + +out: + return -1; +} + +static int64_t validate_sglist(struct opal_sg_list *list) +{ + struct opal_sg_list *sg; + struct opal_sg_entry *prev_entry, *entry; + int length, num_entries, i; + + prev_entry = NULL; + for (sg = list; sg; sg = (struct opal_sg_list*)be64_to_cpu(sg->next)) { + length = (be64_to_cpu(sg->length) & ~(SG_LIST_VERSION << 56)) - 16; + num_entries = length / sizeof(struct opal_sg_entry); + if (num_entries <= 0) + return -1; + + for (i = 0; i < num_entries; i++) { + entry = &sg->entry[i]; + + /* All entries must be aligned */ + if (((uint64_t)be64_to_cpu(entry->data)) & 0xfff) + return OPAL_PARAMETER; + + /* All non-terminal entries size must be aligned */ + if (prev_entry && (be64_to_cpu(prev_entry->length) & 0xfff)) + return OPAL_PARAMETER; + + prev_entry = entry; + } + } + return OPAL_SUCCESS; +} + +static int64_t fsp_opal_update_flash(struct opal_sg_list *list) +{ + struct opal_sg_entry *entry; + int length, num_entries, result = 0, rc = OPAL_PARAMETER; + + /* Ensure that the sg list honors our alignment requirements */ + rc = validate_sglist(list); + if (rc) { + log_simple_error(&e_info(OPAL_RC_CU_SG_LIST), + "CUPD: sglist fails alignment requirements\n"); + return rc; + } + + lock(&flash_lock); + if (!list) { /* Cancel update request */ + fsp_flash_term_hook = NULL; + image_data = NULL; + rc = OPAL_SUCCESS; + goto out; + } + + disable_fast_reboot("FSP Code Update"); + + length = (be64_to_cpu(list->length) & ~(SG_LIST_VERSION << 56)) - 16; + num_entries = length / sizeof(struct opal_sg_entry); + if (num_entries <= 0) + goto out; + + /* Validate image header */ + entry = &list->entry[0]; + rc = validate_candidate_image((uint64_t)be64_to_cpu(entry->data), + VALIDATE_BUF_SIZE, &result); + if (!rc && (result != VALIDATE_FLASH_AUTH && + result != VALIDATE_INVALID_IMG)) { + image_data = list; + fsp_flash_term_hook = fsp_flash_firmware; + goto out; + } + + /* Adjust return code */ + if (result == VALIDATE_FLASH_AUTH) + rc = OPAL_FLASH_NO_AUTH; + else if (result == VALIDATE_INVALID_IMG) + rc = OPAL_INVALID_IMAGE; + +out: + unlock(&flash_lock); + return rc; +} + +/* + * Code Update notifications + * + * Note: At present we just ACK these notifications. + * Reset cached VPD data if we are going to support + * concurrent image maint in future. + */ +static bool code_update_notify(uint32_t cmd_sub_mod, struct fsp_msg *msg) +{ + int rc; + uint32_t cmd; + + switch(cmd_sub_mod) { + case FSP_CMD_FLASH_CACHE: + cmd = FSP_CMD_FLASH_CACHE_RSP; + prlog(PR_NOTICE, "CUPD: Update LID cache event [data = 0x%x]\n", + fsp_msg_get_data_word(msg, 0)); + break; + case FSP_CMD_FLASH_OUTC: + case FSP_CMD_FLASH_OUTR: + case FSP_CMD_FLASH_OUTS: + cmd = FSP_CMD_FLASH_OUT_RSP; + prlog(PR_NOTICE, "CUPD: Out of band commit notify " + "[Type = 0x%x]\n", (msg->word1 >> 8) & 0xff); + break; + default: + log_simple_error(&e_info(OPAL_RC_CU_NOTIFY), "CUPD: Unknown " + "notification [cmd = 0x%x]\n", cmd_sub_mod); + return false; + } + + rc = fsp_queue_msg(fsp_mkmsg(cmd, 0), fsp_freemsg); + if (rc) + log_simple_error(&e_info(OPAL_RC_CU_NOTIFY), "CUPD: Failed to " + "queue code update notification response :%d\n", rc); + + return true; +} + +/* + * Handle FSP R/R event. + * + * Note: + * If FSP R/R happens during code update, then entire system reboots + * and comes up with P side image (and T side image will be invalid). + * Hence we don't need to handle R/R during code update. + * + * Also if FSP R/R happens in init path (while retrieving in_flight_params) + * then system fails to continue booting (because we have not yet loaded + * all required data/LID from FSP). Hence we don't need to handle R/R + * for system params. + */ +static bool fsp_code_update_rr(uint32_t cmd_sub_mod, + struct fsp_msg *msg __unused) +{ + switch (cmd_sub_mod) { + case FSP_RESET_START: + lock(&flash_lock); + + if (code_update_check_state() == OPAL_BUSY) + flash_state = FLASH_STATE_ABORT; + + unlock(&flash_lock); + return true; + case FSP_RELOAD_COMPLETE: + lock(&flash_lock); + + /* Lets try to parse marker LID again, if we failed + * to parse marker LID last time. + */ + if (code_update_check_state() == OPAL_INTERNAL_ERROR) + fetch_com_marker_lid(); + + unlock(&flash_lock); + return true; + } + return false; +} + +static struct fsp_client fsp_cupd_client_rr = { + .message = fsp_code_update_rr, +}; + +static struct fsp_client fsp_get_notify = { + .message = code_update_notify, +}; + +void fsp_code_update_init(void) +{ + if (!fsp_present()) { + flash_state = FLASH_STATE_ABSENT; + return; + } + + /* OPAL interface */ + opal_register(OPAL_FLASH_VALIDATE, fsp_opal_validate_flash, 3); + opal_register(OPAL_FLASH_MANAGE, fsp_opal_manage_flash, 1); + opal_register(OPAL_FLASH_UPDATE, fsp_opal_update_flash, 1); + + /* register Code Update Class D3 */ + fsp_register_client(&fsp_get_notify, FSP_MCLASS_CODE_UPDATE); + /* Register for Class AA (FSP R/R) */ + fsp_register_client(&fsp_cupd_client_rr, FSP_MCLASS_RR_EVENT); + + /* Register for firmware IPL side update notification */ + sysparam_add_update_notifier(fw_ipl_side_update_notify); + + /* Flash hook */ + fsp_flash_term_hook = NULL; + + /* Fetch various code update related sys parameters */ + get_ipl_side(); + get_code_update_policy(); + get_platform_hmc_managed(); + + /* Fetch common marker LID */ + lid_data = memalign(TCE_PSIZE, MARKER_LID_SIZE); + if (!lid_data) { + log_simple_error(&e_info(OPAL_RC_CU_INIT), + "CUPD: Failed to allocate memory for marker LID\n"); + flash_state = FLASH_STATE_ABSENT; + return; + } + fetch_com_marker_lid(); +} diff --git a/roms/skiboot/hw/fsp/fsp-codeupdate.h b/roms/skiboot/hw/fsp/fsp-codeupdate.h new file mode 100644 index 000000000..2b86619ef --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-codeupdate.h @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2015 IBM Corp. */ + +#ifndef __CODEUPDATE_H +#define __CODEUPDATE_H + +/* Flash SG list version */ +#define SG_LIST_VERSION (1UL) + +/* LID size <= 16M */ +#define LID_MAX_SIZE 0x1000000 + +/* Delete all LIDs in */ +#define DEL_UPD_SIDE_LIDS 0xFFFFFFFF + +/* System parameter values used in code update validation */ +#define INBAND_UPDATE_ALLOWED 0x01 +#define PLATFORM_HMC_MANAGED 0x01 +#define FW_LICENSE_ACCEPT 0x01 + +/* Running image side */ +#define FW_IPL_SIDE_TEMP 0x01 +#define FW_IPL_SIDE_PERM 0x00 + +/* Manage operations */ +#define OPAL_REJECT_TMP_SIDE 0 +#define OPAL_COMMIT_TMP_SIDE 1 + +/* Validate image size */ +#define VALIDATE_BUF_SIZE 4096 + +/* Code update operation status */ +#define OPAL_INVALID_IMAGE -1003 /* Unacceptable image */ +#define OPAL_ACTIVE_SIDE_ERR -9001 +#define OPAL_FLASH_NO_AUTH -9002 + +/* Validate image update result tokens */ +#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */ +#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */ +#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */ +#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */ +/* + * Current T side will be committed to P side before being replace with new + * image, and the new image is downlevel from current image + */ +#define VALIDATE_TMP_COMMIT_DL 4 +/* + * Current T side will be committed to P side before being replaced with new + * image + */ +#define VALIDATE_TMP_COMMIT 5 +/* + * T side will be updated with a downlevel image + */ +#define VALIDATE_TMP_UPDATE_DL 6 +/* + * The candidate image's release date is later than the system's firmware + * service entitlement date - service warranty period has expired + */ +#define VALIDATE_OUT_OF_WRNTY 7 + +/* default version */ +#define FW_VERSION_UNKNOWN "UNKNOWN" + +/* Actual size of MI & ML keyword including NULL */ +#define MI_KEYWORD_SIZE 10 +#define ML_KEYWORD_SIZE 9 + +/* Firmware image VPD data */ +struct fw_image_vpd { + char mi_keyword[MI_KEYWORD_SIZE]; /* NNSSS_FFF */ + char ext_fw_id[ML_KEYWORD_SIZE]; /* FWxxx.yy */ +}; + +/* Master LID header */ +struct master_lid_header { + char key[3]; /* "MLH" */ + uint8_t version; /* 0x02 */ + __be16 header_size; + __be16 entry_size; + uint8_t reserved[56]; +}; + +/* LID index entry */ +struct lid_index_entry { + __be32 id; + __be32 size; + __be32 offset; + __be32 crc; +}; + +/* SP flags */ +#define FW_ONE_OFF_SP 0x80000000 +#define FW_EMERGENCY_SP 0x40000000 + +/* + * SP GA date + * + * sp_flag addr = header->data + header->ext_fw_id_size + */ +struct update_image_ga_date { + __be32 sp_flag; + char sp_ga_date[8]; /* YYYYMMDD */ +}; + +/* Image magic number */ +#define IMAGE_MAGIC_NUMBER 0x5549 + +/* Image header structure */ +struct update_image_header { + __be16 magic; + __be16 version; + __be32 package_size; + __be32 crc; + __be16 lid_index_offset; + __be16 number_lids; + __be16 package_flags; + __be16 mi_keyword_size; + char mi_keyword_data[40]; + __be16 ext_fw_id_size; + /* Rest of the image data including ext fw id, sp flags */ + char data[]; +}; + +/* FipS header */ +struct fips_header { + __be16 magic; + __be16 version; + __be32 lid_id; + __be32 lid_date; /* YYYYMMDD */ + __be16 lid_time; /* HHMM */ + __be16 lid_class; + __be32 crc; + __be32 lid_size; /* Number of bytes below header */ + __be32 header_size; + uint8_t mtd_number; + uint8_t valid; /* 1 = valid, 0 = invalid */ + uint8_t reserved; + uint8_t lid_info_size; + char lid_info[64]; /* code level */ + __be32 update_date; /* YYYYMMDD */ + __be16 update_time; /* HHMM */ + __be16 phylum_len; + uint8_t lid_phylum[]; +}; + +/* Approximate LID size */ +#define MASTER_LID_SIZE 0x5000 +/* + * Note: + * Doc indicates non-SP LIDs size is 0-8MB. However + * in reality marker LID size less than 4k. Allocating + * 8k to give some breathing space. + */ +#define MARKER_LID_SIZE 0x00002000 + +/* Common marker LID no */ +#define P_COM_MARKER_LID_ID 0x80A00001 +#define T_COM_MARKER_LID_ID (P_COM_MARKER_LID_ID | ADJUST_T_SIDE_LID_NO) + +/* + * Common marker LID structure + * + * Note that we are populating only required sections, + * not all ADF sections in common marker LID. + */ +struct com_marker_header { + __be32 version; + __be32 MI_offset; /* Offset to MI section */ + __be32 iseries_offset; +}; + +/* MI Keyword section */ +struct com_marker_mi_section { + __be32 MI_size; + char mi_keyword[40]; /* MI Keyword */ + char lst_disrupt_fix_lvl[3]; + char skip[21]; /* Skip not interested fields */ + __be32 adf_offset; /* Offset to ADF section */ +}; + +/* Additional Data Fields */ +struct com_marker_adf_sec { + __be32 adf_cnt; /* ADF count */ + char adf_data[]; /* ADF data */ +}; + +/* ADF common header */ +struct com_marker_adf_header { + __be32 size; /* Section size */ + __be32 name; /* Section name */ +}; + +/* + * Service Pack Nomenclature ADF + * + * Service pack release name. + */ +#define ADF_NAME_SP 0x53504E4D /* SPNM */ +struct com_marker_adf_sp +{ + struct com_marker_adf_header header; + __be32 sp_name_offset; /* Offset from start of ADF */ + __be32 sp_name_size; + __be32 skip[4]; /* Skip rest of fields */ +}; + +/* + * Firmware IP Protection ADF + * + * Service Pack flags and GA date. + */ +#define ADF_NAME_FW_IP 0x46495050 /* FIPP */ +struct com_marker_fw_ip { + struct com_marker_adf_header header; + __be32 sp_flag_offset; /* Offset from start of ADF */ + __be32 sp_flag_size; + __be32 sp_ga_offset; /* Offset from start of ADF*/ + __be32 sp_ga_size; +}; + +#endif /* __CODEUPDATE_H */ diff --git a/roms/skiboot/hw/fsp/fsp-console.c b/roms/skiboot/hw/fsp/fsp-console.c new file mode 100644 index 000000000..dc23ac46f --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-console.c @@ -0,0 +1,1062 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Flexible Service Processor (FSP) serial console handling code + * + * Copyright 2013-2018 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +DEFINE_LOG_ENTRY(OPAL_RC_CONSOLE_HANG, OPAL_PLATFORM_ERR_EVT, OPAL_CONSOLE, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_GENERAL, OPAL_NA); + +struct fsp_serbuf_hdr { + __be16 partition_id; + u8 session_id; + u8 hmc_id; + __be16 data_offset; + __be16 last_valid; + __be16 ovf_count; + __be16 next_in; + u8 flags; + u8 reserved; + __be16 next_out; + u8 data[]; +}; +#define SER_BUF_DATA_SIZE (0x10000 - sizeof(struct fsp_serbuf_hdr)) + +struct fsp_serial { + bool available; + bool open; + bool has_part0; + bool has_part1; + bool log_port; + bool out_poke; + char loc_code[LOC_CODE_SIZE]; + u16 rsrc_id; + struct fsp_serbuf_hdr *in_buf; + struct fsp_serbuf_hdr *out_buf; + struct fsp_msg *poke_msg; + u8 waiting; + u64 irq; + u16 out_buf_prev_len; + u64 out_buf_timeout; +}; + +#define SER_BUFFER_SIZE 0x00040000UL +#define MAX_SERIAL 4 + +#define SER_BUFFER_OUT_TIMEOUT 10 + +static struct fsp_serial fsp_serials[MAX_SERIAL]; +static bool got_intf_query; +static struct lock fsp_con_lock = LOCK_UNLOCKED; +static void* ser_buffer = NULL; + +static void fsp_console_reinit(void) +{ + int i; + void *base; + struct fsp_msg *msg; + + /* Initialize out data structure pointers & TCE maps */ + base = ser_buffer; + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *ser = &fsp_serials[i]; + + ser->in_buf = base; + ser->out_buf = base + SER_BUFFER_SIZE/2; + base += SER_BUFFER_SIZE; + } + fsp_tce_map(PSI_DMA_SER0_BASE, ser_buffer, + 4 * PSI_DMA_SER0_SIZE); + + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + + if (!fs->available) + continue; + + if (fs->rsrc_id == 0xffff) + continue; + prlog(PR_DEBUG, "FSP: Reassociating HVSI console %d\n", i); + msg = fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2, + (fs->rsrc_id << 16) | 1, i); + if (!msg) { + prerror("FSPCON: Failed to allocate associate msg\n"); + return; + } + if (fsp_queue_msg(msg, fsp_freemsg)) { + fsp_freemsg(msg); + prerror("FSPCON: Failed to queue associate msg\n"); + return; + } + } +} + +static void fsp_close_consoles(void) +{ + unsigned int i; + + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + + if (!fs->available) + continue; + + lock(&fsp_con_lock); + if (fs->open) { + fs->open = false; + fs->out_poke = false; + if (fs->poke_msg->state != fsp_msg_unused) + fsp_cancelmsg(fs->poke_msg); + fsp_freemsg(fs->poke_msg); + fs->poke_msg = NULL; + } + unlock(&fsp_con_lock); + } + prlog(PR_DEBUG, "FSPCON: Closed consoles due to FSP reset/reload\n"); +} + +static void fsp_pokemsg_reclaim(struct fsp_msg *msg) +{ + struct fsp_serial *fs = msg->user_data; + + /* + * The poke_msg might have been "detached" from the console + * in vserial_close, so we need to check whether it's current + * before touching the state, otherwise, just free it + */ + lock(&fsp_con_lock); + if (fs->open && fs->poke_msg == msg) { + if (fs->out_poke) { + if (fsp_queue_msg(fs->poke_msg, fsp_pokemsg_reclaim)) { + prerror("FSPCON: failed to queue poke msg\n"); + } else { + fs->out_poke = false; + } + } else + fs->poke_msg->state = fsp_msg_unused; + } else + fsp_freemsg(msg); + unlock(&fsp_con_lock); +} + +/* Called with the fsp_con_lock held */ +static size_t fsp_write_vserial(struct fsp_serial *fs, const char *buf, + size_t len) +{ + struct fsp_serbuf_hdr *sb = fs->out_buf; + u16 old_nin = be16_to_cpu(sb->next_in); + u16 space, chunk; + + if (!fs->open) + return 0; + + space = (be16_to_cpu(sb->next_out) + SER_BUF_DATA_SIZE - old_nin - 1) + % SER_BUF_DATA_SIZE; + if (space < len) + len = space; + if (!len) + return 0; + + chunk = SER_BUF_DATA_SIZE - old_nin; + if (chunk > len) + chunk = len; + memcpy(&sb->data[old_nin], buf, chunk); + if (chunk < len) + memcpy(&sb->data[0], buf + chunk, len - chunk); + lwsync(); + sb->next_in = cpu_to_be16((old_nin + len) % SER_BUF_DATA_SIZE); + sync(); + + if (be16_to_cpu(sb->next_out) == old_nin && fs->poke_msg) { + if (fs->poke_msg->state == fsp_msg_unused) { + if (fsp_queue_msg(fs->poke_msg, fsp_pokemsg_reclaim)) + prerror("FSPCON: poke msg queuing failed\n"); + } else + fs->out_poke = true; + } +#ifndef DISABLE_CON_PENDING_EVT + opal_update_pending_evt(OPAL_EVENT_CONSOLE_OUTPUT, + OPAL_EVENT_CONSOLE_OUTPUT); +#endif + return len; +} + +#ifdef DVS_CONSOLE +static int fsp_con_port = -1; +static bool fsp_con_full; + +/* + * This is called by the code in console.c without the con_lock + * held. However it can be called as the result of any printf + * thus any other lock might be held including possibly the + * FSP lock + */ +static size_t fsp_con_write(const char *buf, size_t len) +{ + size_t written; + + if (fsp_con_port < 0) + return 0; + + lock(&fsp_con_lock); + written = fsp_write_vserial(&fsp_serials[fsp_con_port], buf, len); + fsp_con_full = (written < len); + unlock(&fsp_con_lock); + + return written; +} + +static struct con_ops fsp_con_ops = { + .write = fsp_con_write, +}; +#endif /* DVS_CONSOLE */ + +static void fsp_open_vserial(struct fsp_msg *msg) +{ + struct fsp_msg *resp; + + u16 part_id = fsp_msg_get_data_word(msg, 0) & 0xffff; + u16 sess_id = fsp_msg_get_data_word(msg, 1) & 0xffff; + u8 hmc_sess = msg->data.bytes[0]; + u8 hmc_indx = msg->data.bytes[1]; + u8 authority = msg->data.bytes[4]; + u32 tce_in, tce_out; + struct fsp_serial *fs; + + prlog(PR_INFO, "FSPCON: Got VSerial Open\n"); + prlog(PR_DEBUG, " part_id = 0x%04x\n", part_id); + prlog(PR_DEBUG, " sess_id = 0x%04x\n", sess_id); + prlog(PR_DEBUG, " hmc_sess = 0x%02x\n", hmc_sess); + prlog(PR_DEBUG, " hmc_indx = 0x%02x\n", hmc_indx); + prlog(PR_DEBUG, " authority = 0x%02x\n", authority); + + if (sess_id >= MAX_SERIAL || !fsp_serials[sess_id].available) { + prlog(PR_WARNING, "FSPCON: 0x%04x NOT AVAILABLE!\n", sess_id); + resp = fsp_mkmsg(FSP_RSP_OPEN_VSERIAL | 0x2f, 0); + if (!resp) { + prerror("FSPCON: Response allocation failed\n"); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSPCON: Failed to queue response msg\n"); + } + return; + } + + fs = &fsp_serials[sess_id]; + + /* Hack ! On blades, the console opened via the mm has partition 1 + * while the debug DVS generally has partition 0 (though you can + * use what you want really). + * We don't want a DVS open/close to crap on the blademm console + * thus if it's a raw console, gets an open with partID 1, we + * set a flag that ignores the close of partid 0 + */ + if (fs->rsrc_id == 0xffff) { + if (part_id == 0) + fs->has_part0 = true; + if (part_id == 1) + fs->has_part1 = true; + } + + tce_in = PSI_DMA_SER0_BASE + PSI_DMA_SER0_SIZE * sess_id; + tce_out = tce_in + SER_BUFFER_SIZE/2; + + lock(&fsp_con_lock); + if (fs->open) { + prlog(PR_DEBUG, " already open, skipping init !\n"); + unlock(&fsp_con_lock); + goto already_open; + } + + fs->poke_msg = fsp_mkmsg(FSP_CMD_VSERIAL_OUT, 2, + fsp_msg_get_data_word(msg, 0), + fsp_msg_get_data_word(msg, 1) & 0xffff); + if (fs->poke_msg == NULL) { + prerror("FSPCON: Failed to allocate poke_msg\n"); + unlock(&fsp_con_lock); + return; + } + + fs->open = true; + fs->poke_msg->user_data = fs; + + fs->in_buf->partition_id = fs->out_buf->partition_id = cpu_to_be16(part_id); + fs->in_buf->session_id = fs->out_buf->session_id = sess_id; + fs->in_buf->hmc_id = fs->out_buf->hmc_id = hmc_indx; + fs->in_buf->data_offset = fs->out_buf->data_offset = + cpu_to_be16(sizeof(struct fsp_serbuf_hdr)); + fs->in_buf->last_valid = fs->out_buf->last_valid = + cpu_to_be16(SER_BUF_DATA_SIZE - 1); + fs->in_buf->ovf_count = fs->out_buf->ovf_count = 0; + fs->in_buf->next_in = fs->out_buf->next_in = 0; + fs->in_buf->flags = fs->out_buf->flags = 0; + fs->in_buf->reserved = fs->out_buf->reserved = 0; + fs->in_buf->next_out = fs->out_buf->next_out = 0; + fs->out_buf_prev_len = 0; + fs->out_buf_timeout = 0; + unlock(&fsp_con_lock); + + already_open: + resp = fsp_mkmsg(FSP_RSP_OPEN_VSERIAL, 6, fsp_msg_get_data_word(msg, 0), + fsp_msg_get_data_word(msg, 1) & 0xffff, 0, tce_in, 0, tce_out); + if (!resp) { + prerror("FSPCON: Failed to allocate open msg response\n"); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSPCON: Failed to queue open msg response\n"); + return; + } + +#ifdef DVS_CONSOLE + prlog(PR_DEBUG, " log_port = %d\n", fs->log_port); + if (fs->log_port) { + fsp_con_port = sess_id; + sync(); + /* + * We mark the FSP lock as being in the console + * path. We do that only once, we never unmark it + * (there is really no much point) + */ + fsp_used_by_console(); + fsp_con_lock.in_con_path = true; + /* See comment in fsp_used_by_console */ + lock(&fsp_con_lock); + unlock(&fsp_con_lock); + set_console(&fsp_con_ops); + } +#endif +} + +static void fsp_close_vserial(struct fsp_msg *msg) +{ + u16 part_id = fsp_msg_get_data_word(msg, 0) & 0xffff; + u16 sess_id = fsp_msg_get_data_word(msg, 1) & 0xffff; + u8 hmc_sess = msg->data.bytes[0]; + u8 hmc_indx = msg->data.bytes[1]; + u8 authority = msg->data.bytes[4]; + struct fsp_serial *fs; + struct fsp_msg *resp; + + prlog(PR_INFO, "FSPCON: Got VSerial Close\n"); + prlog(PR_DEBUG, " part_id = 0x%04x\n", part_id); + prlog(PR_DEBUG, " sess_id = 0x%04x\n", sess_id); + prlog(PR_DEBUG, " hmc_sess = 0x%02x\n", hmc_sess); + prlog(PR_DEBUG, " hmc_indx = 0x%02x\n", hmc_indx); + prlog(PR_DEBUG, " authority = 0x%02x\n", authority); + + if (sess_id >= MAX_SERIAL || !fsp_serials[sess_id].available) { + prlog(PR_WARNING, "FSPCON: 0x%04x NOT AVAILABLE!\n", sess_id); + goto skip_close; + } + + fs = &fsp_serials[sess_id]; + + /* See "HACK" comment in open */ + if (fs->rsrc_id == 0xffff) { + if (part_id == 0) + fs->has_part0 = false; + if (part_id == 1) + fs->has_part1 = false; + if (fs->has_part0 || fs->has_part1) { + prlog(PR_DEBUG, " skipping close !\n"); + goto skip_close; + } + } + +#ifdef DVS_CONSOLE + if (fs->log_port) { + fsp_con_port = -1; + set_console(NULL); + } +#endif + + lock(&fsp_con_lock); + if (fs->open) { + fs->open = false; + fs->out_poke = false; + if (fs->poke_msg && fs->poke_msg->state == fsp_msg_unused) { + fsp_freemsg(fs->poke_msg); + fs->poke_msg = NULL; + } + } + unlock(&fsp_con_lock); + skip_close: + resp = fsp_mkmsg(FSP_RSP_CLOSE_VSERIAL, 2, fsp_msg_get_data_word(msg, 0), + fsp_msg_get_data_word(msg, 1) & 0xffff); + if (!resp) { + prerror("FSPCON: Failed to allocate close msg response\n"); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSPCON: Failed to queue close msg response\n"); + } +} + +static bool fsp_con_msg_hmc(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + struct fsp_msg *resp; + + /* Associate response */ + if ((cmd_sub_mod >> 8) == 0xe08a) { + prlog(PR_TRACE, "FSPCON: Got associate response, status" + " 0x%02x\n", cmd_sub_mod & 0xff); + return true; + } + if ((cmd_sub_mod >> 8) == 0xe08b) { + prlog(PR_TRACE, "Got unassociate response, status 0x%02x\n", + cmd_sub_mod & 0xff); + return true; + } + switch(cmd_sub_mod) { + case FSP_CMD_OPEN_VSERIAL: + fsp_open_vserial(msg); + return true; + case FSP_CMD_CLOSE_VSERIAL: + fsp_close_vserial(msg); + return true; + case FSP_CMD_HMC_INTF_QUERY: + prlog(PR_DEBUG, "FSPCON: Got HMC interface query\n"); + got_intf_query = true; + resp = fsp_mkmsg(FSP_RSP_HMC_INTF_QUERY, 1, + fsp_msg_get_data_word(msg, 0) & 0x00ffffff); + if (!resp) { + prerror("FSPCON: Failed to allocate hmc intf response\n"); + return true; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSPCON: Failed to queue hmc intf response\n"); + } + return true; + } + return false; +} + +static bool fsp_con_msg_vt(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + u16 sess_id = fsp_msg_get_data_word(msg, 1) & 0xffff; + + if (cmd_sub_mod == FSP_CMD_VSERIAL_IN && sess_id < MAX_SERIAL) { + struct fsp_serial *fs = &fsp_serials[sess_id]; + + if (!fs->open) + return true; + + /* FSP is signaling some incoming data. We take the console + * lock to avoid racing with a simultaneous read, though we + * might want to consider to simplify all that locking into + * one single lock that covers the console and the pending + * events. + */ + lock(&fsp_con_lock); + opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, + OPAL_EVENT_CONSOLE_INPUT); + opal_update_pending_evt(fs->irq, fs->irq); + unlock(&fsp_con_lock); + } + return true; +} + +static bool fsp_con_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + assert(msg == NULL); + + switch (cmd_sub_mod) { + case FSP_RESET_START: + fsp_close_consoles(); + return true; + case FSP_RELOAD_COMPLETE: + fsp_console_reinit(); + return true; + } + return false; +} + +static struct fsp_client fsp_con_client_hmc = { + .message = fsp_con_msg_hmc, +}; + +static struct fsp_client fsp_con_client_vt = { + .message = fsp_con_msg_vt, +}; + +static struct fsp_client fsp_con_client_rr = { + .message = fsp_con_msg_rr, +}; + +static void fsp_serial_add(int index, u16 rsrc_id, const char *loc_code, + bool log_port) +{ + struct fsp_serial *ser; + struct fsp_msg *msg; + + lock(&fsp_con_lock); + ser = &fsp_serials[index]; + + if (ser->available) { + unlock(&fsp_con_lock); + return; + } + + ser->rsrc_id = rsrc_id; + memset(ser->loc_code, 0x00, LOC_CODE_SIZE); + strncpy(ser->loc_code, loc_code, LOC_CODE_SIZE - 1); + ser->available = true; + ser->log_port = log_port; + unlock(&fsp_con_lock); + + /* DVS doesn't have that */ + if (rsrc_id != 0xffff) { + msg = fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2, + (rsrc_id << 16) | 1, index); + if (!msg) { + prerror("FSPCON: Assoc serial alloc failed\n"); + return; + } + if (fsp_queue_msg(msg, fsp_freemsg)) { + fsp_freemsg(msg); + prerror("FSPCON: Assoc serial queue failed\n"); + return; + } + } +} + +void fsp_console_preinit(void) +{ + int i; + void *base; + + if (!fsp_present()) + return; + + ser_buffer = memalign(TCE_PSIZE, SER_BUFFER_SIZE * MAX_SERIAL); + + /* Initialize out data structure pointers & TCE maps */ + base = ser_buffer; + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *ser = &fsp_serials[i]; + + ser->in_buf = base; + ser->out_buf = base + SER_BUFFER_SIZE/2; + base += SER_BUFFER_SIZE; + } + fsp_tce_map(PSI_DMA_SER0_BASE, ser_buffer, + 4 * PSI_DMA_SER0_SIZE); + + /* Register for class E0 and E1 */ + fsp_register_client(&fsp_con_client_hmc, FSP_MCLASS_HMC_INTFMSG); + fsp_register_client(&fsp_con_client_vt, FSP_MCLASS_HMC_VT); + fsp_register_client(&fsp_con_client_rr, FSP_MCLASS_RR_EVENT); + + /* Add DVS ports. We currently have session 0 and 3, 0 is for + * OS use. 3 is our debug port. We need to add those before + * we complete the OPL or we'll potentially miss the + * console setup on Firebird blades. + */ + fsp_serial_add(0, 0xffff, "DVS_OS", false); + op_display(OP_LOG, OP_MOD_FSPCON, 0x0001); + fsp_serial_add(3, 0xffff, "DVS_FW", true); + op_display(OP_LOG, OP_MOD_FSPCON, 0x0002); + +} + +static int64_t fsp_console_write(int64_t term_number, __be64 *__length, + const uint8_t *buffer) +{ + struct fsp_serial *fs; + size_t written, requested; + + if (term_number < 0 || term_number >= MAX_SERIAL) + return OPAL_PARAMETER; + fs = &fsp_serials[term_number]; + if (!fs->available || fs->log_port) + return OPAL_PARAMETER; + lock(&fsp_con_lock); + if (!fs->open) { + unlock(&fsp_con_lock); + return OPAL_CLOSED; + } + /* Clamp to a reasonable size */ + requested = be64_to_cpu(*__length); + if (requested > 0x1000) + requested = 0x1000; + written = fsp_write_vserial(fs, buffer, requested); + + if (written) { + /* If we wrote anything, reset timeout */ + fs->out_buf_prev_len = 0; + fs->out_buf_timeout = 0; + } + +#ifdef OPAL_DEBUG_CONSOLE_IO + prlog(PR_TRACE, "OPAL: console write req=%ld written=%ld" + " ni=%d no=%d\n", + requested, written, be16_to_cpu(fs->out_buf->next_in), + be16_to_cpu(fs->out_buf->next_out)); + prlog(PR_TRACE, " %02x %02x %02x %02x " + "%02x \'%c\' %02x \'%c\' %02x \'%c\'.%02x \'%c\'..\n", + buffer[0], buffer[1], buffer[2], buffer[3], + buffer[4], buffer[4], buffer[5], buffer[5], + buffer[6], buffer[6], buffer[7], buffer[7]); +#endif /* OPAL_DEBUG_CONSOLE_IO */ + + *__length = cpu_to_be64(written); + unlock(&fsp_con_lock); + + if (written) + return OPAL_SUCCESS; + + return OPAL_HARDWARE; +} + +static int64_t fsp_console_write_buffer_space(int64_t term_number, + __be64 *__length) +{ + static bool elog_generated = false; + struct fsp_serial *fs; + struct fsp_serbuf_hdr *sb; + int64_t length; + + if (term_number < 0 || term_number >= MAX_SERIAL) + return OPAL_PARAMETER; + fs = &fsp_serials[term_number]; + if (!fs->available || fs->log_port) + return OPAL_PARAMETER; + lock(&fsp_con_lock); + if (!fs->open) { + unlock(&fsp_con_lock); + return OPAL_CLOSED; + } + sb = fs->out_buf; + length = (be16_to_cpu(sb->next_out) + SER_BUF_DATA_SIZE + - be16_to_cpu(sb->next_in) - 1) + % SER_BUF_DATA_SIZE; + unlock(&fsp_con_lock); + + /* Console buffer has enough space to write incoming data */ + if (length != fs->out_buf_prev_len) { + fs->out_buf_prev_len = length; + fs->out_buf_timeout = 0; + + *__length = cpu_to_be64(length); + return OPAL_SUCCESS; + } + + /* + * Buffer is full, start internal timer. We will continue returning + * SUCCESS until timeout happens, hoping FSP will consume data within + * timeout period. + */ + if (fs->out_buf_timeout == 0) { + fs->out_buf_timeout = mftb() + + secs_to_tb(SER_BUFFER_OUT_TIMEOUT); + } + + if (tb_compare(mftb(), fs->out_buf_timeout) != TB_AAFTERB) { + *__length = cpu_to_be64(length); + return OPAL_SUCCESS; + } + + /* + * FSP is still active but not reading console data. Hence + * our console buffer became full. Most likely IPMI daemon + * on FSP is buggy. Lets log error and return OPAL_RESOURCE + * to payload (Linux). + */ + if (!elog_generated) { + elog_generated = true; + log_simple_error(&e_info(OPAL_RC_CONSOLE_HANG), "FSPCON: Console " + "buffer is full, dropping console data\n"); + } + + /* Timeout happened. Lets drop incoming data */ + return OPAL_RESOURCE; +} + +static int64_t fsp_console_read(int64_t term_number, __be64 *__length, + uint8_t *buffer) +{ + struct fsp_serial *fs; + struct fsp_serbuf_hdr *sb; + bool pending = false; + uint32_t old_nin, n, i, chunk, req = be64_to_cpu(*__length); + int rc = OPAL_SUCCESS; + + if (term_number < 0 || term_number >= MAX_SERIAL) + return OPAL_PARAMETER; + fs = &fsp_serials[term_number]; + if (!fs->available || fs->log_port) + return OPAL_PARAMETER; + lock(&fsp_con_lock); + if (!fs->open) { + rc = OPAL_CLOSED; + goto clr_flag; + } + if (fs->waiting) + fs->waiting = 0; + sb = fs->in_buf; + old_nin = be16_to_cpu(sb->next_in); + lwsync(); + n = (old_nin + SER_BUF_DATA_SIZE - be16_to_cpu(sb->next_out)) + % SER_BUF_DATA_SIZE; + if (n > req) { + pending = true; + n = req; + } + *__length = cpu_to_be64(n); + + chunk = SER_BUF_DATA_SIZE - be16_to_cpu(sb->next_out); + if (chunk > n) + chunk = n; + memcpy(buffer, &sb->data[be16_to_cpu(sb->next_out)], chunk); + if (chunk < n) + memcpy(buffer + chunk, &sb->data[0], n - chunk); + sb->next_out = cpu_to_be16(((be16_to_cpu(sb->next_out)) + n) % SER_BUF_DATA_SIZE); + +#ifdef OPAL_DEBUG_CONSOLE_IO + prlog(PR_TRACE, "OPAL: console read req=%d read=%d ni=%d no=%d\n", + req, n, be16_to_cpu(sb->next_in), be16_to_cpu(sb->next_out)); + prlog(PR_TRACE, " %02x %02x %02x %02x %02x %02x %02x %02x ...\n", + buffer[0], buffer[1], buffer[2], buffer[3], + buffer[4], buffer[5], buffer[6], buffer[7]); +#endif /* OPAL_DEBUG_CONSOLE_IO */ + +clr_flag: + /* Might clear the input pending flag */ + for (i = 0; i < MAX_SERIAL && !pending; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + struct fsp_serbuf_hdr *sb = fs->in_buf; + + if (fs->log_port || !fs->open) + continue; + if (sb->next_out != sb->next_in) { + /* + * HACK: Some kernels (4.1+) may fail to properly + * register hvc1 and will never read it. This can lead + * to RCU stalls, so if we notice this console is not + * being read, do not set OPAL_EVENT_CONSOLE_INPUT even + * if it has data + */ + if (fs->waiting < 5) { + pending = true; + fs->waiting++; + } + } + } + if (!pending) { + opal_update_pending_evt(fs->irq, 0); + opal_update_pending_evt(OPAL_EVENT_CONSOLE_INPUT, 0); + } + + unlock(&fsp_con_lock); + + return rc; +} + +void fsp_console_poll(void *data __unused) +{ +#ifdef OPAL_DEBUG_CONSOLE_POLL + static int debug; +#endif + + /* + * We don't get messages for out buffer being consumed, so we + * need to poll. We also defer sending of poke messages from + * the sapphire console to avoid a locking nightmare with + * beging called from printf() deep into an existing lock nest + * stack. + */ + if (fsp_con_full || + (opal_pending_events & OPAL_EVENT_CONSOLE_OUTPUT)) { + unsigned int i; + bool pending = false; + + /* We take the console lock. This is somewhat inefficient + * but it guarantees we aren't racing with a write, and + * thus clearing an event improperly + */ + lock(&fsp_con_lock); + for (i = 0; i < MAX_SERIAL && !pending; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + struct fsp_serbuf_hdr *sb = fs->out_buf; + + if (!fs->open) + continue; + if (sb->next_out == sb->next_in) { + continue; + } + if (fs->log_port) { + flush_console(); + } else { +#ifdef OPAL_DEBUG_CONSOLE_POLL + if (debug < 5) { + prlog(PR_DEBUG,"OPAL: %d still pending" + " ni=%d no=%d\n", + i, be16_to_cpu(sb->next_in), + be16_to_cpu(sb->next_out)); + debug++; + } +#endif /* OPAL_DEBUG_CONSOLE_POLL */ + pending = true; + } + } + if (!pending) { + opal_update_pending_evt(OPAL_EVENT_CONSOLE_OUTPUT, 0); +#ifdef OPAL_DEBUG_CONSOLE_POLL + debug = 0; +#endif + } + unlock(&fsp_con_lock); + } +} + +void fsp_console_init(void) +{ + struct dt_node *serials, *ser; + int i; + + if (!fsp_present()) + return; + + /* Wait until we got the intf query before moving on */ + while (!got_intf_query) + opal_run_pollers(); + + op_display(OP_LOG, OP_MOD_FSPCON, 0x0000); + + /* Register poller */ + opal_add_poller(fsp_console_poll, NULL); + + /* Register OPAL console backend */ + set_opal_console(&fsp_opal_con); + + /* Parse serial port data */ + serials = dt_find_by_path(dt_root, "ipl-params/fsp-serial"); + if (!serials) { + prerror("FSPCON: No FSP serial ports in device-tree\n"); + return; + } + + i = 1; + dt_for_each_child(serials, ser) { + u32 rsrc_id = dt_prop_get_u32(ser, "reg"); + const void *lc = dt_prop_get(ser, "ibm,loc-code"); + + prlog(PR_NOTICE, "FSPCON: Serial %d rsrc: %04x loc: %s\n", + i, rsrc_id, (const char *)lc); + fsp_serial_add(i++, rsrc_id, lc, false); + op_display(OP_LOG, OP_MOD_FSPCON, 0x0010 + i); + } + + op_display(OP_LOG, OP_MOD_FSPCON, 0x0005); +} + +static int64_t fsp_console_flush(int64_t terminal __unused) +{ + /* FIXME: There's probably something we can do here... */ + return OPAL_PARAMETER; +} + +struct opal_con_ops fsp_opal_con = { + .name = "FSP OPAL console", + .init = NULL, /* all the required setup is done in fsp_console_init() */ + .read = fsp_console_read, + .write = fsp_console_write, + .space = fsp_console_write_buffer_space, + .flush = fsp_console_flush, +}; + +static void flush_all_input(void) +{ + unsigned int i; + + lock(&fsp_con_lock); + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + struct fsp_serbuf_hdr *sb = fs->in_buf; + + if (fs->log_port) + continue; + + sb->next_out = sb->next_in; + } + unlock(&fsp_con_lock); +} + +static bool send_all_hvsi_close(void) +{ + unsigned int i; + bool has_hvsi = false; + static const uint8_t close_packet[] = { 0xfe, 6, 0, 1, 0, 3 }; + + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + struct fsp_serbuf_hdr *sb = fs->out_buf; + unsigned int space, timeout = 10; + + if (fs->log_port) + continue; + if (fs->rsrc_id == 0xffff) + continue; + has_hvsi = true; + + /* Do we have room ? Wait a bit if not */ + while(timeout--) { + space = (be16_to_cpu(sb->next_out) + SER_BUF_DATA_SIZE - + be16_to_cpu(sb->next_in) - 1) % SER_BUF_DATA_SIZE; + if (space >= 6) + break; + time_wait_ms(500); + } + lock(&fsp_con_lock); + fsp_write_vserial(fs, close_packet, 6); + unlock(&fsp_con_lock); + } + + return has_hvsi; +} + +static void reopen_all_hvsi(void) +{ + unsigned int i; + + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + + if (!fs->available) + continue; + + if (fs->rsrc_id == 0xffff) + continue; + prlog(PR_NOTICE, "FSP: Deassociating HVSI console %d\n", i); + fsp_sync_msg(fsp_mkmsg(FSP_CMD_UNASSOC_SERIAL, 1, + (i << 16) | 1), true); + } + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + + if (!fs->available) + continue; + + if (fs->rsrc_id == 0xffff) + continue; + prlog(PR_NOTICE, "FSP: Reassociating HVSI console %d\n", i); + fsp_sync_msg(fsp_mkmsg(FSP_CMD_ASSOC_SERIAL, 2, + (fs->rsrc_id << 16) | 1, i), true); + } +} + +void fsp_console_reset(void) +{ + if (!fsp_present()) + return; + + prlog(PR_NOTICE, "FSP: Console reset !\n"); + + /* This is called on a fast-reset. To work around issues with HVSI + * initial negotiation, before we reboot the kernel, we flush all + * input and send an HVSI close packet. + */ + flush_all_input(); + + /* Returns false if there is no HVSI console */ + if (!send_all_hvsi_close()) + return; + + time_wait_ms(500); + + reopen_all_hvsi(); + +} + +void fsp_console_add_nodes(void) +{ + struct dt_node *opal_event; + unsigned int i; + + opal_event = dt_find_by_name(opal_node, "event"); + + for (i = 0; i < MAX_SERIAL; i++) { + struct fsp_serial *fs = &fsp_serials[i]; + struct dt_node *fs_node; + const char *type; + + if (fs->log_port || !fs->available) + continue; + + if (fs->rsrc_id == 0xffff) + type = "raw"; + else + type = "hvsi"; + + fs_node = add_opal_console_node(i, type, SER_BUF_DATA_SIZE); + + fs->irq = opal_dynamic_event_alloc(); + dt_add_property_cells(fs_node, "interrupts", ilog2(fs->irq)); + + if (opal_event) + dt_add_property_cells(fs_node, "interrupt-parent", + opal_event->phandle); + } +} + +void fsp_console_select_stdout(void) +{ + bool use_serial = false; + int rc; + u8 param; + + if (!fsp_present()) + return; + + rc = fsp_get_sys_param(SYS_PARAM_CONSOLE_SELECT, + ¶m, 1, NULL, NULL); + if (rc != 1) { + prerror("FSPCON: Failed to get console" + " sysparam rc %d\n", rc); + } else { + switch(param) { + case 0: + use_serial = false; + break; + case 1: + use_serial = true; + break; + default: + prerror("FSPCON: Unknown console" + " sysparam %d\n", param); + } + } + + dt_check_del_prop(dt_chosen, "linux,stdout-path"); + + if (fsp_serials[1].open && use_serial) { + dt_add_property_string(dt_chosen, "linux,stdout-path", + "/ibm,opal/consoles/serial@1"); + prlog(PR_NOTICE, "FSPCON: default console set to serial A\n"); + } else { + dt_add_property_string(dt_chosen, "linux,stdout-path", + "/ibm,opal/consoles/serial@0"); + prlog(PR_NOTICE, "FSPCON: default console set to SOL/DVS\n"); + } +} + diff --git a/roms/skiboot/hw/fsp/fsp-diag.c b/roms/skiboot/hw/fsp/fsp-diag.c new file mode 100644 index 000000000..d9101f31b --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-diag.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Code for handling FSP_MCLASS_DIAG messages (cmd 0xee) + * Receiving a high level ack timeout is likely indicative of a firmware bug + * + * Copyright 2013-2014 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include + +static bool fsp_diag_msg(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + + if (cmd_sub_mod == FSP_RSP_DIAG_LINK_ERROR) { + printf("FIXME: Unhandled FSP_MCLASS_DIAG Link Error Report\n"); + return false; + } + + if (cmd_sub_mod != FSP_RSP_DIAG_ACK_TIMEOUT) { + printf("BUG: Unhandled subcommand: 0x%x (New FSP spec?)\n", + cmd_sub_mod); + return false; + } + + printf("BUG: High Level ACK timeout (FSP_MCLASS_DIAG) for 0x%x\n", + fsp_msg_get_data_word(msg, 0) & 0xffff0000); + + return true; +} + +static struct fsp_client fsp_diag = { + .message = fsp_diag_msg, +}; + +/* This is called at boot time */ +void fsp_init_diag(void) +{ + /* Register for the diag event */ + fsp_register_client(&fsp_diag, FSP_MCLASS_DIAG); +} diff --git a/roms/skiboot/hw/fsp/fsp-dpo.c b/roms/skiboot/hw/fsp/fsp-dpo.c new file mode 100644 index 000000000..91919f915 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-dpo.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * FSP DPO (Delayed Power Off) event support + * + * Copyright 2013-2017 IBM Corp. + */ + +#define pr_fmt(fmt) "FSP-DPO: " fmt + +#include +#include +#include +#include +#include +#include + +#define DPO_CMD_SGN_BYTE0 0xf4 /* Byte[0] signature */ +#define DPO_CMD_SGN_BYTE1 0x20 /* Byte[1] signature */ +#define DPO_TIMEOUT 2700 /* 45 minutes in seconds */ + +bool fsp_dpo_pending; +static unsigned long fsp_dpo_init_tb; + +/* + * OPAL DPO interface + * + * Returns zero if DPO is not active, positive value indicating number + * of seconds remaining for a forced system shutdown. This will enable + * the host to schedule for shutdown voluntarily before timeout occurs. + */ +static int64_t fsp_opal_get_dpo_status(__be64 *dpo_timeout) +{ + if (!fsp_dpo_pending) { + *dpo_timeout = 0; + return OPAL_WRONG_STATE; + } + + *dpo_timeout = cpu_to_be64(DPO_TIMEOUT - tb_to_secs(mftb() - fsp_dpo_init_tb)); + return OPAL_SUCCESS; +} + +/* Process FSP DPO init message */ +static void fsp_process_dpo(struct fsp_msg *msg) +{ + struct fsp_msg *resp; + u32 cmd = FSP_RSP_INIT_DPO; + int rc; + + /* DPO message does not have the correct signatures */ + if ((msg->data.bytes[0] != DPO_CMD_SGN_BYTE0) + || (msg->data.bytes[1] != DPO_CMD_SGN_BYTE1)) { + prerror("Message signatures did not match\n"); + cmd |= FSP_STATUS_INVALID_CMD; + resp = fsp_mkmsg(cmd, 0); + if (resp == NULL) { + prerror("%s : Message allocation failed\n", __func__); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("%s : Failed to queue response " + "message\n", __func__); + } + return; + } + + /* OPAL is already in "DPO pending" state */ + if (fsp_dpo_pending) { + prlog(PR_INFO, "OPAL already in DPO pending state\n"); + cmd |= FSP_STATUS_INVALID_DPOSTATE; + resp = fsp_mkmsg(cmd, 0); + if (resp == NULL) { + prerror("%s : Message allocation failed\n", __func__); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("%s : Failed to queue response " + "message\n", __func__); + } + return; + } + + + /* Inform the host about DPO */ + rc = opal_queue_msg(OPAL_MSG_DPO, NULL, NULL); + if (rc) { + prerror("OPAL message queuing failed\n"); + cmd |= FSP_STATUS_GENERIC_ERROR; + resp = fsp_mkmsg(cmd, 0); + if (resp == NULL) { + prerror("%s : Message allocation failed\n", __func__); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("%s : Failed to queue response " + "message\n", __func__); + } + return; + } else + prlog(PR_INFO, "Notified host about DPO event\n"); + + /* Acknowledge the FSP on DPO */ + resp = fsp_mkmsg(cmd, 0); + if (resp == NULL) { + prerror("%s : Message allocation failed\n", __func__); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("%s : Failed to queue response message\n", __func__); + return; + } + + /* Record DPO init time and set DPO pending flag */ + fsp_dpo_init_tb = mftb(); + fsp_dpo_pending = true; + + /* + * OPAL is now in DPO pending state. After first detecting DPO + * condition from OPAL, the host will have 45 minutes to prepare + * the system for shutdown. The host must take all necessary actions + * required in that regard and at the end shutdown itself. The host + * shutdown sequence eventually will make the call OPAL_CEC_POWER_DOWN + * which in turn ask the FSP to shutdown the CEC. If the FSP does not + * receive the cec power down command from OPAL within 45 minutes, + * it will assume that the host and the OPAL has processed the DPO + * sequence successfully and hence force power off the system. + */ +} + +/* Handle DPO sub-command from FSP */ +static bool fsp_dpo_message(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + if (cmd_sub_mod == FSP_CMD_INIT_DPO) { + prlog(PR_INFO, "Delayed Power Off (DPO) notification received\n"); + fsp_process_dpo(msg); + return true; + } + + return false; +} + +static struct fsp_client fsp_dpo_client = { + .message = fsp_dpo_message, +}; + +void fsp_dpo_init(void) +{ + fsp_register_client(&fsp_dpo_client, FSP_MCLASS_SERVICE); + opal_register(OPAL_GET_DPO_STATUS, fsp_opal_get_dpo_status, 1); + prlog(PR_INFO, "FSP DPO support initialized\n"); +} diff --git a/roms/skiboot/hw/fsp/fsp-dump.c b/roms/skiboot/hw/fsp/fsp-dump.c new file mode 100644 index 000000000..96cb45e6f --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-dump.c @@ -0,0 +1,916 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Dump support: + * We get dump notification from different sources: + * - During system initialization via HDAT + * - During FSP reset/reload (FipS dump) + * - Dump available notification MBOX command (0xCE, 0x78, 0x00) + * + * To avoid complications, we keep list of dumps in a list and fetch + * them serially. + * + * Dump retrieve process: + * - Once we get notification from FSP we enqueue the dump ID and notify + * Linux via OPAL event notification. + * - Linux reads dump info and allocates required memory to fetch the dump + * and makes dump read call. + * - Sapphire fetches dump data from FSP. + * - Linux writes dump to disk and sends acknowledgement. + * - Sapphire acknowledges FSP. + * + * Copyright 2013-2015 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Max outstanding dumps to retrieve + * + * Note: + * Dumps are serialized. We don't get notification for second + * dump of given type until we acknowledge first one. But we + * may get notification for different dump type. And our dump + * retrieval code is serialized. Hence we use list to keep + * track of outstanding dumps to be retrieved. + */ +#define MAX_DUMP_RECORD 0x04 + +/* Max retry */ +#define FIPS_DUMP_MAX_RETRY 0x03 + +/* Dump type */ +#define DUMP_TYPE_FSP 0x01 +#define DUMP_TYPE_SYS 0x02 +#define DUMP_TYPE_SMA 0x03 + +/* Dump fetch size */ +#define DUMP_FETCH_SIZE_FSP 0x500000 +#define DUMP_FETCH_SIZE_SYS 0x400000 +#define DUMP_FETCH_SIZE_RES 0x200000 + +/* Params for Fips dump */ +#define FSP_DUMP_TOOL_TYPE "SYS " +#define FSP_DUMP_CLIENT_ID "SAPPHIRE_CLIENT" + +enum dump_state { + DUMP_STATE_ABSENT, /* No FSP dump */ + DUMP_STATE_NONE, /* No dump to retrieve */ + DUMP_STATE_NOTIFY, /* Notified Linux */ + DUMP_STATE_FETCHING, /* Dump retrieval is in progress */ + DUMP_STATE_FETCH, /* Dump retrieve complete */ + DUMP_STATE_PARTIAL, /* Partial read */ + DUMP_STATE_ABORTING, /* Aborting due to kexec */ +}; + +/* Pending dump list */ +struct dump_record { + uint8_t type; + uint32_t id; + uint32_t size; + struct list_node link; +}; + +/* List definations */ +static LIST_HEAD(dump_pending); +static LIST_HEAD(dump_free); + +/* Dump retrieve state */ +static enum dump_state dump_state = DUMP_STATE_NONE; + +/* Dump buffer SG list */ +static struct opal_sg_list *dump_data; +static struct dump_record *dump_entry; +static int64_t dump_offset; +static size_t fetch_remain; + +/* FipS dump retry count */ +static int retry_cnt; + +/* Protect list and dump retrieve state */ +static struct lock dump_lock = LOCK_UNLOCKED; + +/* Forward declaration */ +static int64_t fsp_opal_dump_init(uint8_t dump_type); +static int64_t fsp_dump_read(void); + +DEFINE_LOG_ENTRY(OPAL_RC_DUMP_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_DUMP_LIST, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP, + OPAL_PLATFORM_FIRMWARE, + OPAL_INFO, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_DUMP_ACK, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, + OPAL_NA); + +/* + * Helper functions + */ +static inline void update_dump_state(enum dump_state state) +{ + dump_state = state; +} + +static int64_t check_dump_state(void) +{ + switch (dump_state) { + case DUMP_STATE_ABSENT: + return OPAL_HARDWARE; + case DUMP_STATE_NONE: + case DUMP_STATE_NOTIFY: + /* During dump fetch, notify is wrong state */ + return OPAL_WRONG_STATE; + case DUMP_STATE_FETCHING: + case DUMP_STATE_ABORTING: + return OPAL_BUSY_EVENT; + case DUMP_STATE_FETCH: + return OPAL_SUCCESS; + case DUMP_STATE_PARTIAL: + return OPAL_PARTIAL; + } + return OPAL_SUCCESS; +} + +static inline void dump_tce_map(uint32_t tce_offset, + void *buffer, uint32_t size) +{ + uint32_t tlen = ALIGN_UP(size, TCE_PSIZE); + fsp_tce_map(PSI_DMA_DUMP_DATA + tce_offset, buffer, tlen); +} + +static inline void dump_tce_unmap(uint32_t size) +{ + fsp_tce_unmap(PSI_DMA_DUMP_DATA, size); +} + +/* + * Returns Data set ID for the given dump type + */ +static inline uint16_t get_dump_data_set_id(uint8_t type) +{ + switch (type) { + case DUMP_TYPE_FSP: + return FSP_DATASET_SP_DUMP; + case DUMP_TYPE_SYS: + return FSP_DATASET_HW_DUMP; + default: + break; + } + return OPAL_INTERNAL_ERROR; +} + +/* + * Returns max data we can fetch from FSP fetch data call + */ +static inline int64_t get_dump_fetch_max_size(uint8_t type) +{ + switch (type) { + case DUMP_TYPE_FSP: + return DUMP_FETCH_SIZE_FSP; + case DUMP_TYPE_SYS: + return DUMP_FETCH_SIZE_SYS; + default: + break; + } + return OPAL_INTERNAL_ERROR; +} + +/* + * Get dump record from pending list + */ +static inline struct dump_record *get_dump_rec_from_list(uint32_t id) +{ + struct dump_record *record; + + list_for_each(&dump_pending, record, link) { + if (record->id == id) + return record; + } + return NULL; +} + +/* + * New dump available notification to Linux + */ +static void update_opal_dump_notify(void) +{ + /* + * Wait until current dump retrieval to complete + * before notifying again. + */ + if (dump_state != DUMP_STATE_NONE) + return; + + /* More dump's to retrieve */ + if (!list_empty(&dump_pending)) { + update_dump_state(DUMP_STATE_NOTIFY); + opal_update_pending_evt(OPAL_EVENT_DUMP_AVAIL, + OPAL_EVENT_DUMP_AVAIL); + } +} + +static int64_t remove_dump_id_from_list(uint32_t dump_id) +{ + struct dump_record *record, *nxt_record; + int rc = OPAL_SUCCESS; + bool found = false; + + /* Remove record from pending list */ + list_for_each_safe(&dump_pending, record, nxt_record, link) { + if (record->id != dump_id) + continue; + + found = true; + list_del(&record->link); + list_add(&dump_free, &record->link); + break; + } + + /* + * Continue update_opal_dump_notify even if it fails + * to remove ID. So that we can resend notification + * for the same dump ID to Linux. + */ + if (!found) { /* List corrupted? */ + log_simple_error(&e_info(OPAL_RC_DUMP_LIST), + "DUMP: ID 0x%x not found in list!\n", + dump_id); + rc = OPAL_PARAMETER; + } + + /* Update state */ + update_dump_state(DUMP_STATE_NONE); + /* Notify next available dump to retrieve */ + update_opal_dump_notify(); + + return rc; +} + +static int64_t add_dump_id_to_list(uint8_t dump_type, + uint32_t dump_id, uint32_t dump_size) +{ + struct dump_record *record; + int rc = OPAL_SUCCESS; + + lock(&dump_lock); + + rc = check_dump_state(); + if (rc == OPAL_HARDWARE) + goto out; + + /* List is full ? */ + if (list_empty(&dump_free)) { + printf("DUMP: Dump ID 0x%x is not queued.\n", dump_id); + rc = OPAL_RESOURCE; + goto out; + } + + /* Already queued? */ + record = get_dump_rec_from_list(dump_id); + if (record) { + rc = OPAL_SUCCESS; + goto out; + } + + /* Add to list */ + record = list_pop(&dump_free, struct dump_record, link); + record->type = dump_type; + record->id = dump_id; + record->size = dump_size; + list_add_tail(&dump_pending, &record->link); + + /* OPAL notification */ + update_opal_dump_notify(); + rc = OPAL_SUCCESS; + +out: + unlock(&dump_lock); + return rc; +} + +static void dump_init_complete(struct fsp_msg *msg) +{ + uint8_t status = (msg->resp->word1 >> 8) & 0xff; + + printf("DUMP: FipS dump init status = 0x%x\n", status); + fsp_freemsg(msg); + + switch (status) { + case FSP_STATUS_SUCCESS: + printf("DUMP: Initiated FipS dump.\n"); + break; + case FSP_STATUS_BUSY: /* Retry, if FSP is busy */ + if (retry_cnt++ < FIPS_DUMP_MAX_RETRY) + if (fsp_opal_dump_init(DUMP_TYPE_FSP) == OPAL_SUCCESS) + return; + break; + default: + break; + } + /* Reset max retry count */ + retry_cnt = 0; +} + +/* + * Initiate new FipS dump + */ +static int64_t fsp_opal_dump_init(uint8_t dump_type) +{ + struct fsp_msg *msg; + int rc = OPAL_SUCCESS; + uint32_t *tool_type = (void *)FSP_DUMP_TOOL_TYPE; + uint32_t *client_id = (void *)FSP_DUMP_CLIENT_ID; + + /* Only FipS dump generate request is supported */ + if (dump_type != DUMP_TYPE_FSP) + return OPAL_PARAMETER; + + msg = fsp_mkmsg(FSP_CMD_FSP_DUMP_INIT, 6, *tool_type, + sizeof(FSP_DUMP_CLIENT_ID), *client_id, + *(client_id + 1), *(client_id + 2), *(client_id + 3)); + + if (!msg) { + log_simple_error(&e_info(OPAL_RC_DUMP_INIT), + "DUMP: Message allocation failed.\n"); + rc = OPAL_INTERNAL_ERROR; + } else if (fsp_queue_msg(msg, dump_init_complete)) { + log_simple_error(&e_info(OPAL_RC_DUMP_INIT), + "DUMP: Failed to queue FipS dump init request.\n"); + fsp_freemsg(msg); + rc = OPAL_INTERNAL_ERROR; + } + + return rc; +} + +/* + * OPAL interface to send dump information to Linux. + */ +static int64_t fsp_opal_dump_info2(__be32 *dump_id, __be32 *dump_size, + __be32 *dump_type) +{ + struct dump_record *record; + int rc = OPAL_SUCCESS; + + lock(&dump_lock); + + /* Clear notification */ + opal_update_pending_evt(OPAL_EVENT_DUMP_AVAIL, 0); + + record = list_top(&dump_pending, struct dump_record, link); + if (!record) { /* List corrupted? */ + update_dump_state(DUMP_STATE_NONE); + rc = OPAL_INTERNAL_ERROR; + goto out; + } + *dump_id = cpu_to_be32(record->id); + *dump_size = cpu_to_be32(record->size); + *dump_type = cpu_to_be32(record->type); + +out: + unlock(&dump_lock); + return rc; +} + +static int64_t fsp_opal_dump_info(__be32 *dump_id, __be32 *dump_size) +{ + __be32 dump_type; + return fsp_opal_dump_info2(dump_id, dump_size, &dump_type); +} + +static int64_t validate_dump_sglist(struct opal_sg_list *list, + int64_t *size) +{ + struct opal_sg_list *sg; + struct opal_sg_entry *prev_entry, *entry; + int length, num_entries, i; + + prev_entry = NULL; + *size = 0; + for (sg = list; sg; sg = (struct opal_sg_list*)be64_to_cpu(sg->next)) { + length = be64_to_cpu(sg->length) - 16; + num_entries = length / sizeof(struct opal_sg_entry); + if (num_entries <= 0) + return OPAL_PARAMETER; + + for (i = 0; i < num_entries; i++) { + entry = &sg->entry[i]; + *size += be64_to_cpu(entry->length); + + /* All entries must be aligned */ + if (((uint64_t)be64_to_cpu(entry->data)) & 0xfff) + return OPAL_PARAMETER; + + /* All non-terminal entries size must be aligned */ + if (prev_entry && (be64_to_cpu(prev_entry->length) & 0xfff)) + return OPAL_PARAMETER; + + prev_entry = entry; + } + } + return OPAL_SUCCESS; +} + +/* + * Map dump buffer to TCE buffer + */ +static int64_t map_dump_buffer(void) +{ + struct opal_sg_list *sg; + struct opal_sg_entry *entry; + int64_t fetch_max; + int length, num_entries, i; + int buf_off, fetch_off, tce_off, sg_off; + bool last = false; + + /* FSP fetch max size */ + fetch_max = get_dump_fetch_max_size(dump_entry->type); + if (fetch_max > (dump_entry->size - dump_offset)) + fetch_remain = dump_entry->size - dump_offset; + else + fetch_remain = fetch_max; + + /* offsets */ + fetch_off = fetch_remain; + tce_off = sg_off = 0; + + for (sg = dump_data; sg; sg = (struct opal_sg_list*)be64_to_cpu(sg->next)) { + num_entries = (be64_to_cpu(sg->length) - 16) / + sizeof(struct opal_sg_entry); + if (num_entries <= 0) + return OPAL_PARAMETER; + + for (i = 0; i < num_entries; i++) { + entry = &sg->entry[i]; + + /* Continue until we get offset */ + if ((sg_off + be64_to_cpu(entry->length)) < dump_offset) { + sg_off += be64_to_cpu(entry->length); + continue; + } + + /* + * SG list entry size can be more than 4k. + * Map only required pages, instead of + * mapping entire entry. + */ + if (!tce_off) { + buf_off = (dump_offset - sg_off) & ~0xfff; + length = be64_to_cpu(entry->length) - buf_off; + } else { + buf_off = 0; + length = be64_to_cpu(entry->length); + } + + /* Adjust length for last mapping */ + if (fetch_off <= length) { + length = fetch_off; + last = true; + } + + /* Adjust offset */ + sg_off += be64_to_cpu(entry->length); + fetch_off -= length; + + /* TCE mapping */ + dump_tce_map(tce_off, (void*)(be64_to_cpu(entry->data) + buf_off), length); + tce_off += length; + + /* TCE mapping complete */ + if (last) + return OPAL_SUCCESS; + } + } /* outer loop */ + return OPAL_PARAMETER; +} + +static void dump_read_complete(struct fsp_msg *msg) +{ + void *buffer; + size_t length, offset; + int rc; + uint32_t dump_id; + uint16_t id; + uint8_t flags, status; + bool compl = false; + + status = (msg->resp->word1 >> 8) & 0xff; + flags = (fsp_msg_get_data_word(msg, 0) >> 16) & 0xff; + id = fsp_msg_get_data_word(msg, 0) & 0xffff; + dump_id = fsp_msg_get_data_word(msg, 1); + offset = fsp_msg_get_data_word(msg->resp, 1); + length = fsp_msg_get_data_word(msg->resp, 2); + + fsp_freemsg(msg); + + lock(&dump_lock); + + if (dump_state == DUMP_STATE_ABORTING) { + printf("DUMP: Fetch dump aborted, ID = 0x%x\n", dump_id); + dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE); + update_dump_state(DUMP_STATE_NONE); + goto bail; + } + + switch (status) { + case FSP_STATUS_SUCCESS: /* Fetch next dump block */ + if (dump_offset < dump_entry->size) { + dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE); + rc = fsp_dump_read(); + if (rc == OPAL_SUCCESS) + goto bail; + } else { /* Dump read complete */ + compl = true; + } + break; + case FSP_STATUS_MORE_DATA: /* More data to read */ + offset += length; + buffer = (void *)PSI_DMA_DUMP_DATA + offset; + fetch_remain -= length; + + rc = fsp_fetch_data_queue(flags, id, dump_id, offset, buffer, + &fetch_remain, dump_read_complete); + if (rc == OPAL_SUCCESS) + goto bail; + break; + default: + break; + } + + dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE); + + /* Update state */ + if (compl) { + printf("DUMP: Fetch dump success. ID = 0x%x\n", dump_id); + update_dump_state(DUMP_STATE_FETCH); + } else { + printf("DUMP: Fetch dump partial. ID = 0x%x\n", dump_id); + update_dump_state(DUMP_STATE_PARTIAL); + } + bail: + unlock(&dump_lock); +} + +/* + * Fetch dump data from FSP + */ +static int64_t fsp_dump_read(void) +{ + int64_t rc; + uint16_t data_set; + uint8_t flags = 0x00; + + /* Get data set ID */ + data_set = get_dump_data_set_id(dump_entry->type); + + /* Map TCE buffer */ + rc = map_dump_buffer(); + if (rc != OPAL_SUCCESS) { + printf("DUMP: TCE mapping failed\n"); + return rc; + } + + printf("DUMP: Fetch Dump. ID = %02x, sub ID = %08x, len = %ld\n", + data_set, dump_entry->id, fetch_remain); + + /* Fetch data */ + rc = fsp_fetch_data_queue(flags, data_set, dump_entry->id, + dump_offset, (void *)PSI_DMA_DUMP_DATA, + &fetch_remain, dump_read_complete); + + /* Adjust dump fetch offset */ + dump_offset += fetch_remain; + + return rc; +} + +static int64_t fsp_opal_dump_read(uint32_t dump_id, + struct opal_sg_list *list) +{ + struct dump_record *record; + int64_t rc, size; + + lock(&dump_lock); + + /* Check state */ + if (dump_state != DUMP_STATE_NOTIFY) { + rc = check_dump_state(); + goto out; + } + + /* Validate dump ID */ + record = get_dump_rec_from_list(dump_id); + if (!record) { /* List corrupted? */ + rc = OPAL_INTERNAL_ERROR; + goto out; + } + + /* Validate dump buffer and size */ + rc = validate_dump_sglist(list, &size); + if (rc != OPAL_SUCCESS) { + printf("DUMP: SG list validation failed\n"); + goto out; + } + + if (size < record->size) { /* Insuffient buffer */ + printf("DUMP: Insufficient buffer\n"); + rc = OPAL_PARAMETER; + goto out; + } + + /* Update state */ + update_dump_state(DUMP_STATE_FETCHING); + + /* Fetch dump data */ + dump_entry = record; + dump_data = list; + dump_offset = 0; + rc = fsp_dump_read(); + if (rc != OPAL_SUCCESS) + goto out; + + /* Check status after initiating fetch data */ + rc = check_dump_state(); + +out: + unlock(&dump_lock); + return rc; +} + +static void dump_ack_complete(struct fsp_msg *msg) +{ + uint8_t status = (msg->resp->word1 >> 8) & 0xff; + + if (status) + log_simple_error(&e_info(OPAL_RC_DUMP_ACK), + "DUMP: ACK failed for ID: 0x%x\n", + fsp_msg_get_data_word(msg, 0)); + else + printf("DUMP: ACKed dump ID: 0x%x\n", fsp_msg_get_data_word(msg, 0)); + + fsp_freemsg(msg); +} + +/* + * Acknowledge dump + */ +static int64_t fsp_opal_dump_ack(uint32_t dump_id) +{ + struct dump_record *record; + struct fsp_msg *msg; + int rc; + uint32_t cmd; + uint8_t dump_type = 0; + + /* Get dump type */ + lock(&dump_lock); + record = get_dump_rec_from_list(dump_id); + if (record) + dump_type = record->type; + + /* + * Next available dump in pending list will be of different + * type. Hence we don't need to wait for ack complete. + * + * Note: + * This allows us to proceed even if we fail to ACK. + * In the worst case we may get notification for the + * same dump again, which is probably better than + * looping forever. + */ + rc = remove_dump_id_from_list(dump_id); + if (rc != OPAL_SUCCESS) /* Invalid dump id */ + goto out; + + /* Adjust mod value */ + cmd = FSP_CMD_ACK_DUMP | (dump_type & 0xff); + msg = fsp_mkmsg(cmd, 1, dump_id); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_DUMP_ACK), + "DUMP: Message allocation failed.!\n"); + rc = OPAL_INTERNAL_ERROR; + } else if (fsp_queue_msg(msg, dump_ack_complete)) { + log_simple_error(&e_info(OPAL_RC_DUMP_ACK), + "DUMP: Failed to queue dump ack message.\n"); + fsp_freemsg(msg); + rc = OPAL_INTERNAL_ERROR; + } +out: + unlock(&dump_lock); + return rc; +} + +/* Resend dump available notification */ +static int64_t fsp_opal_dump_resend_notification(void) +{ + lock(&dump_lock); + + if (dump_state != DUMP_STATE_ABSENT) + update_dump_state(DUMP_STATE_NONE); + + update_opal_dump_notify(); + + unlock(&dump_lock); + + return OPAL_SUCCESS; +} + +/* + * Handle FSP R/R event. + */ +static bool fsp_dump_retrieve_rr(uint32_t cmd_sub_mod, + struct fsp_msg *msg __unused) +{ + switch (cmd_sub_mod) { + case FSP_RESET_START: + lock(&dump_lock); + /* Reset dump state */ + if (dump_state == DUMP_STATE_FETCHING) + update_dump_state(DUMP_STATE_ABORTING); + unlock(&dump_lock); + return true; + case FSP_RELOAD_COMPLETE: + lock(&dump_lock); + + /* Reset TCE mapping */ + dump_tce_unmap(PSI_DMA_DUMP_DATA_SIZE); + + /* Reset dump state */ + update_dump_state(DUMP_STATE_NONE); + + /* + * For now keeping R/R handler simple. In the worst case + * we may endup resending dump available notification for + * same dump ID twice to Linux. + */ + update_opal_dump_notify(); + unlock(&dump_lock); + return true; + } + return false; +} + +/* + * Handle host kexec'ing scenarios + */ +static bool opal_kexec_dump_notify(void *data __unused) +{ + bool ready = true; + + lock(&dump_lock); + + /* Dump retrieve is in progress? */ + if (dump_state == DUMP_STATE_FETCHING) + dump_state = DUMP_STATE_ABORTING; + + /* Not yet safe to kexec */ + if (dump_state == DUMP_STATE_ABORTING) + ready = false; + + unlock(&dump_lock); + + return ready; +} + +/* + * FipS dump notification + */ +void fsp_fips_dump_notify(uint32_t dump_id, uint32_t dump_size) +{ + printf("DUMP: FipS dump available. ID = 0x%x [size: %d bytes]\n", + dump_id, dump_size); + add_dump_id_to_list(DUMP_TYPE_FSP, dump_id, dump_size); +} + +/* + * System/Platform dump notification + */ +static bool fsp_sys_dump_notify(uint32_t cmd_sub_mod, struct fsp_msg *msg) +{ + /* + * Though spec says mod 00 is deprecated we still + * seems to get mod 00 notification (at least on + * P7 machine). + */ + if (cmd_sub_mod != FSP_RSP_SYS_DUMP && + cmd_sub_mod != FSP_RSP_SYS_DUMP_OLD) + return false; + + printf("DUMP: Platform dump available. ID = 0x%x [size: %d bytes]\n", + fsp_msg_get_data_word(msg, 0), fsp_msg_get_data_word(msg, 1)); + + add_dump_id_to_list(DUMP_TYPE_SYS, + fsp_msg_get_data_word(msg, 0), + fsp_msg_get_data_word(msg, 1)); + return true; +} + +/* + * If platform dump available during IPL time, then we + * get notification via HDAT. Check for DT for the dump + * presence. + */ +static void check_ipl_sys_dump(void) +{ + struct dt_node *dump_node, *opal_node; + uint32_t dump_id, dump_size; + + if (proc_gen >= proc_gen_p9) { + opal_node = dt_find_by_path(dt_root, "ibm,opal"); + if (!opal_node) + return; + dump_node = dt_find_by_path(opal_node, "dump"); + if (dump_node) { + if (dt_find_property(dump_node, "mpipl-boot")) + return; + } + } + + dump_node = dt_find_by_path(dt_root, "ipl-params/platform-dump"); + if (!dump_node) + return; + + if (!dt_find_property(dump_node, "dump-id")) + return; + + dump_id = dt_prop_get_u32(dump_node, "dump-id"); + dump_size = (uint32_t)dt_prop_get_u64(dump_node, "total-size"); + + printf("DUMP: Platform dump present during IPL.\n"); + printf(" ID = 0x%x [size: %d bytes]\n", dump_id, dump_size); + + add_dump_id_to_list(DUMP_TYPE_SYS, dump_id, dump_size); +} + +/* + * Allocate and initialize dump list + */ +static int init_dump_free_list(void) +{ + struct dump_record *entry; + int i; + + entry = zalloc(sizeof(struct dump_record) * MAX_DUMP_RECORD); + if (!entry) { + log_simple_error(&e_info(OPAL_RC_DUMP_INIT), + "DUMP: Out of memory\n"); + return -ENOMEM; + } + + for (i = 0; i < MAX_DUMP_RECORD; i++) { + list_add_tail(&dump_free, &entry->link); + entry++; + } + return 0; +} + +static struct fsp_client fsp_sys_dump_client = { + .message = fsp_sys_dump_notify, +}; + +static struct fsp_client fsp_dump_client_rr = { + .message = fsp_dump_retrieve_rr, +}; + +void fsp_dump_init(void) +{ + if (!fsp_present()) { + update_dump_state(DUMP_STATE_ABSENT); + return; + } + + /* Initialize list */ + if (init_dump_free_list() != 0) { + update_dump_state(DUMP_STATE_ABSENT); + return; + } + + /* Register for Class CE */ + fsp_register_client(&fsp_sys_dump_client, FSP_MCLASS_SERVICE); + /* Register for Class AA (FSP R/R) */ + fsp_register_client(&fsp_dump_client_rr, FSP_MCLASS_RR_EVENT); + + /* Register for sync on host reboot call */ + opal_add_host_sync_notifier(opal_kexec_dump_notify, NULL); + + /* OPAL interface */ + opal_register(OPAL_DUMP_INIT, fsp_opal_dump_init, 1); + opal_register(OPAL_DUMP_INFO, fsp_opal_dump_info, 2); + opal_register(OPAL_DUMP_INFO2, fsp_opal_dump_info2, 3); + opal_register(OPAL_DUMP_READ, fsp_opal_dump_read, 2); + opal_register(OPAL_DUMP_ACK, fsp_opal_dump_ack, 1); + opal_register(OPAL_DUMP_RESEND, fsp_opal_dump_resend_notification, 0); + + /* Check for platform dump presence during IPL time */ + check_ipl_sys_dump(); +} diff --git a/roms/skiboot/hw/fsp/fsp-elog-read.c b/roms/skiboot/hw/fsp/fsp-elog-read.c new file mode 100644 index 000000000..bd23ffbe8 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-elog-read.c @@ -0,0 +1,608 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * This code will enable retrieving of error log from FSP -> Sapphire in + * sequence. + * Here, FSP would send next log only when Sapphire sends a new log notification + * response to FSP. On Completion of reading the log from FSP, + * OPAL_EVENT_ERROR_LOG_AVAIL is signaled. This will remain raised until a call + * to opal_elog_read() is made and OPAL_SUCCESS is returned. Upon which, the + * operation is complete and the event is cleared. This is READ action from FSP. + * + * Copyright 2013-2017 IBM Corp. + */ + +/* + * Design of READ error log : + * When we receive a new error log entry notification from FSP, we queue it into + * the "pending" list. If the "pending" list is not empty, then we start + * fetching log from FSP. + * + * When Linux reads a log entry, we dequeue it from the "pending" list and + * enqueue it to another "processed" list. At this point, if the "pending" + * list is not empty, we continue to fetch the next log. + * + * When Linux calls opal_resend_pending_logs(), we fetch the log corresponding + * to the head of the pending list and move it to the processed list, and + * continue this process until the pending list is empty. If the pending list + * was empty earlier and is currently non-empty, we initiate an error log fetch. + * + * When Linux acks an error log, we remove it from processed list. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Maximum number of entries that are pre-allocated + * to keep track of pending elogs to be fetched. + */ +#define ELOG_READ_MAX_RECORD 128 + +/* Structure to maintain log-id, log-size, pending and processed list. */ +struct fsp_log_entry { + uint32_t log_id; + size_t log_size; + struct list_node link; +}; + +static LIST_HEAD(elog_read_pending); +static LIST_HEAD(elog_read_processed); +static LIST_HEAD(elog_read_free); +/* + * Lock is used to protect overwriting of processed and pending list + * and also used while updating state of each log. + */ +static struct lock elog_read_lock = LOCK_UNLOCKED; + +#define ELOG_READ_BUFFER_SIZE 0x00004000 +/* Log buffer to copy FSP log for read */ +static void *elog_read_buffer; +static uint32_t elog_head_id; /* FSP entry ID */ +static size_t elog_head_size; /* Actual FSP log size */ +static uint32_t elog_read_retries; /* Bad response status count */ + +/* Initialize the state of the log */ +static enum elog_head_state elog_read_from_fsp_head_state = ELOG_STATE_NONE; + +static bool elog_enabled = false; + +/* Need forward declaration because of circular dependency. */ +static void fsp_elog_queue_fetch(void); + +/* + * Check the response message for mbox acknowledgement + * command send to FSP. + */ +static void fsp_elog_ack_complete(struct fsp_msg *msg) +{ + uint8_t val; + + val = (msg->resp->word1 >> 8) & 0xff; + if (val != 0) + prerror("ELOG: Acknowledgement error\n"); + + fsp_freemsg(msg); +} + +/* Send error log PHYP acknowledgement to FSP with entry ID. */ +static int64_t fsp_send_elog_ack(uint32_t log_id) +{ + struct fsp_msg *ack_msg; + + ack_msg = fsp_mkmsg(FSP_CMD_ERRLOG_PHYP_ACK, 1, log_id); + if (!ack_msg) { + prerror("ELOG: Failed to allocate ack message\n"); + return OPAL_INTERNAL_ERROR; + } + + if (fsp_queue_msg(ack_msg, fsp_elog_ack_complete)) { + fsp_freemsg(ack_msg); + ack_msg = NULL; + prerror("ELOG: Error queueing elog ack complete\n"); + return OPAL_INTERNAL_ERROR; + } + + return OPAL_SUCCESS; +} + +/* Retrieve error log from FSP with TCE for the data transfer. */ +static void fsp_elog_check_and_fetch_head(void) +{ + lock(&elog_read_lock); + if (elog_read_from_fsp_head_state != ELOG_STATE_NONE || + list_empty(&elog_read_pending)) { + unlock(&elog_read_lock); + return; + } + + elog_read_retries = 0; + /* Start fetching first entry from the pending list */ + fsp_elog_queue_fetch(); + unlock(&elog_read_lock); +} + +void elog_set_head_state(bool opal_logs, enum elog_head_state state) +{ + static enum elog_head_state opal_logs_state = ELOG_STATE_NONE; + static enum elog_head_state fsp_logs_state = ELOG_STATE_NONE; + + /* ELOG disabled */ + if (!elog_enabled) + return; + + if (opal_logs) + opal_logs_state = state; + else + fsp_logs_state = state; + + if (fsp_logs_state == ELOG_STATE_FETCHED_DATA || + opal_logs_state == ELOG_STATE_FETCHED_DATA) + opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL, + OPAL_EVENT_ERROR_LOG_AVAIL); + else + opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL, 0); +} + +/* This function should be called with the lock held. */ +static inline void fsp_elog_set_head_state(enum elog_head_state state) +{ + elog_set_head_state(false, state); + elog_read_from_fsp_head_state = state; +} + +/* + * When, we try maximum time of fetching log from FSP + * we call following function to delete log from the + * pending list and update the state to fetch next log. + * + * This function should be called with the lock held. + */ +static void fsp_elog_fetch_failure(uint8_t fsp_status) +{ + struct fsp_log_entry *log_data; + + /* Read top list and delete the node */ + log_data = list_top(&elog_read_pending, struct fsp_log_entry, link); + if (!log_data) { + /** + * @fwts-label ElogFetchFailureInconsistent + * @fwts-advice Inconsistent state between OPAL and FSP + * in code path for handling failure of fetching error log + * from FSP. Likely a bug in interaction between FSP and OPAL. + */ + prlog(PR_ERR, "%s: Inconsistent internal list state !\n", + __func__); + } else { + list_del(&log_data->link); + list_add(&elog_read_free, &log_data->link); + prerror("ELOG: received invalid data: %x FSP status: 0x%x\n", + log_data->log_id, fsp_status); + } + + fsp_elog_set_head_state(ELOG_STATE_NONE); +} + +/* Read response value from FSP for fetch sp data mbox command */ +static void fsp_elog_read_complete(struct fsp_msg *read_msg) +{ + uint8_t val; + + lock(&elog_read_lock); + val = (read_msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(read_msg); + if (elog_read_from_fsp_head_state == ELOG_STATE_REJECTED) { + fsp_elog_set_head_state(ELOG_STATE_NONE); + goto elog_read_out; + } + + switch (val) { + case FSP_STATUS_SUCCESS: + fsp_elog_set_head_state(ELOG_STATE_FETCHED_DATA); + break; + + case FSP_STATUS_DMA_ERROR: + if (elog_read_retries++ < MAX_RETRIES) { + /* + * For a error response value from FSP, we try to + * send fetch sp data mbox command again for three + * times if response from FSP is still not valid + * we send generic error response to FSP. + */ + fsp_elog_queue_fetch(); + break; + } + + fsp_elog_fetch_failure(val); + break; + + default: + fsp_elog_fetch_failure(val); + } + +elog_read_out: + unlock(&elog_read_lock); + + /* Check if a new log needs fetching */ + fsp_elog_check_and_fetch_head(); +} + +/* Read error log from FSP through mbox commands */ +static void fsp_elog_queue_fetch(void) +{ + int rc; + uint8_t flags = 0; + struct fsp_log_entry *entry; + + entry = list_top(&elog_read_pending, struct fsp_log_entry, link); + if (!entry) { + /** + * @fwts-label ElogQueueInconsistent + * @fwts-advice Bug in interaction between FSP and OPAL. We + * expected there to be a pending read from FSP but the list + * was empty. + */ + prlog(PR_ERR, "%s: Inconsistent internal list state !\n", + __func__); + fsp_elog_set_head_state(ELOG_STATE_NONE); + return; + } + + fsp_elog_set_head_state(ELOG_STATE_FETCHING); + elog_head_id = entry->log_id; + elog_head_size = entry->log_size; + rc = fsp_fetch_data_queue(flags, FSP_DATASET_ERRLOG, elog_head_id, + 0, (void *)PSI_DMA_ERRLOG_READ_BUF, + &elog_head_size, fsp_elog_read_complete); + if (rc) { + prerror("ELOG: failed to queue read message: %d\n", rc); + fsp_elog_set_head_state(ELOG_STATE_NONE); + } +} + +/* OPAL interface for PowerNV to read log size and log ID from Sapphire. */ +static int64_t fsp_opal_elog_info(__be64 *opal_elog_id, + __be64 *opal_elog_size, __be64 *elog_type) +{ + struct fsp_log_entry *log_data; + + /* Copy type of the error log */ + *elog_type = cpu_to_be64(ELOG_TYPE_PEL); + + /* Check if any OPAL log needs to be reported to the host */ + if (opal_elog_info(opal_elog_id, opal_elog_size)) + return OPAL_SUCCESS; + + lock(&elog_read_lock); + if (elog_read_from_fsp_head_state != ELOG_STATE_FETCHED_DATA) { + unlock(&elog_read_lock); + return OPAL_WRONG_STATE; + } + + log_data = list_top(&elog_read_pending, struct fsp_log_entry, link); + if (!log_data) { + /** + * @fwts-label ElogInfoInconsistentState + * @fwts-advice We expected there to be an entry in the list + * of error logs for the error log we're fetching information + * for. There wasn't. This means there's a bug. + */ + prlog(PR_ERR, "%s: Inconsistent internal list state !\n", + __func__); + fsp_elog_set_head_state(ELOG_STATE_NONE); + unlock(&elog_read_lock); + return OPAL_WRONG_STATE; + } + + *opal_elog_id = cpu_to_be64(log_data->log_id); + *opal_elog_size = cpu_to_be64(log_data->log_size); + fsp_elog_set_head_state(ELOG_STATE_HOST_INFO); + unlock(&elog_read_lock); + return OPAL_SUCCESS; +} + +/* OPAL interface for PowerNV to read log from Sapphire. */ +static int64_t fsp_opal_elog_read(void *buffer, uint64_t opal_elog_size, + uint64_t opal_elog_id) +{ + int size = opal_elog_size; + struct fsp_log_entry *log_data; + + /* Check if any OPAL log needs to be reported to the PowerNV */ + if (opal_elog_read(buffer, opal_elog_size, opal_elog_id)) + return OPAL_SUCCESS; + + /* + * Read top entry from list. + * As we know always top record of the list is fetched from FSP + */ + lock(&elog_read_lock); + if (elog_read_from_fsp_head_state != ELOG_STATE_HOST_INFO) { + unlock(&elog_read_lock); + return OPAL_WRONG_STATE; + } + + log_data = list_top(&elog_read_pending, struct fsp_log_entry, link); + if (!log_data) { + /** + * @fwts-label ElogReadInconsistentState + * @fwts-advice Inconsistent state while reading error log + * from FSP. Bug in OPAL and FSP interaction. + */ + prlog(PR_ERR, "%s: Inconsistent internal list state !\n", + __func__); + fsp_elog_set_head_state(ELOG_STATE_NONE); + unlock(&elog_read_lock); + return OPAL_WRONG_STATE; + } + + /* Check log ID and then read log from buffer */ + if (opal_elog_id != log_data->log_id) { + unlock(&elog_read_lock); + return OPAL_PARAMETER; + } + + /* Do not copy more than actual log size */ + if (opal_elog_size > log_data->log_size) + size = log_data->log_size; + + memset(buffer, 0, opal_elog_size); + memcpy(buffer, elog_read_buffer, size); + + /* + * Once log is read from linux move record from pending + * to processed list and delete record from pending list + * and change state of the log to fetch next record. + */ + list_del(&log_data->link); + list_add(&elog_read_processed, &log_data->link); + fsp_elog_set_head_state(ELOG_STATE_NONE); + unlock(&elog_read_lock); + + /* Read error log from FSP */ + fsp_elog_check_and_fetch_head(); + + return OPAL_SUCCESS; +} + +/* Set state of the log head before fetching the log. */ +static void elog_reject_head(void) +{ + if (elog_read_from_fsp_head_state == ELOG_STATE_FETCHING) + fsp_elog_set_head_state(ELOG_STATE_REJECTED); + else + fsp_elog_set_head_state(ELOG_STATE_NONE); +} + +/* OPAL interface for PowerNV to send ack to FSP with log ID */ +static int64_t fsp_opal_elog_ack(uint64_t ack_id) +{ + int rc = 0; + struct fsp_log_entry *record, *next_record; + + if (opal_elog_ack(ack_id)) + return rc; + + /* Send acknowledgement to FSP */ + rc = fsp_send_elog_ack(ack_id); + if (rc != OPAL_SUCCESS) { + prerror("ELOG: failed to send acknowledgement: %d\n", rc); + return rc; + } + + lock(&elog_read_lock); + list_for_each_safe(&elog_read_processed, record, next_record, link) { + if (record->log_id != ack_id) + continue; + + list_del(&record->link); + list_add(&elog_read_free, &record->link); + unlock(&elog_read_lock); + return rc; + } + + list_for_each_safe(&elog_read_pending, record, next_record, link) { + if (record->log_id != ack_id) + continue; + /* + * It means PowerNV has sent ACK without reading actual data. + * Because of this elog_read_from_fsp_head_state may be + * stuck in wrong state (ELOG_STATE_HOST_INFO) and not able + * to send remaining ELOGs to PowerNV. Hence reset ELOG state + * and start sending remaining ELOGs. + */ + list_del(&record->link); + list_add(&elog_read_free, &record->link); + elog_reject_head(); + unlock(&elog_read_lock); + fsp_elog_check_and_fetch_head(); + return rc; + } + + unlock(&elog_read_lock); + return OPAL_PARAMETER; +} + +/* + * Once Linux kexec's it ask to resend all logs which + * are not acknowledged from Linux. + */ +static void fsp_opal_resend_pending_logs(void) +{ + struct fsp_log_entry *entry; + + lock(&elog_read_lock); + elog_enabled = true; + unlock(&elog_read_lock); + + /* Check if any Sapphire logs are pending. */ + opal_resend_pending_logs(); + + lock(&elog_read_lock); + /* + * If processed list is not empty add all record from + * processed list to pending list at head of the list + * and delete records from processed list. + */ + while (!list_empty(&elog_read_processed)) { + entry = list_pop(&elog_read_processed, + struct fsp_log_entry, link); + list_add(&elog_read_pending, &entry->link); + } + + unlock(&elog_read_lock); + + /* Read error log from FSP */ + elog_reject_head(); + fsp_elog_check_and_fetch_head(); +} + +/* Disable ELOG event flag until PowerNV is ready to receive event */ +static bool opal_kexec_elog_notify(void *data __unused) +{ + lock(&elog_read_lock); + elog_enabled = false; + opal_update_pending_evt(OPAL_EVENT_ERROR_LOG_AVAIL, 0); + unlock(&elog_read_lock); + + return true; +} + +/* FSP elog notify function */ +static bool fsp_elog_msg(uint32_t cmd_sub_mod, struct fsp_msg *msg) +{ + int rc = 0; + struct fsp_log_entry *record; + uint32_t log_id; + uint32_t log_size; + + if (cmd_sub_mod != FSP_CMD_ERRLOG_NOTIFICATION) + return false; + + log_id = fsp_msg_get_data_word(msg, 0); + log_size = fsp_msg_get_data_word(msg, 1); + + prlog(PR_TRACE, "ELOG: Notified of log 0x%08x (size: %d)\n", + log_id, log_size); + + /* Make sure we don't cross read buffer size */ + if (log_size > ELOG_READ_BUFFER_SIZE) { + log_size = ELOG_READ_BUFFER_SIZE; + printf("ELOG: Truncated log (0x%08x) to 0x%x\n", + log_id, log_size); + } + + /* Take a lock until we take out the node from elog_read_free */ + lock(&elog_read_lock); + if (!list_empty(&elog_read_free)) { + /* Create a new entry in the pending list. */ + record = list_pop(&elog_read_free, struct fsp_log_entry, link); + record->log_id = log_id; + record->log_size = log_size; + list_add_tail(&elog_read_pending, &record->link); + unlock(&elog_read_lock); + + /* Send response back to FSP for a new elog notify message. */ + rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION, + 1, log_id), fsp_freemsg); + if (rc) + prerror("ELOG: Failed to queue errlog notification" + " response: %d\n", rc); + + /* Read error log from FSP */ + fsp_elog_check_and_fetch_head(); + + } else { + prlog(PR_TRACE, "ELOG: Log entry 0x%08x discarded\n", log_id); + + /* Unlock if elog_read_free is empty. */ + unlock(&elog_read_lock); + + rc = fsp_queue_msg(fsp_mkmsg(FSP_RSP_ERRLOG_NOTIFICATION, + 1, log_id), fsp_freemsg); + if (rc) + prerror("ELOG: Failed to queue errlog notification" + " response: %d\n", rc); + + /* + * If list is full with max record then we send discarded by + * phyp (condition full) ack to FSP. + * + * At some point in the future, we'll get notified again. + * This is largely up to FSP as to when they tell us about + * the log again. + */ + rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_ERRLOG_PHYP_ACK | 0x02, + 1, log_id), fsp_freemsg); + if (rc) + prerror("ELOG: Failed to queue errlog ack" + " response: %d\n", rc); + } + + return true; +} + +static struct fsp_client fsp_get_elog_notify = { + .message = fsp_elog_msg, +}; + +/* Pre-allocate memory for reading error log from FSP */ +static int init_elog_read_free_list(uint32_t num_entries) +{ + struct fsp_log_entry *entry; + int i; + + entry = zalloc(sizeof(struct fsp_log_entry) * num_entries); + if (!entry) + goto out_err; + + for (i = 0; i < num_entries; ++i) { + list_add_tail(&elog_read_free, &entry->link); + entry++; + } + + return 0; + +out_err: + return -ENOMEM; +} + +/* FSP elog read init function */ +void fsp_elog_read_init(void) +{ + int val = 0; + + if (!fsp_present()) + return; + + elog_read_buffer = memalign(TCE_PSIZE, ELOG_READ_BUFFER_SIZE); + if (!elog_read_buffer) { + prerror("FSP: could not allocate FSP ELOG_READ_BUFFER!\n"); + return; + } + + /* Map TCEs */ + fsp_tce_map(PSI_DMA_ERRLOG_READ_BUF, elog_read_buffer, + PSI_DMA_ERRLOG_READ_BUF_SZ); + + /* Pre allocate memory for 128 record */ + val = init_elog_read_free_list(ELOG_READ_MAX_RECORD); + if (val != 0) + return; + + /* Register error log class D2 */ + fsp_register_client(&fsp_get_elog_notify, FSP_MCLASS_ERR_LOG); + + /* Register for sync on PowerNV reboot call */ + opal_add_host_sync_notifier(opal_kexec_elog_notify, NULL); + + /* Register OPAL interface */ + opal_register(OPAL_ELOG_READ, fsp_opal_elog_read, 3); + opal_register(OPAL_ELOG_ACK, fsp_opal_elog_ack, 1); + opal_register(OPAL_ELOG_RESEND, fsp_opal_resend_pending_logs, 0); + opal_register(OPAL_ELOG_SIZE, fsp_opal_elog_info, 3); +} diff --git a/roms/skiboot/hw/fsp/fsp-elog-write.c b/roms/skiboot/hw/fsp/fsp-elog-write.c new file mode 100644 index 000000000..7b26a1867 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-elog-write.c @@ -0,0 +1,441 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * This code will enable generation and pushing of error log from Sapphire + * to FSP. + * Critical events from Sapphire that needs to be reported will be pushed + * on to FSP after converting the error log to Platform Error Log(PEL) format. + * This is termed as write action to FSP. + * + * Copyright 2013-2016 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(elog_write_to_fsp_pending); +static LIST_HEAD(elog_write_to_host_pending); +static LIST_HEAD(elog_write_to_host_processed); + +static struct lock elog_write_lock = LOCK_UNLOCKED; +static struct lock elog_panic_write_lock = LOCK_UNLOCKED; +static struct lock elog_write_to_host_lock = LOCK_UNLOCKED; + +#define ELOG_WRITE_TO_FSP_BUFFER_SIZE 0x00004000 +/* Log buffer to copy OPAL log for write to FSP. */ +static void *elog_write_to_fsp_buffer; + +#define ELOG_PANIC_WRITE_BUFFER_SIZE 0x00004000 +static void *elog_panic_write_buffer; + +#define ELOG_WRITE_TO_HOST_BUFFER_SIZE 0x00004000 +static void *elog_write_to_host_buffer; + +static uint32_t elog_write_retries; + +/* Manipulate this only with write_lock held */ +static uint32_t elog_plid_fsp_commit = -1; +static enum elog_head_state elog_write_to_host_head_state = ELOG_STATE_NONE; + +/* Need forward declaration because of circular dependency */ +static int opal_send_elog_to_fsp(void); + +static void remove_elog_head_entry(void) +{ + struct errorlog *head, *entry; + + lock(&elog_write_lock); + if (!list_empty(&elog_write_to_fsp_pending)) { + head = list_top(&elog_write_to_fsp_pending, + struct errorlog, link); + if (head->plid == elog_plid_fsp_commit) { + entry = list_pop(&elog_write_to_fsp_pending, + struct errorlog, link); + opal_elog_complete(entry, + elog_write_retries < MAX_RETRIES); + /* Reset the counter */ + elog_plid_fsp_commit = -1; + } + } + + elog_write_retries = 0; + unlock(&elog_write_lock); +} + +static void opal_fsp_write_complete(struct fsp_msg *read_msg) +{ + uint8_t val; + + val = (read_msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(read_msg); + + switch (val) { + case FSP_STATUS_SUCCESS: + remove_elog_head_entry(); + break; + default: + if (elog_write_retries++ >= MAX_RETRIES) { + remove_elog_head_entry(); + prerror("ELOG: Error in writing to FSP (0x%x)!\n", val); + } + + break; + } + + if (opal_send_elog_to_fsp() != OPAL_SUCCESS) + prerror("ELOG: Error sending elog to FSP !\n"); +} + +/* Write PEL format hex dump of the log to FSP */ +static int64_t fsp_opal_elog_write(size_t opal_elog_size) +{ + struct fsp_msg *elog_msg; + + elog_msg = fsp_mkmsg(FSP_CMD_CREATE_ERRLOG, 3, opal_elog_size, + 0, PSI_DMA_ERRLOG_WRITE_BUF); + if (!elog_msg) { + prerror("ELOG: Failed to create message for WRITE to FSP\n"); + return OPAL_INTERNAL_ERROR; + } + + if (fsp_queue_msg(elog_msg, opal_fsp_write_complete)) { + fsp_freemsg(elog_msg); + elog_msg = NULL; + prerror("FSP: Error queueing elog update\n"); + return OPAL_INTERNAL_ERROR; + } + + return OPAL_SUCCESS; +} + +/* This should be called with elog_write_to_host_lock lock */ +static inline void fsp_elog_write_set_head_state(enum elog_head_state state) +{ + elog_set_head_state(true, state); + elog_write_to_host_head_state = state; +} + +bool opal_elog_info(__be64 *opal_elog_id, __be64 *opal_elog_size) +{ + struct errorlog *head; + bool rc = false; + + lock(&elog_write_to_host_lock); + if (elog_write_to_host_head_state == ELOG_STATE_FETCHED_DATA) { + head = list_top(&elog_write_to_host_pending, + struct errorlog, link); + if (!head) { + /** + * @fwts-label ElogListInconsistent + * @fwts-advice Bug in interaction between FSP and + * OPAL. The state maintained by OPAL didn't match + * what the FSP sent. + */ + prlog(PR_ERR, + "%s: Inconsistent internal list state !\n", + __func__); + fsp_elog_write_set_head_state(ELOG_STATE_NONE); + } else { + *opal_elog_id = cpu_to_be64(head->plid); + *opal_elog_size = cpu_to_be64(head->log_size); + fsp_elog_write_set_head_state(ELOG_STATE_HOST_INFO); + rc = true; + } + } + + unlock(&elog_write_to_host_lock); + return rc; +} + +static void opal_commit_elog_in_host(void) +{ + struct errorlog *buf; + + lock(&elog_write_to_host_lock); + if (!list_empty(&elog_write_to_host_pending) && + (elog_write_to_host_head_state == ELOG_STATE_NONE)) { + buf = list_top(&elog_write_to_host_pending, + struct errorlog, link); + buf->log_size = create_pel_log(buf, + (char *)elog_write_to_host_buffer, + ELOG_WRITE_TO_HOST_BUFFER_SIZE); + fsp_elog_write_set_head_state(ELOG_STATE_FETCHED_DATA); + } + + unlock(&elog_write_to_host_lock); +} + +bool opal_elog_read(void *buffer, uint64_t opal_elog_size, + uint64_t opal_elog_id) +{ + struct errorlog *log_data; + bool rc = false; + + lock(&elog_write_to_host_lock); + if (elog_write_to_host_head_state == ELOG_STATE_HOST_INFO) { + log_data = list_top(&elog_write_to_host_pending, + struct errorlog, link); + if (!log_data) { + fsp_elog_write_set_head_state(ELOG_STATE_NONE); + unlock(&elog_write_to_host_lock); + return rc; + } + + if ((opal_elog_id != log_data->plid) && + (opal_elog_size != log_data->log_size)) { + unlock(&elog_write_to_host_lock); + return rc; + } + + memcpy(buffer, elog_write_to_host_buffer, opal_elog_size); + list_del(&log_data->link); + list_add(&elog_write_to_host_processed, &log_data->link); + fsp_elog_write_set_head_state(ELOG_STATE_NONE); + rc = true; + } + + unlock(&elog_write_to_host_lock); + opal_commit_elog_in_host(); + return rc; +} + +bool opal_elog_ack(uint64_t ack_id) +{ + bool rc = false; + struct errorlog *log_data; + struct errorlog *record, *next_record; + + lock(&elog_write_to_host_lock); + if (!list_empty(&elog_write_to_host_processed)) { + list_for_each_safe(&elog_write_to_host_processed, record, + next_record, link) { + if (record->plid != ack_id) + continue; + + list_del(&record->link); + opal_elog_complete(record, true); + rc = true; + } + } + + if ((!rc) && (!list_empty(&elog_write_to_host_pending))) { + log_data = list_top(&elog_write_to_host_pending, + struct errorlog, link); + if (ack_id == log_data->plid) + fsp_elog_write_set_head_state(ELOG_STATE_NONE); + + list_for_each_safe(&elog_write_to_host_pending, record, + next_record, link) { + if (record->plid != ack_id) + continue; + + list_del(&record->link); + opal_elog_complete(record, true); + rc = true; + unlock(&elog_write_to_host_lock); + opal_commit_elog_in_host(); + return rc; + } + } + + unlock(&elog_write_to_host_lock); + return rc; +} + +void opal_resend_pending_logs(void) +{ + struct errorlog *record; + + lock(&elog_write_to_host_lock); + while (!list_empty(&elog_write_to_host_processed)) { + record = list_pop(&elog_write_to_host_processed, + struct errorlog, link); + list_add_tail(&elog_write_to_host_pending, &record->link); + } + + fsp_elog_write_set_head_state(ELOG_STATE_NONE); + unlock(&elog_write_to_host_lock); + opal_commit_elog_in_host(); +} + +static inline u64 get_elog_timeout(void) +{ + return (mftb() + secs_to_tb(ERRORLOG_TIMEOUT_INTERVAL)); +} + +static int opal_send_elog_to_fsp(void) +{ + struct errorlog *head; + int rc = OPAL_SUCCESS; + + /* + * Convert entry to PEL and push it down to FSP. + * Then we wait for the ack from FSP. + */ + lock(&elog_write_lock); + if (!list_empty(&elog_write_to_fsp_pending)) { + head = list_top(&elog_write_to_fsp_pending, + struct errorlog, link); + /* Error needs to be committed, update the time out value */ + head->elog_timeout = get_elog_timeout(); + + elog_plid_fsp_commit = head->plid; + head->log_size = create_pel_log(head, + (char *)elog_write_to_fsp_buffer, + ELOG_WRITE_TO_FSP_BUFFER_SIZE); + rc = fsp_opal_elog_write(head->log_size); + unlock(&elog_write_lock); + return rc; + } + + unlock(&elog_write_lock); + return rc; +} + +static int opal_push_logs_sync_to_fsp(struct errorlog *buf) +{ + struct fsp_msg *elog_msg; + int opal_elog_size = 0; + int rc = OPAL_SUCCESS; + + lock(&elog_panic_write_lock); + + /* Error needs to be committed, update the time out value */ + buf->elog_timeout = get_elog_timeout(); + + opal_elog_size = create_pel_log(buf, + (char *)elog_panic_write_buffer, + ELOG_PANIC_WRITE_BUFFER_SIZE); + + elog_msg = fsp_mkmsg(FSP_CMD_CREATE_ERRLOG, 3, opal_elog_size, + 0, PSI_DMA_ELOG_PANIC_WRITE_BUF); + if (!elog_msg) { + prerror("ELOG: PLID: 0x%x Failed to create message for WRITE " + "to FSP\n", buf->plid); + unlock(&elog_panic_write_lock); + opal_elog_complete(buf, false); + return OPAL_INTERNAL_ERROR; + } + + if (fsp_sync_msg(elog_msg, false)) { + fsp_freemsg(elog_msg); + rc = OPAL_INTERNAL_ERROR; + } else { + rc = (elog_msg->resp->word1 >> 8) & 0xff; + fsp_freemsg(elog_msg); + } + + unlock(&elog_panic_write_lock); + if (rc != OPAL_SUCCESS) + opal_elog_complete(buf, false); + else + opal_elog_complete(buf, true); + + return rc; +} + +int elog_fsp_commit(struct errorlog *buf) +{ + int rc = OPAL_SUCCESS; + + if (buf->event_severity == OPAL_ERROR_PANIC) { + rc = opal_push_logs_sync_to_fsp(buf); + return rc; + } + + lock(&elog_write_lock); + if (list_empty(&elog_write_to_fsp_pending)) { + list_add_tail(&elog_write_to_fsp_pending, &buf->link); + unlock(&elog_write_lock); + rc = opal_send_elog_to_fsp(); + return rc; + } + + list_add_tail(&elog_write_to_fsp_pending, &buf->link); + unlock(&elog_write_lock); + return rc; +} + +static void elog_append_write_to_host(struct errorlog *buf) +{ + lock(&elog_write_to_host_lock); + if (list_empty(&elog_write_to_host_pending)) { + list_add(&elog_write_to_host_pending, &buf->link); + unlock(&elog_write_to_host_lock); + opal_commit_elog_in_host(); + } else { + list_add_tail(&elog_write_to_host_pending, &buf->link); + unlock(&elog_write_to_host_lock); + } +} + +static void elog_timeout_poll(void *data __unused) +{ + uint64_t now; + struct errorlog *head, *entry; + + lock(&elog_write_lock); + if (list_empty(&elog_write_to_fsp_pending)) { + unlock(&elog_write_lock); + return; + } + + head = list_top(&elog_write_to_fsp_pending, struct errorlog, link); + now = mftb(); + if ((tb_compare(now, head->elog_timeout) == TB_AAFTERB) || + (tb_compare(now, head->elog_timeout) == TB_AEQUALB)) { + entry = list_pop(&elog_write_to_fsp_pending, + struct errorlog, link); + unlock(&elog_write_lock); + elog_append_write_to_host(entry); + } else { + unlock(&elog_write_lock); + } +} + +/* FSP elog init function */ +void fsp_elog_write_init(void) +{ + if (!fsp_present()) + return; + + elog_panic_write_buffer = memalign(TCE_PSIZE, + ELOG_PANIC_WRITE_BUFFER_SIZE); + if (!elog_panic_write_buffer) { + prerror("FSP: could not allocate ELOG_PANIC_WRITE_BUFFER!\n"); + return; + } + + elog_write_to_fsp_buffer = memalign(TCE_PSIZE, + ELOG_WRITE_TO_FSP_BUFFER_SIZE); + if (!elog_write_to_fsp_buffer) { + prerror("FSP: could not allocate ELOG_WRITE_BUFFER!\n"); + return; + } + + elog_write_to_host_buffer = memalign(TCE_PSIZE, + ELOG_WRITE_TO_HOST_BUFFER_SIZE); + if (!elog_write_to_host_buffer) { + prerror("FSP: could not allocate ELOG_WRITE_TO_HOST_BUFFER!\n"); + return; + } + + /* Map TCEs */ + fsp_tce_map(PSI_DMA_ELOG_PANIC_WRITE_BUF, elog_panic_write_buffer, + PSI_DMA_ELOG_PANIC_WRITE_BUF_SZ); + + fsp_tce_map(PSI_DMA_ERRLOG_WRITE_BUF, elog_write_to_fsp_buffer, + PSI_DMA_ERRLOG_WRITE_BUF_SZ); + + elog_init(); + + /* Add a poller */ + opal_add_poller(elog_timeout_poll, NULL); +} diff --git a/roms/skiboot/hw/fsp/fsp-epow.c b/roms/skiboot/hw/fsp/fsp-epow.c new file mode 100644 index 000000000..8869e91e6 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-epow.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * FSP Environmental and Power Warnings (EPOW) support + * + * Copyright 2013-2016 IBM Corp. + */ + +#define pr_fmt(fmt) "FSP-EPOW: " fmt + +#include +#include +#include +#include +#include + +#include "fsp-epow.h" + +/* + * System EPOW status + * + * This value is exported to the host. Each individual element in this + * array [0...(OPAL_SYSEPOW_MAX-1)] contains bitwise EPOW event info + * corresponding to particular defined EPOW sub class. For example. + * opal_epow_status[OPAL_SYSEPOW_POWER] will reflect power related EPOW events. + */ +static int16_t epow_status[OPAL_SYSEPOW_MAX]; + +/* EPOW lock */ +static struct lock epow_lock = LOCK_UNLOCKED; + +/* Process FSP sent EPOW based information */ +static void epow_process_ex1_event(u8 *epow) +{ + memset(epow_status, 0, sizeof(epow_status)); + + if (epow[4] == EPOW_TMP_INT) { + prlog(PR_INFO, "Internal temp above normal\n"); + epow_status[OPAL_SYSEPOW_TEMP] = OPAL_SYSTEMP_INT; + + } else if (epow[4] == EPOW_TMP_AMB) { + prlog(PR_INFO, "Ambient temp above normal\n"); + epow_status[OPAL_SYSEPOW_TEMP] = OPAL_SYSTEMP_AMB; + + } else if (epow[4] == EPOW_ON_UPS) { + prlog(PR_INFO, "System running on UPS power\n"); + epow_status[OPAL_SYSEPOW_POWER] = OPAL_SYSPOWER_UPS; + + } +} + +/* Process EPOW event */ +static void fsp_process_epow(struct fsp_msg *msg, int epow_type) +{ + int rc; + u8 epow[8]; + bool epow_changed = false; + int16_t old_epow_status[OPAL_SYSEPOW_MAX]; + + /* Basic EPOW signature */ + if (msg->data.bytes[0] != 0xF2) { + /** + * @fwts-label EPOWSignatureMismatch + * @fwts-advice Bug in skiboot/FSP code for EPOW event handling + */ + prlog(PR_ERR, "Signature mismatch\n"); + return; + } + + lock(&epow_lock); + + /* Copy over and clear system EPOW status */ + memcpy(old_epow_status, epow_status, sizeof(old_epow_status)); + + switch(epow_type) { + case EPOW_NORMAL: + case EPOW_EX2: + break; + case EPOW_EX1: + epow[0] = msg->data.bytes[0]; + epow[1] = msg->data.bytes[1]; + epow[2] = msg->data.bytes[2]; + epow[3] = msg->data.bytes[3]; + epow[4] = msg->data.bytes[4]; + + epow_process_ex1_event(epow); + break; + default: + prlog(PR_WARNING, "Unknown EPOW event notification\n"); + break; + } + + if (memcmp(epow_status, old_epow_status, sizeof(epow_status))) + epow_changed = true; + + unlock(&epow_lock); + + /* Send OPAL message notification */ + if (epow_changed) { + rc = opal_queue_msg(OPAL_MSG_EPOW, NULL, NULL); + if (rc) { + /** + * @fwts-label EPOWMessageQueueFailed + * @fwts-advice Queueing a message from OPAL to FSP + * failed. This is likely due to either an OPAL bug + * or the FSP going away. + */ + prlog(PR_ERR, "OPAL EPOW message queuing failed\n"); + return; + } + prlog(PR_INFO, "Notified host about EPOW event\n"); + } +} + +/* + * EPOW OPAL interface + * + * The host requests for the system EPOW status through this + * OPAl call, where it passes a buffer with a give length. + * Sapphire fills the buffer with updated system EPOW status + * and then updates the length variable back to reflect the + * number of EPOW sub classes it has updated the buffer with. + */ +static int64_t fsp_opal_get_epow_status(__be16 *out_epow, __be16 *length) +{ + int i; + int n_epow_class; + int l = be16_to_cpu(*length); + + /* + * There can be situations where the host and the Sapphire versions + * don't match with eact other and hence the expected system EPOW status + * details. Newer hosts might be expecting status for more number of EPOW + * sub classes which Sapphire may not know about and older hosts might be + * expecting status for EPOW sub classes which is a subset of what + * Sapphire really knows about. Both these situations are handled here. + * + * (A) Host version >= Sapphire version + * + * Sapphire sends out EPOW status for sub classes it knows about + * and keeps the status. Updates the length variable for the host. + * + * (B) Host version < Sapphire version + * + * Sapphire sends out EPOW status for sub classes host knows about + * and can interpret correctly. + */ + if (l >= OPAL_SYSEPOW_MAX) { + n_epow_class = OPAL_SYSEPOW_MAX; + *length = cpu_to_be16(OPAL_SYSEPOW_MAX); + } else { + n_epow_class = l; + } + + /* Transfer EPOW Status */ + for (i = 0; i < n_epow_class; i++) + out_epow[i] = cpu_to_be16(epow_status[i]); + + return OPAL_SUCCESS; +} + +/* Handle EPOW sub-commands from FSP */ +static bool fsp_epow_message(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + switch(cmd_sub_mod) { + case FSP_CMD_PANELSTATUS: + fsp_process_epow(msg, EPOW_NORMAL); + return true; + case FSP_CMD_PANELSTATUS_EX1: + fsp_process_epow(msg, EPOW_EX1); + return true; + case FSP_CMD_PANELSTATUS_EX2: + fsp_process_epow(msg, EPOW_EX2); + return true; + } + return false; +} + +static struct fsp_client fsp_epow_client = { + .message = fsp_epow_message, +}; + +void fsp_epow_init(void) +{ + struct dt_node *np; + + fsp_register_client(&fsp_epow_client, FSP_MCLASS_SERVICE); + opal_register(OPAL_GET_EPOW_STATUS, fsp_opal_get_epow_status, 2); + np = dt_new(opal_node, "epow"); + dt_add_property_strings(np, "compatible", "ibm,opal-v3-epow"); + dt_add_property_strings(np, "epow-classes", "power", "temperature", "cooling"); + prlog(PR_INFO, "FSP EPOW support initialized\n"); +} diff --git a/roms/skiboot/hw/fsp/fsp-epow.h b/roms/skiboot/hw/fsp/fsp-epow.h new file mode 100644 index 000000000..bc1df258e --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-epow.h @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Handle FSP EPOW event notifications + * + * Copyright 2013-2015 IBM Corp. + */ + +#ifndef __FSP_EPOW_H +#define __FSP_EPOW_H + +/* FSP based EPOW event notifications */ +#define EPOW_NORMAL 0x00 /* panel status normal */ +#define EPOW_EX1 0x01 /* panel status extended 1 */ +#define EPOW_EX2 0x02 /* Panel status extended 2 */ + +/* EPOW reason code notifications */ +#define EPOW_ON_UPS 1 /* System on UPS */ +#define EPOW_TMP_AMB 2 /* Over ambient temperature */ +#define EPOW_TMP_INT 3 /* Over internal temperature */ + +#endif diff --git a/roms/skiboot/hw/fsp/fsp-ipmi.c b/roms/skiboot/hw/fsp/fsp-ipmi.c new file mode 100644 index 000000000..e368c2828 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-ipmi.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Conduit for IPMI messages to/from FSP + * + * Copyright 2014-2019 IBM Corp. + */ + +#include +#include +#include +#include +#include + +/* + * Under the hood, FSP IPMI component implements the KCS (Keyboard Controller + * Style) interface + * + * KCS interface request message format + * + * BYTE 1 BYTE 2 BYTE 3:N + * ------------------------------------- + * | NetFn/LUN | Cmd | Data | + * ------------------------------------- + * + * KCS interface response message format + * + * BYTE 1 BYTE 2 BYTE 3 BYTE 4:N + * ------------------------------------------------ + * | NetFn/LUN | Cmd | CompCode | Data | + * ------------------------------------------------ + + */ + +#define FSP_IPMI_REQ_MIN_LEN 2 /* NetFn + Cmd */ +#define FSP_IPMI_RESP_MIN_LEN 3 /* NetFn + Cmd + Completion code */ + +DEFINE_LOG_ENTRY(OPAL_RC_IPMI_REQ, OPAL_PLATFORM_ERR_EVT, OPAL_IPMI, + OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); +DEFINE_LOG_ENTRY(OPAL_RC_IPMI_RESP, OPAL_PLATFORM_ERR_EVT, OPAL_IPMI, + OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_IPMI_DMA_ERROR_RESP, OPAL_PLATFORM_ERR_EVT, OPAL_IPMI, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, + OPAL_NA); + +struct fsp_ipmi_msg { + struct list_node link; + struct ipmi_msg ipmi_msg; +}; + +static struct fsp_ipmi { + struct list_head msg_queue; + void *ipmi_req_buf; + void *ipmi_resp_buf; + /* There can only be one outstanding request whose reference is stored + * in 'cur_msg' and the 'lock' protects against the concurrent updates + * of it through request and response. The same 'lock' also protects + * the list manipulation. + */ + struct fsp_ipmi_msg *cur_msg; + struct lock lock; +} fsp_ipmi; + +static int fsp_ipmi_send_request(void); + +static void fsp_ipmi_cmd_done(uint8_t cmd, uint8_t netfn, uint8_t cc) +{ + struct fsp_ipmi_msg *fsp_ipmi_msg = fsp_ipmi.cur_msg; + + lock(&fsp_ipmi.lock); + if (fsp_ipmi.cur_msg == NULL) { + unlock(&fsp_ipmi.lock); + return; + } + list_del(&fsp_ipmi_msg->link); + fsp_ipmi.cur_msg = NULL; + unlock(&fsp_ipmi.lock); + + ipmi_cmd_done(cmd, netfn, cc, &fsp_ipmi_msg->ipmi_msg); +} + + +static void fsp_ipmi_req_complete(struct fsp_msg *msg) +{ + uint8_t status = (msg->resp->word1 >> 8) & 0xff; + uint32_t length = fsp_msg_get_data_word(msg->resp, 0); + struct fsp_ipmi_msg *fsp_ipmi_msg = msg->user_data; + struct ipmi_msg *ipmi_msg; + + fsp_freemsg(msg); + + if (status != FSP_STATUS_SUCCESS) { + assert(fsp_ipmi_msg == fsp_ipmi.cur_msg); + + ipmi_msg = &fsp_ipmi_msg->ipmi_msg; + + if (length != (ipmi_msg->req_size + FSP_IPMI_REQ_MIN_LEN)) + prlog(PR_DEBUG, "IPMI: Length mismatch in req completion " + "(%d, %d)\n", ipmi_msg->req_size, length); + + log_simple_error(&e_info(OPAL_RC_IPMI_REQ), "IPMI: Request " + "failed with status:0x%02x\n", status); + /* FSP will not send the response now, so clear the current + * outstanding request + */ + fsp_ipmi_cmd_done(ipmi_msg->cmd, + IPMI_NETFN_RETURN_CODE(ipmi_msg->netfn), + IPMI_ERR_UNSPECIFIED); + + /* Send the next request in the queue */ + fsp_ipmi_send_request(); + } +} + +static int fsp_ipmi_send_request(void) +{ + uint8_t *req_buf = fsp_ipmi.ipmi_req_buf; + struct ipmi_msg *ipmi_msg; + struct fsp_msg *msg; + int rc; + + if (fsp_in_rr()) + return OPAL_BUSY; + + lock(&fsp_ipmi.lock); + /* An outstanding request is still pending */ + if (fsp_ipmi.cur_msg) { + unlock(&fsp_ipmi.lock); + return OPAL_SUCCESS; + } + + fsp_ipmi.cur_msg = list_top(&fsp_ipmi.msg_queue, struct fsp_ipmi_msg, + link); + unlock(&fsp_ipmi.lock); + + if (!fsp_ipmi.cur_msg) + return OPAL_SUCCESS; + + ipmi_msg = &fsp_ipmi.cur_msg->ipmi_msg; + prlog(PR_TRACE, "IPMI: Send request, netfn:0x%02x, cmd:0x%02x, " + "req_len:%d\n", ipmi_msg->netfn, ipmi_msg->cmd, ipmi_msg->req_size); + + /* KCS request message format */ + *req_buf++ = ipmi_msg->netfn; /* BYTE 1 */ + *req_buf++ = ipmi_msg->cmd; /* BYTE 2 */ + if (ipmi_msg->req_size) + memcpy(req_buf, ipmi_msg->data, ipmi_msg->req_size); + + msg = fsp_mkmsg(FSP_CMD_FETCH_PLAT_DATA, 5, 0, PSI_DMA_PLAT_REQ_BUF, + 0, PSI_DMA_PLAT_RESP_BUF, + ipmi_msg->req_size + FSP_IPMI_REQ_MIN_LEN); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_IPMI_REQ), "IPMI: Failed to " + "allocate request message\n"); + fsp_ipmi_cmd_done(ipmi_msg->cmd, + IPMI_NETFN_RETURN_CODE(ipmi_msg->netfn), + IPMI_ERR_UNSPECIFIED); + return OPAL_NO_MEM; + } + + msg->user_data = fsp_ipmi.cur_msg; + rc = fsp_queue_msg(msg, fsp_ipmi_req_complete); + if (rc) { + log_simple_error(&e_info(OPAL_RC_IPMI_REQ), "IPMI: Failed to " + "queue request message (%d)\n", rc); + fsp_freemsg(msg); + fsp_ipmi_cmd_done(ipmi_msg->cmd, + IPMI_NETFN_RETURN_CODE(ipmi_msg->netfn), + IPMI_ERR_UNSPECIFIED); + return OPAL_INTERNAL_ERROR; + } + + return OPAL_SUCCESS; +} + +static struct ipmi_msg *fsp_ipmi_alloc_msg(size_t req_size, size_t resp_size) +{ + struct fsp_ipmi_msg *fsp_ipmi_msg; + struct ipmi_msg *ipmi_msg; + + fsp_ipmi_msg = zalloc(sizeof(*fsp_ipmi_msg) + MAX(req_size, resp_size)); + if (!fsp_ipmi_msg) + return NULL; + + ipmi_msg = &fsp_ipmi_msg->ipmi_msg; + + ipmi_msg->req_size = req_size; + ipmi_msg->resp_size = resp_size; + ipmi_msg->data = (uint8_t *)(fsp_ipmi_msg + 1); + + return ipmi_msg; +} + +static void fsp_ipmi_free_msg(struct ipmi_msg *ipmi_msg) +{ + struct fsp_ipmi_msg *fsp_ipmi_msg = container_of(ipmi_msg, + struct fsp_ipmi_msg, ipmi_msg); + + free(fsp_ipmi_msg); +} + +static int fsp_ipmi_queue_msg(struct ipmi_msg *ipmi_msg) +{ + struct fsp_ipmi_msg *fsp_ipmi_msg = container_of(ipmi_msg, + struct fsp_ipmi_msg, ipmi_msg); + + if (fsp_in_rr()) + return OPAL_BUSY; + + lock(&fsp_ipmi.lock); + list_add_tail(&fsp_ipmi.msg_queue, &fsp_ipmi_msg->link); + unlock(&fsp_ipmi.lock); + + return fsp_ipmi_send_request(); +} + +static int fsp_ipmi_queue_msg_head(struct ipmi_msg *ipmi_msg) +{ + struct fsp_ipmi_msg *fsp_ipmi_msg = container_of(ipmi_msg, + struct fsp_ipmi_msg, ipmi_msg); + + if (fsp_in_rr()) + return OPAL_BUSY; + + lock(&fsp_ipmi.lock); + list_add(&fsp_ipmi.msg_queue, &fsp_ipmi_msg->link); + unlock(&fsp_ipmi.lock); + + return fsp_ipmi_send_request(); +} + +static int fsp_ipmi_dequeue_msg(struct ipmi_msg *ipmi_msg) +{ + struct fsp_ipmi_msg *fsp_ipmi_msg = container_of(ipmi_msg, + struct fsp_ipmi_msg, ipmi_msg); + + lock(&fsp_ipmi.lock); + list_del_from(&fsp_ipmi.msg_queue, &fsp_ipmi_msg->link); + unlock(&fsp_ipmi.lock); + + return 0; +} + +static struct ipmi_backend fsp_ipmi_backend = { + .alloc_msg = fsp_ipmi_alloc_msg, + .free_msg = fsp_ipmi_free_msg, + .queue_msg = fsp_ipmi_queue_msg, + .queue_msg_head = fsp_ipmi_queue_msg_head, + .dequeue_msg = fsp_ipmi_dequeue_msg, + /* FIXME if ever use ipmi_queue_msg_sync on FSP */ + .poll = NULL, +}; + +static bool fsp_ipmi_rr_notify(uint32_t cmd_sub_mod, + struct fsp_msg *msg __unused) +{ + struct ipmi_msg *ipmi_msg; + + switch (cmd_sub_mod) { + case FSP_RESET_START: + return true; + case FSP_RELOAD_COMPLETE: + /* + * We will not get response for outstanding request. Send error + * message to caller and start sending new ipmi messages. + */ + if (fsp_ipmi.cur_msg) { + ipmi_msg = &fsp_ipmi.cur_msg->ipmi_msg; + fsp_ipmi_cmd_done(ipmi_msg->cmd, + IPMI_NETFN_RETURN_CODE(ipmi_msg->netfn), + IPMI_ERR_UNSPECIFIED); + } + fsp_ipmi_send_request(); + return true; + } + return false; +} + +static struct fsp_client fsp_ipmi_client_rr = { + .message = fsp_ipmi_rr_notify, +}; + +static bool fsp_ipmi_send_response(uint32_t cmd) +{ + struct fsp_msg *resp; + int rc; + + resp = fsp_mkmsg(cmd, 0); + if (!resp) { + log_simple_error(&e_info(OPAL_RC_IPMI_RESP), "IPMI: Failed to " + "allocate response message\n"); + return false; + } + + rc = fsp_queue_msg(resp, fsp_freemsg); + if (rc) { + fsp_freemsg(resp); + log_simple_error(&e_info(OPAL_RC_IPMI_RESP), "IPMI: Failed to " + "queue response message\n"); + return false; + } + + return true; +} + +static bool fsp_ipmi_read_response(struct fsp_msg *msg) +{ + uint8_t *resp_buf = fsp_ipmi.ipmi_resp_buf; + uint32_t status = fsp_msg_get_data_word(msg, 3); + uint32_t length = fsp_msg_get_data_word(msg, 2); + struct ipmi_msg *ipmi_msg; + uint8_t netfn, cmd, cc; + + assert(fsp_ipmi.cur_msg); + ipmi_msg = &fsp_ipmi.cur_msg->ipmi_msg; + + /* Response TCE token */ + assert(fsp_msg_get_data_word(msg, 1) == PSI_DMA_PLAT_RESP_BUF); + + if (status != FSP_STATUS_SUCCESS) { + if(status == FSP_STATUS_DMA_ERROR) + log_simple_error(&e_info(OPAL_RC_IPMI_DMA_ERROR_RESP), "IPMI: Received " + "DMA ERROR response from FSP, this may be due to FSP " + "is in termination state:0x%02x\n", status); + else + log_simple_error(&e_info(OPAL_RC_IPMI_RESP), "IPMI: FSP response " + "received with bad status:0x%02x\n", status); + + fsp_ipmi_cmd_done(ipmi_msg->cmd, + IPMI_NETFN_RETURN_CODE(ipmi_msg->netfn), + IPMI_ERR_UNSPECIFIED); + return fsp_ipmi_send_response(FSP_RSP_PLAT_DATA | + FSP_STATUS_SUCCESS); + } + + /* KCS response message format */ + netfn = *resp_buf++; + cmd = *resp_buf++; + cc = *resp_buf++; + length -= FSP_IPMI_RESP_MIN_LEN; + + prlog(PR_TRACE, "IPMI: fsp response received, netfn:0x%02x, cmd:0x%02x," + " cc:0x%02x, length:%d\n", netfn, cmd, cc, length); + + if (length > ipmi_msg->resp_size) { + prlog(PR_DEBUG, "IPMI: Length mismatch in response (%d, %d)\n", + length, ipmi_msg->resp_size); + length = ipmi_msg->resp_size; /* Truncate */ + cc = IPMI_ERR_MSG_TRUNCATED; + } + + ipmi_msg->resp_size = length; + if (length) + memcpy(ipmi_msg->data, resp_buf, length); + + fsp_ipmi_cmd_done(cmd, netfn, cc); + + return fsp_ipmi_send_response(FSP_RSP_PLAT_DATA); +} + +static bool fsp_ipmi_response(uint32_t cmd_sub_mod, struct fsp_msg *msg) +{ + bool rc; + + switch (cmd_sub_mod) { + case FSP_CMD_SEND_PLAT_DATA: + prlog(PR_TRACE, "FSP_CMD_SEND_PLAT_DATA command received\n"); + rc = fsp_ipmi_read_response(msg); + break; + default: + return false; + }; + + /* If response sent successfully, pick the next request */ + if (rc == true) + fsp_ipmi_send_request(); + + return rc; +} + +static struct fsp_client fsp_ipmi_client = { + .message = fsp_ipmi_response, +}; + +void fsp_ipmi_init(void) +{ + fsp_tce_map(PSI_DMA_PLAT_REQ_BUF, fsp_ipmi.ipmi_req_buf, + PSI_DMA_PLAT_REQ_BUF_SIZE); + fsp_tce_map(PSI_DMA_PLAT_RESP_BUF, fsp_ipmi.ipmi_resp_buf, + PSI_DMA_PLAT_RESP_BUF_SIZE); + + list_head_init(&fsp_ipmi.msg_queue); + init_lock(&fsp_ipmi.lock); + + fsp_register_client(&fsp_ipmi_client, FSP_MCLASS_FETCH_SPDATA); + fsp_register_client(&fsp_ipmi_client_rr, FSP_MCLASS_RR_EVENT); + ipmi_register_backend(&fsp_ipmi_backend); +} diff --git a/roms/skiboot/hw/fsp/fsp-leds.c b/roms/skiboot/hw/fsp/fsp-leds.c new file mode 100644 index 000000000..5a552ab3e --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-leds.c @@ -0,0 +1,1939 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * LED location code and indicator handling + * + * Copyright 2013-2019 IBM Corp. + */ + +#define pr_fmt(fmt) "FSPLED: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define buf_write(p, type, val) do { *(type *)(p) = val;\ + p += sizeof(type); } while(0) +#define buf_read(p, type, addr) do { *addr = *(type *)(p);\ + p += sizeof(type); } while(0) + +/* SPCN replay threshold */ +#define SPCN_REPLAY_THRESHOLD 2 + +/* LED support status */ +enum led_support_state { + LED_STATE_ABSENT, + LED_STATE_READING, + LED_STATE_PRESENT, +}; + +static enum led_support_state led_support = LED_STATE_ABSENT; + +/* + * PSI mapped buffer for LED data + * + * Mapped once and never unmapped. Used for fetching all + * available LED information and creating the list. Also + * used for setting individual LED state. + * + */ +static void *led_buffer; +static u8 *loc_code_list_buffer = NULL; + +/* Maintain list of all LEDs + * + * The contents here will be used to cater requests from FSP + * async commands and HV initiated OPAL calls. + */ +static struct list_head cec_ledq; /* CEC LED list */ +static struct list_head encl_ledq; /* Enclosure LED list */ +static struct list_head spcn_cmdq; /* SPCN command queue */ + +/* LED lock */ +static struct lock led_lock = LOCK_UNLOCKED; +static struct lock spcn_cmd_lock = LOCK_UNLOCKED; +static struct lock sai_lock = LOCK_UNLOCKED; + +static bool spcn_cmd_complete = true; /* SPCN command complete */ + +/* Last SPCN command */ +static u32 last_spcn_cmd; +static int replay = 0; + +/* + * FSP controls System Attention Indicator. But it expects hypervisor + * keep track of the status and serve get LED state request (both from + * Linux and FSP itself)! + */ +static struct sai_data sai_data; + +/* Forward declaration */ +static void fsp_read_leds_data_complete(struct fsp_msg *msg); +static int process_led_state_change(void); + + +DEFINE_LOG_ENTRY(OPAL_RC_LED_SPCN, OPAL_PLATFORM_ERR_EVT, OPAL_LED, + OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_LED_BUFF, OPAL_PLATFORM_ERR_EVT, OPAL_LED, + OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_LED_LC, OPAL_PLATFORM_ERR_EVT, OPAL_LED, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_LED_STATE, OPAL_PLATFORM_ERR_EVT, OPAL_LED, + OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_LED_SUPPORT, OPAL_PLATFORM_ERR_EVT, OPAL_LED, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA); + + +/* Find descendent LED record with CEC location code in CEC list */ +static struct fsp_led_data *fsp_find_cec_led(char *loc_code) +{ + struct fsp_led_data *led, *next; + + list_for_each_safe(&cec_ledq, led, next, link) { + if (strcmp(led->loc_code, loc_code)) + continue; + return led; + } + return NULL; +} + +/* Find encl LED record with ENCL location code in ENCL list */ +static struct fsp_led_data *fsp_find_encl_led(char *loc_code) +{ + struct fsp_led_data *led, *next; + + list_for_each_safe(&encl_ledq, led, next, link) { + if (strcmp(led->loc_code, loc_code)) + continue; + return led; + } + return NULL; +} + +/* Find encl LED record with CEC location code in CEC list */ +static struct fsp_led_data *fsp_find_encl_cec_led(char *loc_code) +{ + struct fsp_led_data *led, *next; + + list_for_each_safe(&cec_ledq, led, next, link) { + if (strstr(led->loc_code, "-")) + continue; + if (!strstr(loc_code, led->loc_code)) + continue; + return led; + } + return NULL; +} + +/* Find encl LED record with CEC location code in ENCL list */ +static struct fsp_led_data *fsp_find_encl_encl_led(char *loc_code) +{ + struct fsp_led_data *led, *next; + + list_for_each_safe(&encl_ledq, led, next, link) { + if (!strstr(loc_code, led->loc_code)) + continue; + return led; + } + return NULL; +} + +/* Compute the ENCL LED status in CEC list */ +static void compute_encl_status_cec(struct fsp_led_data *encl_led) +{ + struct fsp_led_data *led, *next; + + encl_led->status &= ~SPCN_LED_IDENTIFY_MASK; + encl_led->status &= ~SPCN_LED_FAULT_MASK; + + list_for_each_safe(&cec_ledq, led, next, link) { + if (!strstr(led->loc_code, encl_led->loc_code)) + continue; + + /* Don't count the enclsure LED itself */ + if (!strcmp(led->loc_code, encl_led->loc_code)) + continue; + + if (led->status & SPCN_LED_IDENTIFY_MASK) + encl_led->status |= SPCN_LED_IDENTIFY_MASK; + + if (led->status & SPCN_LED_FAULT_MASK) + encl_led->status |= SPCN_LED_FAULT_MASK; + } +} + +/* Is a enclosure LED */ +static bool is_enclosure_led(char *loc_code) +{ + if (strstr(loc_code, "-")) + return false; + if (!fsp_find_cec_led(loc_code) || !fsp_find_encl_led(loc_code)) + return false; + return true; +} + +static inline void opal_led_update_complete(u64 async_token, u64 result) +{ + opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, + cpu_to_be64(async_token), + cpu_to_be64(result)); +} + +static inline bool is_sai_loc_code(const char *loc_code) +{ + if (!loc_code) + return false; + + if (!strncmp(sai_data.loc_code, loc_code, strlen(sai_data.loc_code))) + return true; + + return false; +} + +/* Set/Reset System attention indicator */ +static void fsp_set_sai_complete(struct fsp_msg *msg) +{ + int ret = OPAL_SUCCESS; + int rc = msg->resp->word1 & 0xff00; + struct led_set_cmd *spcn_cmd = (struct led_set_cmd *)msg->user_data; + + if (rc) { + /** + * @fwts-label FSPSAIFailed + * @fwts-advice Failed to update System Attention Indicator. + * Likely means some bug with OPAL interacting with FSP. + */ + prlog(PR_ERR, "Update SAI cmd failed [rc=%d].\n", rc); + ret = OPAL_INTERNAL_ERROR; + + /* Roll back */ + lock(&sai_lock); + sai_data.state = spcn_cmd->ckpt_status; + unlock(&sai_lock); + } + + if (spcn_cmd->cmd_src == SPCN_SRC_OPAL) + opal_led_update_complete(spcn_cmd->async_token, ret); + + /* free msg and spcn command */ + free(spcn_cmd); + fsp_freemsg(msg); + + /* Process pending LED update request */ + process_led_state_change(); +} + +static int fsp_set_sai(struct led_set_cmd *spcn_cmd) +{ + int rc = -ENOMEM; + uint32_t cmd = FSP_CMD_SA_INDICATOR; + struct fsp_msg *msg; + + /* + * FSP does not allow hypervisor to set real SAI, but we can + * reset real SAI. Also in our case only host can control + * LEDs, not guests. Hence we will set platform virtual SAI + * and reset real SAI. + */ + if (spcn_cmd->state == LED_STATE_ON) + cmd |= FSP_LED_SET_PLAT_SAI; + else + cmd |= FSP_LED_RESET_REAL_SAI; + + prlog(PR_TRACE, "Update SAI Indicator [cur : 0x%x, new : 0x%x].\n", + sai_data.state, spcn_cmd->state); + + msg = fsp_mkmsg(cmd, 0); + if (!msg) { + /** + * @fwts-label SAIMallocFail + * @fwts-advice OPAL ran out of memory while trying to + * allocate an FSP message in SAI code path. This indicates + * an OPAL bug that caused OPAL to run out of memory. + */ + prlog(PR_ERR, "%s: Memory allocation failed.\n", __func__); + goto sai_fail; + } + + spcn_cmd->ckpt_status = sai_data.state; + msg->user_data = spcn_cmd; + rc = fsp_queue_msg(msg, fsp_set_sai_complete); + if (rc) { + fsp_freemsg(msg); + /** + * @fwts-label SAIQueueFail + * @fwts-advice Error in queueing message to FSP in SAI code + * path. Likely an OPAL bug. + */ + prlog(PR_ERR, "%s: Failed to queue the message\n", __func__); + goto sai_fail; + } + + lock(&sai_lock); + sai_data.state = spcn_cmd->state; + unlock(&sai_lock); + + return OPAL_SUCCESS; + +sai_fail: + if (spcn_cmd->cmd_src == SPCN_SRC_OPAL) + opal_led_update_complete(spcn_cmd->async_token, + OPAL_INTERNAL_ERROR); + + return OPAL_INTERNAL_ERROR; +} + +static void fsp_get_sai_complete(struct fsp_msg *msg) +{ + int rc = msg->resp->word1 & 0xff00; + + if (rc) { + /** + * @fwts-label FSPSAIGetFailed + * @fwts-advice Possibly an error on FSP side, OPAL failed + * to read state from FSP. + */ + prlog(PR_ERR, "Read real SAI cmd failed [rc = 0x%x].\n", rc); + } else { /* Update SAI state */ + lock(&sai_lock); + sai_data.state = fsp_msg_get_data_word(msg->resp, 0) & 0xff; + unlock(&sai_lock); + + prlog(PR_TRACE, "SAI initial state = 0x%x\n", sai_data.state); + } + + fsp_freemsg(msg); +} + +/* Read initial SAI state. */ +static void fsp_get_sai(void) +{ + int rc; + uint32_t cmd = FSP_CMD_SA_INDICATOR | FSP_LED_READ_REAL_SAI; + struct fsp_msg *msg; + + msg = fsp_mkmsg(cmd, 0); + if (!msg) { + /** + * @fwts-label FSPGetSAIMallocFail + * @fwts-advice OPAL ran out of memory: OPAL bug. + */ + prlog(PR_ERR, "%s: Memory allocation failed.\n", __func__); + return; + } + rc = fsp_queue_msg(msg, fsp_get_sai_complete); + if (rc) { + fsp_freemsg(msg); + /** + * @fwts-label FSPGetSAIQueueFail + * @fwts-advice Failed to queue message to FSP: OPAL bug + */ + prlog(PR_ERR, "%s: Failed to queue the message\n", __func__); + } +} + +static bool sai_update_notification(struct fsp_msg *msg) +{ + uint32_t state = fsp_msg_get_data_word(msg, 2); + uint32_t param_id = fsp_msg_get_data_word(msg, 0); + int len = fsp_msg_get_data_word(msg, 1) & 0xffff; + + if (param_id != SYS_PARAM_REAL_SAI && param_id != SYS_PARAM_PLAT_SAI) + return false; + + if (len != 4) + return false; + + if (state != LED_STATE_ON && state != LED_STATE_OFF) + return false; + + /* Update SAI state */ + lock(&sai_lock); + sai_data.state = state; + unlock(&sai_lock); + + prlog(PR_TRACE, "SAI updated. New SAI state = 0x%x\n", state); + return true; +} + + +/* + * Update both the local LED lists to reflect upon led state changes + * occurred with the recent SPCN command. Subsequent LED requests will + * be served with these updates changed to the list. + */ +static void update_led_list(char *loc_code, u32 led_state, u32 excl_bit) +{ + struct fsp_led_data *led = NULL, *encl_led = NULL, *encl_cec_led = NULL; + bool is_encl_led = is_enclosure_led(loc_code); + + /* Enclosure LED in CEC list */ + encl_cec_led = fsp_find_encl_cec_led(loc_code); + if (!encl_cec_led) { + log_simple_error(&e_info(OPAL_RC_LED_LC), + "Could not find enclosure LED in CEC LC=%s\n", + loc_code); + return; + } + + /* Update state */ + if (is_encl_led) { + /* Enclosure exclusive bit */ + encl_cec_led->excl_bit = excl_bit; + } else { /* Descendant LED in CEC list */ + led = fsp_find_cec_led(loc_code); + if (!led) { + log_simple_error(&e_info(OPAL_RC_LED_LC), + "Could not find descendent LED in \ + CEC LC=%s\n", loc_code); + return; + } + led->status = led_state; + } + + /* Enclosure LED in ENCL list */ + encl_led = fsp_find_encl_encl_led(loc_code); + if (!encl_led) { + log_simple_error(&e_info(OPAL_RC_LED_LC), + "Could not find enclosure LED in ENCL LC=%s\n", + loc_code); + return; + } + + /* Compute descendent rolled up status */ + compute_encl_status_cec(encl_cec_led); + + /* Check whether exclussive bits set */ + if (encl_cec_led->excl_bit & FSP_LED_EXCL_FAULT) + encl_cec_led->status |= SPCN_LED_FAULT_MASK; + + if (encl_cec_led->excl_bit & FSP_LED_EXCL_IDENTIFY) + encl_cec_led->status |= SPCN_LED_IDENTIFY_MASK; + + /* Copy over */ + encl_led->status = encl_cec_led->status; + encl_led->excl_bit = encl_cec_led->excl_bit; +} + +static int fsp_set_led_response(uint32_t cmd) +{ + struct fsp_msg *msg; + int rc = -1; + + msg = fsp_mkmsg(cmd, 0); + if (!msg) { + prerror("Failed to allocate FSP_RSP_SET_LED_STATE [cmd=%x])\n", + cmd); + } else { + rc = fsp_queue_msg(msg, fsp_freemsg); + if (rc != OPAL_SUCCESS) { + fsp_freemsg(msg); + prerror("Failed to queue FSP_RSP_SET_LED_STATE" + " [cmd=%x]\n", cmd); + } + } + return rc; +} + +static void fsp_spcn_set_led_completion(struct fsp_msg *msg) +{ + struct fsp_msg *resp = msg->resp; + u32 cmd = FSP_RSP_SET_LED_STATE; + u8 status = resp->word1 & 0xff00; + struct led_set_cmd *spcn_cmd = (struct led_set_cmd *)msg->user_data; + + lock(&led_lock); + + /* + * LED state update request came as part of FSP async message + * FSP_CMD_SET_LED_STATE, we need to send response message. + * + * Also if SPCN command failed, then roll back changes. + */ + if (status != FSP_STATUS_SUCCESS) { + log_simple_error(&e_info(OPAL_RC_LED_SPCN), + "Last SPCN command failed, status=%02x\n", + status); + cmd |= FSP_STATUS_GENERIC_ERROR; + + /* Rollback the changes */ + update_led_list(spcn_cmd->loc_code, + spcn_cmd->ckpt_status, spcn_cmd->ckpt_excl_bit); + } + + /* FSP initiated SPCN command */ + if (spcn_cmd->cmd_src == SPCN_SRC_FSP) + fsp_set_led_response(cmd); + + /* OPAL initiated SPCN command */ + if (spcn_cmd->cmd_src == SPCN_SRC_OPAL) { + if (status != FSP_STATUS_SUCCESS) + opal_led_update_complete(spcn_cmd->async_token, + OPAL_INTERNAL_ERROR); + else + opal_led_update_complete(spcn_cmd->async_token, + OPAL_SUCCESS); + } + + unlock(&led_lock); + + /* free msg and spcn command */ + free(spcn_cmd); + fsp_freemsg(msg); + + /* Process pending LED update request */ + process_led_state_change(); +} + +/* + * Set the state of the LED pointed by the location code + * + * LED command: FAULT state or IDENTIFY state + * LED state : OFF (reset) or ON (set) + * + * SPCN TCE mapped buffer entries for setting LED state + * + * struct spcn_led_data { + * u8 lc_len; + * u16 state; + * char lc_code[LOC_CODE_SIZE]; + *}; + */ +static int fsp_msg_set_led_state(struct led_set_cmd *spcn_cmd) +{ + struct spcn_led_data sled; + struct fsp_msg *msg = NULL; + struct fsp_led_data *led = NULL; + void *buf = led_buffer; + u16 data_len = 0; + u32 cmd_hdr = 0; + u32 cmd = FSP_RSP_SET_LED_STATE; + int rc = -1; + + memset(sled.lc_code, 0, LOC_CODE_SIZE); + sled.lc_len = strlen(spcn_cmd->loc_code); + if (sled.lc_len >= LOC_CODE_SIZE) + sled.lc_len = LOC_CODE_SIZE - 1; + strncpy(sled.lc_code, spcn_cmd->loc_code, LOC_CODE_SIZE - 1); + + lock(&led_lock); + + /* Location code length + Location code + LED control */ + data_len = LOC_CODE_LEN + sled.lc_len + LED_CONTROL_LEN; + cmd_hdr = SPCN_MOD_SET_LED_CTL_LOC_CODE << 24 | SPCN_CMD_SET << 16 | + data_len; + + /* Fetch the current state of LED */ + led = fsp_find_cec_led(spcn_cmd->loc_code); + + /* LED not present */ + if (led == NULL) { + if (spcn_cmd->cmd_src == SPCN_SRC_FSP) { + cmd |= FSP_STATUS_INVALID_LC; + fsp_set_led_response(cmd); + } + + if (spcn_cmd->cmd_src == SPCN_SRC_OPAL) + opal_led_update_complete(spcn_cmd->async_token, + OPAL_INTERNAL_ERROR); + + unlock(&led_lock); + return rc; + } + + /* + * Checkpoint the status here, will use it if the SPCN + * command eventually fails. + */ + spcn_cmd->ckpt_status = led->status; + spcn_cmd->ckpt_excl_bit = led->excl_bit; + sled.state = cpu_to_be16(led->status); + + /* Update the exclussive LED bits */ + if (is_enclosure_led(spcn_cmd->loc_code)) { + if (spcn_cmd->command == LED_COMMAND_FAULT) { + if (spcn_cmd->state == LED_STATE_ON) + led->excl_bit |= FSP_LED_EXCL_FAULT; + if (spcn_cmd->state == LED_STATE_OFF) + led->excl_bit &= ~FSP_LED_EXCL_FAULT; + } + + if (spcn_cmd->command == LED_COMMAND_IDENTIFY) { + if (spcn_cmd->state == LED_STATE_ON) + led->excl_bit |= FSP_LED_EXCL_IDENTIFY; + if (spcn_cmd->state == LED_STATE_OFF) + led->excl_bit &= ~FSP_LED_EXCL_IDENTIFY; + } + } + + /* LED FAULT commad */ + if (spcn_cmd->command == LED_COMMAND_FAULT) { + if (spcn_cmd->state == LED_STATE_ON) + sled.state |= cpu_to_be16(SPCN_LED_FAULT_MASK); + if (spcn_cmd->state == LED_STATE_OFF) + sled.state &= cpu_to_be16(~SPCN_LED_FAULT_MASK); + } + + /* LED IDENTIFY command */ + if (spcn_cmd->command == LED_COMMAND_IDENTIFY) { + if (spcn_cmd->state == LED_STATE_ON) + sled.state |= cpu_to_be16(SPCN_LED_IDENTIFY_MASK); + if (spcn_cmd->state == LED_STATE_OFF) + sled.state &= cpu_to_be16(~SPCN_LED_IDENTIFY_MASK); + } + + /* Write into SPCN TCE buffer */ + buf_write(buf, u8, sled.lc_len); /* Location code length */ + memcpy(buf, sled.lc_code, sled.lc_len); /* Location code */ + buf += sled.lc_len; + buf_write(buf, __be16, sled.state); /* LED state */ + + msg = fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4, + SPCN_ADDR_MODE_CEC_NODE, cmd_hdr, 0, PSI_DMA_LED_BUF); + if (!msg) { + cmd |= FSP_STATUS_GENERIC_ERROR; + rc = -1; + goto update_fail; + } + + /* + * Update the local lists based on the attempted SPCN command to + * set/reset an individual led (CEC or ENCL). + */ + update_led_list(spcn_cmd->loc_code, be16_to_cpu(sled.state), led->excl_bit); + msg->user_data = spcn_cmd; + + rc = fsp_queue_msg(msg, fsp_spcn_set_led_completion); + if (rc != OPAL_SUCCESS) { + cmd |= FSP_STATUS_GENERIC_ERROR; + fsp_freemsg(msg); + /* Revert LED state update */ + update_led_list(spcn_cmd->loc_code, spcn_cmd->ckpt_status, + spcn_cmd->ckpt_excl_bit); + } + +update_fail: + if (rc) { + log_simple_error(&e_info(OPAL_RC_LED_STATE), + "Set led state failed at LC=%s\n", + spcn_cmd->loc_code); + + if (spcn_cmd->cmd_src == SPCN_SRC_FSP) + fsp_set_led_response(cmd); + + if (spcn_cmd->cmd_src == SPCN_SRC_OPAL) + opal_led_update_complete(spcn_cmd->async_token, + OPAL_INTERNAL_ERROR); + } + + unlock(&led_lock); + return rc; +} + +/* + * process_led_state_change + * + * If the command queue is empty, it sets the 'spcn_cmd_complete' as true + * and just returns. Else it pops one element from the command queue + * and processes the command for the requested LED state change. + */ +static int process_led_state_change(void) +{ + struct led_set_cmd *spcn_cmd; + int rc = 0; + + /* + * The command queue is empty. This will only + * happen during the SPCN command callback path + * in which case we set 'spcn_cmd_complete' as true. + */ + lock(&spcn_cmd_lock); + if (list_empty(&spcn_cmdq)) { + spcn_cmd_complete = true; + unlock(&spcn_cmd_lock); + return rc; + } + + spcn_cmd = list_pop(&spcn_cmdq, struct led_set_cmd, link); + unlock(&spcn_cmd_lock); + + if (is_sai_loc_code(spcn_cmd->loc_code)) + rc = fsp_set_sai(spcn_cmd); + else + rc = fsp_msg_set_led_state(spcn_cmd); + + if (rc) { + free(spcn_cmd); + process_led_state_change(); + } + + return rc; +} + +/* + * queue_led_state_change + * + * FSP async command or OPAL based request for LED state change gets queued + * up in the command queue. If no previous SPCN command is pending, then it + * immediately pops up one element from the list and processes it. If previous + * SPCN commands are still pending then it just queues up and return. When the + * SPCN command callback gets to execute, it processes one element from the + * list and keeps the chain execution going. At last when there are no elements + * in the command queue it sets 'spcn_cmd_complete' as true again. + */ +static int queue_led_state_change(char *loc_code, u8 command, + u8 state, int cmd_src, uint64_t async_token) +{ + struct led_set_cmd *cmd; + int rc = 0; + + /* New request node */ + cmd = zalloc(sizeof(struct led_set_cmd)); + if (!cmd) { + /** + * @fwts-label FSPLEDRequestMallocFail + * @fwts-advice OPAL failed to allocate memory for FSP LED + * command. Likely an OPAL bug led to out of memory. + */ + prlog(PR_ERR, "SPCN set command node allocation failed\n"); + return -1; + } + + /* Save the request */ + strncpy(cmd->loc_code, loc_code, LOC_CODE_SIZE - 1); + cmd->command = command; + cmd->state = state; + cmd->cmd_src = cmd_src; + cmd->async_token = async_token; + + /* Add to the queue */ + lock(&spcn_cmd_lock); + list_add_tail(&spcn_cmdq, &cmd->link); + + /* No previous SPCN command pending */ + if (spcn_cmd_complete) { + spcn_cmd_complete = false; + unlock(&spcn_cmd_lock); + rc = process_led_state_change(); + return rc; + } + + unlock(&spcn_cmd_lock); + return rc; +} + +/* + * Write single location code information into the TCE outbound buffer + * + * Data layout + * + * 2 bytes - Length of location code structure + * 4 bytes - CCIN in ASCII + * 1 byte - Resource status flag + * 1 byte - Indicator state + * 1 byte - Raw loc code length + * 1 byte - Loc code field size + * Field size byte - Null terminated ASCII string padded to 4 byte boundary + * + */ +static u32 fsp_push_data_to_tce(struct fsp_led_data *led, u8 *out_data, + u32 total_size) +{ + struct fsp_loc_code_data lcode; + + /* CCIN value is irrelevant */ + lcode.ccin = 0x0; + + lcode.status = FSP_IND_NOT_IMPLMNTD; + + if (led->parms & SPCN_LED_IDENTIFY_MASK) + lcode.status = FSP_IND_IMPLMNTD; + + /* LED indicator status */ + lcode.ind_state = FSP_IND_INACTIVE; + if (led->status & SPCN_LED_IDENTIFY_MASK) + lcode.ind_state |= FSP_IND_IDENTIFY_ACTV; + if (led->status & SPCN_LED_FAULT_MASK) + lcode.ind_state |= FSP_IND_FAULT_ACTV; + + /* Location code */ + memset(lcode.loc_code, 0, LOC_CODE_SIZE); + lcode.raw_len = strlen(led->loc_code); + strncpy(lcode.loc_code, led->loc_code, LOC_CODE_SIZE - 1); + lcode.fld_sz = sizeof(lcode.loc_code); + + /* Rest of the structure */ + lcode.size = cpu_to_be16(sizeof(lcode)); + lcode.status &= 0x0f; + + /* + * Check for outbound buffer overflow. If there are still + * more LEDs to be sent across to FSP, don't send, ignore. + */ + if ((total_size + be16_to_cpu(lcode.size)) > PSI_DMA_LOC_COD_BUF_SZ) + return 0; + + /* Copy over to the buffer */ + memcpy(out_data, &lcode, sizeof(lcode)); + + return be16_to_cpu(lcode.size); +} + +/* + * Send out LED information structure pointed by "loc_code" + * to FSP through the PSI DMA mapping. Buffer layout structure + * must be followed. + */ +static void fsp_ret_loc_code_list(u16 req_type, char *loc_code) +{ + struct fsp_led_data *led, *next; + struct fsp_msg *msg; + + u8 *data; /* Start of TCE mapped buffer */ + u8 *out_data; /* Start of location code data */ + u32 bytes_sent = 0, total_size = 0; + u16 header_size = 0, flags = 0; + + if (loc_code_list_buffer == NULL) { + prerror("No loc_code_list_buffer\n"); + return; + } + + /* Init the addresses */ + data = loc_code_list_buffer; + out_data = NULL; + + /* Unmapping through FSP_CMD_RET_LOC_BUFFER command */ + fsp_tce_map(PSI_DMA_LOC_COD_BUF, (void *)data, PSI_DMA_LOC_COD_BUF_SZ); + out_data = data + 8; + + /* CEC LED list */ + list_for_each_safe(&cec_ledq, led, next, link) { + /* + * When the request type is system wide led list + * i.e GET_LC_CMPLT_SYS, send the entire contents + * of the CEC list including both all descendents + * and all of their enclosures. + */ + + if (req_type == GET_LC_ENCLOSURES) + break; + + if (req_type == GET_LC_ENCL_DESCENDANTS) { + if (strstr(led->loc_code, loc_code) == NULL) + continue; + } + + if (req_type == GET_LC_SINGLE_LOC_CODE) { + if (strcmp(led->loc_code, loc_code)) + continue; + } + + /* Push the data into TCE buffer */ + bytes_sent = fsp_push_data_to_tce(led, out_data, total_size); + + /* Advance the TCE pointer */ + out_data += bytes_sent; + total_size += bytes_sent; + } + + /* Enclosure LED list */ + if (req_type == GET_LC_ENCLOSURES) { + list_for_each_safe(&encl_ledq, led, next, link) { + + /* Push the data into TCE buffer */ + bytes_sent = fsp_push_data_to_tce(led, + out_data, total_size); + + /* Advance the TCE pointer */ + out_data += bytes_sent; + total_size += bytes_sent; + } + } + + /* Count from 'data' instead of 'data_out' */ + total_size += 8; + memcpy(data, &total_size, sizeof(total_size)); + + header_size = OUTBUF_HEADER_SIZE; + memcpy(data + sizeof(total_size), &header_size, sizeof(header_size)); + + if (req_type == GET_LC_ENCL_DESCENDANTS) + flags = 0x8000; + + memcpy(data + sizeof(total_size) + sizeof(header_size), &flags, + sizeof(flags)); + msg = fsp_mkmsg(FSP_RSP_GET_LED_LIST, 3, 0, + PSI_DMA_LOC_COD_BUF, total_size); + if (!msg) { + prerror("Failed to allocate FSP_RSP_GET_LED_LIST.\n"); + } else { + if (fsp_queue_msg(msg, fsp_freemsg)) { + fsp_freemsg(msg); + prerror("Failed to queue FSP_RSP_GET_LED_LIST\n"); + } + } +} + +/* + * FSP async command: FSP_CMD_GET_LED_LIST + * + * (1) FSP sends the list of location codes through inbound buffer + * (2) HV sends the status of those location codes through outbound buffer + * + * Inbound buffer data layout (loc code request structure) + * + * 2 bytes - Length of entire structure + * 2 bytes - Request type + * 1 byte - Raw length of location code + * 1 byte - Location code field size + * `Field size` bytes - NULL terminated ASCII location code string + */ +static void fsp_get_led_list(struct fsp_msg *msg) +{ + struct fsp_loc_code_req req; + u32 tce_token = fsp_msg_get_data_word(msg, 1); + void *buf; + + /* Parse inbound buffer */ + buf = fsp_inbound_buf_from_tce(tce_token); + if (!buf) { + struct fsp_msg *msg; + msg = fsp_mkmsg(FSP_RSP_GET_LED_LIST | FSP_STATUS_INVALID_DATA, + 0); + if (!msg) { + prerror("Failed to allocate FSP_RSP_GET_LED_LIST" + " | FSP_STATUS_INVALID_DATA\n"); + } else { + if (fsp_queue_msg(msg, fsp_freemsg)) { + fsp_freemsg(msg); + prerror("Failed to queue " + "FSP_RSP_GET_LED_LIST |" + " FSP_STATUS_INVALID_DATA\n"); + } + } + return; + } + memcpy(&req, buf, sizeof(req)); + + prlog(PR_TRACE, "Request for loc code list type 0x%04x LC=%s\n", + be16_to_cpu(req.req_type), req.loc_code); + + fsp_ret_loc_code_list(be16_to_cpu(req.req_type), req.loc_code); +} + +/* + * FSP async command: FSP_CMD_RET_LOC_BUFFER + * + * With this command FSP returns ownership of the outbound buffer + * used by Sapphire to pass the indicator list previous time. That + * way FSP tells Sapphire that it has consumed all the data present + * on the outbound buffer and Sapphire can reuse it for next request. + */ +static void fsp_free_led_list_buf(struct fsp_msg *msg) +{ + u32 tce_token = fsp_msg_get_data_word(msg, 1); + u32 cmd = FSP_RSP_RET_LED_BUFFER; + struct fsp_msg *resp; + + /* Token does not point to outbound buffer */ + if (tce_token != PSI_DMA_LOC_COD_BUF) { + log_simple_error(&e_info(OPAL_RC_LED_BUFF), + "Invalid tce token from FSP\n"); + cmd |= FSP_STATUS_GENERIC_ERROR; + resp = fsp_mkmsg(cmd, 0); + if (!resp) { + prerror("Failed to allocate FSP_RSP_RET_LED_BUFFER" + "| FSP_STATUS_GENERIC_ERROR\n"); + return; + } + + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("Failed to queue " + "RET_LED_BUFFER|ERROR\n"); + } + return; + } + + /* Unmap the location code DMA buffer */ + fsp_tce_unmap(PSI_DMA_LOC_COD_BUF, PSI_DMA_LOC_COD_BUF_SZ); + + resp = fsp_mkmsg(cmd, 0); + if (!resp) { + prerror("Failed to allocate FSP_RSP_RET_LED_BUFFER\n"); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("Failed to queue FSP_RSP_RET_LED_BUFFER\n"); + } +} + +static void fsp_ret_led_state(char *loc_code) +{ + bool found = false; + u8 ind_state = 0; + u32 cmd = FSP_RSP_GET_LED_STATE; + struct fsp_led_data *led, *next; + struct fsp_msg *msg; + + if (is_sai_loc_code(loc_code)) { + if (sai_data.state & OPAL_SLOT_LED_STATE_ON) + ind_state = FSP_IND_FAULT_ACTV; + found = true; + } else { + list_for_each_safe(&cec_ledq, led, next, link) { + if (strcmp(loc_code, led->loc_code)) + continue; + + /* Found the location code */ + if (led->status & SPCN_LED_IDENTIFY_MASK) + ind_state |= FSP_IND_IDENTIFY_ACTV; + if (led->status & SPCN_LED_FAULT_MASK) + ind_state |= FSP_IND_FAULT_ACTV; + + found = true; + break; + } + } + + /* Location code not found */ + if (!found) { + log_simple_error(&e_info(OPAL_RC_LED_LC), + "Could not find the location code LC=%s\n", + loc_code); + cmd |= FSP_STATUS_INVALID_LC; + ind_state = 0xff; + } + + msg = fsp_mkmsg(cmd, 1, ind_state); + if (!msg) { + prerror("Couldn't alloc FSP_RSP_GET_LED_STATE\n"); + return; + } + + if (fsp_queue_msg(msg, fsp_freemsg)) { + fsp_freemsg(msg); + prerror("Couldn't queue FSP_RSP_GET_LED_STATE\n"); + } +} + +/* + * FSP async command: FSP_CMD_GET_LED_STATE + * + * With this command FSP query the state for any given LED + */ +static void fsp_get_led_state(struct fsp_msg *msg) +{ + struct fsp_get_ind_state_req req; + u32 tce_token = fsp_msg_get_data_word(msg, 1); + void *buf; + + /* Parse the inbound buffer */ + buf = fsp_inbound_buf_from_tce(tce_token); + if (!buf) { + struct fsp_msg *msg; + msg = fsp_mkmsg(FSP_RSP_GET_LED_STATE | + FSP_STATUS_INVALID_DATA, 0); + if (!msg) { + prerror("Failed to allocate FSP_RSP_GET_LED_STATE" + " | FSP_STATUS_INVALID_DATA\n"); + return; + } + if (fsp_queue_msg(msg, fsp_freemsg)) { + fsp_freemsg(msg); + prerror("Failed to queue FSP_RSP_GET_LED_STATE" + " | FSP_STATUS_INVALID_DATA\n"); + } + return; + } + memcpy(&req, buf, sizeof(req)); + + prlog(PR_TRACE, "%s: tce=0x%08x buf=%p rq.sz=%d rq.lc_len=%d" + " rq.fld_sz=%d LC: %02x %02x %02x %02x....\n", __func__, + tce_token, buf, req.size, req.lc_len, req.fld_sz, + req.loc_code[0], req.loc_code[1], + req.loc_code[2], req.loc_code[3]); + + /* Bound check */ + if (req.lc_len >= LOC_CODE_SIZE) { + log_simple_error(&e_info(OPAL_RC_LED_LC), + "Loc code too large in %s: %d bytes\n", + __func__, req.lc_len); + req.lc_len = LOC_CODE_SIZE - 1; + } + /* Ensure NULL termination */ + req.loc_code[req.lc_len] = 0; + + /* Do the deed */ + fsp_ret_led_state(req.loc_code); +} + +/* + * FSP async command: FSP_CMD_SET_LED_STATE + * + * With this command FSP sets/resets the state for any given LED + */ +static void fsp_set_led_state(struct fsp_msg *msg) +{ + struct fsp_set_ind_state_req req; + struct fsp_led_data *led, *next; + u32 tce_token = fsp_msg_get_data_word(msg, 1); + bool command, state; + void *buf; + int rc; + + /* Parse the inbound buffer */ + buf = fsp_inbound_buf_from_tce(tce_token); + if (!buf) { + fsp_set_led_response(FSP_RSP_SET_LED_STATE | + FSP_STATUS_INVALID_DATA); + return; + } + memcpy(&req, buf, sizeof(req)); + + prlog(PR_TRACE, "%s: tce=0x%08x buf=%p rq.sz=%d rq.typ=0x%04x" + " rq.lc_len=%d rq.fld_sz=%d LC: %02x %02x %02x %02x....\n", + __func__, tce_token, buf, be16_to_cpu(req.size), req.lc_len, req.fld_sz, + be16_to_cpu(req.req_type), + req.loc_code[0], req.loc_code[1], + req.loc_code[2], req.loc_code[3]); + + /* Bound check */ + if (req.lc_len >= LOC_CODE_SIZE) { + log_simple_error(&e_info(OPAL_RC_LED_LC), + "Loc code too large in %s: %d bytes\n", + __func__, req.lc_len); + req.lc_len = LOC_CODE_SIZE - 1; + } + /* Ensure NULL termination */ + req.loc_code[req.lc_len] = 0; + + /* Decode command */ + command = (req.ind_state & LOGICAL_IND_STATE_MASK) ? + LED_COMMAND_FAULT : LED_COMMAND_IDENTIFY; + state = (req.ind_state & ACTIVE_LED_STATE_MASK) ? + LED_STATE_ON : LED_STATE_OFF; + + /* Handle requests */ + switch (be16_to_cpu(req.req_type)) { + case SET_IND_ENCLOSURE: + list_for_each_safe(&cec_ledq, led, next, link) { + /* Only descendants of the same enclosure */ + if (!strstr(led->loc_code, req.loc_code)) + continue; + + /* Skip the enclosure */ + if (!strcmp(led->loc_code, req.loc_code)) + continue; + + rc = queue_led_state_change(led->loc_code, command, + state, SPCN_SRC_FSP, 0); + if (rc != 0) + fsp_set_led_response(FSP_RSP_SET_LED_STATE | + FSP_STATUS_GENERIC_ERROR); + } + break; + case SET_IND_SINGLE_LOC_CODE: + /* Set led state for single descendent led */ + rc = queue_led_state_change(req.loc_code, + command, state, SPCN_SRC_FSP, 0); + if (rc != 0) + fsp_set_led_response(FSP_RSP_SET_LED_STATE | + FSP_STATUS_GENERIC_ERROR); + break; + default: + fsp_set_led_response(FSP_RSP_SET_LED_STATE | + FSP_STATUS_NOT_SUPPORTED); + break; + } +} + +/* Handle received indicator message from FSP */ +static bool fsp_indicator_message(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + u32 cmd; + struct fsp_msg *resp; + + /* LED support not available yet */ + if (led_support != LED_STATE_PRESENT) { + log_simple_error(&e_info(OPAL_RC_LED_SUPPORT), + "Indicator message while LED support not" + " available yet\n"); + return false; + } + + switch (cmd_sub_mod) { + case FSP_CMD_GET_LED_LIST: + prlog(PR_TRACE, "FSP_CMD_GET_LED_LIST command received\n"); + fsp_get_led_list(msg); + return true; + case FSP_CMD_RET_LED_BUFFER: + prlog(PR_TRACE, "FSP_CMD_RET_LED_BUFFER command received\n"); + fsp_free_led_list_buf(msg); + return true; + case FSP_CMD_GET_LED_STATE: + prlog(PR_TRACE, "FSP_CMD_GET_LED_STATE command received\n"); + fsp_get_led_state(msg); + return true; + case FSP_CMD_SET_LED_STATE: + prlog(PR_TRACE, "FSP_CMD_SET_LED_STATE command received\n"); + fsp_set_led_state(msg); + return true; + /* + * FSP async sub commands which have not been implemented. + * For these async sub commands, print for the log and ack + * the field service processor with a generic error. + */ + case FSP_CMD_GET_MTMS_LIST: + prlog(PR_TRACE, "FSP_CMD_GET_MTMS_LIST command received\n"); + cmd = FSP_RSP_GET_MTMS_LIST; + break; + case FSP_CMD_RET_MTMS_BUFFER: + prlog(PR_TRACE, "FSP_CMD_RET_MTMS_BUFFER command received\n"); + cmd = FSP_RSP_RET_MTMS_BUFFER; + break; + case FSP_CMD_SET_ENCL_MTMS: + prlog(PR_TRACE, "FSP_CMD_SET_MTMS command received\n"); + cmd = FSP_RSP_SET_ENCL_MTMS; + break; + case FSP_CMD_CLR_INCT_ENCL: + prlog(PR_TRACE, "FSP_CMD_CLR_INCT_ENCL command received\n"); + cmd = FSP_RSP_CLR_INCT_ENCL; + break; + case FSP_CMD_ENCL_MCODE_INIT: + prlog(PR_TRACE, "FSP_CMD_ENCL_MCODE_INIT command received\n"); + cmd = FSP_RSP_ENCL_MCODE_INIT; + break; + case FSP_CMD_ENCL_MCODE_INTR: + prlog(PR_TRACE, "FSP_CMD_ENCL_MCODE_INTR command received\n"); + cmd = FSP_RSP_ENCL_MCODE_INTR; + break; + case FSP_CMD_ENCL_POWR_TRACE: + prlog(PR_TRACE, "FSP_CMD_ENCL_POWR_TRACE command received\n"); + cmd = FSP_RSP_ENCL_POWR_TRACE; + break; + case FSP_CMD_RET_ENCL_TRACE_BUFFER: + prlog(PR_TRACE, "FSP_CMD_RET_ENCL_TRACE_BUFFER command received\n"); + cmd = FSP_RSP_RET_ENCL_TRACE_BUFFER; + break; + case FSP_CMD_GET_SPCN_LOOP_STATUS: + prlog(PR_TRACE, "FSP_CMD_GET_SPCN_LOOP_STATUS command received\n"); + cmd = FSP_RSP_GET_SPCN_LOOP_STATUS; + break; + case FSP_CMD_INITIATE_LAMP_TEST: + /* XXX: FSP ACK not required for this sub command */ + prlog(PR_TRACE, "FSP_CMD_INITIATE_LAMP_TEST command received\n"); + return true; + default: + return false; + } + cmd |= FSP_STATUS_GENERIC_ERROR; + resp = fsp_mkmsg(cmd, 0); + if (!resp) { + prerror("Failed to allocate FSP_STATUS_GENERIC_ERROR\n"); + return false; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("Failed to queue FSP_STATUS_GENERIC_ERROR\n"); + return false; + } + return true; +} + +/* Indicator class client */ +static struct fsp_client fsp_indicator_client = { + .message = fsp_indicator_message, +}; + + +static int fsp_opal_get_sai(__be64 *led_mask, __be64 *led_value) +{ + *led_mask |= cpu_to_be64(OPAL_SLOT_LED_STATE_ON << OPAL_SLOT_LED_TYPE_ATTN); + if (sai_data.state & OPAL_SLOT_LED_STATE_ON) + *led_value |= cpu_to_be64(OPAL_SLOT_LED_STATE_ON << OPAL_SLOT_LED_TYPE_ATTN); + + return OPAL_SUCCESS; +} + +static int fsp_opal_set_sai(uint64_t async_token, char *loc_code, + const u64 led_mask, const u64 led_value) +{ + int state = LED_STATE_OFF; + + if (!((led_mask >> OPAL_SLOT_LED_TYPE_ATTN) & OPAL_SLOT_LED_STATE_ON)) + return OPAL_PARAMETER; + + if ((led_value >> OPAL_SLOT_LED_TYPE_ATTN) & OPAL_SLOT_LED_STATE_ON) + state = LED_STATE_ON; + + return queue_led_state_change(loc_code, 0, + state, SPCN_SRC_OPAL, async_token); +} + +/* + * fsp_opal_leds_get_ind (OPAL_LEDS_GET_INDICATOR) + * + * Argument Description Updated By + * -------- ----------- ---------- + * loc_code Location code of the LEDs (Host) + * led_mask LED types whose status is available (OPAL) + * led_value Status of the available LED types (OPAL) + * max_led_type Maximum number of supported LED types (Host/OPAL) + * + * The host will pass the location code of the LED types (loc_code) and + * maximum number of LED types it understands (max_led_type). OPAL will + * update the 'led_mask' with set bits pointing to LED types whose status + * is available and updates the 'led_value' with actual status. OPAL checks + * the 'max_led_type' to understand whether the host is newer or older + * compared to itself. In the case where the OPAL is newer compared + * to host (OPAL's max_led_type > host's max_led_type), it will update + * led_mask and led_value according to max_led_type requested by the host. + * When the host is newer compared to the OPAL (host's max_led_type > + * OPAL's max_led_type), OPAL updates 'max_led_type' to the maximum + * number of LED type it understands and updates 'led_mask', 'led_value' + * based on that maximum value of LED types. + */ +static int64_t fsp_opal_leds_get_ind(char *loc_code, __be64 *led_mask, + __be64 *led_value, __be64 *max_led_type) +{ + bool supported = true; + int64_t max; + int rc; + struct fsp_led_data *led; + + /* FSP not present */ + if (!fsp_present()) + return OPAL_HARDWARE; + + /* LED support not available */ + if (led_support != LED_STATE_PRESENT) + return OPAL_HARDWARE; + + max = be64_to_cpu(*max_led_type); + + /* Adjust max LED type */ + if (max > OPAL_SLOT_LED_TYPE_MAX) { + supported = false; + max = OPAL_SLOT_LED_TYPE_MAX; + *max_led_type = cpu_to_be64(max); + } + + /* Invalid parameter */ + if (max <= 0) + return OPAL_PARAMETER; + + /* Get System attention indicator state */ + if (is_sai_loc_code(loc_code)) { + rc = fsp_opal_get_sai(led_mask, led_value); + return rc; + } + + /* LED not found */ + led = fsp_find_cec_led(loc_code); + if (!led) + return OPAL_PARAMETER; + + *led_mask = 0; + *led_value = 0; + + /* Identify LED */ + --max; + *led_mask |= cpu_to_be64(OPAL_SLOT_LED_STATE_ON << OPAL_SLOT_LED_TYPE_ID); + if (led->status & SPCN_LED_IDENTIFY_MASK) + *led_value |= cpu_to_be64(OPAL_SLOT_LED_STATE_ON << OPAL_SLOT_LED_TYPE_ID); + + /* Fault LED */ + if (!max) + return OPAL_SUCCESS; + + --max; + *led_mask |= cpu_to_be64(OPAL_SLOT_LED_STATE_ON << OPAL_SLOT_LED_TYPE_FAULT); + if (led->status & SPCN_LED_FAULT_MASK) + *led_value |= cpu_to_be64(OPAL_SLOT_LED_STATE_ON << OPAL_SLOT_LED_TYPE_FAULT); + + /* OPAL doesn't support all the LED type requested by payload */ + if (!supported) + return OPAL_PARTIAL; + + return OPAL_SUCCESS; +} + +/* + * fsp_opal_leds_set_ind (OPAL_LEDS_SET_INDICATOR) + * + * Argument Description Updated By + * -------- ----------- ---------- + * loc_code Location code of the LEDs (Host) + * led_mask LED types whose status will be updated (Host) + * led_value Requested status of various LED types (Host) + * max_led_type Maximum number of supported LED types (Host/OPAL) + * + * The host will pass the location code of the LED types, mask, value + * and maximum number of LED types it understands. OPAL will update + * LED status for all the LED types mentioned in the mask with their + * value mentioned. OPAL checks the 'max_led_type' to understand + * whether the host is newer or older compared to itself. In case where + * the OPAL is newer compared to the host (OPAL's max_led_type > + * host's max_led_type), it updates LED status based on max_led_type + * requested from the host. When the host is newer compared to the OPAL + * (host's max_led_type > OPAL's max_led_type), OPAL updates + * 'max_led_type' to the maximum number of LED type it understands and + * then it updates LED status based on that updated maximum value of LED + * types. Host needs to check the returned updated value of max_led_type + * to figure out which part of it's request got served and which ones got + * ignored. + */ +static int64_t fsp_opal_leds_set_ind(uint64_t async_token, + char *loc_code, const u64 led_mask, + const u64 led_value, __be64 *max_led_type) +{ + bool supported = true; + int command, state, rc = OPAL_SUCCESS; + int64_t max; + struct fsp_led_data *led; + + /* FSP not present */ + if (!fsp_present()) + return OPAL_HARDWARE; + + /* LED support not available */ + if (led_support != LED_STATE_PRESENT) + return OPAL_HARDWARE; + + max = be64_to_cpu(*max_led_type); + + /* Adjust max LED type */ + if (max > OPAL_SLOT_LED_TYPE_MAX) { + supported = false; + max = OPAL_SLOT_LED_TYPE_MAX; + *max_led_type = cpu_to_be64(max); + } + + /* Invalid parameter */ + if (max <= 0) + return OPAL_PARAMETER; + + /* Set System attention indicator state */ + if (is_sai_loc_code(loc_code)) { + supported = true; + rc = fsp_opal_set_sai(async_token, + loc_code, led_mask, led_value); + goto success; + } + + /* LED not found */ + led = fsp_find_cec_led(loc_code); + if (!led) + return OPAL_PARAMETER; + + /* Indentify LED mask */ + --max; + + if ((led_mask >> OPAL_SLOT_LED_TYPE_ID) & OPAL_SLOT_LED_STATE_ON) { + supported = true; + + command = LED_COMMAND_IDENTIFY; + state = LED_STATE_OFF; + if ((led_value >> OPAL_SLOT_LED_TYPE_ID) + & OPAL_SLOT_LED_STATE_ON) + state = LED_STATE_ON; + + rc = queue_led_state_change(loc_code, command, + state, SPCN_SRC_OPAL, async_token); + } + + if (!max) + goto success; + + /* Fault LED mask */ + --max; + if ((led_mask >> OPAL_SLOT_LED_TYPE_FAULT) & OPAL_SLOT_LED_STATE_ON) { + supported = true; + + command = LED_COMMAND_FAULT; + state = LED_STATE_OFF; + if ((led_value >> OPAL_SLOT_LED_TYPE_FAULT) + & OPAL_SLOT_LED_STATE_ON) + state = LED_STATE_ON; + + rc = queue_led_state_change(loc_code, command, + state, SPCN_SRC_OPAL, async_token); + } + +success: + /* Unsupported LED type */ + if (!supported) + return OPAL_UNSUPPORTED; + + if (rc == OPAL_SUCCESS) + rc = OPAL_ASYNC_COMPLETION; + else + rc = OPAL_INTERNAL_ERROR; + + return rc; +} + +/* Get LED node from device tree */ +static struct dt_node *dt_get_led_node(void) +{ + struct dt_node *pled; + + if (!opal_node) { + prlog(PR_WARNING, "OPAL parent device node not available\n"); + return NULL; + } + + pled = dt_find_by_path(opal_node, DT_PROPERTY_LED_NODE); + if (!pled) + prlog(PR_WARNING, "Parent device node not available\n"); + + return pled; +} + +/* Get System attention indicator location code from device tree */ +static void dt_get_sai_loc_code(void) +{ + struct dt_node *pled, *child; + const char *led_type = NULL; + + memset(sai_data.loc_code, 0, LOC_CODE_SIZE); + + pled = dt_get_led_node(); + if (!pled) + return; + + list_for_each(&pled->children, child, list) { + led_type = dt_prop_get(child, DT_PROPERTY_LED_TYPES); + if (!led_type) + continue; + + if (strcmp(led_type, LED_TYPE_ATTENTION)) + continue; + + memcpy(sai_data.loc_code, child->name, LOC_CODE_SIZE - 1); + + prlog(PR_TRACE, "SAI Location code = %s\n", sai_data.loc_code); + return; + } +} + +/* + * create_led_device_node + * + * Creates the system parent LED device node and all individual + * child LED device nodes under it. This is called right before + * starting the payload (Linux) to ensure that the SPCN command + * sequence to fetch the LED location code list has been finished + * and to have a better chance of creating the deviced nodes. + */ +void create_led_device_nodes(void) +{ + const char *led_mode = NULL; + struct fsp_led_data *led, *next; + struct dt_node *pled, *cled; + + if (!fsp_present()) + return; + + /* Make sure LED list read is completed */ + while (led_support == LED_STATE_READING) + opal_run_pollers(); + + if (led_support == LED_STATE_ABSENT) { + prlog(PR_WARNING, "LED support not available, \ + hence device tree nodes will not be created\n"); + return; + } + + /* Get LED node */ + pled = dt_get_led_node(); + if (!pled) + return; + + /* Check if already populated (fast-reboot) */ + if (dt_has_node_property(pled, "compatible", NULL)) + return; + dt_add_property_strings(pled, "compatible", DT_PROPERTY_LED_COMPATIBLE); + + led_mode = dt_prop_get(pled, DT_PROPERTY_LED_MODE); + if (!led_mode) { + prlog(PR_WARNING, "Unknown LED operating mode\n"); + return; + } + + /* LED child nodes */ + list_for_each_safe(&cec_ledq, led, next, link) { + /* Duplicate LED location code */ + if (dt_find_by_path(pled, led->loc_code)) { + prlog(PR_WARNING, "duplicate location code %s\n", + led->loc_code); + continue; + } + + cled = dt_new(pled, led->loc_code); + if (!cled) { + prlog(PR_WARNING, "Child device node creation " + "failed\n"); + continue; + } + + if (!strcmp(led_mode, LED_MODE_LIGHT_PATH)) + dt_add_property_strings(cled, DT_PROPERTY_LED_TYPES, + LED_TYPE_IDENTIFY, + LED_TYPE_FAULT); + else + dt_add_property_strings(cled, DT_PROPERTY_LED_TYPES, + LED_TYPE_IDENTIFY); + } +} + +/* + * Process the received LED data from SPCN + * + * Every LED state data is added into the CEC list. If the location + * code is a enclosure type, its added into the enclosure list as well. + * + */ +static void fsp_process_leds_data(u16 len) +{ + struct fsp_led_data *led_data = NULL; + void *buf = NULL; + + /* + * Process the entire captured data from the last command + * + * TCE mapped 'led_buffer' contains the fsp_led_data structure + * one after the other till the total length 'len'. + * + */ + buf = led_buffer; + while (len) { + size_t lc_len; + __be16 tmp; + + /* Prepare */ + led_data = zalloc(sizeof(struct fsp_led_data)); + assert(led_data); + + /* Resource ID */ + buf_read(buf, __be16, &tmp); + led_data->rid = be16_to_cpu(tmp); + len -= sizeof(led_data->rid); + + /* Location code length */ + buf_read(buf, u8, &led_data->lc_len); + len -= sizeof(led_data->lc_len); + + lc_len = led_data->lc_len; + if (lc_len == 0) { + free(led_data); + break; + } + + if (lc_len >= LOC_CODE_SIZE) + lc_len = LOC_CODE_SIZE - 1; + + /* Location code */ + strncpy(led_data->loc_code, buf, lc_len); + led_data->loc_code[lc_len] = '\0'; + + buf += led_data->lc_len; + len -= led_data->lc_len; + + /* Parameters */ + buf_read(buf, __be16, &tmp); + led_data->parms = be16_to_cpu(tmp); + len -= sizeof(led_data->parms); + + /* Status */ + buf_read(buf, __be16, &tmp); + led_data->status = be16_to_cpu(tmp); + len -= sizeof(led_data->status); + + /* + * This is Enclosure LED's location code, need to go + * inside the enclosure LED list as well. + */ + if (!strstr(led_data->loc_code, "-")) { + struct fsp_led_data *encl_led_data = NULL; + encl_led_data = zalloc(sizeof(struct fsp_led_data)); + assert(encl_led_data); + + /* copy over the original */ + memcpy(encl_led_data, led_data, sizeof(struct fsp_led_data)); + + /* Add to the list of enclosure LEDs */ + list_add_tail(&encl_ledq, &encl_led_data->link); + } + + /* Push this onto the list */ + list_add_tail(&cec_ledq, &led_data->link); + } +} + +/* Replay the SPCN command */ +static void replay_spcn_cmd(u32 last_spcn_cmd) +{ + u32 cmd_hdr = 0; + int rc = -1; + + /* Reached threshold */ + if (replay == SPCN_REPLAY_THRESHOLD) { + replay = 0; + led_support = LED_STATE_ABSENT; + return; + } + + replay++; + if (last_spcn_cmd == SPCN_MOD_PRS_LED_DATA_FIRST) { + cmd_hdr = SPCN_MOD_PRS_LED_DATA_FIRST << 24 | + SPCN_CMD_PRS << 16; + rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4, + SPCN_ADDR_MODE_CEC_NODE, + cmd_hdr, 0, + PSI_DMA_LED_BUF), + fsp_read_leds_data_complete); + if (rc) + prlog(PR_ERR, "Replay SPCN_MOD_PRS_LED_DATA_FIRST" + " command could not be queued\n"); + } + + if (last_spcn_cmd == SPCN_MOD_PRS_LED_DATA_SUB) { + cmd_hdr = SPCN_MOD_PRS_LED_DATA_SUB << 24 | SPCN_CMD_PRS << 16; + rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4, + SPCN_ADDR_MODE_CEC_NODE, cmd_hdr, + 0, PSI_DMA_LED_BUF), + fsp_read_leds_data_complete); + if (rc) + prlog(PR_ERR, "Replay SPCN_MOD_PRS_LED_DATA_SUB" + " command could not be queued\n"); + } + + /* Failed to queue MBOX message */ + if (rc) + led_support = LED_STATE_ABSENT; +} + +/* + * FSP message response handler for following SPCN LED commands + * which are used to fetch all of the LED data from SPCN + * + * 1. SPCN_MOD_PRS_LED_DATA_FIRST --> First 1KB of LED data + * 2. SPCN_MOD_PRS_LED_DATA_SUB --> Subsequent 1KB of LED data + * + * Once the SPCN_RSP_STATUS_SUCCESS response code has been received + * indicating the last batch of 1KB LED data is here, the list addition + * process is now complete and we enable LED support for FSP async commands + * and for OPAL interface. + */ +static void fsp_read_leds_data_complete(struct fsp_msg *msg) +{ + struct fsp_led_data *led, *next; + struct fsp_msg *resp = msg->resp; + u32 cmd_hdr = 0; + int rc = 0; + + u32 msg_status = resp->word1 & 0xff00; + u32 led_status = (fsp_msg_get_data_word(resp, 1) >> 24) & 0xff; + u16 data_len = (u16)(fsp_msg_get_data_word(resp, 1) & 0xffff); + + if (msg_status != FSP_STATUS_SUCCESS) { + log_simple_error(&e_info(OPAL_RC_LED_SUPPORT), + "FSP returned error %x LED not supported\n", + msg_status); + /* LED support not available */ + led_support = LED_STATE_ABSENT; + + fsp_freemsg(msg); + return; + } + + /* SPCN command status */ + switch (led_status) { + /* Last 1KB of LED data */ + case SPCN_RSP_STATUS_SUCCESS: + prlog(PR_DEBUG, "SPCN_RSP_STATUS_SUCCESS: %d bytes received\n", + data_len); + + led_support = LED_STATE_PRESENT; + + /* Copy data to the local list */ + fsp_process_leds_data(data_len); + + /* LEDs captured on the system */ + prlog(PR_DEBUG, "CEC LEDs captured on the system:\n"); + list_for_each_safe(&cec_ledq, led, next, link) { + prlog(PR_DEBUG, + "rid: %x\t" + "len: %x " + "lcode: %-30s\t" + "parms: %04x\t" + "status: %04x\n", + led->rid, + led->lc_len, + led->loc_code, + led->parms, + led->status); + } + + prlog(PR_DEBUG, "ENCL LEDs captured on the system:\n"); + list_for_each_safe(&encl_ledq, led, next, link) { + prlog(PR_DEBUG, + "rid: %x\t" + "len: %x " + "lcode: %-30s\t" + "parms: %04x\t" + "status: %04x\n", + led->rid, + led->lc_len, + led->loc_code, + led->parms, + led->status); + } + + break; + + /* If more 1KB of LED data present */ + case SPCN_RSP_STATUS_COND_SUCCESS: + prlog(PR_DEBUG, "SPCN_RSP_STATUS_COND_SUCCESS: %d bytes " + " received\n", data_len); + + /* Copy data to the local list */ + fsp_process_leds_data(data_len); + + /* Fetch the remaining data from SPCN */ + last_spcn_cmd = SPCN_MOD_PRS_LED_DATA_SUB; + cmd_hdr = SPCN_MOD_PRS_LED_DATA_SUB << 24 | SPCN_CMD_PRS << 16; + rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4, + SPCN_ADDR_MODE_CEC_NODE, + cmd_hdr, 0, PSI_DMA_LED_BUF), + fsp_read_leds_data_complete); + if (rc) { + prlog(PR_ERR, "SPCN_MOD_PRS_LED_DATA_SUB command" + " could not be queued\n"); + + led_support = LED_STATE_ABSENT; + } + break; + + /* Other expected error codes*/ + case SPCN_RSP_STATUS_INVALID_RACK: + case SPCN_RSP_STATUS_INVALID_SLAVE: + case SPCN_RSP_STATUS_INVALID_MOD: + case SPCN_RSP_STATUS_STATE_PROHIBIT: + case SPCN_RSP_STATUS_UNKNOWN: + default: + /* Replay the previous SPCN command */ + replay_spcn_cmd(last_spcn_cmd); + } + fsp_freemsg(msg); +} + +/* + * Init the LED state + * + * This is called during the host boot process. This is the place where + * we figure out all the LEDs present on the system, their state and then + * create structure out of those information and popullate two master lists. + * One for all the LEDs on the CEC and one for all the LEDs on the enclosure. + * The LED information contained in the lists will cater either to various + * FSP initiated async commands or POWERNV initiated OPAL calls. Need to make + * sure that this initialization process is complete before allowing any requets + * on LED. Also need to be called to re-fetch data from SPCN after any LED state + * have been updated. + */ +static void fsp_leds_query_spcn(void) +{ + struct fsp_led_data *led = NULL; + int rc = 0; + + u32 cmd_hdr = SPCN_MOD_PRS_LED_DATA_FIRST << 24 | SPCN_CMD_PRS << 16; + + /* Till the last batch of LED data */ + last_spcn_cmd = 0; + + /* Empty the lists */ + while (!list_empty(&cec_ledq)) { + led = list_pop(&cec_ledq, struct fsp_led_data, link); + free(led); + } + + while (!list_empty(&encl_ledq)) { + led = list_pop(&encl_ledq, struct fsp_led_data, link); + free(led); + } + + /* Allocate buffer with alignment requirements */ + if (led_buffer == NULL) { + led_buffer = memalign(TCE_PSIZE, PSI_DMA_LED_BUF_SZ); + if (!led_buffer) + return; + } + + /* TCE mapping - will not unmap */ + fsp_tce_map(PSI_DMA_LED_BUF, led_buffer, PSI_DMA_LED_BUF_SZ); + + /* Request the first 1KB of LED data */ + last_spcn_cmd = SPCN_MOD_PRS_LED_DATA_FIRST; + rc = fsp_queue_msg(fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4, + SPCN_ADDR_MODE_CEC_NODE, cmd_hdr, 0, + PSI_DMA_LED_BUF), fsp_read_leds_data_complete); + if (rc) + prlog(PR_ERR, + "SPCN_MOD_PRS_LED_DATA_FIRST command could" + " not be queued\n"); + else /* Initiated LED list fetch MBOX command */ + led_support = LED_STATE_READING; +} + +/* Init the LED subsystem at boot time */ +void fsp_led_init(void) +{ + led_buffer = NULL; + + if (!fsp_present()) + return; + + /* Init the master lists */ + list_head_init(&cec_ledq); + list_head_init(&encl_ledq); + list_head_init(&spcn_cmdq); + + fsp_leds_query_spcn(); + + loc_code_list_buffer = memalign(TCE_PSIZE, PSI_DMA_LOC_COD_BUF_SZ); + if (loc_code_list_buffer == NULL) + prerror("ERROR: Unable to allocate loc_code_list_buffer!\n"); + + prlog(PR_TRACE, "Init completed\n"); + + /* Get System attention indicator state */ + dt_get_sai_loc_code(); + fsp_get_sai(); + + /* Handle FSP initiated async LED commands */ + fsp_register_client(&fsp_indicator_client, FSP_MCLASS_INDICATOR); + prlog(PR_TRACE, "FSP async command client registered\n"); + + /* Register for SAI update notification */ + sysparam_add_update_notifier(sai_update_notification); + + opal_register(OPAL_LEDS_GET_INDICATOR, fsp_opal_leds_get_ind, 4); + opal_register(OPAL_LEDS_SET_INDICATOR, fsp_opal_leds_set_ind, 5); + prlog(PR_TRACE, "LED OPAL interface registered\n"); +} diff --git a/roms/skiboot/hw/fsp/fsp-mem-err.c b/roms/skiboot/hw/fsp/fsp-mem-err.c new file mode 100644 index 000000000..2e3e65401 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-mem-err.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Sometimes some memory needs to go and sit in the naughty corner + * + * Copyright 2013-2019 IBM Corp. + */ + +#define pr_fmt(fmt) "FSPMEMERR: " fmt +#include +#include +#include +#include +#include +#include + +/* FSP sends real address of 4K memory page. */ +#define MEM_ERR_PAGE_SIZE_4K (1UL << 12) + +/* maximum number of error event to hold until linux consumes it. */ +#define MERR_MAX_RECORD 1024 + +struct fsp_mem_err_node { + struct list_node list; + struct OpalMemoryErrorData data; +}; + +static LIST_HEAD(merr_free_list); +static LIST_HEAD(mem_error_list); +/* + * lock is used to protect overwriting of merr_free_list and mem_error_list + * list. + */ +static struct lock mem_err_lock = LOCK_UNLOCKED; + +DEFINE_LOG_ENTRY(OPAL_RC_MEM_ERR_RES, OPAL_PLATFORM_ERR_EVT, OPAL_MEM_ERR, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_MEM_ERR_DEALLOC, OPAL_PLATFORM_ERR_EVT, OPAL_MEM_ERR, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +static bool send_response_to_fsp(u32 cmd_sub_mod) +{ + struct fsp_msg *rsp; + int rc = -ENOMEM; + + rsp = fsp_mkmsg(cmd_sub_mod, 0); + if (rsp) + rc = fsp_queue_msg(rsp, fsp_freemsg); + if (rc) { + fsp_freemsg(rsp); + /* XXX Generate error logs */ + prerror("Error %d queueing FSP memory error reply\n", rc); + return false; + } + return true; +} + +/* + * Queue up the memory error message for delivery. + * + * queue_event_for_delivery get called from two places. + * 1) from queue_mem_err_node when new fsp mem error is available and + * 2) from completion callback indicating that linux has consumed an message. + * + * TODO: + * There is a chance that, we may not get a free slot to queue our event + * for delivery to linux during both the above invocations. In that case + * we end up holding events with us until next fsp memory error comes in. + * We need to address this case either here OR fix up messaging infrastructure + * to make sure at least one slot will always be available per message type. + * + * XXX: BenH: I changed the msg infrastructure to attempt an allocation + * in that case, at least until we clarify a bit better how + * we want to handle things. + */ +static void queue_event_for_delivery(void *data __unused, int staus __unused) +{ + struct fsp_mem_err_node *entry; + uint64_t *merr_data; + int rc; + + lock(&mem_err_lock); + entry = list_pop(&mem_error_list, struct fsp_mem_err_node, list); + unlock(&mem_err_lock); + + if (!entry) + return; + + /* + * struct OpalMemoryErrorData is of (4 * 64 bits) size and well packed + * structure. Hence use uint64_t pointer to pass entire structure + * using 4 params in generic message format. + */ + merr_data = (uint64_t *)&entry->data; + + /* queue up for delivery */ + rc = opal_queue_msg(OPAL_MSG_MEM_ERR, NULL, queue_event_for_delivery, + cpu_to_be64(merr_data[0]), + cpu_to_be64(merr_data[1]), + cpu_to_be64(merr_data[2]), + cpu_to_be64(merr_data[3])); + lock(&mem_err_lock); + if (rc) { + /* + * Failed to queue up the event for delivery. No free slot + * available. There is a chance that we are trying to queue + * up multiple event at the same time. We may already have + * at least one event queued up, in that case we will be + * called again through completion callback and we should + * be able to grab empty slot then. + * + * For now, put this node back on mem_error_list. + */ + list_add(&mem_error_list, &entry->list); + } else + list_add(&merr_free_list, &entry->list); + unlock(&mem_err_lock); +} + +static int queue_mem_err_node(struct OpalMemoryErrorData *merr_evt) +{ + struct fsp_mem_err_node *entry; + + lock(&mem_err_lock); + entry = list_pop(&merr_free_list, struct fsp_mem_err_node, list); + if (!entry) { + printf("Failed to queue up memory error event.\n"); + unlock(&mem_err_lock); + return -ENOMEM; + } + + entry->data = *merr_evt; + list_add(&mem_error_list, &entry->list); + unlock(&mem_err_lock); + + /* Queue up the event for delivery to OS. */ + queue_event_for_delivery(NULL, OPAL_SUCCESS); + return 0; +} + +/* Check if memory resilience event for same address already exists. */ +static bool is_resilience_event_exist(u64 paddr) +{ + struct fsp_mem_err_node *entry; + struct OpalMemoryErrorData *merr_evt; + int found = 0; + + lock(&mem_err_lock); + list_for_each(&mem_error_list, entry, list) { + merr_evt = &entry->data; + if ((merr_evt->type == OPAL_MEM_ERR_TYPE_RESILIENCE) && + (be64_to_cpu(merr_evt->u.resilience.physical_address_start) + == paddr)) { + found = 1; + break; + } + } + unlock(&mem_err_lock); + return !!found; +} + +/* + * handle Memory Resilience error message. + * Section 28.2 of Hypervisor to FSP Mailbox Interface Specification. + * + * The flow for Memory Resilence Event is: + * 1. PRD component in FSP gets a recoverable attention from hardware when + * there is a corretable/uncorrectable memory error to free up a page. + * 2. PRD sends Memory Resilence Command to hypervisor with the real address of + * the 4K memory page in which the error occurred. + * 3. The hypervisor acknowledges with a status immediately. Immediate + * acknowledgment doesn’t require the freeing of the page to be completed. + */ +static bool handle_memory_resilience(u32 cmd_sub_mod, u64 paddr) +{ + int rc = 0; + struct OpalMemoryErrorData mem_err_evt; + struct errorlog *buf; + + memset(&mem_err_evt, 0, sizeof(struct OpalMemoryErrorData)); + /* Check arguments */ + if (paddr == 0) { + prerror("memory resilience: Invalid real address.\n"); + return send_response_to_fsp(FSP_RSP_MEM_RES | + FSP_STATUS_GENERIC_ERROR); + } + + /* Check if event already exist for same address. */ + if (is_resilience_event_exist(paddr)) + goto send_response; + + /* Populate an event. */ + mem_err_evt.version = OpalMemErr_V1; + mem_err_evt.type = OPAL_MEM_ERR_TYPE_RESILIENCE; + + switch (cmd_sub_mod) { + case FSP_CMD_MEM_RES_CE: + /* + * Should we keep counter for corrected errors in + * sapphire OR let linux (PowerNV) handle it? + * + * For now, send corrected errors to linux and let + * linux handle corrected errors thresholding. + */ + mem_err_evt.flags |= cpu_to_be16(OPAL_MEM_CORRECTED_ERROR); + mem_err_evt.u.resilience.resil_err_type = + OPAL_MEM_RESILIENCE_CE; + break; + case FSP_CMD_MEM_RES_UE: + mem_err_evt.u.resilience.resil_err_type = + OPAL_MEM_RESILIENCE_UE; + break; + case FSP_CMD_MEM_RES_UE_SCRB: + mem_err_evt.u.resilience.resil_err_type = + OPAL_MEM_RESILIENCE_UE_SCRUB; + break; + } + mem_err_evt.u.resilience.physical_address_start = cpu_to_be64(paddr); + mem_err_evt.u.resilience.physical_address_end = + cpu_to_be64(paddr + MEM_ERR_PAGE_SIZE_4K); + + /* Queue up the event and inform OS about it. */ + rc = queue_mem_err_node(&mem_err_evt); + +send_response: + /* Queue up an OK response to the resilience message itself */ + if (!rc) + return send_response_to_fsp(FSP_RSP_MEM_RES); + else { + buf = opal_elog_create(&e_info(OPAL_RC_MEM_ERR_RES), 0); + log_append_msg(buf, + "OPAL_MEM_ERR: Cannot queue up memory " + "resilience error event to the OS"); + log_add_section(buf, OPAL_ELOG_SEC_DESC); + log_append_data(buf, (char *) &mem_err_evt, + sizeof(struct OpalMemoryErrorData)); + log_commit(buf); + return false; + } +} + +/* update existing event entry if match is found. */ +static bool update_memory_deallocation_event(u64 paddr_start, u64 paddr_end) +{ + struct fsp_mem_err_node *entry; + struct OpalMemoryErrorData *merr_evt; + int found = 0; + + lock(&mem_err_lock); + list_for_each(&mem_error_list, entry, list) { + merr_evt = &entry->data; + if ((merr_evt->type == OPAL_MEM_ERR_TYPE_DYN_DALLOC) && + (be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_start) + == paddr_start)) { + found = 1; + if (be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_end) + < paddr_end) + merr_evt->u.dyn_dealloc.physical_address_end = + cpu_to_be64(paddr_end); + break; + } + } + unlock(&mem_err_lock); + return !!found; +} + +/* + * Handle dynamic memory deallocation message. + * + * When a condition occurs in which we need to do a large scale memory + * deallocation, PRD will send a starting and ending address of an area of + * memory to Hypervisor. Hypervisor then need to use this to deallocate all + * pages between and including the addresses. + * + */ +static bool handle_memory_deallocation(u64 paddr_start, u64 paddr_end) +{ + int rc = 0; + u8 err = 0; + struct OpalMemoryErrorData mem_err_evt; + struct errorlog *buf; + + memset(&mem_err_evt, 0, sizeof(struct OpalMemoryErrorData)); + /* Check arguments */ + if ((paddr_start == 0) || (paddr_end == 0)) { + prerror("memory deallocation: Invalid " + "starting/ending real address.\n"); + err = FSP_STATUS_GENERIC_ERROR; + } + + /* If we had an error, send response to fsp and return */ + if (err) + return send_response_to_fsp(FSP_RSP_MEM_DYN_DEALLOC | err); + + /* + * FSP can send dynamic memory deallocation multiple times for the + * same address/address ranges. Hence check and update if we already + * have sam event queued. + */ + if (update_memory_deallocation_event(paddr_start, paddr_end)) + goto send_response; + + /* Populate an new event. */ + mem_err_evt.version = OpalMemErr_V1; + mem_err_evt.type = OPAL_MEM_ERR_TYPE_DYN_DALLOC; + mem_err_evt.u.dyn_dealloc.dyn_err_type = + OPAL_MEM_DYNAMIC_DEALLOC; + mem_err_evt.u.dyn_dealloc.physical_address_start = cpu_to_be64(paddr_start); + mem_err_evt.u.dyn_dealloc.physical_address_end = cpu_to_be64(paddr_end); + + /* Queue up the event and inform OS about it. */ + rc = queue_mem_err_node(&mem_err_evt); + +send_response: + /* Queue up an OK response to the memory deallocation message itself */ + if (!rc) + return send_response_to_fsp(FSP_RSP_MEM_DYN_DEALLOC); + else { + buf = opal_elog_create(&e_info(OPAL_RC_MEM_ERR_DEALLOC), 0); + log_append_msg(buf, + "OPAL_MEM_ERR: Cannot queue up memory " + "deallocation error event to the OS"); + log_add_section(buf, OPAL_ELOG_SEC_DESC); + log_append_data(buf, (char *)&mem_err_evt, + sizeof(struct OpalMemoryErrorData)); + log_commit(buf); + return false; + } +} + +/* Receive a memory error mesages and handle it. */ +static bool fsp_mem_err_msg(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + u64 paddr_start, paddr_end; + + printf("Received 0x%08ux command\n", cmd_sub_mod); + switch (cmd_sub_mod) { + case FSP_CMD_MEM_RES_CE: + case FSP_CMD_MEM_RES_UE: + case FSP_CMD_MEM_RES_UE_SCRB: + /* + * We get the memory relilence command from FSP for + * correctable/Uncorrectable/scrub UE errors with real + * address of 4K memory page in which the error occurred. + */ + paddr_start = be64_to_cpu(*((__be64 *)&msg->data.bytes[0])); + printf("Got memory resilience error message for " + "paddr=0x%016llux\n", paddr_start); + return handle_memory_resilience(cmd_sub_mod, paddr_start); + case FSP_CMD_MEM_DYN_DEALLOC: + paddr_start = be64_to_cpu(*((__be64 *)&msg->data.bytes[0])); + paddr_end = be64_to_cpu(*((__be64 *)&msg->data.bytes[8])); + printf("Got dynamic memory deallocation message: " + "paddr_start=0x%016llux, paddr_end=0x%016llux\n", + paddr_start, paddr_end); + return handle_memory_deallocation(paddr_start, paddr_end); + } + return false; +} + +/* + * pre allocate memory to hold maximum of 128 memory error event until linux + * consumes it. + */ +static int init_merr_free_list(uint32_t num_entries) +{ + struct fsp_mem_err_node *entry; + int i; + + entry = zalloc(sizeof(struct fsp_mem_err_node) * num_entries); + if (!entry) + return -ENOMEM; + + for (i = 0; i < num_entries; ++i, entry++) + list_add_tail(&merr_free_list, &entry->list); + + return 0; +} + +static struct fsp_client fsp_mem_err_client = { + .message = fsp_mem_err_msg, +}; + +void fsp_memory_err_init(void) +{ + int rc; + + printf("Intializing fsp memory handling.\n"); + /* If we have an FSP, register for notifications */ + if (!fsp_present()) + return; + + /* pre allocate memory for 128 record */ + rc = init_merr_free_list(MERR_MAX_RECORD); + if (rc < 0) + return; + + fsp_register_client(&fsp_mem_err_client, FSP_MCLASS_MEMORY_ERR); +} diff --git a/roms/skiboot/hw/fsp/fsp-nvram.c b/roms/skiboot/hw/fsp/fsp-nvram.c new file mode 100644 index 000000000..aa17cb5e7 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-nvram.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Read/Write NVRAM from/to FSP + * + * Copyright 2013-2017 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include + +/* + * The FSP NVRAM API operates in "blocks" of 4K. It is entirely exposed + * to the OS via the OPAL APIs. + * + * In order to avoid dealing with complicated read/modify/write state + * machines (and added issues related to FSP failover in the middle) + * we keep a memory copy of the entire nvram which we load at boot + * time. We save only modified blocks. + * + * To limit the amount of memory used by the nvram image, we limit + * how much nvram we support to NVRAM_SIZE. Additionally, this limit + * of 1M is the maximum that the CHRP/PAPR nvram partition format + * supports for a partition entry. + * + * (Q: should we save the whole thing in case of FSP failover ?) + * + * The nvram is expected to comply with the CHRP/PAPR defined format, + * and specifically contain a System partition (ID 0x70) named "common" + * with configuration variables for the bootloader and a FW private + * partition for future use by skiboot. + * + * If the partition layout appears broken or lacks one of the above + * partitions, we reformat the entire nvram at boot time. + * + * We do not exploit the ability of the FSP to store a checksum. This + * is documented as possibly going away. The CHRP format for nvram + * that Linux uses has its own (though weak) checksum mechanism already + * + */ + +#define NVRAM_BLKSIZE 0x1000 + +struct nvram_triplet { + __be64 dma_addr; + __be32 blk_offset; + __be32 blk_count; +} __packed; + +#define NVRAM_FLAG_CLEAR_WPEND 0x80000000 + +enum nvram_state { + NVRAM_STATE_CLOSED, + NVRAM_STATE_OPENING, + NVRAM_STATE_BROKEN, + NVRAM_STATE_OPEN, + NVRAM_STATE_ABSENT, +}; + +static void *fsp_nvram_image; +static uint32_t fsp_nvram_size; +static struct lock fsp_nvram_lock = LOCK_UNLOCKED; +static struct fsp_msg *fsp_nvram_msg; +static uint32_t fsp_nvram_dirty_start; +static uint32_t fsp_nvram_dirty_end; +static bool fsp_nvram_was_read; +static struct nvram_triplet fsp_nvram_triplet __align(0x1000); +static enum nvram_state fsp_nvram_state = NVRAM_STATE_CLOSED; + +DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_INIT, OPAL_PLATFORM_ERR_EVT , OPAL_NVRAM, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_OPEN, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_SIZE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_READ, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_NVRAM_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_NVRAM, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +static void fsp_nvram_send_write(void); + +static void fsp_nvram_wr_complete(struct fsp_msg *msg) +{ + struct fsp_msg *resp = msg->resp; + uint8_t rc; + + lock(&fsp_nvram_lock); + fsp_nvram_msg = NULL; + + /* Check for various errors. If an error occurred, + * we generally assume the nvram is completely dirty + * but we won't trigger a new write until we get + * either a new attempt at writing, or an FSP reset + * reload (TODO) + */ + if (!resp || resp->state != fsp_msg_response) + goto fail_dirty; + rc = (msg->word1 >> 8) & 0xff; + switch(rc) { + case 0: + case 0x44: + /* Sync to secondary required... XXX */ + case 0x45: + break; + case 0xef: + /* Sync to secondary failed, let's ignore that for now, + * maybe when (if) we handle redundant FSPs ... + */ + prerror("FSP: NVRAM sync to secondary failed\n"); + break; + default: + log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE), + "FSP: NVRAM write return error 0x%02x\n", rc); + goto fail_dirty; + } + fsp_freemsg(msg); + if (fsp_nvram_dirty_start <= fsp_nvram_dirty_end) + fsp_nvram_send_write(); + unlock(&fsp_nvram_lock); + return; + fail_dirty: + fsp_nvram_dirty_start = 0; + fsp_nvram_dirty_end = fsp_nvram_size - 1; + fsp_freemsg(msg); + unlock(&fsp_nvram_lock); +} + +static void fsp_nvram_send_write(void) +{ + uint32_t start = fsp_nvram_dirty_start; + uint32_t end = fsp_nvram_dirty_end; + uint32_t count; + + if (start > end || fsp_nvram_state != NVRAM_STATE_OPEN) + return; + count = (end - start) / NVRAM_BLKSIZE + 1; + fsp_nvram_triplet.dma_addr = cpu_to_be64(PSI_DMA_NVRAM_BODY + start); + fsp_nvram_triplet.blk_offset = cpu_to_be32(start / NVRAM_BLKSIZE); + fsp_nvram_triplet.blk_count = cpu_to_be32(count); + fsp_nvram_msg = fsp_mkmsg(FSP_CMD_WRITE_VNVRAM, 6, + 0, PSI_DMA_NVRAM_TRIPL, 1, + NVRAM_FLAG_CLEAR_WPEND, 0, 0); + if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_wr_complete)) { + fsp_freemsg(fsp_nvram_msg); + fsp_nvram_msg = NULL; + log_simple_error(&e_info(OPAL_RC_NVRAM_WRITE), + "FSP: Error queueing nvram update\n"); + return; + } + fsp_nvram_dirty_start = fsp_nvram_size; + fsp_nvram_dirty_end = 0; +} + +static void fsp_nvram_rd_complete(struct fsp_msg *msg) +{ + int64_t rc; + + lock(&fsp_nvram_lock); + + /* Read complete, check status. What to do if the read fails ? + * + * Well, there could be various reasons such as an FSP reboot + * at the wrong time, but there is really not much we can do + * so for now I'll just mark the nvram as closed, and we'll + * attempt a re-open and re-read whenever the OS tries to + * access it + */ + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_nvram_msg = NULL; + fsp_freemsg(msg); + if (rc) { + prerror("FSP: NVRAM read failed, will try again later\n"); + fsp_nvram_state = NVRAM_STATE_CLOSED; + } else { + /* nvram was read once, no need to do it ever again */ + fsp_nvram_was_read = true; + fsp_nvram_state = NVRAM_STATE_OPEN; + + /* XXX Here we should look for nvram settings that concern + * us such as guest kernel arguments etc... + */ + } + unlock(&fsp_nvram_lock); + nvram_read_complete(fsp_nvram_state == NVRAM_STATE_OPEN); + if (fsp_nvram_state != NVRAM_STATE_OPEN) + log_simple_error(&e_info(OPAL_RC_NVRAM_INIT), + "FSP: NVRAM not read, skipping init\n"); +} + +static void fsp_nvram_send_read(void) +{ + fsp_nvram_msg = fsp_mkmsg(FSP_CMD_READ_VNVRAM, 4, + 0, PSI_DMA_NVRAM_BODY, 0, + fsp_nvram_size / NVRAM_BLKSIZE); + if (fsp_queue_msg(fsp_nvram_msg, fsp_nvram_rd_complete)) { + /* If the nvram read fails to queue, we mark ourselves + * closed. Shouldn't have happened anyway. Not much else + * we can do. + */ + fsp_nvram_state = NVRAM_STATE_CLOSED; + fsp_freemsg(fsp_nvram_msg); + fsp_nvram_msg = NULL; + log_simple_error(&e_info(OPAL_RC_NVRAM_READ), + "FSP: Error queueing nvram read\n"); + return; + } +} + +static void fsp_nvram_open_complete(struct fsp_msg *msg) +{ + int8_t rc; + + lock(&fsp_nvram_lock); + + /* Open complete, check status */ + rc = (msg->resp->word1 >> 8) & 0xff; + fsp_nvram_msg = NULL; + fsp_freemsg(msg); + if (rc) { + log_simple_error(&e_info(OPAL_RC_NVRAM_OPEN), + "FSP: NVRAM open failed, FSP error 0x%02x\n", rc); + goto failed; + } + if (fsp_nvram_was_read) + fsp_nvram_state = NVRAM_STATE_OPEN; + else + fsp_nvram_send_read(); + unlock(&fsp_nvram_lock); + return; + failed: + fsp_nvram_state = NVRAM_STATE_CLOSED; + unlock(&fsp_nvram_lock); +} + +static void fsp_nvram_send_open(void) +{ + printf("FSP NVRAM: Opening nvram...\n"); + fsp_nvram_msg = fsp_mkmsg(FSP_CMD_OPEN_VNVRAM, 1, fsp_nvram_size); + assert(fsp_nvram_msg); + fsp_nvram_state = NVRAM_STATE_OPENING; + if (!fsp_queue_msg(fsp_nvram_msg, fsp_nvram_open_complete)) + return; + + prerror("FSP NVRAM: Failed to queue nvram open message\n"); + fsp_freemsg(fsp_nvram_msg); + fsp_nvram_msg = NULL; + fsp_nvram_state = NVRAM_STATE_CLOSED; +} + +static bool fsp_nvram_get_size(uint32_t *out_size) +{ + struct fsp_msg *msg; + int rc, size; + + msg = fsp_mkmsg(FSP_CMD_GET_VNVRAM_SIZE, 0); + assert(msg); + + rc = fsp_sync_msg(msg, false); + size = msg->resp ? fsp_msg_get_data_word(msg->resp, 0) : 0; + fsp_freemsg(msg); + if (rc || size == 0) { + log_simple_error(&e_info(OPAL_RC_NVRAM_SIZE), + "FSP: Error %d nvram size reported is %d\n", rc, size); + fsp_nvram_state = NVRAM_STATE_BROKEN; + return false; + } + printf("FSP: NVRAM file size from FSP is %d bytes\n", size); + *out_size = size; + return true; +} + +static bool fsp_nvram_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + assert(msg == NULL); + + switch (cmd_sub_mod) { + case FSP_RESET_START: + printf("FSP: Closing NVRAM on account of FSP Reset\n"); + fsp_nvram_state = NVRAM_STATE_CLOSED; + return true; + case FSP_RELOAD_COMPLETE: + printf("FSP: Reopening NVRAM of FSP Reload complete\n"); + lock(&fsp_nvram_lock); + fsp_nvram_send_open(); + unlock(&fsp_nvram_lock); + return true; + } + return false; +} + +static struct fsp_client fsp_nvram_client_rr = { + .message = fsp_nvram_msg_rr, +}; + +static bool fsp_vnvram_msg(u32 cmd_sub_mod, struct fsp_msg *msg __unused) +{ + u32 cmd; + struct fsp_msg *resp; + + switch (cmd_sub_mod) { + case FSP_CMD_GET_VNV_STATS: + prlog(PR_DEBUG, + "FSP NVRAM: Get vNVRAM statistics not supported\n"); + cmd = FSP_RSP_GET_VNV_STATS | FSP_STATUS_INVALID_SUBCMD; + break; + case FSP_CMD_FREE_VNV_STATS: + prlog(PR_DEBUG, + "FSP NVRAM: Free vNVRAM statistics buffer not supported\n"); + cmd = FSP_RSP_FREE_VNV_STATS | FSP_STATUS_INVALID_SUBCMD; + break; + default: + return false; + } + + resp = fsp_mkmsg(cmd, 0); + if (!resp) { + prerror("FSP NVRAM: Failed to allocate resp message\n"); + return false; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + prerror("FSP NVRAM: Failed to queue resp message\n"); + fsp_freemsg(resp); + return false; + } + return true; +} + +static struct fsp_client fsp_vnvram_client = { + .message = fsp_vnvram_msg, +}; + +int fsp_nvram_info(uint32_t *total_size) +{ + if (!fsp_present()) { + fsp_nvram_state = NVRAM_STATE_ABSENT; + return OPAL_HARDWARE; + } + + if (!fsp_nvram_get_size(total_size)) + return OPAL_HARDWARE; + return OPAL_SUCCESS; +} + +int fsp_nvram_start_read(void *dst, uint32_t src, uint32_t len) +{ + /* We are currently limited to fully aligned transfers */ + assert((((uint64_t)dst) & 0xfff) == 0); + assert(dst); + + /* Currently don't support src!=0 */ + assert(src == 0); + + if (!fsp_present()) + return -ENODEV; + + op_display(OP_LOG, OP_MOD_INIT, 0x0007); + + lock(&fsp_nvram_lock); + + /* Store image info */ + fsp_nvram_image = dst; + fsp_nvram_size = len; + + /* Mark nvram as not dirty */ + fsp_nvram_dirty_start = len; + fsp_nvram_dirty_end = 0; + + /* Map TCEs */ + fsp_tce_map(PSI_DMA_NVRAM_TRIPL, &fsp_nvram_triplet, + PSI_DMA_NVRAM_TRIPL_SZ); + fsp_tce_map(PSI_DMA_NVRAM_BODY, dst, PSI_DMA_NVRAM_BODY_SZ); + + /* Register for the reset/reload event */ + fsp_register_client(&fsp_nvram_client_rr, FSP_MCLASS_RR_EVENT); + + /* Register for virtual NVRAM interface events */ + fsp_register_client(&fsp_vnvram_client, FSP_MCLASS_VIRTUAL_NVRAM); + + /* Open and load the nvram from the FSP */ + fsp_nvram_send_open(); + + unlock(&fsp_nvram_lock); + + return 0; +} + +int fsp_nvram_write(uint32_t offset, void *src, uint32_t size) +{ + uint64_t end = offset + size - 1; + + /* We only support writing from the original image */ + if (src != fsp_nvram_image + offset) + return OPAL_HARDWARE; + + offset &= ~(NVRAM_BLKSIZE - 1); + end &= ~(NVRAM_BLKSIZE - 1); + + lock(&fsp_nvram_lock); + /* If the nvram is closed, try re-opening */ + if (fsp_nvram_state == NVRAM_STATE_CLOSED) + fsp_nvram_send_open(); + if (fsp_nvram_dirty_start > offset) + fsp_nvram_dirty_start = offset; + if (fsp_nvram_dirty_end < end) + fsp_nvram_dirty_end = end; + if (!fsp_nvram_msg && fsp_nvram_state == NVRAM_STATE_OPEN) + fsp_nvram_send_write(); + unlock(&fsp_nvram_lock); + + return 0; +} diff --git a/roms/skiboot/hw/fsp/fsp-occ.c b/roms/skiboot/hw/fsp/fsp-occ.c new file mode 100644 index 000000000..58926f408 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-occ.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * FSP/OCC interactions + * + * Unlike OpenPOWER machines, FSP machines are much more tightly coupled + * between FSP, host, and OCC. On P8 we have to do a dance to start the + * OCC, but on P9 Hostboot does that, consistent with what we do on + * OpenPOWER. + * + * Copyright 2013-2019 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +DEFINE_LOG_ENTRY(OPAL_RC_OCC_LOAD, OPAL_PLATFORM_ERR_EVT, OPAL_OCC, + OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_OCC_RESET, OPAL_PLATFORM_ERR_EVT, OPAL_OCC, + OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_NA); + +struct occ_load_req { + u8 scope; + u32 dbob_id; + u32 seq_id; + struct list_node link; +}; +static LIST_HEAD(occ_load_req_list); + + +static void occ_queue_load(u8 scope, u32 dbob_id, u32 seq_id) +{ + struct occ_load_req *occ_req; + + occ_req = zalloc(sizeof(struct occ_load_req)); + if (!occ_req) { + /** + * @fwts-label OCCload_reqENOMEM + * @fwts-advice ENOMEM while allocating OCC load message. + * OCCs not started, consequently no power/frequency scaling + * will be functional. + */ + prlog(PR_ERR, "OCC: Could not allocate occ_load_req\n"); + return; + } + + occ_req->scope = scope; + occ_req->dbob_id = dbob_id; + occ_req->seq_id = seq_id; + list_add_tail(&occ_load_req_list, &occ_req->link); +} + +static void __occ_do_load(u8 scope, u32 dbob_id __unused, u32 seq_id) +{ + struct fsp_msg *stat; + int rc = -ENOMEM; + int status_word = 0; + struct proc_chip *chip = next_chip(NULL); + + /* Call HBRT... */ + rc = host_services_occ_load(); + + /* Handle fallback to preload */ + if (rc == -ENOENT && chip->homer_base) { + prlog(PR_INFO, "OCC: Load: Fallback to preloaded image\n"); + rc = 0; + } else if (!rc) { + struct opal_occ_msg occ_msg = { CPU_TO_BE64(OCC_LOAD), 0, 0 }; + + rc = _opal_queue_msg(OPAL_MSG_OCC, NULL, NULL, + sizeof(struct opal_occ_msg), &occ_msg); + if (rc) + prlog(PR_INFO, "OCC: Failed to queue message %d\n", + OCC_LOAD); + + /* Success, start OCC */ + rc = host_services_occ_start(); + } + if (rc) { + /* If either of hostservices call fail, send fail to FSP */ + /* Find a chip ID to send failure */ + for_each_chip(chip) { + if (scope == 0x01 && dbob_id != chip->dbob_id) + continue; + status_word = 0xB500 | (chip->pcid & 0xff); + break; + } + log_simple_error(&e_info(OPAL_RC_OCC_LOAD), + "OCC: Error %d in load/start OCC\n", rc); + } + + /* Send a single response for all chips */ + stat = fsp_mkmsg(FSP_CMD_LOAD_OCC_STAT, 2, status_word, seq_id); + if (stat) + rc = fsp_queue_msg(stat, fsp_freemsg); + if (rc) { + log_simple_error(&e_info(OPAL_RC_OCC_LOAD), + "OCC: Error %d queueing FSP OCC LOAD STATUS msg", rc); + fsp_freemsg(stat); + } +} + +void occ_poke_load_queue(void) +{ + struct occ_load_req *occ_req, *next; + + if (list_empty(&occ_load_req_list)) + return; + + list_for_each_safe(&occ_load_req_list, occ_req, next, link) { + __occ_do_load(occ_req->scope, occ_req->dbob_id, + occ_req->seq_id); + list_del(&occ_req->link); + free(occ_req); + } +} + +static u32 last_seq_id; +static bool in_ipl = true; +static void occ_do_load(u8 scope, u32 dbob_id __unused, u32 seq_id) +{ + struct fsp_msg *rsp; + int rc = -ENOMEM; + u8 err = 0; + + if (scope != 0x01 && scope != 0x02) { + /** + * @fwts-label OCCLoadInvalidScope + * @fwts-advice Invalid request for loading OCCs. Power and + * frequency management not functional + */ + prlog(PR_ERR, "OCC: Load message with invalid scope 0x%x\n", + scope); + err = 0x22; + } + + /* First queue up an OK response to the load message itself */ + rsp = fsp_mkmsg(FSP_RSP_LOAD_OCC | err, 0); + if (rsp) + rc = fsp_queue_msg(rsp, fsp_freemsg); + if (rc) { + log_simple_error(&e_info(OPAL_RC_OCC_LOAD), + "OCC: Error %d queueing FSP OCC LOAD reply\n", rc); + fsp_freemsg(rsp); + return; + } + + if (err) + return; + + if (proc_gen >= proc_gen_p9) { + if (in_ipl) { + /* OCC is pre-loaded in P9, so send SUCCESS to FSP */ + rsp = fsp_mkmsg(FSP_CMD_LOAD_OCC_STAT, 2, 0, seq_id); + if (!rsp) + return; + + rc = fsp_queue_msg(rsp, fsp_freemsg); + if (rc) { + log_simple_error(&e_info(OPAL_RC_OCC_LOAD), + "OCC: Error %d queueing OCC LOAD STATUS msg", + rc); + fsp_freemsg(rsp); + } + in_ipl = false; + } else { + struct proc_chip *chip = next_chip(NULL); + + last_seq_id = seq_id; + prd_fsp_occ_load_start(chip->id); + } + return; + } + + /* + * Check if hostservices lid caching is complete. If not, queue + * the load request. + */ + if (!hservices_lid_preload_complete()) { + occ_queue_load(scope, dbob_id, seq_id); + return; + } + + __occ_do_load(scope, dbob_id, seq_id); +} + +int fsp_occ_reset_status(u64 chipid, s64 status) +{ + struct fsp_msg *stat; + int rc = OPAL_NO_MEM; + int status_word = 0; + + prlog(PR_INFO, "HBRT: OCC stop() completed with %lld\n", status); + + if (status) { + struct proc_chip *chip = get_chip(chipid); + + if (!chip) + return OPAL_PARAMETER; + + status_word = 0xfe00 | (chip->pcid & 0xff); + log_simple_error(&e_info(OPAL_RC_OCC_RESET), + "OCC: Error %lld in OCC reset of chip %lld\n", + status, chipid); + } else { + occ_msg_queue_occ_reset(); + } + + stat = fsp_mkmsg(FSP_CMD_RESET_OCC_STAT, 2, status_word, last_seq_id); + if (!stat) + return rc; + + rc = fsp_queue_msg(stat, fsp_freemsg); + if (rc) { + fsp_freemsg(stat); + log_simple_error(&e_info(OPAL_RC_OCC_RESET), + "OCC: Error %d queueing FSP OCC RESET STATUS message\n", + rc); + } + return rc; +} + +int fsp_occ_load_start_status(u64 chipid, s64 status) +{ + struct fsp_msg *stat; + int rc = OPAL_NO_MEM; + int status_word = 0; + + if (status) { + struct proc_chip *chip = get_chip(chipid); + + if (!chip) + return OPAL_PARAMETER; + + status_word = 0xB500 | (chip->pcid & 0xff); + log_simple_error(&e_info(OPAL_RC_OCC_LOAD), + "OCC: Error %d in load/start OCC %lld\n", rc, + chipid); + } + + stat = fsp_mkmsg(FSP_CMD_LOAD_OCC_STAT, 2, status_word, last_seq_id); + if (!stat) + return rc; + + rc = fsp_queue_msg(stat, fsp_freemsg); + if (rc) { + fsp_freemsg(stat); + log_simple_error(&e_info(OPAL_RC_OCC_LOAD), + "OCC: Error %d queueing FSP OCC LOAD STATUS msg", rc); + } + + return rc; +} + +static void occ_do_reset(u8 scope, u32 dbob_id, u32 seq_id) +{ + struct fsp_msg *rsp, *stat; + struct proc_chip *chip = next_chip(NULL); + int rc = -ENOMEM; + u8 err = 0; + + /* Check arguments */ + if (scope != 0x01 && scope != 0x02) { + /** + * @fwts-label OCCResetInvalidScope + * @fwts-advice Invalid request for resetting OCCs. Power and + * frequency management not functional + */ + prlog(PR_ERR, "OCC: Reset message with invalid scope 0x%x\n", + scope); + err = 0x22; + } + + /* First queue up an OK response to the reset message itself */ + rsp = fsp_mkmsg(FSP_RSP_RESET_OCC | err, 0); + if (rsp) + rc = fsp_queue_msg(rsp, fsp_freemsg); + if (rc) { + fsp_freemsg(rsp); + log_simple_error(&e_info(OPAL_RC_OCC_RESET), + "OCC: Error %d queueing FSP OCC RESET reply\n", rc); + return; + } + + /* If we had an error, return */ + if (err) + return; + + /* + * Call HBRT to stop OCC and leave it stopped. FSP will send load/start + * request subsequently. Also after few runtime restarts (currently 3), + * FSP will request OCC to left in stopped state. + */ + + switch (proc_gen) { + case proc_gen_p8: + rc = host_services_occ_stop(); + break; + case proc_gen_p9: + case proc_gen_p10: + last_seq_id = seq_id; + chip = next_chip(NULL); + prd_fsp_occ_reset(chip->id); + return; + default: + return; + } + + /* Handle fallback to preload */ + if (rc == -ENOENT && chip->homer_base) { + prlog(PR_INFO, "OCC: Reset: Fallback to preloaded image\n"); + rc = 0; + } + if (!rc) { + /* Send a single success response for all chips */ + stat = fsp_mkmsg(FSP_CMD_RESET_OCC_STAT, 2, 0, seq_id); + if (stat) + rc = fsp_queue_msg(stat, fsp_freemsg); + if (rc) { + fsp_freemsg(stat); + log_simple_error(&e_info(OPAL_RC_OCC_RESET), + "OCC: Error %d queueing FSP OCC RESET" + " STATUS message\n", rc); + } + occ_msg_queue_occ_reset(); + } else { + + /* + * Then send a matching OCC Reset Status message with an 0xFE + * (fail) response code as well to the first matching chip + */ + for_each_chip(chip) { + if (scope == 0x01 && dbob_id != chip->dbob_id) + continue; + rc = -ENOMEM; + stat = fsp_mkmsg(FSP_CMD_RESET_OCC_STAT, 2, + 0xfe00 | (chip->pcid & 0xff), seq_id); + if (stat) + rc = fsp_queue_msg(stat, fsp_freemsg); + if (rc) { + fsp_freemsg(stat); + log_simple_error(&e_info(OPAL_RC_OCC_RESET), + "OCC: Error %d queueing FSP OCC RESET" + " STATUS message\n", rc); + } + break; + } + } +} + +static bool fsp_occ_msg(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + u32 dbob_id, seq_id; + u8 scope; + + switch (cmd_sub_mod) { + case FSP_CMD_LOAD_OCC: + /* + * We get the "Load OCC" command at boot. We don't currently + * support loading it ourselves (we don't have the procedures, + * they will come with Host Services). For now HostBoot will + * have loaded a OCC firmware for us, but we still need to + * be nice and respond to OCC. + */ + scope = msg->data.bytes[3]; + dbob_id = fsp_msg_get_data_word(msg, 1); + seq_id = fsp_msg_get_data_word(msg, 2); + prlog(PR_INFO, "OCC: Got OCC Load message, scope=0x%x" + " dbob=0x%x seq=0x%x\n", scope, dbob_id, seq_id); + occ_do_load(scope, dbob_id, seq_id); + return true; + + case FSP_CMD_RESET_OCC: + /* + * We shouldn't be getting this one, but if we do, we have + * to reply something sensible or the FSP will get upset + */ + scope = msg->data.bytes[3]; + dbob_id = fsp_msg_get_data_word(msg, 1); + seq_id = fsp_msg_get_data_word(msg, 2); + prlog(PR_INFO, "OCC: Got OCC Reset message, scope=0x%x" + " dbob=0x%x seq=0x%x\n", scope, dbob_id, seq_id); + occ_do_reset(scope, dbob_id, seq_id); + return true; + } + return false; +} + +static struct fsp_client fsp_occ_client = { + .message = fsp_occ_msg, +}; + +void occ_fsp_init(void) +{ + /* If we have an FSP, register for notifications */ + if (fsp_present()) + fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC); +} diff --git a/roms/skiboot/hw/fsp/fsp-op-panel.c b/roms/skiboot/hw/fsp/fsp-op-panel.c new file mode 100644 index 000000000..a8ac00b7a --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-op-panel.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Small LCD screen on the front of FSP machines + * + * Copyright 2013-2019 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +DEFINE_LOG_ENTRY(OPAL_RC_PANEL_WRITE, OPAL_PLATFORM_ERR_EVT, OPAL_OP_PANEL, + OPAL_MISC_SUBSYSTEM, OPAL_PREDICTIVE_ERR_GENERAL, OPAL_NA); + +/* For OPAL OP_PANEL API we can only have one in flight due to TCEs */ +static struct fsp_msg *op_req; +static uint64_t op_async_token; +static struct lock op_lock = LOCK_UNLOCKED; + +static void fsp_op_display_fatal(uint32_t w0, uint32_t w1) +{ + static struct fsp_msg op_msg_resp; + static struct fsp_msg op_msg = { + .resp = &op_msg_resp, + }; + + fsp_fillmsg(&op_msg, FSP_CMD_DISP_SRC_DIRECT, 3, 1, w0, w1); + + /* + * A special way to send a message: it doesn't run pollers. + * This means we can call it while in a poller, which we may + * well be in when we're terminating (and thus displaying a *fatal* + * message on the op-panel). + */ + fsp_fatal_msg(&op_msg); +} + +void fsp_op_display(enum op_severity sev, enum op_module mod, uint16_t code) +{ + struct fsp_msg *op_msg; + uint32_t w0; + uint32_t w1; + + if (!fsp_present()) + return; + + w0 = sev << 16 | mod; + + w1 = tohex((code >> 12) & 0xf) << 24; + w1 |= tohex((code >> 8) & 0xf) << 16; + w1 |= tohex((code >> 4) & 0xf) << 8; + w1 |= tohex((code ) & 0xf); + + if (sev == OP_FATAL) { + fsp_op_display_fatal(w0, w1); + } else { + op_msg = fsp_allocmsg(true); + if (!op_msg) { + prerror("Failed to allocate FSP message for PANEL\n"); + return; + } + + fsp_fillmsg(op_msg, FSP_CMD_DISP_SRC_DIRECT, 3, 1, w0, w1); + + if(fsp_queue_msg(op_msg, fsp_freemsg)) + prerror("Failed to queue FSP message for OP PANEL\n"); + } +} + +void op_panel_disable_src_echo(void) +{ + struct fsp_msg op_msg_resp; + struct fsp_msg op_msg = { + .resp = &op_msg_resp, + }; + + if (!fsp_present()) + return; + + fsp_fillmsg(&op_msg, FSP_CMD_DIS_SRC_ECHO, 0); + fsp_sync_msg(&op_msg, false); +} + +void op_panel_clear_src(void) +{ + struct fsp_msg op_msg_resp; + struct fsp_msg op_msg = { + .resp = &op_msg_resp, + }; + + if (!fsp_present()) + return; + + fsp_fillmsg(&op_msg, FSP_CMD_CLEAR_SRC, 0); + fsp_sync_msg(&op_msg, false); +} + +/* opal_write_oppanel - Write to the physical op panel. + * + * Pass in an array of oppanel_line_t structs defining the ASCII characters + * to display on each line of the oppanel. If there are two lines on the + * physical panel, and you only want to write to the first line, you only + * need to pass in one line. If you only want to write to the second line, + * you need to pass in both lines, and set the line_len of the first line + * to zero. + * + * This command is asynchronous. If OPAL_SUCCESS is returned, then the + * operation was initiated successfully. Subsequent calls will return + * OPAL_BUSY until the current operation is complete. + */ +struct op_src { + uint8_t version; +#define OP_SRC_VERSION 2 + uint8_t flags; + uint8_t reserved; + uint8_t hex_word_cnt; + __be16 reserved2; + __be16 total_size; + __be32 word2; /* SRC format in low byte */ + __be32 word3; + __be32 word4; + __be32 word5; + __be32 word6; + __be32 word7; + __be32 word8; + __be32 word9; + uint8_t ascii[OP_PANEL_NUM_LINES * OP_PANEL_LINE_LEN]; /* Word 11 */ +} __packed __align(4); + +/* Page align for the sake of TCE mapping */ +static struct op_src op_src __align(0x1000); + +static void __op_panel_write_complete(struct fsp_msg *msg) +{ + fsp_tce_unmap(PSI_DMA_OP_PANEL_MISC, 0x1000); + + lock(&op_lock); + op_req = NULL; + unlock(&op_lock); + + fsp_freemsg(msg); +} + +static void op_panel_write_complete(struct fsp_msg *msg) +{ + uint8_t rc = (msg->resp->word1 >> 8) & 0xff; + + if (rc) + prerror("OPPANEL: Error 0x%02x in display command\n", rc); + + __op_panel_write_complete(msg); + + opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, + cpu_to_be64(1), + cpu_to_be64(op_async_token)); +} + +static int64_t __opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines, + uint64_t async_token) +{ + int64_t rc = OPAL_ASYNC_COMPLETION; + int len; + int i; + + if (num_lines < 1 || num_lines > OP_PANEL_NUM_LINES) + return OPAL_PARAMETER; + + /* Only one in flight */ + lock(&op_lock); + if (op_req) { + rc = OPAL_BUSY_EVENT; + unlock(&op_lock); + goto bail; + } + + op_req = fsp_allocmsg(true); + if (!op_req) { + rc = OPAL_NO_MEM; + unlock(&op_lock); + goto bail; + } + unlock(&op_lock); + + op_async_token = async_token; + + memset(&op_src, 0, sizeof(op_src)); + + op_src.version = OP_SRC_VERSION; + op_src.flags = 0; + op_src.reserved = 0; + op_src.hex_word_cnt = 1; /* header word only */ + op_src.reserved2 = 0; + op_src.total_size = cpu_to_be16(sizeof(op_src)); + op_src.word2 = 0; /* should be unneeded */ + + for (i = 0; i < num_lines; i++) { + uint8_t *current_line = op_src.ascii + (i * OP_PANEL_LINE_LEN); + + len = be64_to_cpu(lines[i].line_len); + if (len < OP_PANEL_LINE_LEN) + memset(current_line + len, ' ', OP_PANEL_LINE_LEN-len); + else + len = OP_PANEL_LINE_LEN; + memcpy(current_line, (void *) be64_to_cpu(lines[i].line), len); + } + + for (i = 0; i < sizeof(op_src.ascii); i++) { + /* + * So, there's this interesting thing if you send + * HTML/Javascript through the Operator Panel. + * You get to inject it into the ASM web ui! + * So we filter out anything suspect here, + * at least for the time being. + * + * Allowed characters: + * . / 0-9 : a-z A-Z SPACE + */ + if (! ((op_src.ascii[i] >= '.' && op_src.ascii[i] <= ':') || + (op_src.ascii[i] >= 'a' && op_src.ascii[i] <= 'z') || + (op_src.ascii[i] >= 'A' && op_src.ascii[i] <= 'Z') || + op_src.ascii[i] == ' ')) { + op_src.ascii[i] = '.'; + } + } + + fsp_tce_map(PSI_DMA_OP_PANEL_MISC, &op_src, 0x1000); + + fsp_fillmsg(op_req, FSP_CMD_DISP_SRC_INDIR, 3, 0, + PSI_DMA_OP_PANEL_MISC, sizeof(struct op_src)); + rc = fsp_queue_msg(op_req, op_panel_write_complete); + if (rc) { + __op_panel_write_complete(op_req); + rc = OPAL_INTERNAL_ERROR; + } + bail: + log_simple_error(&e_info(OPAL_RC_PANEL_WRITE), + "FSP: Error updating Op Panel: %lld\n", rc); + return rc; +} + +static int64_t opal_write_oppanel_async(uint64_t async_token, + oppanel_line_t *lines, + uint64_t num_lines) +{ + return __opal_write_oppanel(lines, num_lines, async_token); +} + +void fsp_oppanel_init(void) +{ + struct dt_node *oppanel; + + if (!fsp_present()) + return; + + opal_register(OPAL_WRITE_OPPANEL_ASYNC, opal_write_oppanel_async, 3); + + oppanel = dt_new(opal_node, "oppanel"); + dt_add_property_cells(oppanel, "#length", OP_PANEL_LINE_LEN); + dt_add_property_cells(oppanel, "#lines", OP_PANEL_NUM_LINES); + dt_add_property_string(oppanel, "compatible", "ibm,opal-oppanel"); +} diff --git a/roms/skiboot/hw/fsp/fsp-psi.c b/roms/skiboot/hw/fsp/fsp-psi.c new file mode 100644 index 000000000..38f130dd7 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-psi.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* Copyright 2013-2019 IBM Corp. */ + +#include +#include +#include +#include + +static void psi_tce_enable(struct psi *psi, bool enable) +{ + void *addr = psi->regs + PSIHB_PHBSCR; + u64 val; + + val = in_be64(addr); + if (enable) + val |= PSIHB_PHBSCR_TCE_ENABLE; + else + val &= ~PSIHB_PHBSCR_TCE_ENABLE; + out_be64(addr, val); +} + +/* + * Configure the PSI interface for communicating with + * an FSP, such as enabling the TCEs, FSP commands, + * etc... + */ +void psi_init_for_fsp(struct psi *psi) +{ + uint64_t reg; + bool enable_tce = true; + + lock(&psi_lock); + + /* Disable and setup TCE base address */ + psi_tce_enable(psi, false); + + switch (proc_gen) { + case proc_gen_p8: + case proc_gen_p9: + case proc_gen_p10: + out_be64(psi->regs + PSIHB_TAR, PSI_TCE_TABLE_BASE | + PSIHB_TAR_256K_ENTRIES); + break; + default: + enable_tce = false; + }; + + /* Enable various other configuration register bits based + * on what pHyp does. We keep interrupts disabled until + * after the mailbox has been properly configured. We assume + * basic stuff such as PSI link enable is already there. + * + * - FSP CMD Enable + * - FSP MMIO Enable + * - TCE Enable + * - Error response enable + * + * Clear all other error bits + */ + if (!psi->active) { + prerror("PSI: psi_init_for_fsp() called on inactive link!\n"); + unlock(&psi_lock); + return; + } + + reg = in_be64(psi->regs + PSIHB_CR); + reg |= PSIHB_CR_FSP_CMD_ENABLE; + reg |= PSIHB_CR_FSP_MMIO_ENABLE; + reg |= PSIHB_CR_FSP_ERR_RSP_ENABLE; + reg &= ~0x00000000ffffffffull; + out_be64(psi->regs + PSIHB_CR, reg); + psi_tce_enable(psi, enable_tce); + + unlock(&psi_lock); +} diff --git a/roms/skiboot/hw/fsp/fsp-rtc.c b/roms/skiboot/hw/fsp/fsp-rtc.c new file mode 100644 index 000000000..237560a8d --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-rtc.c @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Real Time Clock (RTC) attached to FSP + * + * Copyright 2013-2017 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Note on how those operate: + * + * Because the RTC calls can be pretty slow, these functions will shoot + * an asynchronous request to the FSP (if none is already pending) + * + * The requests will return OPAL_BUSY_EVENT as long as the event has + * not been completed. + * + * WARNING: An attempt at doing an RTC write while one is already pending + * will simply ignore the new arguments and continue returning + * OPAL_BUSY_EVENT. This is to be compatible with existing Linux code. + * + * Completion of the request will result in an event OPAL_EVENT_RTC + * being signaled, which will remain raised until a corresponding call + * to opal_rtc_read() or opal_rtc_write() finally returns OPAL_SUCCESS, + * at which point the operation is complete and the event cleared. + * + * If we end up taking longer than rtc_read_timeout_ms millieconds waiting + * for the response from a read request, we simply return a cached value (plus + * an offset calculated from the timebase. When the read request finally + * returns, we update our cache value accordingly. + * + * There is two separate set of state for reads and writes. If both are + * attempted at the same time, the event bit will remain set as long as either + * of the two has a pending event to signal. + */ + +#include + +/* All of the below state is protected by rtc_lock. + * It should be held for the shortest amount of time possible. + * Certainly not across calls to FSP. + */ +static struct lock rtc_lock; + +static enum { + RTC_TOD_VALID, + RTC_TOD_INVALID, + RTC_TOD_PERMANENT_ERROR, +} rtc_tod_state = RTC_TOD_INVALID; + +/* State machine for getting an RTC request. + * RTC_{READ/WRITE}_NO_REQUEST -> RTC_{READ/WRITE}_PENDING_REQUEST (one in flight) + * RTC_{READ/WRITE}_PENDING_REQUEST -> RTC_{READ/WRITE}_REQUEST_AVAILABLE, + * when FSP responds + * RTC_{READ/WRITE}_REQUEST_AVAILABLE -> RTC_{READ/WRITE}_NO_REQUEST, + * when OS retrieves it + */ +static enum { + RTC_READ_NO_REQUEST, + RTC_READ_PENDING_REQUEST, + RTC_READ_REQUEST_AVAILABLE, +} rtc_read_request_state = RTC_READ_NO_REQUEST; + +static enum { + RTC_WRITE_NO_REQUEST, + RTC_WRITE_PENDING_REQUEST, + RTC_WRITE_REQUEST_AVAILABLE, +} rtc_write_request_state = RTC_WRITE_NO_REQUEST; + +static bool rtc_tod_cache_dirty = false; + +struct opal_tpo_data { + uint64_t tpo_async_token; + __be32 *year_month_day; + __be32 *hour_min; +}; + +/* Timebase value when we last initiated a RTC read request */ +static unsigned long read_req_tb; + +/* If a RTC read takes longer than this, we return a value generated + * from the cache + timebase */ +static const int rtc_read_timeout_ms = 1500; + +DEFINE_LOG_ENTRY(OPAL_RC_RTC_TOD, OPAL_PLATFORM_ERR_EVT, OPAL_RTC, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_RTC_READ, OPAL_PLATFORM_ERR_EVT, OPAL_RTC, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA); + +static void fsp_tpo_req_complete(struct fsp_msg *read_resp) +{ + struct opal_tpo_data *attr = read_resp->user_data; + int val; + int rc; + + val = (read_resp->resp->word1 >> 8) & 0xff; + switch (val) { + case FSP_STATUS_TOD_RESET: + log_simple_error(&e_info(OPAL_RC_RTC_TOD), + "RTC TPO in invalid state\n"); + rc = OPAL_INTERNAL_ERROR; + break; + + case FSP_STATUS_TOD_PERMANENT_ERROR: + log_simple_error(&e_info(OPAL_RC_RTC_TOD), + "RTC TPO in permanent error state\n"); + rc = OPAL_INTERNAL_ERROR; + break; + case FSP_STATUS_INVALID_DATA: + log_simple_error(&e_info(OPAL_RC_RTC_TOD), + "RTC TPO: Invalid data\n"); + rc = OPAL_PARAMETER; + break; + case FSP_STATUS_SUCCESS: + /* Save the read TPO value in our cache */ + if (attr->year_month_day) + *attr->year_month_day = cpu_to_be32(fsp_msg_get_data_word(read_resp->resp, 0)); + if (attr->hour_min) + *attr->hour_min = cpu_to_be32(fsp_msg_get_data_word(read_resp->resp, 1)); + rc = OPAL_SUCCESS; + break; + + default: + log_simple_error(&e_info(OPAL_RC_RTC_TOD), + "TPO read failed: %d\n", val); + rc = OPAL_INTERNAL_ERROR; + break; + } + opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, + cpu_to_be64(attr->tpo_async_token), + cpu_to_be64(rc)); + free(attr); + fsp_freemsg(read_resp); +} + +static void fsp_rtc_process_read(struct fsp_msg *read_resp) +{ + int val = (read_resp->word1 >> 8) & 0xff; + struct tm tm; + + assert(lock_held_by_me(&rtc_lock)); + + assert(rtc_read_request_state == RTC_READ_PENDING_REQUEST); + + switch (val) { + case FSP_STATUS_TOD_RESET: + log_simple_error(&e_info(OPAL_RC_RTC_TOD), + "RTC TOD in invalid state\n"); + rtc_tod_state = RTC_TOD_INVALID; + break; + + case FSP_STATUS_TOD_PERMANENT_ERROR: + log_simple_error(&e_info(OPAL_RC_RTC_TOD), + "RTC TOD in permanent error state\n"); + rtc_tod_state = RTC_TOD_PERMANENT_ERROR; + break; + + case FSP_STATUS_SUCCESS: + /* Save the read RTC value in our cache */ + rtc_tod_state = RTC_TOD_VALID; + datetime_to_tm(fsp_msg_get_data_word(read_resp, 0), + (u64)fsp_msg_get_data_word(read_resp, 1) << 32, &tm); + rtc_cache_update(&tm); + prlog(PR_TRACE, "FSP-RTC Got time: %d-%d-%d %d:%d:%d\n", + tm.tm_year, tm.tm_mon, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); + break; + + default: + log_simple_error(&e_info(OPAL_RC_RTC_TOD), + "RTC TOD read failed: %d\n", val); + rtc_tod_state = RTC_TOD_INVALID; + } + rtc_read_request_state = RTC_READ_REQUEST_AVAILABLE; +} + +static void opal_rtc_eval_events(bool read_write) +{ + bool request_available; + + if (read_write) + request_available = (rtc_read_request_state == + RTC_READ_REQUEST_AVAILABLE); + else + request_available = (rtc_write_request_state == + RTC_WRITE_REQUEST_AVAILABLE); + + assert(lock_held_by_me(&rtc_lock)); + opal_update_pending_evt(OPAL_EVENT_RTC, + request_available ? OPAL_EVENT_RTC : 0); +} + +static void fsp_rtc_req_complete(struct fsp_msg *msg) +{ + lock(&rtc_lock); + prlog(PR_TRACE, "RTC completion %p\n", msg); + + if (fsp_msg_cmd(msg) == (FSP_CMD_READ_TOD & 0xffffff)) { + fsp_rtc_process_read(msg->resp); + opal_rtc_eval_events(true); + } else { + assert(rtc_write_request_state == RTC_WRITE_PENDING_REQUEST); + rtc_write_request_state = RTC_WRITE_REQUEST_AVAILABLE; + opal_rtc_eval_events(false); + } + + unlock(&rtc_lock); + fsp_freemsg(msg); +} + +static int64_t fsp_rtc_send_read_request(void) +{ + struct fsp_msg *msg; + int rc; + + assert(lock_held_by_me(&rtc_lock)); + assert(rtc_read_request_state == RTC_READ_NO_REQUEST); + + msg = fsp_mkmsg(FSP_CMD_READ_TOD, 0); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_RTC_READ), + "RTC: failed to allocate read message\n"); + return OPAL_INTERNAL_ERROR; + } + + rc = fsp_queue_msg(msg, fsp_rtc_req_complete); + if (rc) { + fsp_freemsg(msg); + log_simple_error(&e_info(OPAL_RC_RTC_READ), + "RTC: failed to queue read message: %d\n", rc); + return OPAL_INTERNAL_ERROR; + } + + rtc_read_request_state = RTC_READ_PENDING_REQUEST; + + read_req_tb = mftb(); + + return OPAL_BUSY_EVENT; +} + +static int64_t fsp_opal_rtc_read(__be32 *__ymd, __be64 *__hmsm) +{ + int64_t rc; + uint32_t ymd; + uint64_t hmsm; + + if (!__ymd || !__hmsm) + return OPAL_PARAMETER; + + lock(&rtc_lock); + + if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) { + rc = OPAL_HARDWARE; + goto out; + } + + /* During R/R of FSP, read cached TOD */ + if (fsp_in_rr()) { + if (rtc_tod_state == RTC_TOD_VALID) { + rtc_cache_get_datetime(&ymd, &hmsm); + rc = OPAL_SUCCESS; + } else { + rc = OPAL_INTERNAL_ERROR; + } + goto out; + } + + /* If we don't have a read pending already, fire off a request and + * return */ + if (rtc_read_request_state == RTC_READ_NO_REQUEST) { + prlog(PR_TRACE, "Sending new RTC read request\n"); + rc = fsp_rtc_send_read_request(); + /* If our pending read is done, clear events and return the time + * from the cache */ + } else if (rtc_read_request_state == RTC_READ_REQUEST_AVAILABLE) { + prlog(PR_TRACE, "RTC read complete, state %d\n", rtc_tod_state); + rtc_read_request_state = RTC_READ_NO_REQUEST; + + opal_rtc_eval_events(true); + + if (rtc_tod_state == RTC_TOD_VALID) { + rtc_cache_get_datetime(&ymd, &hmsm); + prlog(PR_TRACE,"FSP-RTC Cached datetime: %x %llx\n", + ymd, hmsm); + rc = OPAL_SUCCESS; + } else { + rc = OPAL_INTERNAL_ERROR; + } + + /* Timeout: return our cached value (updated from tb), but leave the + * read request pending so it will update the cache later */ + } else if (mftb() > read_req_tb + msecs_to_tb(rtc_read_timeout_ms)) { + prlog(PR_TRACE, "RTC read timed out\n"); + + if (rtc_tod_state == RTC_TOD_VALID) { + rtc_cache_get_datetime(&ymd, &hmsm); + rc = OPAL_SUCCESS; + } else { + rc = OPAL_INTERNAL_ERROR; + } + /* Otherwise, we're still waiting on the read to complete */ + } else { + assert(rtc_read_request_state == RTC_READ_PENDING_REQUEST); + rc = OPAL_BUSY_EVENT; + } +out: + unlock(&rtc_lock); + + if (rc == OPAL_SUCCESS) { + *__ymd = cpu_to_be32(ymd); + *__hmsm = cpu_to_be64(hmsm); + } + + return rc; +} + +static int64_t fsp_rtc_send_write_request(uint32_t year_month_day, + uint64_t hour_minute_second_millisecond) +{ + struct fsp_msg *msg; + uint32_t w0, w1, w2; + + assert(lock_held_by_me(&rtc_lock)); + assert(rtc_write_request_state == RTC_WRITE_NO_REQUEST); + + /* Create a request and send it. Just like for read, we ignore + * the "millisecond" field which is probably supposed to be + * microseconds and which Linux ignores as well anyway + */ + w0 = year_month_day; + w1 = (hour_minute_second_millisecond >> 32) & 0xffffff00; + w2 = 0; + + msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, w0, w1, w2); + if (!msg) { + prlog(PR_TRACE, " -> allocation failed !\n"); + return OPAL_INTERNAL_ERROR; + } + prlog(PR_TRACE, " -> req at %p\n", msg); + + if (fsp_queue_msg(msg, fsp_rtc_req_complete)) { + prlog(PR_TRACE, " -> queueing failed !\n"); + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + + rtc_write_request_state = RTC_WRITE_PENDING_REQUEST; + + return OPAL_BUSY_EVENT; +} + +static int64_t fsp_opal_rtc_write(uint32_t year_month_day, + uint64_t hour_minute_second_millisecond) +{ + int rc; + struct tm tm; + + lock(&rtc_lock); + if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) { + rc = OPAL_HARDWARE; + goto out; + } + + if (fsp_in_rr()) { + datetime_to_tm(year_month_day, + hour_minute_second_millisecond, &tm); + rtc_cache_update(&tm); + rtc_tod_cache_dirty = true; + rc = OPAL_SUCCESS; + goto out; + } + + if (rtc_write_request_state == RTC_WRITE_NO_REQUEST) { + prlog(PR_TRACE, "Sending new RTC write request\n"); + rc = fsp_rtc_send_write_request(year_month_day, + hour_minute_second_millisecond); + } else if (rtc_write_request_state == RTC_WRITE_PENDING_REQUEST) { + rc = OPAL_BUSY_EVENT; + } else { + assert(rtc_write_request_state == RTC_WRITE_REQUEST_AVAILABLE); + rtc_write_request_state = RTC_WRITE_NO_REQUEST; + + opal_rtc_eval_events(false); + rc = OPAL_SUCCESS; + } + +out: + unlock(&rtc_lock); + return rc; +} + +/* Set timed power on values to fsp */ +static int64_t fsp_opal_tpo_write(uint64_t async_token, uint32_t y_m_d, + uint32_t hr_min) +{ + static struct opal_tpo_data *attr; + struct fsp_msg *msg; + + if (!fsp_present()) + return OPAL_HARDWARE; + + attr = zalloc(sizeof(struct opal_tpo_data)); + if (!attr) + return OPAL_NO_MEM; + + /* Create a request and send it.*/ + attr->tpo_async_token = async_token; + + /* check if this is a disable tpo request */ + if (y_m_d == 0 && hr_min == 0) { + prlog(PR_TRACE, "Sending TPO disable request...\n"); + msg = fsp_mkmsg(FSP_CMD_TPO_DISABLE, 0); + } else { + prlog(PR_TRACE, "Sending TPO write request...\n"); + msg = fsp_mkmsg(FSP_CMD_TPO_WRITE, 2, y_m_d, hr_min); + } + + if (!msg) { + prerror("TPO: Failed to create message for WRITE to FSP\n"); + free(attr); + return OPAL_INTERNAL_ERROR; + } + msg->user_data = attr; + if (fsp_queue_msg(msg, fsp_tpo_req_complete)) { + free(attr); + fsp_freemsg(msg); + return OPAL_INTERNAL_ERROR; + } + return OPAL_ASYNC_COMPLETION; +} + +/* Read Timed power on (TPO) from FSP */ +static int64_t fsp_opal_tpo_read(uint64_t async_token, __be32 *y_m_d, + __be32 *hr_min) +{ + static struct opal_tpo_data *attr; + struct fsp_msg *msg; + int64_t rc; + + if (!fsp_present()) + return OPAL_HARDWARE; + + if (!y_m_d || !hr_min) + return OPAL_PARAMETER; + + attr = zalloc(sizeof(*attr)); + if (!attr) + return OPAL_NO_MEM; + + /* Send read requet to FSP */ + attr->tpo_async_token = async_token; + attr->year_month_day = y_m_d; + attr->hour_min = hr_min; + + prlog(PR_TRACE, "Sending new TPO read request\n"); + msg = fsp_mkmsg(FSP_CMD_TPO_READ, 0); + if (!msg) { + log_simple_error(&e_info(OPAL_RC_RTC_READ), + "TPO: failed to allocate read message\n"); + free(attr); + return OPAL_INTERNAL_ERROR; + } + msg->user_data = attr; + rc = fsp_queue_msg(msg, fsp_tpo_req_complete); + if (rc) { + free(attr); + fsp_freemsg(msg); + log_simple_error(&e_info(OPAL_RC_RTC_READ), + "TPO: failed to queue read message: %lld\n", rc); + return OPAL_INTERNAL_ERROR; + } + return OPAL_ASYNC_COMPLETION; +} + +static void rtc_flush_cached_tod(void) +{ + struct fsp_msg *msg; + uint64_t h_m_s_m; + uint32_t y_m_d; + + if (rtc_cache_get_datetime(&y_m_d, &h_m_s_m)) + return; + msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, y_m_d, + (h_m_s_m >> 32) & 0xffffff00, 0); + if (!msg) { + prerror("TPO: %s : Failed to allocate write TOD message\n", + __func__); + return; + } + if (fsp_queue_msg(msg, fsp_freemsg)) { + fsp_freemsg(msg); + prerror("TPO: %s : Failed to queue WRITE_TOD command\n", + __func__); + return; + } +} + +static bool fsp_rtc_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + + int rc = false; + assert(msg == NULL); + + switch (cmd_sub_mod) { + case FSP_RESET_START: + rc = true; + break; + case FSP_RELOAD_COMPLETE: + lock(&rtc_lock); + if (rtc_tod_cache_dirty) { + rtc_flush_cached_tod(); + rtc_tod_cache_dirty = false; + } + unlock(&rtc_lock); + rc = true; + break; + } + + return rc; +} + +static struct fsp_client fsp_rtc_client_rr = { + .message = fsp_rtc_msg_rr, +}; + +void fsp_rtc_init(void) +{ + struct dt_node *np; + + if (!fsp_present()) { + rtc_tod_state = RTC_TOD_PERMANENT_ERROR; + return; + } + + opal_register(OPAL_RTC_READ, fsp_opal_rtc_read, 2); + opal_register(OPAL_RTC_WRITE, fsp_opal_rtc_write, 2); + opal_register(OPAL_WRITE_TPO, fsp_opal_tpo_write, 3); + opal_register(OPAL_READ_TPO, fsp_opal_tpo_read, 3); + + np = dt_new(opal_node, "rtc"); + dt_add_property_strings(np, "compatible", "ibm,opal-rtc"); + dt_add_property(np, "has-tpo", NULL, 0); + + /* Register for the reset/reload event */ + fsp_register_client(&fsp_rtc_client_rr, FSP_MCLASS_RR_EVENT); + + prlog(PR_TRACE, "Getting initial RTC TOD\n"); + + /* We don't wait for RTC response and this is actually okay as + * any OPAL callers will wait correctly and if we ever have + * internal users then they should check the state properly + */ + lock(&rtc_lock); + fsp_rtc_send_read_request(); + unlock(&rtc_lock); +} diff --git a/roms/skiboot/hw/fsp/fsp-sensor.c b/roms/skiboot/hw/fsp/fsp-sensor.c new file mode 100644 index 000000000..ffcd004f3 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-sensor.c @@ -0,0 +1,860 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * This code will enable the 'powernv' to retrieve sensor related data from FSP + * using SPCN passthru mailbox commands. + * + * The OPAL read sensor API in Sapphire is implemented as an 'asynchronous' read + * call that returns after queuing the read request. A unique sensor-id is + * expected as an argument for OPAL read call which has already been exported + * to the device tree during fsp init. The sapphire code decodes this Id to + * determine requested attribute and sensor. + * + * Copyright 2013-2017 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define INVALID_DATA ((uint32_t)-1) + +/* Entry size of PRS command modifiers */ +#define PRS_STATUS_ENTRY_SZ 0x08 +#define SENSOR_PARAM_ENTRY_SZ 0x10 +#define SENSOR_DATA_ENTRY_SZ 0x08 +#define PROC_JUNC_ENTRY_SZ 0x04 + +DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_SENSOR, + OPAL_MISC_SUBSYSTEM, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_READ, OPAL_PLATFORM_ERR_EVT, OPAL_SENSOR, + OPAL_MISC_SUBSYSTEM, OPAL_INFO, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_SENSOR_ASYNC_COMPLETE, OPAL_PLATFORM_ERR_EVT, + OPAL_SENSOR, OPAL_MISC_SUBSYSTEM, OPAL_INFO, + OPAL_NA); + +/* FSP response status codes */ +enum { + SP_RSP_STATUS_VALID_DATA = 0x00, + SP_RSP_STATUS_INVALID_DATA = 0x22, + SP_RSP_STATUS_SPCN_ERR = 0xA8, + SP_RSP_STATUS_DMA_ERR = 0x24, +}; + +enum sensor_state { + SENSOR_VALID_DATA, + SENSOR_INVALID_DATA, + SENSOR_SPCN_ERROR, + SENSOR_DMA_ERROR, + SENSOR_PERMANENT_ERROR, + SENSOR_OPAL_ERROR, +}; + +enum spcn_attr { + SENSOR_STATUS, + SENSOR_THRS, + SENSOR_DATA, + SENSOR_MAX, +}; + +/* Parsed sensor attributes, passed through OPAL */ +struct opal_sensor_data { + uint64_t async_token; /* Asynchronous token */ + __be64 *sensor_data; /* Kernel pointer to copy data */ + enum spcn_attr spcn_attr; /* Modifier attribute */ + uint16_t rid; /* Sensor RID */ + uint8_t frc; /* Sensor resource class */ + uint32_t mod_index; /* Modifier index*/ + uint32_t offset; /* Offset in sensor buffer */ +}; + +struct spcn_mod { + uint8_t mod; /* Modifier code */ + uint8_t entry_size; /* Size of each entry in response buffer */ + uint16_t entry_count; /* Number of entries */ +}; + +static struct spcn_mod spcn_mod_data[] = { + {SPCN_MOD_PRS_STATUS_FIRST, PRS_STATUS_ENTRY_SZ, 0 }, + {SPCN_MOD_PRS_STATUS_SUBS, PRS_STATUS_ENTRY_SZ, 0 }, + {SPCN_MOD_SENSOR_PARAM_FIRST, SENSOR_PARAM_ENTRY_SZ, 0 }, + {SPCN_MOD_SENSOR_PARAM_SUBS, SENSOR_PARAM_ENTRY_SZ, 0 }, + {SPCN_MOD_SENSOR_DATA_FIRST, SENSOR_DATA_ENTRY_SZ, 0 }, + {SPCN_MOD_SENSOR_DATA_SUBS, SENSOR_DATA_ENTRY_SZ, 0 }, + /* TODO Support this modifier '0x14', if required */ + /* {SPCN_MOD_PROC_JUNC_TEMP, PROC_JUNC_ENTRY_SZ, 0, NULL}, */ + {SPCN_MOD_SENSOR_POWER, SENSOR_DATA_ENTRY_SZ, 0 }, + {SPCN_MOD_LAST, 0xff, 0xffff} +}; + +/* Frame resource class (FRC) names */ +static const char *frc_names[] = { + /* 0x00 and 0x01 are reserved */ + NULL, + NULL, + "power-controller", + "power", + "regulator", + "cooling-fan", + "cooling-controller", + "battery-charger", + "battery-pack", + "amb-temp", + "temp", + "vrm", + "riser-card", + "io-backplane" +}; + +#define SENSOR_MAX_SIZE 0x00100000 +static void *sensor_buffer = NULL; +static enum sensor_state sensor_state; +static bool prev_msg_consumed = true; +static struct lock sensor_lock; + +/* Function prototypes */ +static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr); +static void queue_msg_for_delivery(int rc, struct opal_sensor_data *attr); + + +/* + * Power Resource Status (PRS) + * Command: 0x42 + * + * Modifier: 0x01 + * -------------------------------------------------------------------------- + * | 0 1 2 3 4 5 6 7 | + * -------------------------------------------------------------------------- + * |Frame resrc class| PRID | SRC | Status | + * -------------------------------------------------------------------------- + * + * + * Modifier: 0x10 + * -------------------------------------------------------------------------- + * | 0 1 2 3 4 5 6 7 | + * -------------------------------------------------------------------------- + * |Frame resrc class| PRID | Sensor location | + * -------------------------------------------------------------------------- + * -------------------------------------------------------------------------- + * | 8 9 10 11 12 13 14 15 | + * -------------------------------------------------------------------------- + * | Reserved | Reserved | Threshold | Status | + * -------------------------------------------------------------------------- + * + * + * Modifier: 0x12 + * -------------------------------------------------------------------------- + * | 0 1 2 3 4 5 6 7 | + * -------------------------------------------------------------------------- + * |Frame resrc class| PRID | Sensor data | Status | + * -------------------------------------------------------------------------- + * + * + * Modifier: 0x14 + * -------------------------------------------------------------------------- + * | 0 1 2 3 | + * -------------------------------------------------------------------------- + * |Enclosure Tj Avg | Chip Tj Avg | Reserved | Reserved | + * -------------------------------------------------------------------------- + */ + + +/* + * When coming from a SENSOR_POWER modifier command, the resource id + * of a power supply is on one byte and misses a "subclass" byte + * (0x10). This routine adds it to be consistent with the PRS_STATUS + * modifier command. + */ +#define normalize_power_rid(rid) (0x1000|(rid)) + +static uint32_t sensor_power_process_data(uint16_t rid, + struct sensor_power *power) +{ + int i; + + if (!sensor_power_is_valid(power)) { + prlog(PR_TRACE, "Power Sensor data not valid\n"); + return INVALID_DATA; + } + + for (i = 0; i < sensor_power_count(power); i++) { + prlog(PR_TRACE, "Power[%d]: %d mW\n", i, + power->supplies[i].milliwatts); + if (rid == normalize_power_rid(power->supplies[i].rid)) + return be32_to_cpu(power->supplies[i].milliwatts) / 1000; + } + + return 0; +} + +static inline uint16_t convert_status_to_fault(uint16_t status) +{ + return status & 0x06; +} + +static void fsp_sensor_process_data(struct opal_sensor_data *attr) +{ + uint8_t *sensor_buf_ptr = (uint8_t *)sensor_buffer; + uint32_t sensor_data = INVALID_DATA; + __be16 sensor_mod_data[8]; + int count; + + for (count = 0; count < spcn_mod_data[attr->mod_index].entry_count; + count++) { + memcpy((void *)sensor_mod_data, sensor_buf_ptr, + spcn_mod_data[attr->mod_index].entry_size); + if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_PROC_JUNC_TEMP) { + /* TODO Support this modifier '0x14', if required */ + + } else if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_SENSOR_POWER) { + sensor_data = sensor_power_process_data(attr->rid, + (struct sensor_power *) sensor_buf_ptr); + break; + } else if (be16_to_cpu(sensor_mod_data[0]) == attr->frc && + be16_to_cpu(sensor_mod_data[1]) == attr->rid) { + switch (attr->spcn_attr) { + case SENSOR_STATUS: + sensor_data = + convert_status_to_fault(be16_to_cpu(sensor_mod_data[3])); + break; + case SENSOR_THRS: + sensor_data = be16_to_cpu(sensor_mod_data[6]); + break; + case SENSOR_DATA: + sensor_data = be16_to_cpu(sensor_mod_data[2]); + break; + default: + break; + } + + break; + } + + sensor_buf_ptr += spcn_mod_data[attr->mod_index].entry_size; + } + + *attr->sensor_data = cpu_to_be64(sensor_data); + if (sensor_data == INVALID_DATA) + queue_msg_for_delivery(OPAL_PARTIAL, attr); + else + queue_msg_for_delivery(OPAL_SUCCESS, attr); +} + +static int fsp_sensor_process_read(struct fsp_msg *resp_msg) +{ + uint8_t mbx_rsp_status; + uint32_t size = 0; + + mbx_rsp_status = (resp_msg->word1 >> 8) & 0xff; + switch (mbx_rsp_status) { + case SP_RSP_STATUS_VALID_DATA: + sensor_state = SENSOR_VALID_DATA; + size = fsp_msg_get_data_word(resp_msg, 1) & 0xffff; + break; + case SP_RSP_STATUS_INVALID_DATA: + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), + "SENSOR: %s: Received invalid data\n", __func__); + sensor_state = SENSOR_INVALID_DATA; + break; + case SP_RSP_STATUS_SPCN_ERR: + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), + "SENSOR: %s: Failure due to SPCN error\n", __func__); + sensor_state = SENSOR_SPCN_ERROR; + break; + case SP_RSP_STATUS_DMA_ERR: + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), + "SENSOR: %s: Failure due to DMA error\n", __func__); + sensor_state = SENSOR_DMA_ERROR; + break; + default: + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), + "SENSOR %s: Read failed, status:0x%02X\n", + __func__, mbx_rsp_status); + sensor_state = SENSOR_INVALID_DATA; + break; + } + + return size; +} + +static void queue_msg_for_delivery(int rc, struct opal_sensor_data *attr) +{ + prlog(PR_INSANE, "%s: rc:%d, data:%lld\n", + __func__, rc, *(attr->sensor_data)); + check_sensor_read(attr->async_token); + opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, + cpu_to_be64(attr->async_token), + cpu_to_be64(rc)); + spcn_mod_data[attr->mod_index].entry_count = 0; + free(attr); + prev_msg_consumed = true; +} + +static void fsp_sensor_read_complete(struct fsp_msg *msg) +{ + struct opal_sensor_data *attr = msg->user_data; + enum spcn_rsp_status status; + int rc, size; + + prlog(PR_INSANE, "%s()\n", __func__); + + status = (fsp_msg_get_data_word(msg->resp, 1) >> 24) & 0xff; + size = fsp_sensor_process_read(msg->resp); + fsp_freemsg(msg); + + lock(&sensor_lock); + if (sensor_state == SENSOR_VALID_DATA) { + spcn_mod_data[attr->mod_index].entry_count += (size / + spcn_mod_data[attr->mod_index].entry_size); + attr->offset += size; + /* Fetch the subsequent entries of the same modifier type */ + if (status == SPCN_RSP_STATUS_COND_SUCCESS) { + switch (spcn_mod_data[attr->mod_index].mod) { + case SPCN_MOD_PRS_STATUS_FIRST: + case SPCN_MOD_SENSOR_PARAM_FIRST: + case SPCN_MOD_SENSOR_DATA_FIRST: + attr->mod_index++; + spcn_mod_data[attr->mod_index].entry_count = + spcn_mod_data[attr->mod_index - 1]. + entry_count; + spcn_mod_data[attr->mod_index - 1].entry_count = 0; + break; + default: + break; + } + + rc = fsp_sensor_send_read_request(attr); + if (rc != OPAL_ASYNC_COMPLETION) + goto err; + } else { /* Notify 'powernv' of read completion */ + fsp_sensor_process_data(attr); + } + } else { + rc = OPAL_INTERNAL_ERROR; + goto err; + } + unlock(&sensor_lock); + return; +err: + *attr->sensor_data = cpu_to_be64(INVALID_DATA); + queue_msg_for_delivery(rc, attr); + unlock(&sensor_lock); + log_simple_error(&e_info(OPAL_RC_SENSOR_ASYNC_COMPLETE), + "SENSOR: %s: Failed to queue the " + "read request to fsp\n", __func__); +} + +static int64_t fsp_sensor_send_read_request(struct opal_sensor_data *attr) +{ + int rc; + struct fsp_msg *msg; + uint32_t align; + uint32_t cmd_header; + + if (fsp_in_rr()) + return OPAL_BUSY; + + prlog(PR_INSANE, "Get the data for modifier [%x]\n", + spcn_mod_data[attr->mod_index].mod); + + if (spcn_mod_data[attr->mod_index].mod == SPCN_MOD_PROC_JUNC_TEMP) { + /* TODO Support this modifier '0x14', if required */ + align = attr->offset % sizeof(uint32_t); + if (align) + attr->offset += (sizeof(uint32_t) - align); + + /* TODO Add 8 byte command data required for mod 0x14 */ + + attr->offset += 8; + + cmd_header = spcn_mod_data[attr->mod_index].mod << 24 | + SPCN_CMD_PRS << 16 | 0x0008; + } else { + cmd_header = spcn_mod_data[attr->mod_index].mod << 24 | + SPCN_CMD_PRS << 16; + } + + msg = fsp_mkmsg(FSP_CMD_SPCN_PASSTHRU, 4, + SPCN_ADDR_MODE_CEC_NODE, cmd_header, 0, + PSI_DMA_SENSOR_BUF + attr->offset); + + if (!msg) { + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), "SENSOR: Failed " + "to allocate read message\n"); + return OPAL_INTERNAL_ERROR; + } + + msg->user_data = attr; + rc = fsp_queue_msg(msg, fsp_sensor_read_complete); + if (rc) { + fsp_freemsg(msg); + msg = NULL; + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), "SENSOR: Failed " + "to queue read message (%d)\n", rc); + return OPAL_INTERNAL_ERROR; + } + + return OPAL_ASYNC_COMPLETION; +} + +/* + * These are the resources we know about and for which we provide a + * mapping in the device tree to capture data from the OS. Just + * discard the other ones for the moment. + */ +static inline bool sensor_frc_is_valid(uint16_t frc) +{ + switch (frc) { + case SENSOR_FRC_POWER_SUPPLY: + case SENSOR_FRC_COOLING_FAN: + case SENSOR_FRC_AMB_TEMP: + return true; + default: + return false; + } +} + +/* + * Each attribute of a resource needs a request to the FSP to capture + * its data. The routine below provides the mapping between the + * attribute and the PRS command modifier to use. + * + * resource | data | thrs | status | + * ----------------+--------+--------+-----------+ + * power_supply | POWER | | | + * | | | PRS | + * ----------------+--------+--------+-----------+ + * amb-temp | DATA | | DATA | + * | | PARAM | PARAM (*) | + * ----------------+--------+--------+-----------+ + * fan | DATA | | DATA (*) | + * | | PARAM | PARAM (*) | + * | | | PRS | + * + * (*) don't use the attribute given by this command modifier + */ +static int64_t parse_sensor_id(uint32_t handler, struct opal_sensor_data *attr) +{ + uint32_t mod, index; + + attr->frc = sensor_get_frc(handler); + attr->rid = sensor_get_rid(handler); + attr->spcn_attr = sensor_get_attr(handler); + + if (!sensor_frc_is_valid(attr->frc)) + return OPAL_PARAMETER; + + /* now compute the PRS command modifier which will be used to + * request a resource attribute from the FSP */ + switch (attr->spcn_attr) { + case SENSOR_DATA: + if (attr->frc == SENSOR_FRC_POWER_SUPPLY) + mod = SPCN_MOD_SENSOR_POWER; + else + mod = SPCN_MOD_SENSOR_DATA_FIRST; + break; + + case SENSOR_THRS: + mod = SPCN_MOD_SENSOR_PARAM_FIRST; + break; + + case SENSOR_STATUS: + switch (attr->frc) { + case SENSOR_FRC_AMB_TEMP: + mod = SPCN_MOD_SENSOR_DATA_FIRST; + break; + case SENSOR_FRC_POWER_SUPPLY: + case SENSOR_FRC_COOLING_FAN: + mod = SPCN_MOD_PRS_STATUS_FIRST; + break; + default: + return OPAL_PARAMETER; + } + break; + + default: + return OPAL_PARAMETER; + } + + for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST; index++) { + if (spcn_mod_data[index].mod == mod) + break; + } + + attr->mod_index = index; + return 0; +} + + +int64_t fsp_opal_read_sensor(uint32_t sensor_hndl, int token, + __be64 *sensor_data) +{ + struct opal_sensor_data *attr; + int64_t rc; + + prlog(PR_INSANE, "fsp_opal_read_sensor [%08x]\n", sensor_hndl); + + if (fsp_in_rr()) + return OPAL_BUSY; + + if (sensor_state == SENSOR_PERMANENT_ERROR) { + rc = OPAL_HARDWARE; + goto out; + } + + if (!sensor_hndl) { + rc = OPAL_PARAMETER; + goto out; + } + + lock(&sensor_lock); + if (prev_msg_consumed) { + attr = zalloc(sizeof(*attr)); + if (!attr) { + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), + "SENSOR: Failed to allocate memory\n"); + rc = OPAL_NO_MEM; + goto out_lock; + } + + /* Parse the sensor id and store them to the local structure */ + rc = parse_sensor_id(sensor_hndl, attr); + if (rc) { + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), + "SENSOR: %s: Failed to parse the sensor " + "handle[0x%08x]\n", __func__, sensor_hndl); + goto out_free; + } + /* Kernel buffer pointer to copy the data later when ready */ + attr->sensor_data = sensor_data; + attr->async_token = token; + + rc = fsp_sensor_send_read_request(attr); + if (rc != OPAL_ASYNC_COMPLETION) { + log_simple_error(&e_info(OPAL_RC_SENSOR_READ), + "SENSOR: %s: Failed to queue the read " + "request to fsp\n", __func__); + goto out_free; + } + + prev_msg_consumed = false; + } else { + rc = OPAL_BUSY_EVENT; + } + + unlock(&sensor_lock); + return rc; + +out_free: + free(attr); +out_lock: + unlock(&sensor_lock); +out: + return rc; +} + + +#define MAX_NAME 64 + +static struct dt_node *sensor_get_node(struct dt_node *sensors, + struct sensor_header *header, const char* attrname) +{ + char name[MAX_NAME]; + struct dt_node *node; + + /* + * Just use the resource class name and resource id. This + * should be obvious enough for a node name. + */ + snprintf(name, sizeof(name), "%s#%d-%s", frc_names[be16_to_cpu(header->frc)], be16_to_cpu(header->rid), attrname); + + /* + * The same resources are reported by the different PRS + * subcommands (PRS_STATUS, SENSOR_PARAM, SENSOR_DATA). So we + * need to check that we did not already create the device + * node. + */ + node = dt_find_by_path(sensors, name); + if (!node) { + prlog(PR_INFO, "SENSOR: creating node %s\n", name); + + node = dt_new(sensors, name); + + snprintf(name, sizeof(name), "ibm,opal-sensor-%s", + frc_names[be16_to_cpu(header->frc)]); + dt_add_property_string(node, "compatible", name); + } else { + /** + * @fwts-label OPALSensorNodeExists + * @fwts-advice OPAL had trouble creating the sensor + * nodes in the device tree as there was already one there. + * This indicates either the device tree from Hostboot + * already filled in sensors or an OPAL bug. + */ + prlog(PR_ERR, "SENSOR: node %s exists\n", name); + } + return node; +} + +#define sensor_handler(header, attr_num) \ + sensor_make_handler(SENSOR_FSP, be16_to_cpu((header).frc), be16_to_cpu((header).rid), attr_num) + +static int add_sensor_prs(struct dt_node *sensors, struct sensor_prs *prs) +{ + struct dt_node *node; + + node = sensor_get_node(sensors, &prs->header, "faulted"); + if (!node) + return -1; + + dt_add_property_cells(node, "sensor-id", + sensor_handler(prs->header, SENSOR_STATUS)); + return 0; +} + +static int add_sensor_param(struct dt_node *sensors, struct sensor_param *param) +{ + struct dt_node *node; + + node = sensor_get_node(sensors, ¶m->header, "thrs"); + if (!node) + return -1; + + dt_add_property_string(node, "ibm,loc-code", param->location); + dt_add_property_cells(node, "sensor-id", + sensor_handler(param->header, SENSOR_THRS)); + /* don't use the status coming from the response of the + * SENSOR_PARAM subcommand */ + return 0; +} + +static int add_sensor_data(struct dt_node *sensors, + struct sensor_data *data) +{ + struct dt_node *node; + + node = sensor_get_node(sensors, &data->header, "data"); + if (!node) + return -1; + + dt_add_property_cells(node, "sensor-id", + sensor_handler(data->header, SENSOR_DATA)); + + /* Let's make sure we are not adding a duplicate device node. + * Some resource, like fans, get their status attribute from + * three different commands ... + */ + if (be16_to_cpu(data->header.frc) == SENSOR_FRC_AMB_TEMP) { + node = sensor_get_node(sensors, &data->header, "faulted"); + if (!node) + return -1; + + dt_add_property_cells(node, "sensor-id", + sensor_handler(data->header, SENSOR_STATUS)); + } + + return 0; +} + +static int add_sensor_power(struct dt_node *sensors, struct sensor_power *power) +{ + int i; + struct dt_node *node; + + if (!sensor_power_is_valid(power)) + return -1; + + for (i = 0; i < sensor_power_count(power); i++) { + struct sensor_header header = { + cpu_to_be16(SENSOR_FRC_POWER_SUPPLY), + cpu_to_be16(normalize_power_rid(power->supplies[i].rid)) + }; + + node = sensor_get_node(sensors, &header, "data"); + + prlog(PR_TRACE, "SENSOR: Power[%d] : %d mW\n", + power->supplies[i].rid, + be32_to_cpu(power->supplies[i].milliwatts)); + + dt_add_property_cells(node, "sensor-id", + sensor_handler(header, SENSOR_DATA)); + } + return 0; +} + +static void add_sensor_ids(struct dt_node *sensors) +{ + uint8_t *sensor_buf_ptr = (uint8_t *)sensor_buffer; + struct spcn_mod *smod; + int i; + + for (smod = spcn_mod_data; smod->mod != SPCN_MOD_LAST; smod++) { + /* + * SPCN_MOD_SENSOR_POWER (0x1C) has a different layout. + */ + if (smod->mod == SPCN_MOD_SENSOR_POWER) { + add_sensor_power(sensors, + (struct sensor_power *) sensor_buf_ptr); + + sensor_buf_ptr += smod->entry_size * smod->entry_count; + continue; + } + + for (i = 0; i < smod->entry_count; i++) { + struct sensor_header *header = + (struct sensor_header *) sensor_buf_ptr; + + if (!sensor_frc_is_valid(be16_to_cpu(header->frc))) + goto out_sensor; + + switch (smod->mod) { + case SPCN_MOD_PROC_JUNC_TEMP: + /* TODO Support this modifier '0x14', + if required */ + break; + + case SPCN_MOD_PRS_STATUS_FIRST: + case SPCN_MOD_PRS_STATUS_SUBS: + add_sensor_prs(sensors, + (struct sensor_prs *) header); + break; + + case SPCN_MOD_SENSOR_PARAM_FIRST: + case SPCN_MOD_SENSOR_PARAM_SUBS: + add_sensor_param(sensors, + (struct sensor_param *) header); + break; + + case SPCN_MOD_SENSOR_DATA_FIRST: + case SPCN_MOD_SENSOR_DATA_SUBS: + add_sensor_data(sensors, + (struct sensor_data *) header); + + break; + + default: + prerror("SENSOR: unknown modifier : %x\n", + smod->mod); + } + +out_sensor: + sensor_buf_ptr += smod->entry_size; + } + } +} + +static void add_opal_sensor_node(void) +{ + int index; + + if (!fsp_present()) + return; + + add_sensor_ids(sensor_node); + + /* Reset the entry count of each modifier */ + for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST; + index++) + spcn_mod_data[index].entry_count = 0; +} + +void fsp_init_sensor(void) +{ + uint32_t cmd_header, align, size, psi_dma_offset = 0; + enum spcn_rsp_status status; + struct fsp_msg msg, resp; + int index, rc; + + if (!fsp_present()) { + sensor_state = SENSOR_PERMANENT_ERROR; + return; + } + + sensor_buffer = memalign(TCE_PSIZE, SENSOR_MAX_SIZE); + if (!sensor_buffer) { + log_simple_error(&e_info(OPAL_RC_SENSOR_INIT), "SENSOR: could " + "not allocate sensor_buffer!\n"); + return; + } + + /* Map TCE */ + fsp_tce_map(PSI_DMA_SENSOR_BUF, sensor_buffer, PSI_DMA_SENSOR_BUF_SZ); + + msg.resp = &resp; + + /* Traverse using all the modifiers to know all the sensors available + * in the system */ + for (index = 0; spcn_mod_data[index].mod != SPCN_MOD_LAST && + sensor_state == SENSOR_VALID_DATA;) { + prlog(PR_TRACE, "Get the data for modifier [%d]\n", + spcn_mod_data[index].mod); + if (spcn_mod_data[index].mod == SPCN_MOD_PROC_JUNC_TEMP) { + /* TODO Support this modifier 0x14, if required */ + align = psi_dma_offset % sizeof(uint32_t); + if (align) + psi_dma_offset += (sizeof(uint32_t) - align); + + /* TODO Add 8 byte command data required for mod 0x14 */ + psi_dma_offset += 8; + + cmd_header = spcn_mod_data[index].mod << 24 | + SPCN_CMD_PRS << 16 | 0x0008; + } else { + cmd_header = spcn_mod_data[index].mod << 24 | + SPCN_CMD_PRS << 16; + } + + fsp_fillmsg(&msg, FSP_CMD_SPCN_PASSTHRU, 4, + SPCN_ADDR_MODE_CEC_NODE, cmd_header, 0, + PSI_DMA_SENSOR_BUF + psi_dma_offset); + + rc = fsp_sync_msg(&msg, false); + if (rc >= 0) { + status = (fsp_msg_get_data_word(&resp, 1) >> 24) & 0xff; + size = fsp_sensor_process_read(&resp); + psi_dma_offset += size; + spcn_mod_data[index].entry_count += (size / + spcn_mod_data[index].entry_size); + } else { + sensor_state = SENSOR_PERMANENT_ERROR; + break; + } + + switch (spcn_mod_data[index].mod) { + case SPCN_MOD_PRS_STATUS_FIRST: + case SPCN_MOD_SENSOR_PARAM_FIRST: + case SPCN_MOD_SENSOR_DATA_FIRST: + if (status == SPCN_RSP_STATUS_COND_SUCCESS) + index++; + else + index += 2; + + break; + case SPCN_MOD_PRS_STATUS_SUBS: + case SPCN_MOD_SENSOR_PARAM_SUBS: + case SPCN_MOD_SENSOR_DATA_SUBS: + if (status != SPCN_RSP_STATUS_COND_SUCCESS) + index++; + break; + case SPCN_MOD_SENSOR_POWER: + index++; + default: + break; + } + } + + if (sensor_state != SENSOR_VALID_DATA) + sensor_state = SENSOR_PERMANENT_ERROR; + else + add_opal_sensor_node(); +} diff --git a/roms/skiboot/hw/fsp/fsp-surveillance.c b/roms/skiboot/hw/fsp/fsp-surveillance.c new file mode 100644 index 000000000..84e6878f3 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-surveillance.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * We don't want to go on the cart! + * + * Copyright 2013-2018 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static bool fsp_surv_state = false; +static bool fsp_surv_ack_pending = false; +static u64 surv_timer; +static u64 surv_ack_timer; +static u32 surv_state_param; +static struct lock surv_lock = LOCK_UNLOCKED; + +#define FSP_SURV_ACK_TIMEOUT 120 /* surv ack timeout in seconds */ + +DEFINE_LOG_ENTRY(OPAL_RC_SURVE_INIT, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE, + OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_MISCELLANEOUS_INFO_ONLY); + +DEFINE_LOG_ENTRY(OPAL_RC_SURVE_STATUS, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE, + OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_MISCELLANEOUS_INFO_ONLY); + +DEFINE_LOG_ENTRY(OPAL_RC_SURVE_ACK, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE, + OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_MISCELLANEOUS_INFO_ONLY); + +static void fsp_surv_ack(struct fsp_msg *msg) +{ + uint8_t val; + + if (!msg->resp) + return; + + val = (msg->resp->word1 >> 8) & 0xff; + if (val == 0) { + /* reset the pending flag */ + prlog(PR_TRACE, + "SURV: Received heartbeat acknowledge from FSP\n"); + lock(&surv_lock); + fsp_surv_ack_pending = false; + unlock(&surv_lock); + } else { + /** + * @fwts-label FSPHeartbeatAckError + * @fwts-advice Error in acknowledging heartbeat to FSP. + * This could mean the FSP has gone away or it may mean + * the FSP may kill us for missing too many heartbeats. + */ + prlog(PR_ERR, + "SURV: Heartbeat Acknowledgment error from FSP\n"); + } + + fsp_freemsg(msg); +} + +static void fsp_surv_check_timeout(void) +{ + u64 now = mftb(); + + /* + * We just checked fsp_surv_ack_pending to be true in fsp_surv_hbeat + * and we haven't dropped the surv_lock between then and now. So, we + * just go ahead and check timeouts. + */ + if (tb_compare(now, surv_ack_timer) == TB_AAFTERB) { + uint32_t plid = log_simple_error(&e_info(OPAL_RC_SURVE_ACK), + "SURV: Surv ACK timed out; initiating R/R\n"); + + /* Reset the pending trigger too */ + fsp_surv_ack_pending = false; + fsp_trigger_reset(plid); + } + + return; +} + +/* Send surveillance heartbeat based on a timebase trigger */ +static void fsp_surv_hbeat(void) +{ + u64 now = mftb(); + struct fsp_msg *msg; + + /* Check if an ack is pending... if so, don't send the ping just yet */ + if (fsp_surv_ack_pending) { + fsp_surv_check_timeout(); + return; + } + + /* add timebase callbacks */ + /* + * XXX This packet needs to be pushed to FSP in an interval + * less than 120s that's advertised to FSP. + * + * Verify if the command building format and call is fine. + */ + if (surv_timer == 0 || + (tb_compare(now, surv_timer) == TB_AAFTERB) || + (tb_compare(now, surv_timer) == TB_AEQUALB)) { + prlog(PR_TRACE, + "SURV: Sending the heartbeat command to FSP\n"); + msg = fsp_mkmsg(FSP_CMD_SURV_HBEAT, 1, 120); + if (!msg) { + prerror("SURV: Failed to allocate heartbeat msg\n"); + return; + } + if (fsp_queue_msg(msg, fsp_surv_ack)) { + fsp_freemsg(msg); + prerror("SURV: Failed to queue heartbeat msg\n"); + } else { + fsp_surv_ack_pending = true; + surv_timer = now + secs_to_tb(60); + surv_ack_timer = now + secs_to_tb(FSP_SURV_ACK_TIMEOUT); + } + } +} + +static void fsp_surv_poll(void *data __unused) +{ + if (!fsp_surv_state) + return; + lock(&surv_lock); + fsp_surv_hbeat(); + unlock(&surv_lock); +} + +static void fsp_surv_got_param(uint32_t param_id __unused, int err_len, + void *data __unused) +{ + if (err_len != 4) { + uint32_t plid = log_simple_error(&e_info(OPAL_RC_SURVE_STATUS), + "SURV: Error (%d) retrieving surv status; initiating R/R\n", + err_len); + fsp_trigger_reset(plid); + return; + } + + surv_state_param = be32_to_cpu((__be32)surv_state_param); + if (!(surv_state_param & 0x01)) { + prlog(PR_NOTICE, "SURV: Status from FSP: disabled\n"); + return; + } + prlog(PR_NOTICE, "SURV: Status from FSP: enabled\n"); + + lock(&surv_lock); + fsp_surv_state = true; + + /* Also send one heartbeat now. The next one will not happen + * until we hit the OS. + */ + fsp_surv_hbeat(); + unlock(&surv_lock); +} + +void fsp_surv_query(void) +{ + int rc; + + printf("SURV: Querying FSP's surveillance status\n"); + + /* Reset surveillance settings */ + lock(&surv_lock); + fsp_surv_state = false; + surv_timer = 0; + surv_ack_timer = 0; + unlock(&surv_lock); + + /* Query FPS for surveillance state */ + rc = fsp_get_sys_param(SYS_PARAM_SURV, &surv_state_param, 4, + fsp_surv_got_param, NULL); + if (rc) { + log_simple_error(&e_info(OPAL_RC_SURVE_INIT), + "SURV: Error %d queueing param request\n", rc); + } +} + +static bool fsp_surv_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + assert(msg == NULL); + + switch (cmd_sub_mod) { + case FSP_RESET_START: + printf("SURV: Disabling surveillance\n"); + lock(&surv_lock); + fsp_surv_state = false; + fsp_surv_ack_pending = false; + unlock(&surv_lock); + return true; + case FSP_RELOAD_COMPLETE: + fsp_surv_query(); + return true; + } + return false; +} + +static struct fsp_client fsp_surv_client_rr = { + .message = fsp_surv_msg_rr, +}; + +/* This is called at boot time */ +void fsp_init_surveillance(void) +{ + /* Always register the poller, so we don't have to add/remove + * it on reset-reload or change of surveillance state. Also the + * poller list has no locking so we don't want to play with it + * at runtime. + */ + opal_add_poller(fsp_surv_poll, NULL); + + /* Register for the reset/reload event */ + fsp_register_client(&fsp_surv_client_rr, FSP_MCLASS_RR_EVENT); + + /* Send query to FSP */ + fsp_surv_query(); +} + diff --git a/roms/skiboot/hw/fsp/fsp-sysdump.c b/roms/skiboot/hw/fsp/fsp-sysdump.c new file mode 100644 index 000000000..cd8744062 --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-sysdump.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Sapphire dump design: + * - During initialization we setup Memory Dump Source Table (MDST) table + * which contains address, size pair. + * - We send MDST table update notification to FSP via MBOX command. + * - During Sapphire checkstop: + * - FSP retrieves HWDUMP. + * - FSP retrieves CEC memory based on MDST table. + * - Once Sapphire reboot FSP sends new dump avialable notification via HDAT + * + * Copyright 2013-2016 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Sapphire dump size + * This is the maximum memory that FSP can retrieve during checkstop. + * + * Note: + * Presently we are hardcoding this parameter. Eventually we need + * new System parameter so that we can get max size dynamically. + */ +#define MAX_SAPPHIRE_DUMP_SIZE 0x1000000 + +DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP, + OPAL_PLATFORM_FIRMWARE, OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_UPDATE, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP, + OPAL_PLATFORM_FIRMWARE, + OPAL_PREDICTIVE_ERR_FAULT_RECTIFY_REBOOT, + OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_ADD, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_DUMP_MDST_REMOVE, OPAL_PLATFORM_ERR_EVT, OPAL_DUMP, + OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA); + + +static struct mdst_table *mdst_table; +static struct mdst_table *dump_mem_region; + +static int cur_mdst_entry; +static int max_mdst_entry; +static int cur_dump_size; +/* + * Presently both sizes are same.. But if someday FSP gives more space + * than our TCE mapping then we need this validation.. + * + * Also once FSP implements MAX_SAPPHIRE_DUMP_SIZE system param, we can + * move this validation to separate function. + */ +static int max_dump_size = MIN(MAX_SAPPHIRE_DUMP_SIZE, PSI_DMA_HYP_DUMP_SIZE); + +/* Protect MDST table entries */ +static struct lock mdst_lock = LOCK_UNLOCKED; + +static inline uint32_t get_dump_region_map_size(uint64_t addr, uint32_t size) +{ + uint64_t start, end; + + start = addr & ~TCE_MASK; + end = addr + size; + end = ALIGN_UP(end, TCE_PSIZE); + + return (end - start); +} + +static int dump_region_tce_map(void) +{ + int i; + uint32_t t_size = 0, size; + uint64_t addr; + + for (i = 0; i < cur_mdst_entry; i++) { + + addr = be64_to_cpu(dump_mem_region[i].addr) & ~TCE_MASK; + size = get_dump_region_map_size(be64_to_cpu(dump_mem_region[i].addr), + be32_to_cpu(dump_mem_region[i].size)); + + if (t_size + size > max_dump_size) + break; + + /* TCE mapping */ + fsp_tce_map(PSI_DMA_HYP_DUMP + t_size, (void *)addr, size); + + /* Add entry to MDST table */ + mdst_table[i].data_region = dump_mem_region[i].data_region; + mdst_table[i].size = dump_mem_region[i].size; + mdst_table[i].addr = cpu_to_be64(PSI_DMA_HYP_DUMP + t_size); + + /* TCE alignment adjustment */ + mdst_table[i].addr = cpu_to_be64(be64_to_cpu(mdst_table[i].addr) + + (be64_to_cpu(dump_mem_region[i].addr) & 0xfff)); + + t_size += size; + } + + return i; +} + +static inline void dump_region_tce_unmap(void) +{ + fsp_tce_unmap(PSI_DMA_HYP_DUMP, PSI_DMA_HYP_DUMP_SIZE); +} + +static void update_mdst_table_complete(struct fsp_msg *msg) +{ + uint8_t status = (msg->resp->word1 >> 8) & 0xff; + + if (status) + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE), + "MDST: Update table MBOX command failed: " + "0x%x\n", status); + else + printf("MDST: Table updated.\n"); + + fsp_freemsg(msg); +} + +/* Send MDST table to FSP */ +static int64_t fsp_update_mdst_table(void) +{ + struct fsp_msg *msg; + int count; + int rc = OPAL_SUCCESS; + + if (cur_mdst_entry <= 0) { + printf("MDST: Table is empty\n"); + return OPAL_INTERNAL_ERROR; + } + + lock(&mdst_lock); + + /* Unmap previous mapping */ + dump_region_tce_unmap(); + count = dump_region_tce_map(); + + msg = fsp_mkmsg(FSP_CMD_HYP_MDST_TABLE, 4, 0, + PSI_DMA_MDST_TABLE, + sizeof(*mdst_table) * count, + sizeof(*mdst_table)); + unlock(&mdst_lock); + + if (!msg) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE), + "MDST: Message allocation failed.!\n"); + rc = OPAL_INTERNAL_ERROR; + } else if (fsp_queue_msg(msg, update_mdst_table_complete)) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_UPDATE), + "MDST: Failed to queue MDST table message.\n"); + fsp_freemsg(msg); + rc = OPAL_INTERNAL_ERROR; + } + return rc; +} + +static int dump_region_del_entry(uint32_t id) +{ + int i; + uint32_t size; + bool found = false; + int rc = OPAL_SUCCESS; + + lock(&mdst_lock); + + for (i = 0; i < cur_mdst_entry; i++) { + if (dump_mem_region[i].data_region != id) + continue; + + found = true; + break; + } + + if (!found) { + rc = OPAL_PARAMETER; + goto del_out; + } + + /* Adjust current dump size */ + size = get_dump_region_map_size(be64_to_cpu(dump_mem_region[i].addr), + be32_to_cpu(dump_mem_region[i].size)); + cur_dump_size -= size; + + for ( ; i < cur_mdst_entry - 1; i++) + dump_mem_region[i] = dump_mem_region[i + 1]; + + dump_mem_region[i].data_region = 0; + cur_mdst_entry--; + +del_out: + unlock(&mdst_lock); + return rc; +} + +/* Add entry to MDST table */ +static int __dump_region_add_entry(uint32_t id, uint64_t addr, uint32_t size) +{ + int rc = OPAL_INTERNAL_ERROR; + uint32_t act_size; + + /* Delete function takes lock before modifying table */ + dump_region_del_entry(id); + + lock(&mdst_lock); + + if (cur_mdst_entry >= max_mdst_entry) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_ADD), + "MDST: Table is full.\n"); + goto out; + } + + /* TCE alignment adjustment */ + act_size = get_dump_region_map_size(addr, size); + + /* Make sure we don't cross dump size limit */ + if (cur_dump_size + act_size > max_dump_size) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_ADD), + "MDST: 0x%x is crossing max dump size (0x%x) limit.\n", + cur_dump_size + act_size, max_dump_size); + goto out; + } + + /* Add entry to dump memory region table */ + dump_mem_region[cur_mdst_entry].data_region = (u8)id; + dump_mem_region[cur_mdst_entry].addr = cpu_to_be64(addr); + dump_mem_region[cur_mdst_entry].size = cpu_to_be32(size); + + /* Update dump region count and dump size */ + cur_mdst_entry++; + cur_dump_size += act_size; + + printf("MDST: Addr = 0x%llx [size : 0x%x bytes] added to MDST table.\n", + (uint64_t)addr, size); + + rc = OPAL_SUCCESS; + +out: + unlock(&mdst_lock); + return rc; +} + +static int dump_region_add_entries(void) +{ + int rc; + + /* Add console buffer */ + rc = __dump_region_add_entry(DUMP_REGION_CONSOLE, + INMEM_CON_START, INMEM_CON_LEN); + if (rc) + return rc; + + /* Add HBRT buffer */ + rc = __dump_region_add_entry(DUMP_REGION_HBRT_LOG, + HBRT_CON_START, HBRT_CON_LEN); + + return rc; +} + +static int64_t fsp_opal_register_dump_region(uint32_t id, + uint64_t addr, uint64_t size) +{ + int rc = OPAL_SUCCESS; + + if (!fsp_present()) + return OPAL_UNSUPPORTED; + + /* Validate memory region id */ + if (id < DUMP_REGION_HOST_START || id > DUMP_REGION_HOST_END) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_ADD), + "MDST: Invalid dump region id : 0x%x\n", id); + return OPAL_PARAMETER; + } + + if (size <= 0) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_ADD), + "MDST: Invalid size : 0x%llx\n", size); + return OPAL_PARAMETER; + } + + rc = __dump_region_add_entry(id, addr, size); + if (rc) + return rc; + + /* Send updated MDST to FSP */ + rc = fsp_update_mdst_table(); + + return rc; +} + +static int64_t fsp_opal_unregister_dump_region(uint32_t id) +{ + int rc = OPAL_SUCCESS; + + if (!fsp_present()) + return OPAL_UNSUPPORTED; + + /* Validate memory region id */ + if (id < DUMP_REGION_HOST_START || id > DUMP_REGION_HOST_END) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_REMOVE), + "MDST: Invalid dump region id : 0x%x\n", id); + return OPAL_PARAMETER; + } + + rc = dump_region_del_entry(id); + if (rc) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_REMOVE), + "MDST: dump region id : 0x%x not found\n", id); + return OPAL_PARAMETER; + } + + /* Send updated MDST to FSP */ + rc = fsp_update_mdst_table(); + + return rc; +} + +/* TCE mapping */ +static inline void mdst_table_tce_map(void) +{ + fsp_tce_map(PSI_DMA_MDST_TABLE, mdst_table, PSI_DMA_MDST_TABLE_SIZE); +} + +/* Initialize MDST table */ +static int mdst_table_init(void) +{ + dump_mem_region = memalign(TCE_PSIZE, PSI_DMA_MDST_TABLE_SIZE); + if (!dump_mem_region) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_INIT), + "MDST: Failed to allocate memory for dump " + "memory region table.\n"); + return -ENOMEM; + } + + memset(dump_mem_region, 0, PSI_DMA_MDST_TABLE_SIZE); + + mdst_table = memalign(TCE_PSIZE, PSI_DMA_MDST_TABLE_SIZE); + if (!mdst_table) { + log_simple_error(&e_info(OPAL_RC_DUMP_MDST_INIT), + "MDST: Failed to allocate memory for MDST table.\n"); + return -ENOMEM; + } + + memset(mdst_table, 0, PSI_DMA_MDST_TABLE_SIZE); + mdst_table_tce_map(); + + max_mdst_entry = PSI_DMA_MDST_TABLE_SIZE / sizeof(*mdst_table); + printf("MDST: Max entries in MDST table : %d\n", max_mdst_entry); + + return OPAL_SUCCESS; +} + +/* + * Handle FSP R/R event. + */ +static bool fsp_mdst_update_rr(uint32_t cmd_sub_mod, + struct fsp_msg *msg __unused) +{ + switch (cmd_sub_mod) { + case FSP_RESET_START: + return true; + case FSP_RELOAD_COMPLETE: /* Send MDST to FSP */ + fsp_update_mdst_table(); + return true; + } + return false; +} + +static struct fsp_client fsp_mdst_client_rr = { + .message = fsp_mdst_update_rr, +}; + +/* Initialize MDST table and send notification to FSP */ +void fsp_mdst_table_init(void) +{ + if (!fsp_present()) + return; + + /* OPAL interface */ + opal_register(OPAL_REGISTER_DUMP_REGION, + fsp_opal_register_dump_region, 3); + opal_register(OPAL_UNREGISTER_DUMP_REGION, + fsp_opal_unregister_dump_region, 1); + + /* Initiate MDST */ + if (mdst_table_init() != OPAL_SUCCESS) + return; + + /* + * Ignore return code from mdst_table_add_entries so that + * we can atleast capture partial dump. + */ + dump_region_add_entries(); + fsp_update_mdst_table(); + + /* Register for Class AA (FSP R/R) */ + fsp_register_client(&fsp_mdst_client_rr, FSP_MCLASS_RR_EVENT); +} diff --git a/roms/skiboot/hw/fsp/fsp-sysparam.c b/roms/skiboot/hw/fsp/fsp-sysparam.c new file mode 100644 index 000000000..adb424e5e --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp-sysparam.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * There's some system level parameters that aren't over IPMI or NVRAM + * but that the FSP exposes through this interface. + * + * We expose these through an OPAL API as there really isn't any other/better + * way of doing so. + * + * Copyright 2013-2017 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct sysparam_comp_data { + uint32_t param_len; + uint64_t async_token; +}; + +struct sysparam_req { + sysparam_compl_t completion; + void *comp_data; + void *ubuf; + uint32_t ulen; + struct fsp_msg msg; + struct fsp_msg resp; + bool done; +}; + +static struct sysparam_attr { + const char *name; + uint32_t id; + uint32_t length; + uint8_t perm; +} sysparam_attrs[] = { +#define _R OPAL_SYSPARAM_READ +#define _W OPAL_SYSPARAM_WRITE +#define _RW OPAL_SYSPARAM_RW + {"surveillance", SYS_PARAM_SURV, 4, _RW}, + {"hmc-management", SYS_PARAM_HMC_MANAGED, 4, _R}, + {"cupd-policy", SYS_PARAM_FLASH_POLICY, 4, _RW}, + {"plat-hmc-managed", SYS_PARAM_NEED_HMC, 4, _RW}, + {"fw-license-policy", SYS_PARAM_FW_LICENSE, 4, _RW}, + {"world-wide-port-num", SYS_PARAM_WWPN, 12, _W}, + {"default-boot-device", SYS_PARAM_DEF_BOOT_DEV, 1, _RW}, + {"next-boot-device", SYS_PARAM_NEXT_BOOT_DEV,1, _RW}, + {"console-select", SYS_PARAM_CONSOLE_SELECT,1, _RW}, + {"boot-device-path", SYS_PARAM_BOOT_DEV_PATH,48, _RW} +#undef _R +#undef _W +#undef _RW +}; + +static int fsp_sysparam_process(struct sysparam_req *r) +{ + u32 param_id, len; + int stlen = 0; + u8 fstat; + /* Snapshot completion before we set the "done" flag */ + sysparam_compl_t comp = r->completion; + void *cdata = r->comp_data; + + if (r->msg.state != fsp_msg_done) { + prerror("FSP: Request for sysparam 0x%x got FSP failure!\n", + fsp_msg_get_data_word(&r->msg, 0)); + stlen = -1; /* XXX Find saner error codes */ + goto complete; + } + + param_id = fsp_msg_get_data_word(&r->resp, 0); + len = fsp_msg_get_data_word(&r->resp, 1) & 0xffff; + + /* Check params validity */ + if (param_id != fsp_msg_get_data_word(&r->msg, 0)) { + prerror("FSP: Request for sysparam 0x%x got resp. for 0x%x!\n", + fsp_msg_get_data_word(&r->msg, 0), param_id); + stlen = -2; /* XXX Sane error codes */ + goto complete; + } + if (len > r->ulen) { + prerror("FSP: Request for sysparam 0x%x truncated!\n", + param_id); + len = r->ulen; + } + + /* Decode the request status */ + fstat = (r->msg.resp->word1 >> 8) & 0xff; + switch(fstat) { + case 0x00: /* XXX Is that even possible ? */ + case 0x11: /* Data in request */ + memcpy(r->ubuf, &r->resp.data.bytes[8], len); + /* fallthrough */ + case 0x12: /* Data in TCE */ + stlen = len; + break; + default: + stlen = -fstat; + } + complete: + /* Call completion if any */ + if (comp) + comp(fsp_msg_get_data_word(&r->msg, 0), stlen, cdata); + + free(r); + + return stlen; +} + +static void fsp_sysparam_get_complete(struct fsp_msg *msg) +{ + struct sysparam_req *r = container_of(msg, struct sysparam_req, msg); + + /* If it's an asynchronous request, process it now */ + if (r->completion) { + fsp_sysparam_process(r); + return; + } + + /* Else just set the done flag */ + + /* Another CPU can be polling on the "done" flag without the + * lock held, so let's order the udpates to the structure + */ + lwsync(); + r->done = true; +} + +int fsp_get_sys_param(uint32_t param_id, void *buffer, uint32_t length, + sysparam_compl_t async_complete, void *comp_data) +{ + struct sysparam_req *r; + uint64_t baddr, tce_token; + int rc; + + if (!fsp_present()) + return -ENODEV; + /* + * XXX FIXME: We currently always allocate the sysparam_req here + * however, we want to avoid runtime allocations as much as + * possible, so if this is going to be used a lot at runtime, + * we probably want to pre-allocate a pool of these + */ + if (length > 4096) + return -EINVAL; + r = zalloc(sizeof(struct sysparam_req)); + if (!r) + return -ENOMEM; + r->completion = async_complete; + r->comp_data = comp_data; + r->done = false; + r->ubuf = buffer; + r->ulen = length; + r->msg.resp = &r->resp; + + /* Map always 1 page ... easier that way and none of that + * is performance critical + */ + baddr = (uint64_t)buffer; + fsp_tce_map(PSI_DMA_GET_SYSPARAM, (void *)(baddr & ~0xffful), 0x1000); + tce_token = PSI_DMA_GET_SYSPARAM | (baddr & 0xfff); + fsp_fillmsg(&r->msg, FSP_CMD_QUERY_SPARM, 3, + param_id, length, tce_token); + rc = fsp_queue_msg(&r->msg, fsp_sysparam_get_complete); + + if (rc) + free(r); + + /* Asynchronous operation or queueing failure, return */ + if (rc || async_complete) + return rc; + + /* Synchronous operation requested, spin and process */ + while(!r->done) + opal_run_pollers(); + + /* Will free the request */ + return fsp_sysparam_process(r); +} + +static void fsp_opal_getparam_complete(uint32_t param_id __unused, int err_len, + void *data) +{ + struct sysparam_comp_data *comp_data = data; + int rc = OPAL_SUCCESS; + + if (comp_data->param_len != err_len) + rc = OPAL_INTERNAL_ERROR; + + opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, + cpu_to_be64(comp_data->async_token), + cpu_to_be64(rc)); + free(comp_data); +} + +static void fsp_opal_setparam_complete(struct fsp_msg *msg) +{ + struct sysparam_comp_data *comp_data = msg->user_data; + u8 fstat; + uint32_t param_id; + int rc = OPAL_SUCCESS; + + if (msg->state != fsp_msg_done) { + prerror("FSP: Request for set sysparam 0x%x got FSP failure!\n", + fsp_msg_get_data_word(msg, 0)); + rc = OPAL_INTERNAL_ERROR; + goto out; + } + + param_id = fsp_msg_get_data_word(msg->resp, 0); + if (param_id != fsp_msg_get_data_word(msg, 0)) { + prerror("FSP: Request for set sysparam 0x%x got resp. for 0x%x!" + "\n", fsp_msg_get_data_word(msg, 0), param_id); + rc = OPAL_INTERNAL_ERROR; + goto out; + } + + fstat = (msg->resp->word1 >> 8) & 0xff; + switch (fstat) { + case 0x00: + rc = OPAL_SUCCESS; + break; + case 0x22: + prerror("%s: Response status 0x%x, invalid data\n", __func__, + fstat); + rc = OPAL_INTERNAL_ERROR; + break; + case 0x24: + prerror("%s: Response status 0x%x, DMA error\n", __func__, + fstat); + rc = OPAL_INTERNAL_ERROR; + break; + default: + rc = OPAL_INTERNAL_ERROR; + break; + } + +out: + opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL, + cpu_to_be64(comp_data->async_token), + cpu_to_be64(rc)); + free(comp_data); + fsp_freemsg(msg); +} + +/* OPAL interface for PowerNV to read the system parameter from FSP */ +static int64_t fsp_opal_get_param(uint64_t async_token, uint32_t param_id, + uint64_t buffer, uint64_t length) +{ + struct sysparam_comp_data *comp_data; + int count, rc, i; + + if (!fsp_present()) + return OPAL_HARDWARE; + + count = ARRAY_SIZE(sysparam_attrs); + for (i = 0; i < count; i++) + if (sysparam_attrs[i].id == param_id) + break; + if (i == count) + return OPAL_PARAMETER; + + if (length < sysparam_attrs[i].length) + return OPAL_PARAMETER; + if (!(sysparam_attrs[i].perm & OPAL_SYSPARAM_READ)) + return OPAL_PERMISSION; + + comp_data = zalloc(sizeof(struct sysparam_comp_data)); + if (!comp_data) + return OPAL_NO_MEM; + + comp_data->param_len = sysparam_attrs[i].length; + comp_data->async_token = async_token; + rc = fsp_get_sys_param(param_id, (void *)buffer, + sysparam_attrs[i].length, fsp_opal_getparam_complete, + comp_data); + if (rc) { + free(comp_data); + prerror("%s: Error %d queuing param request\n", __func__, rc); + return OPAL_INTERNAL_ERROR; + } + + return OPAL_ASYNC_COMPLETION; +} + +/* OPAL interface for PowerNV to update the system parameter to FSP */ +static int64_t fsp_opal_set_param(uint64_t async_token, uint32_t param_id, + uint64_t buffer, uint64_t length) +{ + struct sysparam_comp_data *comp_data; + struct fsp_msg *msg; + uint64_t tce_token; + int count, rc, i; + + if (!fsp_present()) + return OPAL_HARDWARE; + + count = ARRAY_SIZE(sysparam_attrs); + for (i = 0; i < count; i++) + if (sysparam_attrs[i].id == param_id) + break; + if (i == count) + return OPAL_PARAMETER; + + if (length < sysparam_attrs[i].length) + return OPAL_PARAMETER; + if (!(sysparam_attrs[i].perm & OPAL_SYSPARAM_WRITE)) + return OPAL_PERMISSION; + + fsp_tce_map(PSI_DMA_SET_SYSPARAM, (void *)(buffer & ~0xffful), 0x1000); + tce_token = PSI_DMA_SET_SYSPARAM | (buffer & 0xfff); + + msg = fsp_mkmsg(FSP_CMD_SET_SPARM_2, 4, param_id, length, + tce_token >> 32, tce_token); + if (!msg) { + prerror("%s: Failed to allocate the message\n", __func__); + return OPAL_INTERNAL_ERROR; + } + + comp_data = zalloc(sizeof(struct sysparam_comp_data)); + if (!comp_data) { + fsp_freemsg(msg); + return OPAL_NO_MEM; + } + + comp_data->param_len = length; + comp_data->async_token = async_token; + msg->user_data = comp_data; + + rc = fsp_queue_msg(msg, fsp_opal_setparam_complete); + if (rc) { + free(comp_data); + fsp_freemsg(msg); + prerror("%s: Failed to queue the message\n", __func__); + return OPAL_INTERNAL_ERROR; + } + + return OPAL_ASYNC_COMPLETION; +} + +struct sysparam_notify_entry { + struct list_node link; + sysparam_update_notify notify; +}; + +static LIST_HEAD(sysparam_update_notifiers); + +/* Add client to notifier chain */ +void sysparam_add_update_notifier(sysparam_update_notify notify) +{ + struct sysparam_notify_entry *entry; + + entry = zalloc(sizeof(struct sysparam_notify_entry)); + assert(entry); + + entry->notify = notify; + list_add_tail(&sysparam_update_notifiers, &entry->link); +} + +/* Remove client from notifier chain */ +void sysparam_del_update_notifier(sysparam_update_notify notify) +{ + struct sysparam_notify_entry *entry; + + list_for_each(&sysparam_update_notifiers, entry, link) { + if (entry->notify == notify) { + list_del(&entry->link); + free(entry); + return; + } + } +} + +/* Update notification chain */ +static void sysparam_run_update_notifier(struct fsp_msg *msg) +{ + bool ret; + struct sysparam_notify_entry *entry; + + list_for_each(&sysparam_update_notifiers, entry, link) { + ret = entry->notify(msg); + if (ret == true) + break; + } +} + +static bool fsp_sysparam_msg(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + struct fsp_msg *rsp; + int rc = -ENOMEM; + + switch(cmd_sub_mod) { + case FSP_CMD_SP_SPARM_UPD_0: + case FSP_CMD_SP_SPARM_UPD_1: + printf("FSP: Got sysparam update, param ID 0x%x\n", + fsp_msg_get_data_word(msg, 0)); + + sysparam_run_update_notifier(msg); + + rsp = fsp_mkmsg((cmd_sub_mod & 0xffff00) | 0x008000, 0); + if (rsp) + rc = fsp_queue_msg(rsp, fsp_freemsg); + if (rc) { + prerror("FSP: Error %d queuing sysparam reply\n", rc); + /* What to do here ? R/R ? */ + fsp_freemsg(rsp); + } + return true; + } + return false; +} + +static struct fsp_client fsp_sysparam_client = { + .message = fsp_sysparam_msg, +}; + +static void add_opal_sysparam_node(void) +{ + struct dt_node *sysparams; + char *names, *s; + __be32 *ids, *lens; + uint8_t *perms; + unsigned int i, count, size = 0; + + if (!fsp_present()) + return; + + sysparams = dt_new(opal_node, "sysparams"); + dt_add_property_string(sysparams, "compatible", "ibm,opal-sysparams"); + + count = ARRAY_SIZE(sysparam_attrs); + for (i = 0; i < count; i++) + size = size + strlen(sysparam_attrs[i].name) + 1; + + names = zalloc(size); + if (!names) { + prerror("%s: Failed to allocate memory for parameter names\n", + __func__); + return; + } + + ids = zalloc(count * sizeof(*ids)); + if (!ids) { + prerror("%s: Failed to allocate memory for parameter ids\n", + __func__); + goto out_free_name; + } + + lens = zalloc(count * sizeof(*lens)); + if (!lens) { + prerror("%s: Failed to allocate memory for parameter length\n", + __func__); + goto out_free_id; + } + + perms = zalloc(count * sizeof(*perms)); + if (!perms) { + prerror("%s: Failed to allocate memory for parameter length\n", + __func__); + goto out_free_len; + } + + s = names; + for (i = 0; i < count; i++) { + strcpy(s, sysparam_attrs[i].name); + s = s + strlen(sysparam_attrs[i].name) + 1; + + ids[i] = cpu_to_be32(sysparam_attrs[i].id); + lens[i] = cpu_to_be32(sysparam_attrs[i].length); + perms[i] = sysparam_attrs[i].perm; + } + + dt_add_property(sysparams, "param-name", names, size); + dt_add_property(sysparams, "param-id", ids, count * sizeof(*ids)); + dt_add_property(sysparams, "param-len", lens, count * sizeof(*lens)); + dt_add_property(sysparams, "param-perm", perms, count * sizeof(*perms)); + + free(perms); + +out_free_len: + free(lens); +out_free_id: + free(ids); +out_free_name: + free(names); +} + +void fsp_sysparam_init(void) +{ + if (!fsp_present()) + return; + + /* Register change notifications */ + fsp_register_client(&fsp_sysparam_client, FSP_MCLASS_SERVICE); + + /* Register OPAL interfaces */ + opal_register(OPAL_GET_PARAM, fsp_opal_get_param, 4); + opal_register(OPAL_SET_PARAM, fsp_opal_set_param, 4); + + /* Add device-tree nodes */ + add_opal_sysparam_node(); +} diff --git a/roms/skiboot/hw/fsp/fsp.c b/roms/skiboot/hw/fsp/fsp.c new file mode 100644 index 000000000..2c5f9d71b --- /dev/null +++ b/roms/skiboot/hw/fsp/fsp.c @@ -0,0 +1,2709 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +/* + * Base FSP (Flexible Service Processor) Support + * + * FSP is the BMC-like thing in some IBM POWER servers + * + * Copyright 2013-2019 IBM Corp. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern uint32_t hir_trigger; + +DEFINE_LOG_ENTRY(OPAL_RC_FSP_POLL_TIMEOUT, OPAL_PLATFORM_ERR_EVT, OPAL_FSP, + OPAL_PLATFORM_FIRMWARE, OPAL_RECOVERED_ERR_GENERAL, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_FSP_MBOX_ERR, OPAL_PLATFORM_ERR_EVT, OPAL_FSP, + OPAL_PLATFORM_FIRMWARE, OPAL_RECOVERED_ERR_GENERAL, OPAL_NA); + +DEFINE_LOG_ENTRY(OPAL_RC_FSP_DISR_HIR_MASK, OPAL_PLATFORM_ERR_EVT, OPAL_FSP, + OPAL_PLATFORM_FIRMWARE, OPAL_RECOVERED_ERR_GENERAL, OPAL_NA); + +/* We make this look like a Surveillance error, even though it really + * isn't one. + */ +DEFINE_LOG_ENTRY(OPAL_INJECTED_HIR, OPAL_MISC_ERR_EVT, OPAL_SURVEILLANCE, + OPAL_SURVEILLANCE_ERR, OPAL_PREDICTIVE_ERR_GENERAL, + OPAL_MISCELLANEOUS_INFO_ONLY); + +#define FSP_TRACE_MSG +#define FSP_TRACE_EVENT + +#define FSP_MAX_IOPATH 4 + +enum fsp_path_state { + fsp_path_bad, + fsp_path_backup, + fsp_path_active, +}; + +struct fsp_iopath { + enum fsp_path_state state; + void *fsp_regs; + struct psi *psi; +}; + +enum fsp_mbx_state { + fsp_mbx_idle, /* Mailbox ready to send */ + fsp_mbx_send, /* Mailbox sent, waiting for ack */ + fsp_mbx_crit_op, /* Critical operation in progress */ + fsp_mbx_prep_for_reset, /* Prepare for reset sent */ + fsp_mbx_hir_seq_done, /* HIR sequence done, link forced down */ + fsp_mbx_err, /* Mailbox in error state, waiting for r&r */ + fsp_mbx_rr, /* Mailbox in r&r */ +}; + +struct fsp { + struct fsp *link; + unsigned int index; + enum fsp_mbx_state state; + struct fsp_msg *pending; + + unsigned int iopath_count; + int active_iopath; /* -1: no active IO path */ + struct fsp_iopath iopath[FSP_MAX_IOPATH]; +}; + +enum ipl_state { + ipl_initial = 0x00000000, + ipl_opl_sent = 0x00000001, + ipl_got_continue = 0x00000002, + ipl_got_new_role = 0x00000004, + ipl_got_caps = 0x00000008, + ipl_got_fsp_functional = 0x00000010 +}; +static enum ipl_state ipl_state = ipl_initial; + +static struct fsp *first_fsp; +static struct fsp *active_fsp; +static u16 fsp_curseq = 0x8000; +static __be64 *fsp_tce_table; + +#define FSP_INBOUND_SIZE 0x00100000UL +static void *fsp_inbound_buf = NULL; +static u32 fsp_inbound_off; + +static struct lock fsp_lock = LOCK_UNLOCKED; +static struct lock fsp_poll_lock = LOCK_UNLOCKED; + +static u64 fsp_cmdclass_resp_bitmask; +static u64 timeout_timer; + +static u64 fsp_hir_timeout; + +#define FSP_CRITICAL_OP_TIMEOUT 128 +#define FSP_DRCR_CLEAR_TIMEOUT 128 + +/* LID numbers. For now we hijack some of pHyp's own until i figure + * out the whole business with the MasterLID + */ +#define KERNEL_LID_PHYP 0x80a00701 +#define KERNEL_LID_OPAL 0x80f00101 +#define INITRAMFS_LID_OPAL 0x80f00102 + +/* + * We keep track on last logged values for some things to print only on + * value changes, but also to relieve pressure on the tracer which + * doesn't do a very good job at detecting repeats when called from + * many different CPUs + */ +static u32 disr_last_print; +static u32 drcr_last_print; +static u32 hstate_last_print; + +void fsp_handle_resp(struct fsp_msg *msg); + +struct fsp_cmdclass { + int timeout; + bool busy; + struct list_head msgq; + struct list_head clientq; + struct list_head rr_queue; /* To queue up msgs during R/R */ + u64 timesent; +}; + +static struct fsp_cmdclass fsp_cmdclass_rr; + +static struct fsp_cmdclass fsp_cmdclass[FSP_MCLASS_LAST - FSP_MCLASS_FIRST + 1] += { +#define DEF_CLASS(_cl, _to) [_cl - FSP_MCLASS_FIRST] = { .timeout = _to } + DEF_CLASS(FSP_MCLASS_SERVICE, 16), + DEF_CLASS(FSP_MCLASS_PCTRL_MSG, 16), + DEF_CLASS(FSP_MCLASS_PCTRL_ABORTS, 16), + DEF_CLASS(FSP_MCLASS_ERR_LOG, 16), + DEF_CLASS(FSP_MCLASS_CODE_UPDATE, 40), + DEF_CLASS(FSP_MCLASS_FETCH_SPDATA, 16), + DEF_CLASS(FSP_MCLASS_FETCH_HVDATA, 16), + DEF_CLASS(FSP_MCLASS_NVRAM, 16), + DEF_CLASS(FSP_MCLASS_MBOX_SURV, 2), + DEF_CLASS(FSP_MCLASS_RTC, 16), + DEF_CLASS(FSP_MCLASS_SMART_CHIP, 20), + DEF_CLASS(FSP_MCLASS_INDICATOR, 180), + DEF_CLASS(FSP_MCLASS_HMC_INTFMSG, 16), + DEF_CLASS(FSP_MCLASS_HMC_VT, 16), + DEF_CLASS(FSP_MCLASS_HMC_BUFFERS, 16), + DEF_CLASS(FSP_MCLASS_SHARK, 16), + DEF_CLASS(FSP_MCLASS_MEMORY_ERR, 16), + DEF_CLASS(FSP_MCLASS_CUOD_EVENT, 16), + DEF_CLASS(FSP_MCLASS_HW_MAINT, 16), + DEF_CLASS(FSP_MCLASS_VIO, 16), + DEF_CLASS(FSP_MCLASS_SRC_MSG, 16), + DEF_CLASS(FSP_MCLASS_DATA_COPY, 16), + DEF_CLASS(FSP_MCLASS_TONE, 16), + DEF_CLASS(FSP_MCLASS_VIRTUAL_NVRAM, 16), + DEF_CLASS(FSP_MCLASS_TORRENT, 16), + DEF_CLASS(FSP_MCLASS_NODE_PDOWN, 16), + DEF_CLASS(FSP_MCLASS_DIAG, 16), + DEF_CLASS(FSP_MCLASS_PCIE_LINK_TOPO, 16), + DEF_CLASS(FSP_MCLASS_OCC, 16), + DEF_CLASS(FSP_MCLASS_TRUSTED_BOOT, 2), + DEF_CLASS(FSP_MCLASS_HBRT, 2), +}; + +static void fsp_trace_msg(struct fsp_msg *msg, u8 dir __unused) +{ + union trace fsp __unused; +#ifdef FSP_TRACE_MSG + size_t len = offsetof(struct trace_fsp_msg, data[msg->dlen]); + + fsp.fsp_msg.dlen = msg->dlen; + fsp.fsp_msg.word0 = cpu_to_be32(msg->word0); + fsp.fsp_msg.word1 = cpu_to_be32(msg->word1); + fsp.fsp_msg.dir = dir; + memcpy(fsp.fsp_msg.data, msg->data.bytes, msg->dlen); + trace_add(&fsp, TRACE_FSP_MSG, len); +#endif /* FSP_TRACE_MSG */ + assert(msg->dlen <= sizeof(fsp.fsp_msg.data)); +} + +static struct fsp *fsp_get_active(void) +{ + /* XXX Handle transition between FSPs */ + return active_fsp; +} + +static u64 fsp_get_class_bit(u8 class) +{ + /* Alias classes CE and CF as the FSP has a single queue */ + if (class == FSP_MCLASS_IPL) + class = FSP_MCLASS_SERVICE; + + return 1ul << (class - FSP_MCLASS_FIRST); +} + +static struct fsp_cmdclass *__fsp_get_cmdclass(u8 class) +{ + struct fsp_cmdclass *ret; + + /* RR class is special */ + if (class == FSP_MCLASS_RR_EVENT) + return &fsp_cmdclass_rr; + + /* Bound check */ + if (class < FSP_MCLASS_FIRST || class > FSP_MCLASS_LAST) + return NULL; + + /* Alias classes CE and CF as the FSP has a single queue */ + if (class == FSP_MCLASS_IPL) + class = FSP_MCLASS_SERVICE; + + ret = &fsp_cmdclass[class - FSP_MCLASS_FIRST]; + + /* Unknown class */ + if (ret->timeout == 0) + return NULL; + + return ret; +} + +static struct fsp_cmdclass *fsp_get_cmdclass(struct fsp_msg *msg) +{ + u8 c = msg->word0 & 0xff; + + return __fsp_get_cmdclass(c); +} + +static struct fsp_msg *__fsp_allocmsg(void) +{ + return zalloc(sizeof(struct fsp_msg)); +} + +struct fsp_msg *fsp_allocmsg(bool alloc_response) +{ + struct fsp_msg *msg; + + msg = __fsp_allocmsg(); + if (!msg) + return NULL; + if (alloc_response) { + msg->resp = __fsp_allocmsg(); + if (!msg->resp) { + free(msg); + return NULL; + } + } + + return msg; +} + +void __fsp_freemsg(struct fsp_msg *msg) +{ + free(msg); +} + +void fsp_freemsg(struct fsp_msg *msg) +{ + if (msg && msg->resp) + __fsp_freemsg(msg->resp); + __fsp_freemsg(msg); +} + +void fsp_cancelmsg(struct fsp_msg *msg) +{ + bool need_unlock = false; + struct fsp_cmdclass* cmdclass = fsp_get_cmdclass(msg); + + if (!fsp_in_rr()) { + prerror("FSP: Message cancel allowed only when" + "FSP is in reset\n"); + return; + } + + if (!cmdclass) + return; + + /* Recursive locking */ + need_unlock = lock_recursive(&fsp_lock); + + list_del(&msg->link); + msg->state = fsp_msg_cancelled; + + if (need_unlock) + unlock(&fsp_lock); +} + +static void fsp_wreg(struct fsp *fsp, u32 reg, u32 val) +{ + struct fsp_iopath *iop; + + if (fsp->active_iopath < 0) + return; + iop = &fsp->iopath[fsp->active_iopath]; + if (iop->state == fsp_path_bad) + return; + out_be32(iop->fsp_regs + reg, val); +} + +static u32 fsp_rreg(struct fsp *fsp, u32 reg) +{ + struct fsp_iopath *iop; + + if (fsp->active_iopath < 0) + return 0xffffffff; + iop = &fsp->iopath[fsp->active_iopath]; + if (iop->state == fsp_path_bad) + return 0xffffffff; + return in_be32(iop->fsp_regs + reg); +} + +static void fsp_reg_dump(void) +{ +#define FSP_DUMP_ONE(x) \ + prlog(PR_DEBUG, " %20s: %x\n", #x, fsp_rreg(fsp, x)); + + struct fsp *fsp = fsp_get_active(); + + if (!fsp) + return; + + prlog(PR_DEBUG, "FSP #%d: Register dump (state=%d)\n", + fsp->index, fsp->state); + FSP_DUMP_ONE(FSP_DRCR_REG); + FSP_DUMP_ONE(FSP_DISR_REG); + FSP_DUMP_ONE(FSP_MBX1_HCTL_REG); + FSP_DUMP_ONE(FSP_MBX1_FCTL_REG); + FSP_DUMP_ONE(FSP_MBX2_HCTL_REG); + FSP_DUMP_ONE(FSP_MBX2_FCTL_REG); + FSP_DUMP_ONE(FSP_SDES_REG); + FSP_DUMP_ONE(FSP_HDES_REG); + FSP_DUMP_ONE(FSP_HDIR_REG); + FSP_DUMP_ONE(FSP_HDIM_SET_REG); + FSP_DUMP_ONE(FSP_PDIR_REG); + FSP_DUMP_ONE(FSP_PDIM_SET_REG); + FSP_DUMP_ONE(FSP_SCRATCH0_REG); + FSP_DUMP_ONE(FSP_SCRATCH1_REG); + FSP_DUMP_ONE(FSP_SCRATCH2_REG); + FSP_DUMP_ONE(FSP_SCRATCH3_REG); +} + +static void fsp_notify_rr_state(u32 state) +{ + struct fsp_client *client, *next; + struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(FSP_MCLASS_RR_EVENT); + + assert(cmdclass); + list_for_each_safe(&cmdclass->clientq, client, next, link) + client->message(state, NULL); +} + +static void fsp_reset_cmdclass(void) +{ + int i; + struct fsp_msg *msg; + + /* + * The FSP is in reset and hence we can't expect any response + * to outstanding messages that we've already sent. Clear the + * bitmap to reflect that. + */ + fsp_cmdclass_resp_bitmask = 0; + for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) { + struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i]; + cmdclass->busy = false; + cmdclass->timesent = 0; + + /* Make sure the message queue is empty */ + while(!list_empty(&cmdclass->msgq)) { + msg = list_pop(&cmdclass->msgq, struct fsp_msg, + link); + list_add_tail(&cmdclass->rr_queue, &msg->link); + } + } +} + +static bool fsp_in_hir(struct fsp *fsp) +{ + switch (fsp->state) { + case fsp_mbx_crit_op: + case fsp_mbx_prep_for_reset: + return true; + default: + return false; + } +} + +static bool fsp_in_reset(struct fsp *fsp) +{ + switch (fsp->state) { + case fsp_mbx_hir_seq_done: /* FSP reset triggered */ + case fsp_mbx_err: /* Will be reset soon */ + case fsp_mbx_rr: /* Mbx activity stopped pending reset */ + return true; + default: + return false; + } +} + +bool fsp_in_rr(void) +{ + struct fsp *fsp = fsp_get_active(); + struct fsp_iopath *iop; + + if (fsp->active_iopath < 0) + return true; + + iop = &fsp->iopath[fsp->active_iopath]; + + if (fsp_in_reset(fsp) || fsp_in_hir(fsp) || !(psi_check_link_active(iop->psi))) + return true; + + return false; +} + +static bool fsp_hir_state_timeout(void) +{ + u64 now = mftb(); + + if (tb_compare(now, fsp_hir_timeout) == TB_AAFTERB) + return true; + + return false; +} + +static void fsp_set_hir_timeout(u32 seconds) +{ + u64 now = mftb(); + fsp_hir_timeout = now + secs_to_tb(seconds); +} + +static bool fsp_crit_op_in_progress(struct fsp *fsp) +{ + u32 disr = fsp_rreg(fsp, FSP_DISR_REG); + + if (disr & FSP_DISR_CRIT_OP_IN_PROGRESS) + return true; + + return false; +} + +/* Notify the FSP that it will be reset soon by writing to the DRCR */ +static void fsp_prep_for_reset(struct fsp *fsp) +{ + u32 drcr; + + /* + * Its possible that the FSP went into reset by itself between the + * time the HIR is triggered and we get here. Check and bail out if so. + */ + if (fsp_in_rr()) + return; + + drcr = fsp_rreg(fsp, FSP_DRCR_REG); + + prlog(PR_TRACE, "FSP: Writing reset to DRCR\n"); + drcr_last_print = drcr; + fsp_wreg(fsp, FSP_DRCR_REG, (drcr | FSP_PREP_FOR_RESET_CMD)); + fsp->state = fsp_mbx_prep_for_reset; + fsp_set_hir_timeout(FSP_DRCR_CLEAR_TIMEOUT); +} + +static void fsp_hir_poll(struct fsp *fsp, struct psi *psi) +{ + u32 drcr; + + if (fsp_in_reset(fsp) || !(psi_check_link_active(psi))) + return; + + switch (fsp->state) { + case fsp_mbx_crit_op: + if (fsp_crit_op_in_progress(fsp)) { + if (fsp_hir_state_timeout()) + prerror("FSP: Critical operation timeout\n"); + /* XXX What do do next? Check with FSP folks */ + } else { + fsp_prep_for_reset(fsp); + } + break; + case fsp_mbx_prep_for_reset: + drcr = fsp_rreg(fsp, FSP_DRCR_REG); + + if (drcr != drcr_last_print) { + prlog(PR_TRACE, "FSP: DRCR changed, old = %x," + " new = %x\n", + drcr_last_print, drcr); + drcr_last_print = drcr; + } + + if (drcr & FSP_DRCR_ACK_MASK) { + if (fsp_hir_state_timeout()) { + prerror("FSP: Ack timeout. Triggering reset\n"); + psi_reset_fsp(psi); + fsp->state = fsp_mbx_hir_seq_done; + } + } else { + prlog(PR_TRACE, "FSP: DRCR ack received." + " Triggering reset\n"); + psi_reset_fsp(psi); + fsp->state = fsp_mbx_hir_seq_done; + } + break; + default: + break; + } +} + +/* + * This is the main entry for the host initiated reset case. + * This gets called when: + * a. Surveillance ack is not received in 120 seconds + * b. A mailbox command doesn't get a response within the stipulated time. + */ +static void __fsp_trigger_reset(void) +{ + struct fsp *fsp = fsp_get_active(); + u32 disr; + + /* Already in one of the error processing states */ + if (fsp_in_hir(fsp) || fsp_in_reset(fsp)) + return; + + prerror("FSP: fsp_trigger_reset() entry\n"); + + drcr_last_print = 0; + /* + * Check if we are allowed to reset the FSP. We aren't allowed to + * reset the FSP if the FSP_DISR_DBG_IN_PROGRESS is set. + */ + disr = fsp_rreg(fsp, FSP_DISR_REG); + if (disr & FSP_DISR_DBG_IN_PROGRESS) { + prerror("FSP: Host initiated reset disabled\n"); + return; + } + + /* + * Check if some critical operation is in progress as indicated + * by FSP_DISR_CRIT_OP_IN_PROGRESS. Timeout is 128 seconds + */ + if (fsp_crit_op_in_progress(fsp)) { + prlog(PR_NOTICE, "FSP: Critical operation in progress\n"); + fsp->state = fsp_mbx_crit_op; + fsp_set_hir_timeout(FSP_CRITICAL_OP_TIMEOUT); + } else + fsp_prep_for_reset(fsp); +} + +static uint32_t fsp_hir_reason_plid; + +void fsp_trigger_reset(uint32_t plid) +{ + lock(&fsp_lock); + fsp_hir_reason_plid = plid; + __fsp_trigger_reset(); + unlock(&fsp_lock); +} + +/* + * Called when we trigger a HIR or when the FSP tells us via the DISR's + * RR bit that one is impending. We should therefore stop all mbox activity. + */ +static void fsp_start_rr(struct fsp *fsp) +{ + struct fsp_iopath *iop; + + if (fsp->state == fsp_mbx_rr) + return; + + /* We no longer have an active path on that FSP */ + if (fsp->active_iopath >= 0) { + iop = &fsp->iopath[fsp->active_iopath]; + iop->state = fsp_path_bad; + fsp->active_iopath = -1; + } + fsp->state = fsp_mbx_rr; + disr_last_print = 0; + hstate_last_print = 0; + + /* + * Mark all command classes as non-busy and clear their + * timeout, then flush all messages in our staging queue + */ + fsp_reset_cmdclass(); + + /* Notify clients. We have to drop the lock here */ + unlock(&fsp_lock); + fsp_notify_rr_state(FSP_RESET_START); + lock(&fsp_lock); + + /* + * Unlike earlier, we don't trigger the PSI link polling + * from this point. We wait for the PSI interrupt to tell + * us the FSP is really down and then start the polling there. + */ +} + +/* + * Called on normal/quick shutdown to give up the PSI link + */ +void fsp_reset_links(void) +{ + struct fsp *fsp = fsp_get_active(); + struct fsp_iopath *iop; + + if (!fsp) + return; + + /* Already in one of the error states? */ + if (fsp_in_hir(fsp) || fsp_in_reset(fsp)) + return; + + iop = &fsp->iopath[fsp->active_iopath]; + prlog(PR_NOTICE, "FSP #%d: Host initiated shutdown." + " Giving up the PSI link\n", fsp->index); + psi_disable_link(iop->psi); + return; +} + +static void fsp_trace_event(struct fsp *fsp, u32 evt, + u32 data0, u32 data1, u32 data2, u32 data3) +{ + union trace tfsp __unused; +#ifdef FSP_TRACE_EVENT + size_t len = sizeof(struct trace_fsp_event); + + tfsp.fsp_evt.event = cpu_to_be16(evt); + tfsp.fsp_evt.fsp_state = cpu_to_be16(fsp->state); + tfsp.fsp_evt.data[0] = cpu_to_be32(data0); + tfsp.fsp_evt.data[1] = cpu_to_be32(data1); + tfsp.fsp_evt.data[2] = cpu_to_be32(data2); + tfsp.fsp_evt.data[3] = cpu_to_be32(data3); + trace_add(&tfsp, TRACE_FSP_EVENT, len); +#endif /* FSP_TRACE_EVENT */ +} + +static void fsp_handle_errors(struct fsp *fsp) +{ + u32 hstate; + struct fsp_iopath *iop; + struct psi *psi; + u32 disr; + + if (fsp->active_iopath < 0) { + prerror("FSP #%d: fsp_handle_errors() with no active IOP\n", + fsp->index); + return; + } + + iop = &fsp->iopath[fsp->active_iopath]; + if (!iop->psi) { + prerror("FSP: Active IOP with no PSI link !\n"); + return; + } + psi = iop->psi; + + /* + * If the link is not up, start R&R immediately, we do call + * psi_disable_link() in this case as while the link might + * not be up, it might still be enabled and the PSI layer + * "active" bit still set + */ + if (!psi_check_link_active(psi)) { + /* Start R&R process */ + fsp_trace_event(fsp, TRACE_FSP_EVT_LINK_DOWN, 0, 0, 0, 0); + prerror("FSP #%d: Link down, starting R&R\n", fsp->index); + + fsp_start_rr(fsp); + return; + } + + /* Link is up, check for other conditions */ + disr = fsp_rreg(fsp, FSP_DISR_REG); + + /* If in R&R, log values */ + if (disr != disr_last_print) { + fsp_trace_event(fsp, TRACE_FSP_EVT_DISR_CHG, disr, 0, 0, 0); + + prlog(PR_TRACE, "FSP #%d: DISR stat change = 0x%08x\n", + fsp->index, disr); + disr_last_print = disr; + } + + /* On a deferred mbox error, trigger a HIR + * Note: We may never get here since the link inactive case is handled + * above and the other case is when the iop->psi is NULL, which is + * quite rare. + */ + if (fsp->state == fsp_mbx_err) { + uint32_t plid; + plid = log_simple_error(&e_info(OPAL_RC_FSP_MBOX_ERR), + "FSP #%d: Triggering HIR on mbx_err\n", + fsp->index); + fsp_trigger_reset(plid); + return; + } + + /* + * If we get here as part of normal flow, the FSP is telling + * us that there will be an impending R&R, so we stop all mbox + * activity. The actual link down trigger is via a PSI + * interrupt that may arrive in due course. + */ + if (disr & FSP_DISR_FSP_IN_RR) { + /* + * If we get here with DEBUG_IN_PROGRESS also set, the + * FSP is in debug and we should *not* reset it now + */ + if (disr & FSP_DISR_DBG_IN_PROGRESS) + return; + + /* + * When the linux comes back up, we still see that bit + * set for a bit, so just move on, nothing to see here + */ + if (fsp->state == fsp_mbx_rr) + return; + + if (fsp_dpo_pending) { + /* + * If we are about to process a reset when DPO + * is pending, its possible that the host has + * gone down, and OPAL is on its way down and + * hence will not see the subsequent PSI interrupt. + * So, just give up the link here. + */ + prlog(PR_NOTICE, "FSP #%d: FSP reset with DPO pending." + " Giving up PSI link\n", + fsp->index); + psi_disable_link(psi); + } else { + prlog(PR_NOTICE, "FSP #%d: FSP in Reset." + " Waiting for PSI interrupt\n", + fsp->index); + } + fsp_start_rr(fsp); + } + + /* + * However, if any of Unit Check or Runtime Termintated or + * Flash Terminated bits is also set, the FSP is asking us + * to trigger a HIR so it can try to recover via the DRCR route. + */ + if (disr & FSP_DISR_HIR_TRIGGER_MASK) { + const char *reason = "Unknown FSP_DISR_HIR_TRIGGER"; + uint32_t plid; + fsp_trace_event(fsp, TRACE_FSP_EVT_SOFT_RR, disr, 0, 0, 0); + + if (disr & FSP_DISR_FSP_UNIT_CHECK) + reason = "DISR Unit Check set"; + else if (disr & FSP_DISR_FSP_RUNTIME_TERM) + reason = "DISR Runtime Terminate set"; + else if (disr & FSP_DISR_FSP_FLASH_TERM) + reason = "DISR Flash Terminate set"; + + plid = log_simple_error(&e_info(OPAL_RC_FSP_DISR_HIR_MASK), + "FSP: %s. Triggering host initiated " + "reset.", reason); + + /* Clear all interrupt conditions */ + fsp_wreg(fsp, FSP_HDIR_REG, FSP_DBIRQ_ALL); + + /* Make sure this happened */ + fsp_rreg(fsp, FSP_HDIR_REG); + + fsp_trigger_reset(plid); + return; + } + + /* + * We detect an R&R complete indication, acknolwedge it + */ + if (disr & FSP_DISR_FSP_RR_COMPLETE) { + /* + * Acking this bit doens't make it go away immediately, so + * only do it while still in R&R state + */ + if (fsp->state == fsp_mbx_rr) { + fsp_trace_event(fsp, TRACE_FSP_EVT_RR_COMPL, 0,0,0,0); + + prlog(PR_NOTICE, "FSP #%d: Detected R&R complete," + " acking\n", fsp->index); + + /* Clear HDATA area */ + fsp_wreg(fsp, FSP_MBX1_HDATA_AREA, 0xff); + + /* Ack it (XDN) and clear HPEND & counts */ + fsp_wreg(fsp, FSP_MBX1_HCTL_REG, + FSP_MBX_CTL_PTS | + FSP_MBX_CTL_XDN | + FSP_MBX_CTL_HPEND | + FSP_MBX_CTL_HCSP_MASK | + FSP_MBX_CTL_DCSP_MASK); + + /* + * Mark the mbox as usable again so we can process + * incoming messages + */ + fsp->state = fsp_mbx_idle; + + /* Also clear R&R complete bit in DISR */ + fsp_wreg(fsp, FSP_DISR_REG, FSP_DISR_FSP_RR_COMPLETE); + + psi_enable_fsp_interrupt(psi); + } + } + + /* + * XXX + * + * Here we detect a number of errors, should we initiate + * and R&R ? + */ + + hstate = fsp_rreg(fsp, FSP_HDES_REG); + if (hstate != hstate_last_print) { + fsp_trace_event(fsp, TRACE_FSP_EVT_HDES_CHG, hstate, 0, 0, 0); + + prlog(PR_DEBUG, "FSP #%d: HDES stat change = 0x%08x\n", + fsp->index, hstate); + hstate_last_print = hstate; + } + + if (hstate == 0xffffffff) + return; + + /* Clear errors */ + fsp_wreg(fsp, FSP_HDES_REG, FSP_DBERRSTAT_CLR1); + + /* + * Most of those errors shouldn't have happened, we just clear + * the error state and return. In the long run, we might want + * to start retrying commands, switching FSPs or links, etc... + * + * We currently don't set our mailbox to a permanent error state. + */ + if (hstate & FSP_DBERRSTAT_ILLEGAL1) + prerror("FSP #%d: Illegal command error !\n", fsp->index); + + if (hstate & FSP_DBERRSTAT_WFULL1) + prerror("FSP #%d: Write to a full mbox !\n", fsp->index); + + if (hstate & FSP_DBERRSTAT_REMPTY1) + prerror("FSP #%d: Read from an empty mbox !\n", fsp->index); + + if (hstate & FSP_DBERRSTAT_PAR1) + prerror("FSP #%d: Parity error !\n", fsp->index); +} + +/* + * This is called by fsp_post_msg() to check if the mbox + * is in a state that allows sending of a message + * + * Due to the various "interesting" contexts fsp_post_msg() + * can be called from, including recursive locks from lock + * error messages or console code, this should avoid doing + * anything more complex than checking a bit of state. + * + * Specifically, we cannot initiate an R&R and call back into + * clients etc... from this function. + * + * The best we can do is to se the mbox in error state and + * handle it later during a poll or interrupts. + */ +static bool fsp_check_can_send(struct fsp *fsp) +{ + struct fsp_iopath *iop; + struct psi *psi; + + /* Look for FSP in non-idle state */ + if (fsp->state != fsp_mbx_idle) + return false; + + /* Look for an active IO path */ + if (fsp->active_iopath < 0) + goto mbox_error; + iop = &fsp->iopath[fsp->active_iopath]; + if (!iop->psi) { + prerror("FSP: Active IOP with no PSI link !\n"); + goto mbox_error; + } + psi = iop->psi; + + /* Check if link has gone down. This will be handled later */ + if (!psi_check_link_active(psi)) { + prerror("FSP #%d: Link seems to be down on send\n", fsp->index); + goto mbox_error; + } + + /* XXX Do we want to check for other error conditions ? */ + return true; + + /* + * An error of some case occurred, we'll handle it later + * from a more normal "poll" context + */ + mbox_error: + fsp->state = fsp_mbx_err; + return false; +} + +static bool fsp_post_msg(struct fsp *fsp, struct fsp_msg *msg) +{ + u32 ctl, reg; + int i, wlen; + + prlog(PR_INSANE, "FSP #%d: fsp_post_msg (w0: 0x%08x w1: 0x%08x)\n", + fsp->index, msg->word0, msg->word1); + + /* Note: We used to read HCTL here and only modify some of + * the bits in it. This was bogus, because we would write back + * the incoming bits as '1' and clear them, causing fsp_poll() + * to then miss them. Let's just start with 0, which is how + * I suppose the HW intends us to do. + */ + + /* Set ourselves as busy */ + fsp->pending = msg; + fsp->state = fsp_mbx_send; + msg->state = fsp_msg_sent; + + /* We trace after setting the mailbox state so that if the + * tracing recurses, it ends up just queuing the message up + */ + fsp_trace_msg(msg, TRACE_FSP_MSG_OUT); + + /* Build the message in the mailbox */ + reg = FSP_MBX1_HDATA_AREA; + fsp_wreg(fsp, reg, msg->word0); reg += 4; + fsp_wreg(fsp, reg, msg->word1); reg += 4; + wlen = (msg->dlen + 3) >> 2; + for (i = 0; i < wlen; i++) { + fsp_wreg(fsp, reg, fsp_msg_get_data_word(msg, i)); + reg += 4; + } + + /* Write the header */ + fsp_wreg(fsp, FSP_MBX1_HHDR0_REG, (msg->dlen + 8) << 16); + + /* Write the control register */ + ctl = 4 << FSP_MBX_CTL_HCHOST_SHIFT; + ctl |= (msg->dlen + 8) << FSP_MBX_CTL_DCHOST_SHIFT; + ctl |= FSP_MBX_CTL_PTS | FSP_MBX_CTL_SPPEND; + prlog(PR_INSANE, " new ctl: %08x\n", ctl); + fsp_wreg(fsp, FSP_MBX1_HCTL_REG, ctl); + + return true; +} + +static void fsp_poke_queue(struct fsp_cmdclass *cmdclass) +{ + struct fsp *fsp = fsp_get_active(); + struct fsp_msg *msg; + + if (!fsp) + return; + if (!fsp_check_can_send(fsp)) + return; + + /* From here to the point where fsp_post_msg() sets fsp->state + * to !idle we must not cause any re-entrancy (no debug or trace) + * in a code path that may hit fsp_post_msg() (it's ok to do so + * if we are going to bail out), as we are committed to calling + * fsp_post_msg() and so a re-entrancy could cause us to do a + * double-send into the mailbox. + */ + if (cmdclass->busy || list_empty(&cmdclass->msgq)) + return; + + msg = list_top(&cmdclass->msgq, struct fsp_msg, link); + assert(msg); + cmdclass->busy = true; + + if (!fsp_post_msg(fsp, msg)) { + prerror("FSP #%d: Failed to send message\n", fsp->index); + cmdclass->busy = false; + return; + } +} + +static void __fsp_fillmsg(struct fsp_msg *msg, u32 cmd_sub_mod, + u8 add_words, va_list list) +{ + bool response = !!(cmd_sub_mod & 0x1000000); + u8 cmd = (cmd_sub_mod >> 16) & 0xff; + u8 sub = (cmd_sub_mod >> 8) & 0xff; + u8 mod = cmd_sub_mod & 0xff; + int i; + + msg->word0 = cmd & 0xff; + msg->word1 = mod << 8 | sub; + msg->response = response; + msg->dlen = add_words << 2; + + for (i = 0; i < add_words; i++) + fsp_msg_set_data_word(msg, i, va_arg(list, unsigned int)); +} + +void fsp_fillmsg(struct fsp_msg *msg, u32 cmd_sub_mod, u32 add_words, ...) +{ + va_list list; + + va_start(list, add_words); + __fsp_fillmsg(msg, cmd_sub_mod, add_words, list); + va_end(list); +} + +struct fsp_msg *fsp_mkmsg(u32 cmd_sub_mod, u32 add_words, ...) +{ + struct fsp_msg *msg = fsp_allocmsg(!!(cmd_sub_mod & 0x1000000)); + va_list list; + + if (!msg) { + prerror("FSP: Failed to allocate struct fsp_msg\n"); + return NULL; + } + + va_start(list, add_words); + __fsp_fillmsg(msg, cmd_sub_mod, add_words, list); + va_end(list); + + return msg; +} + +/* + * IMPORTANT NOTE: This is *guaranteed* to not call the completion + * routine recusrively for *any* fsp message, either the + * queued one or a previous one. Thus it is *ok* to call + * this function with a lock held which will itself be + * taken by the completion function. + * + * Any change to this implementation must respect this + * rule. This will be especially true of things like + * reset/reload and error handling, if we fail to queue + * we must just return an error, not call any completion + * from the scope of fsp_queue_msg(). + */ +int fsp_queue_msg(struct fsp_msg *msg, void (*comp)(struct fsp_msg *msg)) +{ + struct fsp_cmdclass *cmdclass; + struct fsp *fsp = fsp_get_active(); + bool need_unlock; + u16 seq; + int rc = 0; + + if (!fsp || !msg) + return -1; + + /* Recursive locking */ + need_unlock = lock_recursive(&fsp_lock); + + /* Grab a new sequence number */ + seq = fsp_curseq; + fsp_curseq = fsp_curseq + 1; + if (fsp_curseq == 0) + fsp_curseq = 0x8000; + msg->word0 = (msg->word0 & 0xffff) | seq << 16; + + /* Set completion */ + msg->complete = comp; + + /* Clear response state */ + if (msg->resp) + msg->resp->state = fsp_msg_unused; + + /* Queue the message in the appropriate queue */ + cmdclass = fsp_get_cmdclass(msg); + if (!cmdclass) { + prerror("FSP: Invalid msg in fsp_queue_msg w0/1=0x%08x/%08x\n", + msg->word0, msg->word1); + rc = -1; + goto unlock; + } + + msg->state = fsp_msg_queued; + + /* + * If we have initiated or about to initiate a reset/reload operation, + * we stash the message on the R&R backup queue. Otherwise, queue it + * normally and poke the HW + */ + if (fsp_in_hir(fsp) || fsp_in_reset(fsp)) + list_add_tail(&cmdclass->rr_queue, &msg->link); + else { + list_add_tail(&cmdclass->msgq, &msg->link); + fsp_poke_queue(cmdclass); + } + + unlock: + if (need_unlock) + unlock(&fsp_lock); + + return rc; +} + +/* WARNING: This will drop the FSP lock !!! */ +static void fsp_complete_msg(struct fsp_msg *msg) +{ + struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg); + void (*comp)(struct fsp_msg *msg); + + assert(cmdclass); + + prlog(PR_INSANE, " completing msg, word0: 0x%08x\n", msg->word0); + + comp = msg->complete; + list_del_from(&cmdclass->msgq, &msg->link); + cmdclass->busy = false; + msg->state = fsp_msg_done; + + unlock(&fsp_lock); + if (comp) + (*comp)(msg); + lock(&fsp_lock); +} + +/* WARNING: This will drop the FSP lock !!! */ +static void fsp_complete_send(struct fsp *fsp) +{ + struct fsp_msg *msg = fsp->pending; + struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg); + + assert(msg); + assert(cmdclass); + + fsp->pending = NULL; + + prlog(PR_INSANE, " completing send, word0: 0x%08x, resp: %d\n", + msg->word0, msg->response); + + if (msg->response) { + u64 setbit = fsp_get_class_bit(msg->word0 & 0xff); + msg->state = fsp_msg_wresp; + fsp_cmdclass_resp_bitmask |= setbit; + cmdclass->timesent = mftb(); + } else + fsp_complete_msg(msg); +} + +static void fsp_alloc_inbound(struct fsp_msg *msg) +{ + u16 func_id = fsp_msg_get_data_word(msg, 0) & 0xffff; + u32 len = fsp_msg_get_data_word(msg, 1); + u32 tce_token = 0, act_len = 0; + u8 rc = 0; + void *buf; + struct fsp_msg *resp; + + prlog(PR_DEBUG, "FSP: Allocate inbound buffer func: %04x len: %d\n", + func_id, len); + + lock(&fsp_lock); + if ((fsp_inbound_off + len) > FSP_INBOUND_SIZE) { + prerror("FSP: Out of space in buffer area !\n"); + rc = 0xeb; + goto reply; + } + + if (!fsp_inbound_buf) { + fsp_inbound_buf = memalign(TCE_PSIZE, FSP_INBOUND_SIZE); + if (!fsp_inbound_buf) { + prerror("FSP: could not allocate fsp_inbound_buf!\n"); + rc = 0xeb; + goto reply; + } + } + + buf = fsp_inbound_buf + fsp_inbound_off; + tce_token = PSI_DMA_INBOUND_BUF + fsp_inbound_off; + len = (len + TCE_MASK) & ~TCE_MASK; + fsp_inbound_off += len; + fsp_tce_map(tce_token, buf, len); + prlog(PR_DEBUG, "FSP: -> buffer at 0x%p, TCE: 0x%08x, alen: 0x%x\n", + buf, tce_token, len); + act_len = len; + + reply: + unlock(&fsp_lock); + + resp = fsp_mkmsg(FSP_RSP_ALLOC_INBOUND | rc, 3, 0, tce_token, act_len); + if (!resp) { + prerror("FSP: response message allocation failed\n"); + return; + } + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSP: Failed to queue response message\n"); + return; + } +} + +void *fsp_inbound_buf_from_tce(u32 tce_token) +{ + u32 offset = tce_token - PSI_DMA_INBOUND_BUF; + + if (tce_token < PSI_DMA_INBOUND_BUF || offset >= fsp_inbound_off) { + prerror("FSP: TCE token 0x%x out of bounds\n", tce_token); + return NULL; + } + return fsp_inbound_buf + offset; +} + +static void fsp_repost_queued_msgs_post_rr(void) +{ + struct fsp_msg *msg; + int i; + + for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) { + struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i]; + bool poke = false; + + while(!list_empty(&cmdclass->rr_queue)) { + msg = list_pop(&cmdclass->rr_queue, + struct fsp_msg, link); + list_add_tail(&cmdclass->msgq, &msg->link); + poke = true; + } + if (poke) + fsp_poke_queue(cmdclass); + } +} + +static bool fsp_local_command(u32 cmd_sub_mod, struct fsp_msg *msg) +{ + u32 cmd = 0; + u32 rsp_data = 0; + struct fsp_msg *resp; + + switch(cmd_sub_mod) { + case FSP_CMD_CONTINUE_IPL: + /* We get a CONTINUE_IPL as a response to OPL */ + prlog(PR_NOTICE, "FSP: Got CONTINUE_IPL !\n"); + ipl_state |= ipl_got_continue; + return true; + + case FSP_CMD_HV_STATE_CHG: + prlog(PR_NOTICE, "FSP: Got HV state change request to %d\n", + msg->data.bytes[0]); + + /* Send response synchronously for now, we might want to + * deal with that sort of stuff asynchronously if/when + * we add support for auto-freeing of messages + */ + resp = fsp_mkmsg(FSP_RSP_HV_STATE_CHG, 0); + if (!resp) + prerror("FSP: Failed to allocate HV state response\n"); + else { + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSP: Failed to queue HV state resp\n"); + } + } + return true; + + case FSP_CMD_SP_NEW_ROLE: + /* FSP is assuming a new role */ + prlog(PR_INFO, "FSP: FSP assuming new role\n"); + resp = fsp_mkmsg(FSP_RSP_SP_NEW_ROLE, 0); + if (!resp) + prerror("FSP: Failed to allocate SP role response\n"); + else { + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSP: Failed to queue SP role resp\n"); + } + } + ipl_state |= ipl_got_new_role; + return true; + + case FSP_CMD_SP_QUERY_CAPS: + prlog(PR_INFO, "FSP: FSP query capabilities\n"); + /* XXX Do something saner. For now do a synchronous + * response and hard code our capabilities + */ + resp = fsp_mkmsg(FSP_RSP_SP_QUERY_CAPS, 4, 0x3ff80000, 0, 0, 0); + if (!resp) + prerror("FSP: Failed to allocate CAPS response\n"); + else { + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSP: Failed to queue CAPS resp\n"); + } + } + ipl_state |= ipl_got_caps; + return true; + case FSP_CMD_FSP_FUNCTNAL: + prlog(PR_INFO, "FSP: Got FSP Functional\n"); + ipl_state |= ipl_got_fsp_functional; + return true; + case FSP_CMD_ALLOC_INBOUND: + fsp_alloc_inbound(msg); + return true; + case FSP_CMD_SP_RELOAD_COMP: + if (msg->data.bytes[3] & PPC_BIT8(0)) { + fsp_fips_dump_notify(fsp_msg_get_data_word(msg, 1), + fsp_msg_get_data_word(msg, 2)); + + if (msg->data.bytes[3] & PPC_BIT8(1)) + prlog(PR_DEBUG, " PLID is %x\n", + fsp_msg_get_data_word(msg, 3)); + } + if (msg->data.bytes[3] & PPC_BIT8(2)) { + prlog(PR_INFO, "FSP: SP Reset/Reload was NOT done\n"); + } else { + prlog(PR_INFO, "FSP: SP says Reset/Reload complete\n"); + /* Notify clients that the FSP is back up */ + fsp_notify_rr_state(FSP_RELOAD_COMPLETE); + fsp_repost_queued_msgs_post_rr(); + } + return true; + case FSP_CMD_CLOSE_HMC_INTF: + /* Close the HMC interface */ + /* Though Sapphire does not support a HMC connection, the FSP + * sends this message when it is trying to open any new + * hypervisor session. So returning an error 0x51. + */ + cmd = FSP_RSP_CLOSE_HMC_INTF | FSP_STAUS_INVALID_HMC_ID; + rsp_data = msg->data.bytes[0] << 24 | msg->data.bytes[1] << 16; + rsp_data &= 0xffff0000; + resp = fsp_mkmsg(cmd, 1, rsp_data); + if (!resp) + prerror("FSP: Failed to allocate HMC close response\n"); + else { + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSP: Failed to queue HMC close resp\n"); + } + } + return true; + case FSP_CMD_GET_HIR_PLID: + /* Get Platform Log Id with reason for Host Initiated Reset */ + prlog(PR_DEBUG, "FSP: Sending PLID 0x%x as HIR reason\n", + fsp_hir_reason_plid); + resp = fsp_mkmsg(FSP_RSP_GET_HIR_PLID, 1, fsp_hir_reason_plid); + if (!resp) + prerror("FSP: Failed to allocate GET_HIR_PLID response\n"); + else { + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSP: Failed to queue GET_HIR_PLID resp\n"); + } + } + fsp_hir_reason_plid = 0; + return true; + } + return false; +} + + +/* This is called without the FSP lock */ +static void fsp_handle_command(struct fsp_msg *msg) +{ + struct fsp_cmdclass *cmdclass = fsp_get_cmdclass(msg); + struct fsp_client *client, *next; + struct fsp_msg *resp; + u32 cmd_sub_mod; + + if (!cmdclass) { + prerror("FSP: Got message for unknown class %x\n", + msg->word0 & 0xff); + goto free; + } + + cmd_sub_mod = (msg->word0 & 0xff) << 16; + cmd_sub_mod |= (msg->word1 & 0xff) << 8; + cmd_sub_mod |= (msg->word1 >> 8) & 0xff; + + /* Some commands are handled locally */ + if (fsp_local_command(cmd_sub_mod, msg)) + goto free; + + /* The rest go to clients */ + list_for_each_safe(&cmdclass->clientq, client, next, link) { + if (client->message(cmd_sub_mod, msg)) + goto free; + } + + prerror("FSP: Unhandled message %06x\n", cmd_sub_mod); + + /* We don't know whether the message expected some kind of + * response, so we send one anyway + */ + resp = fsp_mkmsg((cmd_sub_mod & 0xffff00) | 0x008020, 0); + if (!resp) + prerror("FSP: Failed to allocate default response\n"); + else { + if (fsp_queue_msg(resp, fsp_freemsg)) { + fsp_freemsg(resp); + prerror("FSP: Failed to queue default response\n"); + } + } + + free: + fsp_freemsg(msg); +} + +static void __fsp_fill_incoming(struct fsp *fsp, struct fsp_msg *msg, + int dlen, u32 w0, u32 w1) +{ + unsigned int wlen, i, reg; + + msg->dlen = dlen - 8; + msg->word0 = w0; + msg->word1 = w1; + wlen = (dlen + 3) >> 2; + reg = FSP_MBX1_FDATA_AREA + 8; + for (i = 0; i < wlen; i++) { + fsp_msg_set_data_word(msg, i, fsp_rreg(fsp, reg)); + reg += 4; + } + + /* Ack it (XDN) and clear HPEND & counts */ + fsp_wreg(fsp, FSP_MBX1_HCTL_REG, + FSP_MBX_CTL_PTS | + FSP_MBX_CTL_XDN | + FSP_MBX_CTL_HPEND | + FSP_MBX_CTL_HCSP_MASK | + FSP_MBX_CTL_DCSP_MASK); + + fsp_trace_msg(msg, TRACE_FSP_MSG_IN); +} + +static void __fsp_drop_incoming(struct fsp *fsp) +{ + /* Ack it (XDN) and clear HPEND & counts */ + fsp_wreg(fsp, FSP_MBX1_HCTL_REG, + FSP_MBX_CTL_PTS | + FSP_MBX_CTL_XDN | + FSP_MBX_CTL_HPEND | + FSP_MBX_CTL_HCSP_MASK | + FSP_MBX_CTL_DCSP_MASK); +} + +/* WARNING: This will drop the FSP lock */ +static void fsp_handle_incoming(struct fsp *fsp) +{ + struct fsp_msg *msg; + u32 h0, w0, w1; + unsigned int dlen; + bool special_response = false; + + h0 = fsp_rreg(fsp, FSP_MBX1_FHDR0_REG); + dlen = (h0 >> 16) & 0xff; + + w0 = fsp_rreg(fsp, FSP_MBX1_FDATA_AREA); + w1 = fsp_rreg(fsp, FSP_MBX1_FDATA_AREA + 4); + + prlog(PR_INSANE, " Incoming: w0: 0x%08x, w1: 0x%08x, dlen: %d\n", + w0, w1, dlen); + + /* Some responses are expected out of band */ + if ((w0 & 0xff) == FSP_MCLASS_HMC_INTFMSG && + ((w1 & 0xff) == 0x8a || ((w1 & 0xff) == 0x8b))) + special_response = true; + + /* Check for response bit */ + if (w1 & 0x80 && !special_response) { + struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(w0 & 0xff); + struct fsp_msg *req; + + if (!cmdclass) { + prerror("FSP: Got response for unknown class %x\n", + w0 & 0xff); + __fsp_drop_incoming(fsp); + return; + } + + if (!cmdclass->busy || list_empty(&cmdclass->msgq)) { + prerror("FSP #%d: Got orphan response! w0 = 0x%08x w1 = 0x%08x\n", + fsp->index, w0, w1); + __fsp_drop_incoming(fsp); + return; + } + req = list_top(&cmdclass->msgq, struct fsp_msg, link); + + /* Check if the response seems to match the message */ + if (req->state != fsp_msg_wresp || + (req->word0 & 0xff) != (w0 & 0xff) || + (req->word1 & 0xff) != (w1 & 0x7f)) { + __fsp_drop_incoming(fsp); + prerror("FSP #%d: Response doesn't match pending msg. w0 = 0x%08x w1 = 0x%08x\n", + fsp->index, w0, w1); + return; + } else { + u64 resetbit = ~fsp_get_class_bit(req->word0 & 0xff); + fsp_cmdclass_resp_bitmask &= resetbit; + cmdclass->timesent = 0; + } + + /* Allocate response if needed XXX We need to complete + * the original message with some kind of error here ? + */ + if (!req->resp) { + req->resp = __fsp_allocmsg(); + if (!req->resp) { + __fsp_drop_incoming(fsp); + prerror("FSP #%d: Failed to allocate response\n", + fsp->index); + return; + } + } + + /* Populate and complete (will drop the lock) */ + req->resp->state = fsp_msg_response; + __fsp_fill_incoming(fsp, req->resp, dlen, w0, w1); + fsp_complete_msg(req); + return; + } + + /* Allocate an incoming message */ + msg = __fsp_allocmsg(); + if (!msg) { + __fsp_drop_incoming(fsp); + prerror("FSP #%d: Failed to allocate incoming msg\n", + fsp->index); + return; + } + msg->state = fsp_msg_incoming; + __fsp_fill_incoming(fsp, msg, dlen, w0, w1); + + /* Handle FSP commands. This can recurse into fsp_queue_msg etc.. */ + unlock(&fsp_lock); + fsp_handle_command(msg); + lock(&fsp_lock); +} + +static void fsp_check_queues(struct fsp *fsp) +{ + int i; + + /* XXX In the long run, we might want to have a queue of + * classes waiting to be serviced to speed this up, either + * that or a bitmap. + */ + for (i = 0; i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) { + struct fsp_cmdclass *cmdclass = &fsp_cmdclass[i]; + + if (fsp->state != fsp_mbx_idle) + break; + if (cmdclass->busy || list_empty(&cmdclass->msgq)) + continue; + fsp_poke_queue(cmdclass); + } +} + +static void __fsp_poll(bool interrupt) +{ + struct fsp_iopath *iop; + struct fsp *fsp = fsp_get_active(); + u32 ctl, hdir = 0; + bool psi_irq; + + /* + * The tracer isn't terribly efficient at detecting dups + * especially when coming from multiple CPUs so we do our + * own change-detection locally + */ + static u32 hdir_last_trace; + static u32 ctl_last_trace; + static bool psi_irq_last_trace; + static bool irq_last_trace; + + if (!fsp) + return; + + /* Crazy interrupt handling scheme: + * + * In order to avoid "losing" interrupts when polling the mbox + * we only clear interrupt conditions when called as a result of + * an interrupt. + * + * That way, if a poll clears, for example, the HPEND condition, + * the interrupt remains, causing a dummy interrupt later on + * thus allowing the OS to be notified of a state change (ie it + * doesn't need every poll site to monitor every state change). + * + * However, this scheme is complicated by the fact that we need + * to clear the interrupt condition after we have cleared the + * original condition in HCTL, and we might have long stale + * interrupts which we do need to eventually get rid of. However + * clearing interrupts in such a way is racy, so we need to loop + * and re-poll HCTL after having done so or we might miss an + * event. It's a latency risk, but unlikely and probably worth it. + */ + + again: + if (fsp->active_iopath < 0) { + /* That should never happen */ + if (interrupt && (fsp->state != fsp_mbx_rr)) + prerror("FSP: Interrupt with no working IO path\n"); + return; + } + iop = &fsp->iopath[fsp->active_iopath]; + + /* Check for error state and handle R&R completion */ + fsp_handle_errors(fsp); + + /* Handle host initiated resets */ + if (fsp_in_hir(fsp)) { + fsp_hir_poll(fsp, iop->psi); + return; + } + + /* + * The above might have triggered and R&R, check that we + * are still functional + */ + if ((fsp->active_iopath < 0) || fsp_in_hir(fsp)) + return; + iop = &fsp->iopath[fsp->active_iopath]; + + /* Read interrupt status (we may or may not use it) */ + hdir = fsp_rreg(fsp, FSP_HDIR_REG); + + /* Read control now as well so we can trace them */ + ctl = fsp_rreg(fsp, FSP_MBX1_HCTL_REG); + + /* Ditto with PSI irq state */ + psi_irq = psi_poll_fsp_interrupt(iop->psi); + + /* Trace it if anything changes */ + if (hdir != hdir_last_trace || ctl != ctl_last_trace || + interrupt != irq_last_trace || psi_irq != psi_irq_last_trace) { + fsp_trace_event(fsp, TRACE_FSP_EVT_POLL_IRQ, + interrupt, hdir, ctl, psi_irq); + + hdir_last_trace = hdir; + ctl_last_trace = ctl; + irq_last_trace = interrupt; + psi_irq_last_trace = psi_irq; + } + + /* + * We *MUST* ignore the MBOX2 bits here. While MBOX2 cannot generate + * interrupt, it might still latch some bits here (and we found cases + * where the MBOX2 XUP would be set). If that happens, clearing HDIR + * never works (the bit gets set again immediately) because we don't + * clear the condition in HTCL2 and thus we loop forever. + */ + hdir &= FSP_DBIRQ_MBOX1; + + /* + * Sanity check: If an interrupt is pending and we are in polling + * mode, check that the PSI side is also pending. If some bit is + * set, just clear and move on. + */ + if (hdir && !interrupt && !psi_irq) { + prerror("FSP: WARNING ! HDIR 0x%08x but no PSI irq !\n", hdir); + fsp_wreg(fsp, FSP_HDIR_REG, hdir); + } + + /* + * We should never have the mbox in error state here unless it + * was fine until some printf inside fsp_handle_errors() caused + * the console to poke the FSP which detected a branch new error + * in the process. Let's be safe rather than sorry and handle that + * here + */ + if (fsp_in_hir(fsp) || fsp->state == fsp_mbx_err) { + prerror("FSP: Late error state detection\n"); + goto again; + } + + /* + * If we are in an R&R state with an active IO path, we + * shouldn't be getting interrupts. If we do, just clear + * the condition and print a message + */ + if (fsp->state == fsp_mbx_rr) { + if (interrupt) { + prerror("FSP: Interrupt in RR state [HDIR=0x%08x]\n", + hdir); + fsp_wreg(fsp, FSP_HDIR_REG, hdir); + } + return; + } + + /* Poll FSP CTL */ + if (ctl & (FSP_MBX_CTL_XUP | FSP_MBX_CTL_HPEND)) + prlog(PR_INSANE, "FSP #%d: poll, ctl: %x\n", fsp->index, ctl); + + /* Do we have a pending message waiting to complete ? */ + if (ctl & FSP_MBX_CTL_XUP) { + fsp_wreg(fsp, FSP_MBX1_HCTL_REG, FSP_MBX_CTL_XUP); + if (fsp->state == fsp_mbx_send) { + /* mbox is free */ + fsp->state = fsp_mbx_idle; + + /* Complete message (will break the lock) */ + fsp_complete_send(fsp); + + /* Lock can have been broken, so ctl is now + * potentially invalid, let's recheck + */ + goto again; + } else { + prerror("FSP #%d: Got XUP with no pending message !\n", + fsp->index); + } + } + + if (fsp->state == fsp_mbx_send) { + /* XXX Handle send timeouts!!! */ + } + + /* Is there an incoming message ? This will break the lock as well */ + if (ctl & FSP_MBX_CTL_HPEND) + fsp_handle_incoming(fsp); + + /* Note: Lock may have been broken above, thus ctl might be invalid + * now, don't use it any further. + */ + + /* Check for something else to send */ + if (fsp->state == fsp_mbx_idle) + fsp_check_queues(fsp); + + /* Clear interrupts, and recheck HCTL if any occurred */ + if (interrupt && hdir) { + fsp_wreg(fsp, FSP_HDIR_REG, hdir); + goto again; + } +} + +void fsp_interrupt(void) +{ + lock(&fsp_lock); + __fsp_poll(true); + unlock(&fsp_lock); +} + + +int fsp_sync_msg(struct fsp_msg *msg, bool autofree) +{ + int rc; + + rc = fsp_queue_msg(msg, NULL); + if (rc) + goto bail; + + while(fsp_msg_busy(msg)) { + if (fsp_in_rr()) { + fsp_cancelmsg(msg); + rc = -1; + goto bail; + } + cpu_relax(); + opal_run_pollers(); + } + + switch(msg->state) { + case fsp_msg_done: + rc = 0; + break; + case fsp_msg_timeout: + rc = -1; /* XXX to improve */ + break; + default: + rc = -1; /* Should not happen... (assert ?) */ + } + + if (msg->resp) + rc = (msg->resp->word1 >> 8) & 0xff; + bail: + if (autofree) + fsp_freemsg(msg); + return rc; +} + +void fsp_register_client(struct fsp_client *client, u8 msgclass) +{ + struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(msgclass); + + if (!fsp_present()) + return; + assert(cmdclass); + list_add_tail(&cmdclass->clientq, &client->link); +} + +void fsp_unregister_client(struct fsp_client *client, u8 msgclass) +{ + struct fsp_cmdclass *cmdclass = __fsp_get_cmdclass(msgclass); + + if (!fsp_present()) + return; + assert(cmdclass); + list_del_from(&cmdclass->clientq, &client->link); +} + +static int fsp_init_mbox(struct fsp *fsp) +{ + unsigned int i; + u32 reg; + + /* + * Note: The documentation contradicts itself as to + * whether the HDIM bits should be set or cleared to + * enable interrupts + * + * This seems to work... + */ + + /* Mask all interrupts */ + fsp_wreg(fsp, FSP_HDIM_CLR_REG, FSP_DBIRQ_ALL); + + /* Clear all errors */ + fsp_wreg(fsp, FSP_HDES_REG, FSP_DBERRSTAT_CLR1 | FSP_DBERRSTAT_CLR2); + + /* Initialize data area as the doco says */ + for (i = 0; i < 0x40; i += 4) + fsp_wreg(fsp, FSP_MBX1_HDATA_AREA + i, 0); + + /* + * Clear whatever crap may remain in HDCR. Do not write XDN as that + * would be interpreted incorrectly as an R&R completion which + * we aren't ready to send yet ! + */ + fsp_wreg(fsp, FSP_MBX1_HCTL_REG, FSP_MBX_CTL_XUP | FSP_MBX_CTL_HPEND | + FSP_MBX_CTL_HCSP_MASK | FSP_MBX_CTL_DCSP_MASK | + FSP_MBX_CTL_PTS); + + /* Clear all pending interrupts */ + fsp_wreg(fsp, FSP_HDIR_REG, FSP_DBIRQ_ALL); + + /* Enable all mbox1 interrupts */ + fsp_wreg(fsp, FSP_HDIM_SET_REG, FSP_DBIRQ_MBOX1); + + /* Decode what FSP we are connected to */ + reg = fsp_rreg(fsp, FSP_SCRATCH0_REG); + if (reg & PPC_BIT32(0)) { /* Is it a valid connection */ + if (reg & PPC_BIT32(3)) + prlog(PR_INFO, "FSP: Connected to FSP-B\n"); + else + prlog(PR_INFO, "FSP: Connected to FSP-A\n"); + } + + return 0; +} + +/* We use a single fixed TCE table for all PSI interfaces */ +static void fsp_init_tce_table(void) +{ + fsp_tce_table = (__be64 *)PSI_TCE_TABLE_BASE; + + memset(fsp_tce_table, 0, PSI_TCE_TABLE_SIZE); +} + +void fsp_tce_map(u32 offset, void *addr, u32 size) +{ + u64 raddr = (u64)addr; + + assert(!(offset & TCE_MASK)); + assert(!(raddr & TCE_MASK)); + assert(!(size & TCE_MASK)); + + size >>= TCE_SHIFT; + offset >>= TCE_SHIFT; + + while(size--) { + fsp_tce_table[offset++] = cpu_to_be64(raddr | 0x3); + raddr += TCE_PSIZE; + } +} + +void fsp_tce_unmap(u32 offset, u32 size) +{ + assert(!(offset & TCE_MASK)); + assert(!(size & TCE_MASK)); + + size >>= TCE_SHIFT; + offset >>= TCE_SHIFT; + + while(size--) + fsp_tce_table[offset++] = 0; +} + +static struct fsp *fsp_find_by_index(int index) +{ + struct fsp *fsp = first_fsp; + + do { + if (fsp->index == index) + return fsp; + } while (fsp->link != first_fsp); + + return NULL; +} + +static void fsp_init_links(struct dt_node *fsp_node) +{ + const struct dt_property *linksprop; + int i, index; + struct fsp *fsp; + struct fsp_iopath *fiop; + + linksprop = dt_find_property(fsp_node, "ibm,psi-links"); + assert(linksprop); + + index = dt_prop_get_u32(fsp_node, "reg"); + fsp = fsp_find_by_index(index); + if (!fsp) { + prerror("FSP: FSP with index %d not found\n", index); + return; + } + + fsp->state = fsp_mbx_idle; + + /* Iterate all links */ + for (i = 0; i < fsp->iopath_count; i++) { + u64 reg; + u32 link; + + link = dt_property_get_cell(linksprop, i); + fiop = &fsp->iopath[i]; + fiop->psi = psi_find_link(link); + if (fiop->psi == NULL) { + prerror("FSP #%d: Couldn't find PSI link\n", + fsp->index); + continue; + } + + prlog(PR_DEBUG, "FSP #%d: Found PSI HB link to chip %d\n", + fsp->index, link); + + psi_fsp_link_in_use(fiop->psi); + + /* Get the FSP register window */ + reg = in_be64(fiop->psi->regs + PSIHB_FSPBAR); + fiop->fsp_regs = (void *)(reg | (1ULL << 63) | + dt_prop_get_u32(fsp_node, "reg-offset")); + } +} + +static void fsp_update_links_states(struct fsp *fsp) +{ + struct fsp_iopath *fiop; + unsigned int i; + + /* Iterate all links */ + for (i = 0; i < fsp->iopath_count; i++) { + fiop = &fsp->iopath[i]; + if (!fiop->psi) + fiop->state = fsp_path_bad; + else if (fiop->psi->active) { + fsp->active_iopath = i; + fiop->state = fsp_path_active; + } else + fiop->state = fsp_path_backup; + } + + if (fsp->active_iopath >= 0) { + if (!active_fsp || (active_fsp != fsp)) + active_fsp = fsp; + + fsp_inbound_off = 0; + fiop = &fsp->iopath[fsp->active_iopath]; + psi_init_for_fsp(fiop->psi); + fsp_init_mbox(fsp); + } +} + +void fsp_reinit_fsp(void) +{ + struct fsp *fsp; + + /* Notify all FSPs to check for an updated link state */ + for (fsp = first_fsp; fsp; fsp = fsp->link) + fsp_update_links_states(fsp); +} + +static void fsp_create_fsp(struct dt_node *fsp_node) +{ + const struct dt_property *linksprop; + struct fsp *fsp; + int count, index; + + index = dt_prop_get_u32(fsp_node, "reg"); + prlog(PR_INFO, "FSP #%d: Found in device-tree, setting up...\n", + index); + + linksprop = dt_find_property(fsp_node, "ibm,psi-links"); + if (!linksprop || linksprop->len < 4) { + prerror("FSP #%d: No links !\n", index); + return; + } + + fsp = zalloc(sizeof(struct fsp)); + if (!fsp) { + prerror("FSP #%d: Can't allocate memory !\n", index); + return; + } + + fsp->index = index; + fsp->active_iopath = -1; + + count = linksprop->len / 4; + prlog(PR_DEBUG, "FSP #%d: Found %d IO PATH\n", index, count); + if (count > FSP_MAX_IOPATH) { + prerror("FSP #%d: WARNING, limited to %d IO PATH\n", + index, FSP_MAX_IOPATH); + count = FSP_MAX_IOPATH; + } + fsp->iopath_count = count; + + fsp->link = first_fsp; + first_fsp = fsp; + + fsp_init_links(fsp_node); + fsp_update_links_states(fsp); + + if (fsp->active_iopath >= 0) + psi_enable_fsp_interrupt(fsp->iopath[fsp->active_iopath].psi); +} + +static void fsp_opal_poll(void *data __unused) +{ + /* Test the host initiated reset */ + if (hir_trigger == 0xdeadbeef) { + uint32_t plid = log_simple_error(&e_info(OPAL_INJECTED_HIR), + "SURV: Injected HIR, initiating FSP R/R\n"); + fsp_trigger_reset(plid); + hir_trigger = 0; + } + + if (try_lock(&fsp_lock)) { + __fsp_poll(false); + unlock(&fsp_lock); + } +} + +int fsp_fatal_msg(struct fsp_msg *msg) +{ + int rc = 0; + + rc = fsp_queue_msg(msg, NULL); + if (rc) + return rc; + + while(fsp_msg_busy(msg)) { + if (fsp_in_rr()) { + fsp_cancelmsg(msg); + return -1; + } + + cpu_relax(); + fsp_opal_poll(NULL); + } + + switch(msg->state) { + case fsp_msg_done: + rc = 0; + break; + case fsp_msg_timeout: + rc = -1; /* XXX to improve */ + break; + default: + rc = -1; /* Should not happen... (assert ?) */ + } + + if (msg->resp) + rc = (msg->resp->word1 >> 8) & 0xff; + + return rc; +} + +static bool fsp_init_one(const char *compat) +{ + struct dt_node *fsp_node; + bool inited = false; + + dt_for_each_compatible(dt_root, fsp_node, compat) { + if (!inited) { + int i; + + /* Initialize the per-class msg queues */ + for (i = 0; + i <= (FSP_MCLASS_LAST - FSP_MCLASS_FIRST); i++) { + list_head_init(&fsp_cmdclass[i].msgq); + list_head_init(&fsp_cmdclass[i].clientq); + list_head_init(&fsp_cmdclass[i].rr_queue); + } + + /* Init the queues for RR notifier cmdclass */ + list_head_init(&fsp_cmdclass_rr.msgq); + list_head_init(&fsp_cmdclass_rr.clientq); + list_head_init(&fsp_cmdclass_rr.rr_queue); + + /* Register poller */ + opal_add_poller(fsp_opal_poll, NULL); + + inited = true; + } + + /* Create the FSP data structure */ + fsp_create_fsp(fsp_node); + } + + return inited; +} + +void fsp_init(void) +{ + prlog(PR_DEBUG, "FSP: Looking for FSP...\n"); + + fsp_init_tce_table(); + + if (!fsp_init_one("ibm,fsp1") && !fsp_init_one("ibm,fsp2")) { + prlog(PR_DEBUG, "FSP: No FSP on this machine\n"); + return; + } +} + +bool fsp_present(void) +{ + return first_fsp != NULL; +} + +static void fsp_timeout_poll(void *data __unused) +{ + u64 now = mftb(); + u64 timeout_val = 0; + u64 cmdclass_resp_bitmask = fsp_cmdclass_resp_bitmask; + struct fsp_cmdclass *cmdclass = NULL; + struct fsp_msg *req = NULL; + u32 index = 0; + + if (timeout_timer == 0) + timeout_timer = now + secs_to_tb(30); + + /* The lowest granularity for a message timeout is 30 secs. + * So every 30secs, check if there is any message + * waiting for a response from the FSP + */ + if (tb_compare(now, timeout_timer) == TB_ABEFOREB) + return; + if (!try_lock(&fsp_poll_lock)) + return; + if (tb_compare(now, timeout_timer) == TB_ABEFOREB) { + unlock(&fsp_poll_lock); + return; + } + + while (cmdclass_resp_bitmask) { + u64 time_sent = 0; + u64 time_to_comp = 0; + + if (!(cmdclass_resp_bitmask & 0x1)) + goto next_bit; + + cmdclass = &fsp_cmdclass[index]; + timeout_val = secs_to_tb((cmdclass->timeout) * 60); + time_sent = cmdclass->timesent; + time_to_comp = now - cmdclass->timesent; + + /* Now check if the response has timed out */ + if (tb_compare(time_to_comp, timeout_val) == TB_AAFTERB) { + u32 w0, w1; + enum fsp_msg_state mstate; + + /* Take the FSP lock now and re-check */ + lock(&fsp_lock); + if (!(fsp_cmdclass_resp_bitmask & (1ull << index)) || + time_sent != cmdclass->timesent) { + unlock(&fsp_lock); + goto next_bit; + } + req = list_top(&cmdclass->msgq, struct fsp_msg, link); + if (!req) { + printf("FSP: Timeout state mismatch on class %d\n", + index); + fsp_cmdclass_resp_bitmask &= ~(1ull << index); + cmdclass->timesent = 0; + unlock(&fsp_lock); + goto next_bit; + } + w0 = req->word0; + w1 = req->word1; + mstate = req->state; + prlog(PR_WARNING, "FSP: Response from FSP timed out," + " cmd = %x subcmd = %x mod = %x state: %d\n", + w0 & 0xff, w1 & 0xff, (w1 >> 8) & 0xff, mstate); + fsp_reg_dump(); + fsp_cmdclass_resp_bitmask &= ~(1ull << index); + cmdclass->timesent = 0; + if (req->resp) { + req->resp->state = fsp_msg_timeout; + req->resp->word1 = (FSP_STATUS_BUSY << 8) | + (req->resp->word1 & 0xff); + } + fsp_complete_msg(req); + __fsp_trigger_reset(); + unlock(&fsp_lock); + fsp_hir_reason_plid = log_simple_error( + &e_info(OPAL_RC_FSP_POLL_TIMEOUT), + "FSP: Response from FSP timed out," + " cmd = %x subcmd = %x mod = %x state: %d\n", + w0 & 0xff, w1 & 0xff, (w1 >> 8) & 0xff, mstate); + } + next_bit: + cmdclass_resp_bitmask = cmdclass_resp_bitmask >> 1; + index++; + } + unlock(&fsp_poll_lock); +} + +void fsp_opl(void) +{ + struct dt_node *iplp; + + if (!fsp_present()) + return; + + /* Send OPL */ + ipl_state |= ipl_opl_sent; + fsp_sync_msg(fsp_mkmsg(FSP_CMD_OPL, 0), true); + while(!(ipl_state & ipl_got_continue)) { + opal_run_pollers(); + cpu_relax(); + } + + /* Send continue ACK */ + fsp_sync_msg(fsp_mkmsg(FSP_CMD_CONTINUE_ACK, 0), true); + + /* Wait for various FSP messages */ + prlog(PR_INFO, "INIT: Waiting for FSP to advertise new role...\n"); + while(!(ipl_state & ipl_got_new_role)) { + cpu_relax(); + opal_run_pollers(); + } + prlog(PR_INFO, "INIT: Waiting for FSP to request capabilities...\n"); + while(!(ipl_state & ipl_got_caps)) { + cpu_relax(); + opal_run_pollers(); + } + + /* Initiate the timeout poller */ + opal_add_poller(fsp_timeout_poll, NULL); + + /* Tell FSP we are in standby */ + prlog(PR_INFO, "INIT: Sending HV Functional: Standby...\n"); + fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x01000000), true); + + /* Wait for FSP functional */ + prlog(PR_INFO, "INIT: Waiting for FSP functional\n"); + while(!(ipl_state & ipl_got_fsp_functional)) { + cpu_relax(); + opal_run_pollers(); + } + + /* Tell FSP we are in running state */ + prlog(PR_INFO, "INIT: Sending HV Functional: Runtime...\n"); + fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x02000000), true); + + /* + * For the factory reset case, FSP sends us the PCI Bus + * Reset request. We don't have to do anything special with + * PCI bus numbers here; just send the Power Down message + * with modifier 0x02 to FSP. + */ + iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params"); + if (iplp && dt_find_property(iplp, "pci-busno-reset-ipl")) { + prlog(PR_DEBUG, "INIT: PCI Bus Reset requested." + " Sending Power Down\n"); + fsp_sync_msg(fsp_mkmsg(FSP_CMD_POWERDOWN_PCIRS, 0), true); + } + + /* + * Tell FSP we are in running state with all partitions. + * + * This is need otherwise the FSP will not reset it's reboot count + * on failures. Ideally we should send that when we know the + * OS is up but we don't currently have a very good way to do + * that so this will do as a stop-gap + */ + prlog(PR_NOTICE, "INIT: Sending HV Functional: Runtime all partitions\n"); + fsp_sync_msg(fsp_mkmsg(FSP_CMD_HV_FUNCTNAL, 1, 0x04000000), true); +} + +uint32_t fsp_adjust_lid_side(uint32_t lid_no) +{ + struct dt_node *iplp; + const char *side = NULL; + + iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params"); + if (iplp) + side = dt_prop_get_def(iplp, "cec-ipl-side", NULL); + if (!side || !strcmp(side, "temp")) + lid_no |= ADJUST_T_SIDE_LID_NO; + return lid_no; +} + +struct fsp_fetch_lid_item { + enum resource_id id; + uint32_t idx; + + uint32_t lid; + uint32_t lid_no; + uint64_t bsize; + uint32_t offset; + void *buffer; + size_t *length; + size_t remaining; + size_t chunk_requested; + struct list_node link; + int result; +}; + +/* + * We have a queue of things to fetch + * when fetched, it moves to fsp_fetched_lid until we're asked if it + * has been fetched, in which case it's free()d. + * + * Everything is protected with fsp_fetch_lock. + * + * We use PSI_DMA_FETCH TCE entry for this fetching queue. If something + * is in the fsp_fetch_lid_queue, it means we're using this TCE entry! + * + * If we add the first entry to fsp_fetch_lid_queue, we trigger fetching! + */ +static LIST_HEAD(fsp_fetch_lid_queue); +static LIST_HEAD(fsp_fetched_lid); +static struct lock fsp_fetch_lock = LOCK_UNLOCKED; + +/* + * Asynchronous fsp fetch data call + * + * Note: + * buffer = PSI DMA address space + */ +int fsp_fetch_data_queue(uint8_t flags, uint16_t id, uint32_t sub_id, + uint32_t offset, void *buffer, size_t *length, + void (*comp)(struct fsp_msg *msg)) +{ + struct fsp_msg *msg; + uint32_t chunk = *length; + + if (!comp) + return OPAL_PARAMETER; + + msg = fsp_mkmsg(FSP_CMD_FETCH_SP_DATA, 0x6, flags << 16 | id, + sub_id, offset, 0, buffer, chunk); + if (!msg) { + prerror("FSP: allocation failed!\n"); + return OPAL_INTERNAL_ERROR; + } + if (fsp_queue_msg(msg, comp)) { + fsp_freemsg(msg); + prerror("FSP: Failed to queue fetch data message\n"); + return OPAL_INTERNAL_ERROR; + } + return OPAL_SUCCESS; +} + +#define CAPP_IDX_VENICE_DD10 0x100ea +#define CAPP_IDX_VENICE_DD20 0x200ea +#define CAPP_IDX_MURANO_DD20 0x200ef +#define CAPP_IDX_MURANO_DD21 0x201ef +#define CAPP_IDX_NAPLES_DD10 0x100d3 +#define CAPP_IDX_NIMBUS_DD10 0x100d1 +#define CAPP_IDX_NIMBUS_DD20 0x200d1 +#define CAPP_IDX_NIMBUS_DD21 0x201d1 +#define CAPP_IDX_NIMBUS_DD22 0x202d1 +#define CAPP_IDX_NIMBUS_DD23 0x203d1 + +#define IMA_CATALOG_NIMBUS 0x4e0200 +#define IMA_CATALOG_P10_DD1 0x800100 +#define IMA_CATALOG_P10_DD2 0x800200 + + +static struct { + enum resource_id id; + uint32_t idx; + uint32_t lid_no; +} fsp_lid_map[] = { + { RESOURCE_ID_KERNEL, RESOURCE_SUBID_NONE, KERNEL_LID_OPAL }, + { RESOURCE_ID_INITRAMFS,RESOURCE_SUBID_NONE, INITRAMFS_LID_OPAL }, + { RESOURCE_ID_IMA_CATALOG,IMA_CATALOG_NIMBUS, 0x80f00103 }, + { RESOURCE_ID_CAPP, CAPP_IDX_MURANO_DD20, 0x80a02002 }, + { RESOURCE_ID_CAPP, CAPP_IDX_MURANO_DD21, 0x80a02001 }, + { RESOURCE_ID_CAPP, CAPP_IDX_VENICE_DD10, 0x80a02003 }, + { RESOURCE_ID_CAPP, CAPP_IDX_VENICE_DD20, 0x80a02004 }, + { RESOURCE_ID_CAPP, CAPP_IDX_NAPLES_DD10, 0x80a02005 }, + { RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD10, 0x80a02006 }, + { RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD20, 0x80a02007 }, + { RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD21, 0x80a02007 }, + { RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD22, 0x80a02007 }, + { RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD23, 0x80a02007 }, + { RESOURCE_ID_IMA_CATALOG,IMA_CATALOG_P10_DD1, 0x80f00103 }, + { RESOURCE_ID_IMA_CATALOG,IMA_CATALOG_P10_DD2, 0x80f00103 }, +}; + +static void fsp_start_fetching_next_lid(void); +static void fsp_fetch_lid_next_chunk(struct fsp_fetch_lid_item *last); + +static void fsp_fetch_lid_complete(struct fsp_msg *msg) +{ + struct fsp_fetch_lid_item *last; + uint32_t woffset, wlen; + uint8_t rc; + + lock(&fsp_fetch_lock); + last = list_top(&fsp_fetch_lid_queue, struct fsp_fetch_lid_item, link); + fsp_tce_unmap(PSI_DMA_FETCH, last->bsize); + + woffset = fsp_msg_get_data_word(msg->resp, 1); + wlen = fsp_msg_get_data_word(msg->resp, 2); + rc = (msg->resp->word1 >> 8) & 0xff; + + /* Fall back to a PHYP LID for kernel loads */ + if (rc && last->lid_no == KERNEL_LID_OPAL) { + const char *ltype = dt_prop_get_def(dt_root, "lid-type", NULL); + if (!ltype || strcmp(ltype, "opal")) { + prerror("Failed to load in OPAL mode...\n"); + last->result = OPAL_PARAMETER; + last = list_pop(&fsp_fetch_lid_queue, + struct fsp_fetch_lid_item, link); + list_add_tail(&fsp_fetched_lid, &last->link); + fsp_start_fetching_next_lid(); + unlock(&fsp_fetch_lock); + return; + } + printf("Trying to load as PHYP LID...\n"); + last->lid = KERNEL_LID_PHYP; + /* Retry with different LID */ + fsp_fetch_lid_next_chunk(last); + } + + if (rc !=0 && rc != 2) { + last->result = -EIO; + last = list_pop(&fsp_fetch_lid_queue, struct fsp_fetch_lid_item, link); + prerror("FSP LID %08x load ERROR %d\n", last->lid_no, rc); + list_add_tail(&fsp_fetched_lid, &last->link); + fsp_start_fetching_next_lid(); + unlock(&fsp_fetch_lock); + return; + } + + /* + * As per documentation, rc=2 means end of file not reached and + * rc=1 means we reached end of file. But it looks like we always + * get rc=0 irrespective of whether end of file is reached or not. + * The old implementation (fsp_sync_msg) used to rely on + * (wlen < chunk) to decide whether we reached end of file. + * + * Ideally FSP folks should be fix their code as per documentation. + * but until they do, adding the old check (hack) here again. + * + * Without this hack some systems would load partial lid and won't + * be able to boot into petitboot kernel. + */ + if (rc == 0 && (wlen < last->chunk_requested)) + last->result = OPAL_SUCCESS; + + fsp_freemsg(msg); + + last->remaining -= wlen; + *(last->length) += wlen; + last->buffer += wlen; + last->offset += wlen; + + prlog(PR_DEBUG, "FSP: LID %x Chunk read -> rc=0x%02x off: %08x" + " twritten: %08x\n", last->lid, rc, woffset, wlen); + + fsp_fetch_lid_next_chunk(last); + + unlock(&fsp_fetch_lock); +} + +static void fsp_fetch_lid_next_chunk(struct fsp_fetch_lid_item *last) +{ + uint64_t baddr; + uint64_t balign, boff; + uint32_t chunk; + uint32_t taddr; + struct fsp_msg *msg; + uint8_t flags = 0; + uint16_t id = FSP_DATASET_NONSP_LID; + uint32_t sub_id; + + assert(lock_held_by_me(&fsp_fetch_lock)); + + if (last->remaining == 0 || last->result == OPAL_SUCCESS) { + last->result = OPAL_SUCCESS; + last = list_pop(&fsp_fetch_lid_queue, + struct fsp_fetch_lid_item, link); + list_add_tail(&fsp_fetched_lid, &last->link); + fsp_start_fetching_next_lid(); + return; + } + + baddr = (uint64_t)last->buffer; + balign = baddr & ~TCE_MASK; + boff = baddr & TCE_MASK; + + chunk = last->remaining; + if (chunk > (PSI_DMA_FETCH_SIZE - boff)) + chunk = PSI_DMA_FETCH_SIZE - boff; + last->bsize = ((boff + chunk) + TCE_MASK) & ~TCE_MASK; + last->chunk_requested = chunk; + + prlog(PR_DEBUG, "FSP: LID %08x chunk 0x%08x bytes balign=%llx" + " boff=%llx bsize=%llx\n", + last->lid_no, chunk, balign, boff, last->bsize); + + fsp_tce_map(PSI_DMA_FETCH, (void *)balign, last->bsize); + taddr = PSI_DMA_FETCH + boff; + + sub_id = last->lid; + + msg = fsp_mkmsg(FSP_CMD_FETCH_SP_DATA, 6, + flags << 16 | id, sub_id, last->offset, + 0, taddr, chunk); + + if (fsp_queue_msg(msg, fsp_fetch_lid_complete)) { + fsp_freemsg(msg); + prerror("FSP: Failed to queue fetch data message\n"); + last->result = OPAL_INTERNAL_ERROR; + last = list_pop(&fsp_fetch_lid_queue, + struct fsp_fetch_lid_item, link); + list_add_tail(&fsp_fetched_lid, &last->link); + } + last->result = OPAL_BUSY; +} + +static void fsp_start_fetching_next_lid(void) +{ + struct fsp_fetch_lid_item *last; + + assert(lock_held_by_me(&fsp_fetch_lock)); + + last = list_top(&fsp_fetch_lid_queue, struct fsp_fetch_lid_item, link); + + if (last == NULL) + return; + + /* If we're not already fetching */ + if (last->result == OPAL_EMPTY) + fsp_fetch_lid_next_chunk(last); +} + +int fsp_start_preload_resource(enum resource_id id, uint32_t idx, + void *buf, size_t *size) +{ + struct fsp_fetch_lid_item *resource; + uint32_t lid_no = 0; + int i; + + resource = malloc(sizeof(struct fsp_fetch_lid_item)); + assert(resource != NULL); + + resource->id = id; + resource->idx = idx; + + resource->offset = 0; + resource->buffer = buf; + resource->remaining = *size; + *size = 0; + resource->length = size; + resource->result = OPAL_EMPTY; + + for (i = 0; i < ARRAY_SIZE(fsp_lid_map); i++) { + if (id != fsp_lid_map[i].id) + continue; + + if (fsp_lid_map[i].idx == idx) { + lid_no = fsp_lid_map[i].lid_no; + break; + } + } + if (lid_no == 0) + return OPAL_PARAMETER; + + printf("Trying to load OPAL LID %08x...\n", lid_no); + resource->lid_no = lid_no; + resource->lid = fsp_adjust_lid_side(lid_no); + + lock(&fsp_fetch_lock); + list_add_tail(&fsp_fetch_lid_queue, &resource->link); + fsp_start_fetching_next_lid(); + unlock(&fsp_fetch_lock); + + return OPAL_SUCCESS; +} + +int fsp_resource_loaded(enum resource_id id, uint32_t idx) +{ + struct fsp_fetch_lid_item *resource = NULL; + struct fsp_fetch_lid_item *r; + int rc = OPAL_BUSY; + + lock(&fsp_fetch_lock); + list_for_each(&fsp_fetched_lid, r, link) { + if (r->id == id && r->idx == idx) { + resource = r; + break; + } + } + + if (resource) { + rc = resource->result; + list_del(&resource->link); + free(resource); + } + unlock(&fsp_fetch_lock); + + return rc; +} + +static int fsp_lid_loaded(uint32_t lid_no) +{ + struct fsp_fetch_lid_item *resource = NULL; + struct fsp_fetch_lid_item *r; + int rc = OPAL_BUSY; + + lock(&fsp_fetch_lock); + list_for_each(&fsp_fetched_lid, r, link) { + if (r->lid_no == lid_no) { + resource = r; + break; + } + } + + if (resource) { + rc = resource->result; + if (rc == OPAL_SUCCESS) { + list_del(&resource->link); + free(resource); + } + } + unlock(&fsp_fetch_lock); + + return rc; +} + +int fsp_preload_lid(uint32_t lid_no, char *buf, size_t *size) +{ + struct fsp_fetch_lid_item *resource; + int r = OPAL_SUCCESS; + + resource = malloc(sizeof(struct fsp_fetch_lid_item)); + assert(resource != NULL); + + resource->id = -1; + resource->idx = -1; + + resource->offset = 0; + resource->buffer = buf; + resource->remaining = *size; + *size = 0; + resource->length = size; + resource->result = OPAL_EMPTY; + + if (lid_no == 0) + return OPAL_PARAMETER; + + printf("Trying to load LID %08x from FSP\n", lid_no); + resource->lid_no = lid_no; + resource->lid = fsp_adjust_lid_side(lid_no); + + lock(&fsp_fetch_lock); + list_add_tail(&fsp_fetch_lid_queue, &resource->link); + fsp_start_fetching_next_lid(); + unlock(&fsp_fetch_lock); + + return r; +} + +int fsp_wait_lid_loaded(uint32_t lid_no) +{ + int r; + int waited = 0; + + r = fsp_lid_loaded(lid_no); + + while(r == OPAL_BUSY) { + opal_run_pollers(); + time_wait_nopoll(msecs_to_tb(5)); + waited+=5; + cpu_relax(); + r = fsp_lid_loaded(lid_no); + } + + prlog(PR_DEBUG, "FSP: fsp_wait_lid_loaded %x %u ms\n", lid_no, waited); + + return r; +} + +void fsp_used_by_console(void) +{ + fsp_lock.in_con_path = true; + + /* + * Some other processor might hold it without having + * disabled the console locally so let's make sure that + * is over by taking/releasing the lock ourselves + */ + lock(&fsp_lock); + unlock(&fsp_lock); +} -- cgit 1.2.3-korg