aboutsummaryrefslogtreecommitdiffstats
path: root/roms/u-boot/include/linux/soc/ti
diff options
context:
space:
mode:
Diffstat (limited to 'roms/u-boot/include/linux/soc/ti')
-rw-r--r--roms/u-boot/include/linux/soc/ti/cppi5.h996
-rw-r--r--roms/u-boot/include/linux/soc/ti/k3-navss-ringacc.h251
-rw-r--r--roms/u-boot/include/linux/soc/ti/k3-sec-proxy.h25
-rw-r--r--roms/u-boot/include/linux/soc/ti/ti-udma.h43
-rw-r--r--roms/u-boot/include/linux/soc/ti/ti_sci_protocol.h702
5 files changed, 2017 insertions, 0 deletions
diff --git a/roms/u-boot/include/linux/soc/ti/cppi5.h b/roms/u-boot/include/linux/soc/ti/cppi5.h
new file mode 100644
index 000000000..cfdf7ea29
--- /dev/null
+++ b/roms/u-boot/include/linux/soc/ti/cppi5.h
@@ -0,0 +1,996 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPPI5 descriptors interface
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __TI_CPPI5_H__
+#define __TI_CPPI5_H__
+
+#include <hexdump.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+
+/**
+ * Descriptor header, present in all types of descriptors
+ */
+struct cppi5_desc_hdr_t {
+ u32 pkt_info0; /* Packet info word 0 (n/a in Buffer desc) */
+ u32 pkt_info1; /* Packet info word 1 (n/a in Buffer desc) */
+ u32 pkt_info2; /* Packet info word 2 Buffer reclamation info */
+ u32 src_dst_tag; /* Packet info word 3 (n/a in Buffer desc) */
+} __packed;
+
+/**
+ * Host-mode packet and buffer descriptor definition
+ */
+struct cppi5_host_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u64 next_desc; /* w4/5: Linking word */
+ u64 buf_ptr; /* w6/7: Buffer pointer */
+ u32 buf_info1; /* w8: Buffer valid data length */
+ u32 org_buf_len; /* w9: Original buffer length */
+ u64 org_buf_ptr; /* w10/11: Original buffer pointer */
+ u32 epib[0]; /* Extended Packet Info Data (optional, 4 words) */
+ /*
+ * Protocol Specific Data (optional, 0-128 bytes in multiples of 4),
+ * and/or Other Software Data (0-N bytes, optional)
+ */
+} __packed;
+
+#define CPPI5_DESC_MIN_ALIGN (16U)
+
+#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U)
+#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U)
+
+#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U)
+#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30)
+#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U)
+#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29)
+/*
+ * Protocol Specific Words location:
+ * 0 - located in the descriptor,
+ * 1 = located in the SOP Buffer immediately prior to the data.
+ */
+#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22)
+#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0)
+#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0)
+
+#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U)
+#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28)
+#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U)
+#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24)
+#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U)
+#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14)
+#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0)
+#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0)
+
+#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U)
+#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27)
+/* Return Policy: 0 - Entire packet 1 - Each buffer */
+#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18)
+/*
+ * Early Return:
+ * 0 = desc pointers should be returned after all reads have been completed
+ * 1 = desc pointers should be returned immediately upon fetching
+ * the descriptor and beginning to transfer data.
+ */
+#define CPPI5_INFO2_HDESC_EARLYRET BIT(17)
+/*
+ * Return Push Policy:
+ * 0 = Descriptor must be returned to tail of queue
+ * 1 = Descriptor must be returned to head of queue
+ */
+#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16)
+#define CPPI5_INFO2_DESC_RETQ_SHIFT (0)
+#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0)
+
+#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U)
+#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16)
+#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0)
+#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0)
+
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0)
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0)
+
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0)
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0)
+
+/*
+ * Host Packet Descriptor Extended Packet Info Block
+ */
+struct cppi5_desc_epib_t {
+ u32 timestamp; /* w0: application specific timestamp */
+ u32 sw_info0; /* w1: Software Info 0 */
+ u32 sw_info1; /* w2: Software Info 1 */
+ u32 sw_info2; /* w3: Software Info 2 */
+};
+
+/**
+ * Monolithic-mode packet descriptor
+ */
+struct cppi5_monolithic_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u32 epib[0]; /* Extended Packet Info Data (optional, 4 words) */
+ /*
+ * Protocol Specific Data (optional, 0-128 bytes in multiples of 4),
+ * and/or Other Software Data (0-N bytes, optional)
+ */
+};
+
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U)
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18)
+
+/*
+ * Reload Enable:
+ * 0 = Finish the packet and place the descriptor back on the return queue
+ * 1 = Vector to the Reload Index and resume processing
+ */
+#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff)
+#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX
+#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f)
+#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0)
+#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0)
+
+#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U)
+
+static inline void cppi5_desc_dump(void *desc, u32 size)
+{
+ print_hex_dump(KERN_ERR "dump udmap_desc: ", DUMP_PREFIX_NONE,
+ 32, 4, desc, size, false);
+}
+
+/**
+ * cppi5_desc_get_type - get descriptor type
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns descriptor type:
+ * CPPI5_INFO0_DESC_TYPE_VAL_HOST
+ * CPPI5_INFO0_DESC_TYPE_VAL_MONO
+ * CPPI5_INFO0_DESC_TYPE_VAL_TR
+ */
+static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ WARN_ON(!desc_hdr);
+
+ return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >>
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_errflags - get Error Flags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ *
+ * Returns Error Flags from Packet/TR Descriptor
+ */
+static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ WARN_ON(!desc_hdr);
+
+ return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >>
+ CPPI5_INFO1_DESC_PKTERROR_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_pktids - get Packet and Flow ids from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ *
+ * Returns Packet and Flow ids from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *pkt_id, u32 *flow_id)
+{
+ WARN_ON(!desc_hdr);
+
+ *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >>
+ CPPI5_INFO1_DESC_PKTID_SHIFT;
+ *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >>
+ CPPI5_INFO1_DESC_FLOWID_SHIFT;
+}
+
+/**
+ * cppi5_desc_set_pktids - set Packet and Flow ids in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ */
+static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 pkt_id, u32 flow_id)
+{
+ WARN_ON(!desc_hdr);
+
+ desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) &
+ CPPI5_INFO1_DESC_PKTID_MASK;
+ desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) &
+ CPPI5_INFO1_DESC_FLOWID_MASK;
+}
+
+/**
+ * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @flags: fags, supported values
+ * CPPI5_INFO2_HDESC_RETPOLICY
+ * CPPI5_INFO2_HDESC_EARLYRET
+ * CPPI5_INFO2_DESC_RETPUSHPOLICY
+ * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved
+ */
+static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 flags, u32 return_ring_id)
+{
+ WARN_ON(!desc_hdr);
+
+ desc_hdr->pkt_info2 |= flags;
+ desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK;
+}
+
+/**
+ * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *src_tag_id, u32 *dst_tag_id)
+{
+ WARN_ON(!desc_hdr);
+
+ if (src_tag_id)
+ *src_tag_id = (desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_SRCTAG_MASK) >>
+ CPPI5_INFO3_DESC_SRCTAG_SHIFT;
+ if (dst_tag_id)
+ *dst_tag_id = desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 src_tag_id, u32 dst_tag_id)
+{
+ WARN_ON(!desc_hdr);
+
+ desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) &
+ CPPI5_INFO3_DESC_SRCTAG_MASK;
+ desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size
+ * @epib: is EPIB present
+ * @psdata_size: PSDATA size
+ * @sw_data_size: SWDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
+ u32 sw_data_size)
+{
+ u32 desc_size;
+
+ if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE)
+ return 0;
+ //TODO_GS: align
+ desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size +
+ sw_data_size;
+
+ if (epib)
+ desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN);
+}
+
+/**
+ * cppi5_hdesc_init - Init Host Packet Descriptor size
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ * @psdata_size: PSDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags,
+ u32 psdata_size)
+{
+ WARN_ON(!desc);
+ WARN_ON(psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE);
+ WARN_ON(flags & ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+ CPPI5_INFO0_HDESC_PSINFO_LOCATION));
+
+ desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags);
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_update_flags - Replace descriptor flags
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ */
+static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc,
+ u32 flags)
+{
+ WARN_ON(!desc);
+ WARN_ON(flags & ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+ CPPI5_INFO0_HDESC_PSINFO_LOCATION));
+
+ desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+ CPPI5_INFO0_HDESC_PSINFO_LOCATION);
+ desc->hdr.pkt_info0 |= flags;
+}
+
+/**
+ * cppi5_hdesc_update_psdata_size - Replace PSdata size
+ * @desc: Host packet descriptor
+ * @psdata_size: PSDATA size
+ */
+static inline void cppi5_hdesc_update_psdata_size(
+ struct cppi5_host_desc_t *desc, u32 psdata_size)
+{
+ WARN_ON(!desc);
+ WARN_ON(psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE);
+
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_psdata_size - get PSdata size in bytes
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+
+ WARN_ON(!desc);
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ return (psdata_size << 2);
+}
+
+/**
+ * cppi5_hdesc_get_pktlen - get Packet Length from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Packet Length from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
+{
+ WARN_ON(!desc);
+
+ return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_set_pktlen - set Packet Length in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
+ u32 pkt_len)
+{
+ WARN_ON(!desc);
+
+ desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Protocol Specific Flags from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
+{
+ WARN_ON(!desc);
+
+ return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >>
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
+ u32 ps_flags)
+{
+ WARN_ON(!desc);
+
+ desc->hdr.pkt_info1 |= (ps_flags <<
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT) &
+ CPPI5_INFO1_HDESC_PSFLGS_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
+{
+ WARN_ON(!desc);
+
+ return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >>
+ CPPI5_INFO2_HDESC_PKTTYPE_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * @desc: Host packet descriptor
+ * @pkt_type: Packet Type
+ */
+static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc,
+ u32 pkt_type)
+{
+ WARN_ON(!desc);
+ desc->hdr.pkt_info2 |=
+ (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) &
+ CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+}
+
+/**
+ * cppi5_hdesc_attach_buf - attach buffer to HDesc
+ * @desc: Host packet descriptor
+ * @buf: Buffer physical address
+ * @buf_data_len: Buffer length
+ * @obuf: Original Buffer physical address
+ * @obuf_len: Original Buffer length
+ *
+ * Attaches buffer to Host Packet Descriptor
+ */
+static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc,
+ dma_addr_t buf, u32 buf_data_len,
+ dma_addr_t obuf, u32 obuf_len)
+{
+ WARN_ON(!desc);
+ WARN_ON(!buf && !obuf);
+
+ desc->buf_ptr = buf;
+ desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK;
+ desc->org_buf_ptr = obuf;
+ desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc,
+ dma_addr_t *obuf, u32 *obuf_len)
+{
+ WARN_ON(!desc);
+ WARN_ON(!obuf);
+ WARN_ON(!obuf_len);
+
+ *obuf = desc->org_buf_ptr;
+ *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
+{
+ WARN_ON(!desc);
+
+ desc->buf_ptr = desc->org_buf_ptr;
+ desc->buf_info1 = desc->org_buf_len;
+}
+
+/**
+ * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
+ * @desc: Host Packet Descriptor
+ * @buf_desc: Host Buffer Descriptor physical address
+ *
+ * add and link Host Buffer Descriptor to HDesc
+ */
+static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc,
+ dma_addr_t hbuf_desc)
+{
+ WARN_ON(!desc);
+ WARN_ON(!hbuf_desc);
+
+ desc->next_desc = hbuf_desc;
+}
+
+static inline dma_addr_t cppi5_hdesc_get_next_hbdesc(
+ struct cppi5_host_desc_t *desc)
+{
+ WARN_ON(!desc);
+
+ return (dma_addr_t)desc->next_desc;
+}
+
+static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ WARN_ON(!desc);
+
+ desc->hdr = (struct cppi5_desc_hdr_t) { 0 };
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_epib_present - check if EPIB present
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns true if EPIB present in the packet
+ */
+static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ WARN_ON(!desc_hdr);
+ return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT);
+}
+
+/**
+ * cppi5_hdesc_get_psdata - Get pointer on PSDATA
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on PSDATA in HDesc.
+ * NULL - if ps_data placed at the start of data buffer.
+ */
+static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size;
+ void *psdata;
+
+ WARN_ON(!desc);
+
+ if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)
+ return NULL;
+
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ if (!psdata_size)
+ return NULL;
+
+ psdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ psdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return psdata;
+}
+
+static inline u32 *cppi5_hdesc_get_psdata32(struct cppi5_host_desc_t *desc)
+{
+ return (u32 *)cppi5_hdesc_get_psdata(desc);
+}
+
+/**
+ * cppi5_hdesc_get_swdata - Get pointer on swdata
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on SWDATA in HDesc.
+ * NOTE. It's caller responsibility to be sure hdesc actually has swdata.
+ */
+static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+ void *swdata;
+
+ WARN_ON(!desc);
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ swdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ swdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ swdata += (psdata_size << 2);
+
+ return swdata;
+}
+
+/* ================================== TR ================================== */
+
+#define CPPI5_TR_TYPE_SHIFT (0U)
+#define CPPI5_TR_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_STATIC BIT(4)
+#define CPPI5_TR_WAIT BIT(5)
+#define CPPI5_TR_EVENT_SIZE_SHIFT (6U)
+#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6)
+#define CPPI5_TR_TRIGGER0_SHIFT (8U)
+#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8)
+#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U)
+#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10)
+#define CPPI5_TR_TRIGGER1_SHIFT (12U)
+#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12)
+#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U)
+#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14)
+#define CPPI5_TR_CMD_ID_SHIFT (16U)
+#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16)
+#define CPPI5_TR_CSF_FLAGS_SHIFT (24U)
+#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24)
+#define CPPI5_TR_CSF_SA_INDIRECT BIT(0)
+#define CPPI5_TR_CSF_DA_INDIRECT BIT(1)
+#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
+#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
+#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
+#define CPPI5_TR_CSF_EOP BIT(7)
+
+/* Udmap TR flags Type field specifies the type of TR. */
+enum cppi5_tr_types {
+ /* type0: One dimensional data move */
+ CPPI5_TR_TYPE0 = 0,
+ /* type1: Two dimensional data move */
+ CPPI5_TR_TYPE1,
+ /* type2: Three dimensional data move */
+ CPPI5_TR_TYPE2,
+ /* type3: Four dimensional data move */
+ CPPI5_TR_TYPE3,
+ /* type4: Four dimensional data move with data formatting */
+ CPPI5_TR_TYPE4,
+ /* type5: Four dimensional Cache Warm */
+ CPPI5_TR_TYPE5,
+ /* type6-7: Reserved */
+ /* type8: Four Dimensional Block Move */
+ CPPI5_TR_TYPE8 = 8,
+ /* type9: Four Dimensional Block Move with Repacking */
+ CPPI5_TR_TYPE9,
+ /* type10: Two Dimensional Block Move */
+ CPPI5_TR_TYPE10,
+ /* type11: Two Dimensional Block Move with Repacking */
+ CPPI5_TR_TYPE11,
+ /* type12-14: Reserved */
+ /* type15 Four Dimensional Block Move with Repacking and Indirection */
+ CPPI5_TR_TYPE15 = 15,
+ CPPI5_TR_TYPE_MAX
+};
+
+/*
+ * Udmap TR Flags EVENT_SIZE field specifies when an event is generated
+ * for each TR.
+ */
+enum cppi5_tr_event_size {
+ /* When TR is complete and all status for the TR has been received */
+ CPPI5_TR_EVENT_SIZE_COMPLETION,
+ /*
+ * Type 0: when the last data transaction is sent for the TR;
+ * Type 1-11: when ICNT1 is decremented
+ */
+ CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
+ /*
+ * Type 0-1,10-11: when the last data transaction is sent for the TR;
+ * All other types: when ICNT2 is decremented
+ */
+ CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
+ /*
+ * Type 0-2,10-11: when the last data transaction is sent for the TR;
+ * All other types: when ICNT3 is decremented
+ */
+ CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+ CPPI5_TR_EVENT_SIZE_MAX
+};
+
+/*
+ * Udmap TR Flags TRIGGERx field specifies the type of trigger used to
+ * enable the TR to transfer data as specified by TRIGGERx_TYPE field.
+ */
+enum cppi5_tr_trigger {
+ CPPI5_TR_TRIGGER_NONE, /* No Trigger */
+ CPPI5_TR_TRIGGER_GLOBAL0, /* Global Trigger 0 */
+ CPPI5_TR_TRIGGER_GLOBAL1, /* Global Trigger 1 */
+ CPPI5_TR_TRIGGER_LOCAL_EVENT, /* Local Event */
+ CPPI5_TR_TRIGGER_MAX
+};
+
+/*
+ * Udmap TR Flags TRIGGERx_TYPE field specifies the type of data transfer
+ * that will be enabled by receiving a trigger as specified by TRIGGERx.
+ */
+enum cppi5_tr_trigger_type {
+ /* The second inner most loop (ICNT1) will be decremented by 1 */
+ CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC,
+ /* The third inner most loop (ICNT2) will be decremented by 1 */
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ /* The outer most loop (ICNT3) will be decremented by 1 */
+ CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
+ /* The entire TR will be allowed to complete */
+ CPPI5_TR_TRIGGER_TYPE_ALL,
+ CPPI5_TR_TRIGGER_TYPE_MAX
+};
+
+typedef u32 cppi5_tr_flags_t;
+
+/* Type 0 (One dimensional data move) TR (16 byte) */
+struct cppi5_tr_type0_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 unused;
+ u64 addr;
+} __aligned(16) __packed;
+
+/* Type 1 (Two dimensional data move) TR (32 byte) */
+struct cppi5_tr_type1_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+} __aligned(32) __packed;
+
+/* Type 2 (Three dimensional data move) TR (32 byte) */
+struct cppi5_tr_type2_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 unused;
+ s32 dim2;
+} __aligned(32) __packed;
+
+/* Type 3 (Four dimensional data move) TR (32 byte) */
+struct cppi5_tr_type3_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+} __aligned(32) __packed;
+
+/*
+ * Type 15 (Four Dimensional Block Copy with Repacking and
+ * Indirection Support) TR (64 byte).
+ */
+struct cppi5_tr_type15_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+ u32 _reserved;
+ s32 ddim1;
+ u64 daddr;
+ s32 ddim2;
+ s32 ddim3;
+ u16 dicnt0;
+ u16 dicnt1;
+ u16 dicnt2;
+ u16 dicnt3;
+} __aligned(64) __packed;
+
+struct cppi5_tr_resp_t {
+ u8 status;
+ u8 reserved;
+ u8 cmd_id;
+ u8 flags;
+} __packed;
+
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U)
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4)
+#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U)
+#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24)
+
+/*
+ * Udmap TR Response Status Type field is used to determine
+ * what type of status is being returned.
+ */
+enum cppi5_tr_resp_status_type {
+ CPPI5_TR_RESPONSE_STATUS_COMPLETE, /* None */
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR, /* Transfer Error */
+ CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR, /* Aborted Error */
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR, /* Submission Error */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR, /* Unsup. Feature */
+ CPPI5_TR_RESPONSE_STATUS_MAX
+};
+
+/*
+ * Udmap TR Response Status field values which corresponds
+ * CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR
+ */
+enum cppi5_tr_resp_status_submission {
+ /* ICNT0 was 0 */
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
+ /* Channel FIFO was full when TR received */
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
+ /* Channel is not owned by the submitter */
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
+};
+
+/*
+ * Udmap TR Response Status field values which corresponds
+ * CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR
+ */
+enum cppi5_tr_resp_status_unsupported {
+ /* TR Type not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE,
+ /* STATIC not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC,
+ /* EOL not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL,
+ /* CONFIGURATION SPECIFIC not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC,
+ /* AMODE not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE,
+ /* ELTYPE not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE,
+ /* DFMT not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
+ /* SECTR not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
+ /* AMODE SPECIFIC field not supported */
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
+};
+
+/**
+ * cppi5_trdesc_calc_size - Calculate TR Descriptor size
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ *
+ * Returns required TR Descriptor size
+ */
+static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
+{
+ /*
+ * The Size of a TR descriptor is:
+ * 1 x tr_size : the first 16 bytes is used by the packet info block +
+ * tr_count x tr_size : Transfer Request Records +
+ * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records
+ */
+ return tr_size * (tr_count + 1) +
+ sizeof(struct cppi5_tr_resp_t) * tr_count;
+}
+
+/**
+ * cppi5_trdesc_init - Init TR Descriptor
+ * @desc: TR Descriptor
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ * @reload_idx: Absolute index to jump to on the 2nd and following passes
+ * through the TR packet.
+ * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff
+ * indicates infinite looping.
+ *
+ * Init TR Descriptor
+ */
+static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 tr_count, u32 tr_size, u32 reload_idx,
+ u32 reload_count)
+{
+ WARN_ON(!desc_hdr);
+ WARN_ON(tr_count & ~CPPI5_INFO0_TRDESC_LASTIDX_MASK);
+ WARN_ON(reload_idx > CPPI5_INFO0_TRDESC_RLDIDX_MAX);
+ WARN_ON(reload_count > CPPI5_INFO0_TRDESC_RLDCNT_MAX);
+
+ desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+ desc_hdr->pkt_info0 |= (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDCNT_MASK;
+ desc_hdr->pkt_info0 |= (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDIDX_MASK;
+ desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK;
+
+ desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) <<
+ CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) &
+ CPPI5_INFO1_TRDESC_RECSIZE_MASK;
+}
+
+/**
+ * cppi5_tr_init - Init TR record
+ * @flags: Pointer to the TR's flags
+ * @type: TR type
+ * @static_tr: TR is static
+ * @wait: Wait for TR completion before allow the next TR to start
+ * @event_size: output event generation cfg
+ * @cmd_id: TR identifier (application specifics)
+ *
+ * Init TR record
+ */
+static inline void cppi5_tr_init(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_types type, bool static_tr,
+ bool wait, enum cppi5_tr_event_size event_size,
+ u32 cmd_id)
+{
+ WARN_ON(!flags);
+
+ *flags = type;
+ *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) &
+ CPPI5_TR_EVENT_SIZE_MASK;
+
+ *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) &
+ CPPI5_TR_CMD_ID_MASK;
+
+ if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9))
+ *flags |= CPPI5_TR_STATIC;
+
+ if (wait)
+ *flags |= CPPI5_TR_WAIT;
+}
+
+/**
+ * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type
+ * @flags: Pointer to the TR's flags
+ * @trigger0: trigger0 selection
+ * @trigger0_type: type of data transfer that will be enabled by trigger0
+ * @trigger1: trigger1 selection
+ * @trigger1_type: type of data transfer that will be enabled by trigger1
+ *
+ * Configure the triggers for the TR
+ */
+static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_trigger trigger0,
+ enum cppi5_tr_trigger_type trigger0_type,
+ enum cppi5_tr_trigger trigger1,
+ enum cppi5_tr_trigger_type trigger1_type)
+{
+ WARN_ON(!flags);
+
+ *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) &
+ CPPI5_TR_TRIGGER0_MASK;
+ *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER0_TYPE_MASK;
+
+ *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) &
+ CPPI5_TR_TRIGGER1_MASK;
+ *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER1_TYPE_MASK;
+}
+
+/**
+ * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * @flags: Pointer to the TR's flags
+ * @csf: Configuration specific flags
+ *
+ * Set a bit in Configuration Specific Flags section of the TR flags.
+ */
+static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf)
+{
+ WARN_ON(!flags);
+
+ *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) &
+ CPPI5_TR_CSF_FLAGS_MASK;
+}
+
+#endif /* __TI_CPPI5_H__ */
diff --git a/roms/u-boot/include/linux/soc/ti/k3-navss-ringacc.h b/roms/u-boot/include/linux/soc/ti/k3-navss-ringacc.h
new file mode 100644
index 000000000..0ad8f203d
--- /dev/null
+++ b/roms/u-boot/include/linux/soc/ti/k3-navss-ringacc.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __SOC_TI_K3_NAVSS_RINGACC_API_H_
+#define __SOC_TI_K3_NAVSS_RINGACC_API_H_
+
+#include <dm/ofnode.h>
+#include <linux/bitops.h>
+
+/**
+ * enum k3_nav_ring_mode - &struct k3_nav_ring_cfg mode
+ *
+ * RA ring operational modes
+ *
+ * @K3_NAV_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access
+ * @K3_NAV_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires
+ * that all accesses to the queue must go through this IP so that all
+ * accesses to the memory are controlled and ordered. This IP then
+ * controls the entire state of the queue, and SW has no directly control,
+ * such as through doorbells and cannot access the storage memory directly.
+ * This is particularly useful when more than one SW or HW entity can be
+ * the producer and/or consumer at the same time
+ * @K3_NAV_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus
+ * stores credentials with each message, requiring the element size to be
+ * doubled to fit the credentials. Any exposed memory should be protected
+ * by a firewall from unwanted access
+ * @K3_NAV_RINGACC_RING_MODE_QM: Queue manager mode. This takes the credentials
+ * mode and adds packet length per element, along with additional read only
+ * fields for element count and accumulated queue length. The QM mode only
+ * operates with an 8 byte element size (any other element size is
+ * illegal), and like in credentials mode each operation uses 2 element
+ * slots to store the credentials and length fields
+ */
+enum k3_nav_ring_mode {
+ K3_NAV_RINGACC_RING_MODE_RING = 0,
+ K3_NAV_RINGACC_RING_MODE_MESSAGE,
+ K3_NAV_RINGACC_RING_MODE_CREDENTIALS,
+ K3_NAV_RINGACC_RING_MODE_QM,
+ k3_NAV_RINGACC_RING_MODE_INVALID
+};
+
+/**
+ * enum k3_nav_ring_size - &struct k3_nav_ring_cfg elm_size
+ *
+ * RA ring element's sizes in bytes.
+ */
+enum k3_nav_ring_size {
+ K3_NAV_RINGACC_RING_ELSIZE_4 = 0,
+ K3_NAV_RINGACC_RING_ELSIZE_8,
+ K3_NAV_RINGACC_RING_ELSIZE_16,
+ K3_NAV_RINGACC_RING_ELSIZE_32,
+ K3_NAV_RINGACC_RING_ELSIZE_64,
+ K3_NAV_RINGACC_RING_ELSIZE_128,
+ K3_NAV_RINGACC_RING_ELSIZE_256,
+ K3_NAV_RINGACC_RING_ELSIZE_INVALID
+};
+
+struct k3_nav_ringacc;
+struct k3_nav_ring;
+
+/**
+ * enum k3_nav_ring_cfg - RA ring configuration structure
+ *
+ * @size: Ring size, number of elements
+ * @elm_size: Ring element size
+ * @mode: Ring operational mode
+ * @flags: Ring configuration flags. Possible values:
+ * @K3_NAV_RINGACC_RING_SHARED: when set allows to request the same ring
+ * few times. It's usable when the same ring is used as Free Host PD ring
+ * for different flows, for example.
+ * Note: Locking should be done by consumer if required
+ */
+struct k3_nav_ring_cfg {
+ u32 size;
+ enum k3_nav_ring_size elm_size;
+ enum k3_nav_ring_mode mode;
+#define K3_NAV_RINGACC_RING_SHARED BIT(1)
+ u32 flags;
+};
+
+#define K3_NAV_RINGACC_RING_ID_ANY (-1)
+
+/**
+ * k3_nav_ringacc_request_ring - request ring from ringacc
+ * @ringacc: pointer on ringacc
+ * @id: ring id or K3_NAV_RINGACC_RING_ID_ANY for any general purpose ring
+ *
+ * Returns pointer on the Ring - struct k3_nav_ring
+ * or NULL in case of failure.
+ */
+struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc,
+ int id);
+
+int k3_nav_ringacc_request_rings_pair(struct k3_nav_ringacc *ringacc,
+ int fwd_id, int compl_id,
+ struct k3_nav_ring **fwd_ring,
+ struct k3_nav_ring **compl_ring);
+/**
+ * k3_nav_ringacc_get_dev - get pointer on RA device
+ * @ringacc: pointer on RA
+ *
+ * Returns device pointer
+ */
+struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc);
+
+/**
+ * k3_nav_ringacc_ring_reset - ring reset
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx).
+ * TODO_GS: ? Ring can be reused without reconfiguration
+ */
+void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring);
+/**
+ * k3_nav_ringacc_ring_reset - ring reset for DMA rings
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings
+ * which are read by K3 UDMA, like TX or Free Host PD rings.
+ */
+void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ);
+
+/**
+ * k3_nav_ringacc_ring_free - ring free
+ * @ring: pointer on Ring
+ *
+ * Resets ring and free all alocated resources.
+ */
+int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring);
+
+/**
+ * k3_nav_ringacc_get_ring_id - Get the Ring ID
+ * @ring: pointer on ring
+ *
+ * Returns the Ring ID
+ */
+u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring);
+
+/**
+ * k3_nav_ringacc_ring_cfg - ring configure
+ * @ring: pointer on ring
+ * @cfg: Ring configuration parameters (see &struct k3_nav_ring_cfg)
+ *
+ * Configures ring, including ring memory allocation.
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring,
+ struct k3_nav_ring_cfg *cfg);
+
+/**
+ * k3_nav_ringacc_ring_get_size - get ring size
+ * @ring: pointer on ring
+ *
+ * Returns ring size in number of elements.
+ */
+u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring);
+
+/**
+ * k3_nav_ringacc_ring_get_free - get free elements
+ * @ring: pointer on ring
+ *
+ * Returns number of free elements in the ring.
+ */
+u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring);
+
+/**
+ * k3_nav_ringacc_ring_get_occ - get ring occupancy
+ * @ring: pointer on ring
+ *
+ * Returns total number of valid entries on the ring
+ */
+u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring);
+
+/**
+ * k3_nav_ringacc_ring_is_full - checks if ring is full
+ * @ring: pointer on ring
+ *
+ * Returns true if the ring is full
+ */
+u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring);
+
+/**
+ * k3_nav_ringacc_ring_push - push element to the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_nav_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem);
+
+/**
+ * k3_nav_ringacc_ring_pop - pop element from the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_nav_ring_cfg elm_size..
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem);
+
+/**
+ * k3_nav_ringacc_ring_push_head - push element to the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_nav_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_NAV_RINGACC_RING_MODE_RING
+ */
+int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem);
+
+/**
+ * k3_nav_ringacc_ring_pop_tail - pop element from the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_nav_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_NAV_RINGACC_RING_MODE_RING
+ */
+int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem);
+
+/* DMA ring support */
+struct ti_sci_handle;
+
+/**
+ * struct struct k3_ringacc_init_data - Initialization data for DMA rings
+ */
+struct k3_ringacc_init_data {
+ const struct ti_sci_handle *tisci;
+ u32 tisci_dev_id;
+ u32 num_rings;
+};
+
+struct k3_nav_ringacc *k3_ringacc_dmarings_init(struct udevice *dev,
+ struct k3_ringacc_init_data *data);
+
+#endif /* __SOC_TI_K3_NAVSS_RINGACC_API_H_ */
diff --git a/roms/u-boot/include/linux/soc/ti/k3-sec-proxy.h b/roms/u-boot/include/linux/soc/ti/k3-sec-proxy.h
new file mode 100644
index 000000000..f34854cee
--- /dev/null
+++ b/roms/u-boot/include/linux/soc/ti/k3-sec-proxy.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Texas Instruments' K3 Secure proxy
+ *
+ * Copyright (C) 2017-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ *
+ */
+
+#ifndef K3_SEC_PROXY_H
+#define K3_SEC_PROXY_H
+
+/**
+ * struct k3_sec_proxy_msg - Secure proxy message structure
+ * @len: Length of data in the Buffer
+ * @buf: Buffer pointer
+ *
+ * This is the structure for data used in mbox_send() and mbox_recv().
+ */
+struct k3_sec_proxy_msg {
+ size_t len;
+ u32 *buf;
+};
+
+#endif /* K3_SEC_PROXY_H */
diff --git a/roms/u-boot/include/linux/soc/ti/ti-udma.h b/roms/u-boot/include/linux/soc/ti/ti-udma.h
new file mode 100644
index 000000000..04e354fb2
--- /dev/null
+++ b/roms/u-boot/include/linux/soc/ti/ti-udma.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#ifndef __TI_UDMA_H
+#define __TI_UDMA_H
+
+/**
+ * struct ti_udma_drv_packet_data - TI UDMA transfer specific data
+ *
+ * @pkt_type: Packet Type - specific for each DMA client HW
+ * @dest_tag: Destination tag The source pointer.
+ *
+ * TI UDMA transfer specific data passed as part of DMA transfer to
+ * the DMA client HW in UDMA descriptors.
+ */
+struct ti_udma_drv_packet_data {
+ u32 pkt_type;
+ u32 dest_tag;
+};
+
+/**
+ * struct ti_udma_drv_chan_cfg_data - TI UDMA per channel specific
+ * configuration data
+ *
+ * @flow_id_base: Start index of flow ID allocated to this channel
+ * @flow_id_cnt: Number of flows allocated for this channel starting at
+ * flow_id_base
+ *
+ * TI UDMA channel specific data returned as part of dma_get_cfg() call
+ * from the DMA client driver.
+ */
+struct ti_udma_drv_chan_cfg_data {
+ u32 flow_id_base;
+ u32 flow_id_cnt;
+};
+
+/* TI UDMA specific flag IDs for dma_get_cfg() call */
+#define TI_UDMA_CHAN_PRIV_INFO 0
+
+#endif /* __TI_UDMA_H */
diff --git a/roms/u-boot/include/linux/soc/ti/ti_sci_protocol.h b/roms/u-boot/include/linux/soc/ti/ti_sci_protocol.h
new file mode 100644
index 000000000..794737923
--- /dev/null
+++ b/roms/u-boot/include/linux/soc/ti/ti_sci_protocol.h
@@ -0,0 +1,702 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Texas Instruments System Control Interface Protocol
+ * Based on include/linux/soc/ti/ti_sci_protocol.h from Linux.
+ *
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Nishanth Menon
+ * Lokesh Vutla <lokeshvutla@ti.com>
+ */
+
+#ifndef __TISCI_PROTOCOL_H
+#define __TISCI_PROTOCOL_H
+
+/**
+ * struct ti_sci_version_info - version information structure
+ * @abi_major: Major ABI version. Change here implies risk of backward
+ * compatibility break.
+ * @abi_minor: Minor ABI version. Change here implies new feature addition,
+ * or compatible change in ABI.
+ * @firmware_revision: Firmware revision (not usually used).
+ * @firmware_description: Firmware description (not usually used).
+ */
+#include <linux/bitops.h>
+struct ti_sci_version_info {
+ u8 abi_major;
+ u8 abi_minor;
+ u16 firmware_revision;
+ char firmware_description[32];
+};
+
+struct ti_sci_handle;
+
+/**
+ * struct ti_sci_board_ops - Board config operations
+ * @board_config: Command to set the board configuration
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ * @board_config_rm: Command to set the board resource management
+ * configuration
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ * @board_config_security: Command to set the board security configuration
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ * @board_config_pm: Command to trigger and set the board power and clock
+ * management related configuration
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ */
+struct ti_sci_board_ops {
+ int (*board_config)(const struct ti_sci_handle *handle,
+ u64 addr, u32 size);
+ int (*board_config_rm)(const struct ti_sci_handle *handle,
+ u64 addr, u32 size);
+ int (*board_config_security)(const struct ti_sci_handle *handle,
+ u64 addr, u32 size);
+ int (*board_config_pm)(const struct ti_sci_handle *handle,
+ u64 addr, u32 size);
+};
+
+/**
+ * struct ti_sci_dev_ops - Device control operations
+ * @get_device: Command to request for device managed by TISCI
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ * @idle_device: Command to idle a device managed by TISCI
+ * Returns 0 for successful exclusive request, else returns
+ * corresponding error message.
+ * @put_device: Command to release a device managed by TISCI
+ * Returns 0 for successful release, else returns corresponding
+ * error message.
+ * @is_valid: Check if the device ID is a valid ID.
+ * Returns 0 if the ID is valid, else returns corresponding error.
+ * @get_context_loss_count: Command to retrieve context loss counter - this
+ * increments every time the device looses context. Overflow
+ * is possible.
+ * - count: pointer to u32 which will retrieve counter
+ * Returns 0 for successful information request and count has
+ * proper data, else returns corresponding error message.
+ * @is_idle: Reports back about device idle state
+ * - req_state: Returns requested idle state
+ * Returns 0 for successful information request and req_state and
+ * current_state has proper data, else returns corresponding error
+ * message.
+ * @is_stop: Reports back about device stop state
+ * - req_state: Returns requested stop state
+ * - current_state: Returns current stop state
+ * Returns 0 for successful information request and req_state and
+ * current_state has proper data, else returns corresponding error
+ * message.
+ * @is_on: Reports back about device ON(or active) state
+ * - req_state: Returns requested ON state
+ * - current_state: Returns current ON state
+ * Returns 0 for successful information request and req_state and
+ * current_state has proper data, else returns corresponding error
+ * message.
+ * @is_transitioning: Reports back if the device is in the middle of transition
+ * of state.
+ * -current_state: Returns 'true' if currently transitioning.
+ * @set_device_resets: Command to configure resets for device managed by TISCI.
+ * -reset_state: Device specific reset bit field
+ * Returns 0 for successful request, else returns
+ * corresponding error message.
+ * @get_device_resets: Command to read state of resets for device managed
+ * by TISCI.
+ * -reset_state: pointer to u32 which will retrieve resets
+ * Returns 0 for successful request, else returns
+ * corresponding error message.
+ * @release_exclusive_devices: Command to release all the exclusive devices
+ * attached to this host. This should be used very carefully
+ * and only at the end of execution of your software.
+ *
+ * NOTE: for all these functions, the following parameters are generic in
+ * nature:
+ * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * -id: Device Identifier
+ *
+ * Request for the device - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_device with put_device. No refcounting is
+ * managed by driver for that purpose.
+ */
+struct ti_sci_dev_ops {
+ int (*get_device)(const struct ti_sci_handle *handle, u32 id);
+ int (*get_device_exclusive)(const struct ti_sci_handle *handle, u32 id);
+ int (*idle_device)(const struct ti_sci_handle *handle, u32 id);
+ int (*idle_device_exclusive)(const struct ti_sci_handle *handle,
+ u32 id);
+ int (*put_device)(const struct ti_sci_handle *handle, u32 id);
+ int (*is_valid)(const struct ti_sci_handle *handle, u32 id);
+ int (*get_context_loss_count)(const struct ti_sci_handle *handle,
+ u32 id, u32 *count);
+ int (*is_idle)(const struct ti_sci_handle *handle, u32 id,
+ bool *requested_state);
+ int (*is_stop)(const struct ti_sci_handle *handle, u32 id,
+ bool *req_state, bool *current_state);
+ int (*is_on)(const struct ti_sci_handle *handle, u32 id,
+ bool *req_state, bool *current_state);
+ int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id,
+ bool *current_state);
+ int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id,
+ u32 reset_state);
+ int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id,
+ u32 *reset_state);
+ int (*release_exclusive_devices)(const struct ti_sci_handle *handle);
+};
+
+/**
+ * struct ti_sci_clk_ops - Clock control operations
+ * @get_clock: Request for activation of clock and manage by processor
+ * - needs_ssc: 'true' if Spread Spectrum clock is desired.
+ * - can_change_freq: 'true' if frequency change is desired.
+ * - enable_input_term: 'true' if input termination is desired.
+ * @idle_clock: Request for Idling a clock managed by processor
+ * @put_clock: Release the clock to be auto managed by TISCI
+ * @is_auto: Is the clock being auto managed
+ * - req_state: state indicating if the clock is auto managed
+ * @is_on: Is the clock ON
+ * - req_state: if the clock is requested to be forced ON
+ * - current_state: if the clock is currently ON
+ * @is_off: Is the clock OFF
+ * - req_state: if the clock is requested to be forced OFF
+ * - current_state: if the clock is currently Gated
+ * @set_parent: Set the clock source of a specific device clock
+ * - parent_id: Parent clock identifier to set.
+ * @get_parent: Get the current clock source of a specific device clock
+ * - parent_id: Parent clock identifier which is the parent.
+ * @get_num_parents: Get the number of parents of the current clock source
+ * - num_parents: returns the number of parent clocks.
+ * @get_best_match_freq: Find a best matching frequency for a frequency
+ * range.
+ * - match_freq: Best matching frequency in Hz.
+ * @set_freq: Set the Clock frequency
+ * @get_freq: Get the Clock frequency
+ * - current_freq: Frequency in Hz that the clock is at.
+ *
+ * NOTE: for all these functions, the following parameters are generic in
+ * nature:
+ * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * -did: Device identifier this request is for
+ * -cid: Clock identifier for the device for this request.
+ * Each device has it's own set of clock inputs. This indexes
+ * which clock input to modify.
+ * -min_freq: The minimum allowable frequency in Hz. This is the minimum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ * -target_freq: The target clock frequency in Hz. A frequency will be
+ * processed as close to this target frequency as possible.
+ * -max_freq: The maximum allowable frequency in Hz. This is the maximum
+ * allowable programmed frequency and does not account for clock
+ * tolerances and jitter.
+ *
+ * Request for the clock - NOTE: the client MUST maintain integrity of
+ * usage count by balancing get_clock with put_clock. No refcounting is
+ * managed by driver for that purpose.
+ */
+struct ti_sci_clk_ops {
+ int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool needs_ssc, bool can_change_freq,
+ bool enable_input_term);
+ int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
+ int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid);
+ int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool *req_state);
+ int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool *req_state, bool *current_state);
+ int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ bool *req_state, bool *current_state);
+ int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u8 parent_id);
+ int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u8 *parent_id);
+ int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did,
+ u8 cid, u8 *num_parents);
+ int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did,
+ u8 cid, u64 min_freq, u64 target_freq,
+ u64 max_freq, u64 *match_freq);
+ int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u64 min_freq, u64 target_freq, u64 max_freq);
+ int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid,
+ u64 *current_freq);
+};
+
+/**
+ * struct ti_sci_rm_core_ops - Resource management core operations
+ * @get_range: Get a range of resources belonging to ti sci host.
+ * @get_rage_from_shost: Get a range of resources belonging to
+ * specified host id.
+ * - s_host: Host processing entity to which the
+ * resources are allocated
+ *
+ * NOTE: for these functions, all the parameters are consolidated and defined
+ * as below:
+ * - handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * - dev_id: TISCI device ID.
+ * - subtype: Resource assignment subtype that is being requested
+ * from the given device.
+ * - range_start: Start index of the resource range
+ * - range_end: Number of resources in the range
+ */
+struct ti_sci_rm_core_ops {
+ int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id,
+ u8 subtype, u16 *range_start, u16 *range_num);
+ int (*get_range_from_shost)(const struct ti_sci_handle *handle,
+ u32 dev_id, u8 subtype, u8 s_host,
+ u16 *range_start, u16 *range_num);
+};
+
+/**
+ * struct ti_sci_core_ops - SoC Core Operations
+ * @reboot_device: Reboot the SoC
+ * Returns 0 for successful request(ideally should never return),
+ * else returns corresponding error value.
+ * @query_msmc: Query the size of available msmc
+ * Return 0 for successful query else appropriate error value.
+ */
+struct ti_sci_core_ops {
+ int (*reboot_device)(const struct ti_sci_handle *handle);
+ int (*query_msmc)(const struct ti_sci_handle *handle,
+ u64 *msmc_start, u64 *msmc_end);
+};
+
+/**
+ * struct ti_sci_proc_ops - Processor specific operations.
+ *
+ * @proc_request: Request for controlling a physical processor.
+ * The requesting host should be in the processor access list.
+ * @proc_release: Relinquish a physical processor control
+ * @proc_handover: Handover a physical processor control to another host
+ * in the permitted list.
+ * @set_proc_boot_cfg: Base configuration of the processor
+ * @set_proc_boot_ctrl: Setup limited control flags in specific cases.
+ * @proc_auth_boot_image:
+ * @get_proc_boot_status: Get the state of physical processor
+ * @proc_shutdown_no_wait: Shutdown a core without requesting or waiting for a
+ * response.
+ *
+ * NOTE: for all these functions, the following parameters are generic in
+ * nature:
+ * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
+ * -pid: Processor ID
+ *
+ */
+struct ti_sci_proc_ops {
+ int (*proc_request)(const struct ti_sci_handle *handle, u8 pid);
+ int (*proc_release)(const struct ti_sci_handle *handle, u8 pid);
+ int (*proc_handover)(const struct ti_sci_handle *handle, u8 pid,
+ u8 hid);
+ int (*set_proc_boot_cfg)(const struct ti_sci_handle *handle, u8 pid,
+ u64 bv, u32 cfg_set, u32 cfg_clr);
+ int (*set_proc_boot_ctrl)(const struct ti_sci_handle *handle, u8 pid,
+ u32 ctrl_set, u32 ctrl_clr);
+ int (*proc_auth_boot_image)(const struct ti_sci_handle *handle,
+ u64 *image_addr, u32 *image_size);
+ int (*get_proc_boot_status)(const struct ti_sci_handle *handle, u8 pid,
+ u64 *bv, u32 *cfg_flags, u32 *ctrl_flags,
+ u32 *sts_flags);
+ int (*proc_shutdown_no_wait)(const struct ti_sci_handle *handle,
+ u8 pid);
+};
+
+#define TI_SCI_RING_MODE_RING (0)
+#define TI_SCI_RING_MODE_MESSAGE (1)
+#define TI_SCI_RING_MODE_CREDENTIALS (2)
+#define TI_SCI_RING_MODE_QM (3)
+
+#define TI_SCI_MSG_UNUSED_SECONDARY_HOST TI_SCI_RM_NULL_U8
+
+/* RA config.addr_lo parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID BIT(0)
+/* RA config.addr_hi parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID BIT(1)
+ /* RA config.count parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID BIT(2)
+/* RA config.mode parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_MODE_VALID BIT(3)
+/* RA config.size parameter is valid for RM ring configure TI_SCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4)
+/* RA config.order_id parameter is valid for RM ring configure TISCI message */
+#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5)
+
+#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \
+ (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \
+ TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID)
+
+/**
+ * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations
+ * @config: configure the SoC Navigator Subsystem Ring Accelerator ring
+ */
+struct ti_sci_rm_ringacc_ops {
+ int (*config)(const struct ti_sci_handle *handle,
+ u32 valid_params, u16 nav_id, u16 index,
+ u32 addr_lo, u32 addr_hi, u32 count, u8 mode,
+ u8 size, u8 order_id
+ );
+};
+
+/**
+ * struct ti_sci_rm_psil_ops - PSI-L thread operations
+ * @pair: pair PSI-L source thread to a destination thread.
+ * If the src_thread is mapped to UDMA tchan, the corresponding channel's
+ * TCHAN_THRD_ID register is updated.
+ * If the dst_thread is mapped to UDMA rchan, the corresponding channel's
+ * RCHAN_THRD_ID register is updated.
+ * @unpair: unpair PSI-L source thread from a destination thread.
+ * If the src_thread is mapped to UDMA tchan, the corresponding channel's
+ * TCHAN_THRD_ID register is cleared.
+ * If the dst_thread is mapped to UDMA rchan, the corresponding channel's
+ * RCHAN_THRD_ID register is cleared.
+ */
+struct ti_sci_rm_psil_ops {
+ int (*pair)(const struct ti_sci_handle *handle, u32 nav_id,
+ u32 src_thread, u32 dst_thread);
+ int (*unpair)(const struct ti_sci_handle *handle, u32 nav_id,
+ u32 src_thread, u32 dst_thread);
+};
+
+/* UDMAP channel types */
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR 2
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR_SB 3 /* RX only */
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR 10
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBVR 11
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR 12
+#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBVR 13
+
+/* UDMAP channel atypes */
+#define TI_SCI_RM_UDMAP_ATYPE_PHYS 0
+#define TI_SCI_RM_UDMAP_ATYPE_INTERMEDIATE 1
+#define TI_SCI_RM_UDMAP_ATYPE_VIRTUAL 2
+
+/* UDMAP channel scheduling priorities */
+#define TI_SCI_RM_UDMAP_SCHED_PRIOR_HIGH 0
+#define TI_SCI_RM_UDMAP_SCHED_PRIOR_MEDHIGH 1
+#define TI_SCI_RM_UDMAP_SCHED_PRIOR_MEDLOW 2
+#define TI_SCI_RM_UDMAP_SCHED_PRIOR_LOW 3
+
+#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST 0
+#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_MONO 2
+
+#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES 1
+#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2
+#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3
+
+#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_TCHAN 0
+#define TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN 1
+
+/* UDMAP TX/RX channel valid_params common declarations */
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID BIT(2)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID BIT(3)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID BIT(4)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PRIORITY_VALID BIT(5)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_QOS_VALID BIT(6)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID BIT(7)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_SCHED_PRIORITY_VALID BIT(8)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID BIT(14)
+
+/**
+ * Configures a Navigator Subsystem UDMAP transmit channel
+ *
+ * Configures a Navigator Subsystem UDMAP transmit channel registers.
+ * See @ti_sci_msg_rm_udmap_tx_ch_cfg_req
+ */
+struct ti_sci_msg_rm_udmap_tx_ch_cfg {
+ u32 valid_params;
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID BIT(9)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID BIT(10)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID BIT(15)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID BIT(16)
+ u16 nav_id;
+ u16 index;
+ u8 tx_pause_on_err;
+ u8 tx_filt_einfo;
+ u8 tx_filt_pswords;
+ u8 tx_atype;
+ u8 tx_chan_type;
+ u8 tx_supr_tdpkt;
+ u16 tx_fetch_size;
+ u8 tx_credit_count;
+ u16 txcq_qnum;
+ u8 tx_priority;
+ u8 tx_qos;
+ u8 tx_orderid;
+ u16 fdepth;
+ u8 tx_sched_priority;
+ u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
+};
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive channel
+ *
+ * Configures a Navigator Subsystem UDMAP receive channel registers.
+ * See @ti_sci_msg_rm_udmap_rx_ch_cfg_req
+ */
+struct ti_sci_msg_rm_udmap_rx_ch_cfg {
+ u32 valid_params;
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID BIT(9)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID BIT(10)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID BIT(11)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID BIT(12)
+ u16 nav_id;
+ u16 index;
+ u16 rx_fetch_size;
+ u16 rxcq_qnum;
+ u8 rx_priority;
+ u8 rx_qos;
+ u8 rx_orderid;
+ u8 rx_sched_priority;
+ u16 flowid_start;
+ u16 flowid_cnt;
+ u8 rx_pause_on_err;
+ u8 rx_atype;
+ u8 rx_chan_type;
+ u8 rx_ignore_short;
+ u8 rx_ignore_long;
+ u8 rx_burst_size;
+};
+
+/**
+ * Configures a Navigator Subsystem UDMAP receive flow
+ *
+ * Configures a Navigator Subsystem UDMAP receive flow's registers.
+ * See @tis_ci_msg_rm_udmap_flow_cfg_req
+ */
+struct ti_sci_msg_rm_udmap_flow_cfg {
+ u32 valid_params;
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID BIT(0)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID BIT(1)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID BIT(2)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID BIT(3)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SOP_OFFSET_VALID BIT(4)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID BIT(5)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_VALID BIT(6)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_VALID BIT(7)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_VALID BIT(8)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_VALID BIT(9)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID BIT(10)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID BIT(11)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID BIT(12)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID BIT(13)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID BIT(14)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID BIT(15)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID BIT(16)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID BIT(17)
+#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID BIT(18)
+ u16 nav_id;
+ u16 flow_index;
+ u8 rx_einfo_present;
+ u8 rx_psinfo_present;
+ u8 rx_error_handling;
+ u8 rx_desc_type;
+ u16 rx_sop_offset;
+ u16 rx_dest_qnum;
+ u8 rx_src_tag_hi;
+ u8 rx_src_tag_lo;
+ u8 rx_dest_tag_hi;
+ u8 rx_dest_tag_lo;
+ u8 rx_src_tag_hi_sel;
+ u8 rx_src_tag_lo_sel;
+ u8 rx_dest_tag_hi_sel;
+ u8 rx_dest_tag_lo_sel;
+ u16 rx_fdq0_sz0_qnum;
+ u16 rx_fdq1_qnum;
+ u16 rx_fdq2_qnum;
+ u16 rx_fdq3_qnum;
+ u8 rx_ps_location;
+};
+
+/**
+ * struct ti_sci_rm_udmap_ops - UDMA Management operations
+ * @tx_ch_cfg: configure SoC Navigator Subsystem UDMA transmit channel.
+ * @rx_ch_cfg: configure SoC Navigator Subsystem UDMA receive channel.
+ * @rx_flow_cfg: configure SoC Navigator Subsystem UDMA receive flow.
+ */
+struct ti_sci_rm_udmap_ops {
+ int (*tx_ch_cfg)(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params);
+ int (*rx_ch_cfg)(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params);
+ int (*rx_flow_cfg)(
+ const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_udmap_flow_cfg *params);
+};
+
+/**
+ * struct ti_sci_msg_fwl_region_cfg - Request and Response for firewalls settings
+ *
+ * @fwl_id: Firewall ID in question
+ * @region: Region or channel number to set config info
+ * This field is unused in case of a simple firewall and must be initialized
+ * to zero. In case of a region based firewall, this field indicates the
+ * region in question. (index starting from 0) In case of a channel based
+ * firewall, this field indicates the channel in question (index starting
+ * from 0)
+ * @n_permission_regs: Number of permission registers to set
+ * @control: Contents of the firewall CONTROL register to set
+ * @permissions: Contents of the firewall PERMISSION register to set
+ * @start_address: Contents of the firewall START_ADDRESS register to set
+ * @end_address: Contents of the firewall END_ADDRESS register to set
+ */
+struct ti_sci_msg_fwl_region {
+ u16 fwl_id;
+ u16 region;
+ u32 n_permission_regs;
+ u32 control;
+ u32 permissions[3];
+ u64 start_address;
+ u64 end_address;
+} __packed;
+
+/**
+ * \brief Request and Response for firewall owner change
+ *
+ * @fwl_id: Firewall ID in question
+ * @region: Region or channel number to set config info
+ * This field is unused in case of a simple firewall and must be initialized
+ * to zero. In case of a region based firewall, this field indicates the
+ * region in question. (index starting from 0) In case of a channel based
+ * firewall, this field indicates the channel in question (index starting
+ * from 0)
+ * @n_permission_regs: Number of permission registers <= 3
+ * @control: Control register value for this region
+ * @owner_index: New owner index to change to. Owner indexes are setup in DMSC firmware boot configuration data
+ * @owner_privid: New owner priv-id, used to lookup owner_index is not known, must be set to zero otherwise
+ * @owner_permission_bits: New owner permission bits
+ */
+struct ti_sci_msg_fwl_owner {
+ u16 fwl_id;
+ u16 region;
+ u8 owner_index;
+ u8 owner_privid;
+ u16 owner_permission_bits;
+} __packed;
+
+/**
+ * struct ti_sci_fwl_ops - Firewall specific operations
+ * @set_fwl_region: Request for configuring the firewall permissions.
+ * @get_fwl_region: Request for retrieving the firewall permissions.
+ * @change_fwl_owner: Request for a change of firewall owner.
+ */
+struct ti_sci_fwl_ops {
+ int (*set_fwl_region)(const struct ti_sci_handle *handle, const struct ti_sci_msg_fwl_region *region);
+ int (*get_fwl_region)(const struct ti_sci_handle *handle, struct ti_sci_msg_fwl_region *region);
+ int (*change_fwl_owner)(const struct ti_sci_handle *handle, struct ti_sci_msg_fwl_owner *owner);
+};
+
+/**
+ * struct ti_sci_ops - Function support for TI SCI
+ * @board_ops: Miscellaneous operations
+ * @dev_ops: Device specific operations
+ * @clk_ops: Clock specific operations
+ * @core_ops: Core specific operations
+ * @proc_ops: Processor specific operations
+ * @ring_ops: Ring Accelerator Management operations
+ * @fw_ops: Firewall specific operations
+ */
+struct ti_sci_ops {
+ struct ti_sci_board_ops board_ops;
+ struct ti_sci_dev_ops dev_ops;
+ struct ti_sci_clk_ops clk_ops;
+ struct ti_sci_core_ops core_ops;
+ struct ti_sci_proc_ops proc_ops;
+ struct ti_sci_rm_core_ops rm_core_ops;
+ struct ti_sci_rm_ringacc_ops rm_ring_ops;
+ struct ti_sci_rm_psil_ops rm_psil_ops;
+ struct ti_sci_rm_udmap_ops rm_udmap_ops;
+ struct ti_sci_fwl_ops fwl_ops;
+};
+
+/**
+ * struct ti_sci_handle - Handle returned to TI SCI clients for usage.
+ * @ops: operations that are made available to TI SCI clients
+ * @version: structure containing version information
+ */
+struct ti_sci_handle {
+ struct ti_sci_ops ops;
+ struct ti_sci_version_info version;
+};
+
+#define TI_SCI_RESOURCE_NULL 0xffff
+
+/**
+ * struct ti_sci_resource_desc - Description of TI SCI resource instance range.
+ * @start: Start index of the resource.
+ * @num: Number of resources.
+ * @res_map: Bitmap to manage the allocation of these resources.
+ */
+struct ti_sci_resource_desc {
+ u16 start;
+ u16 num;
+ unsigned long *res_map;
+};
+
+/**
+ * struct ti_sci_resource - Structure representing a resource assigned
+ * to a device.
+ * @sets: Number of sets available from this resource type
+ * @desc: Array of resource descriptors.
+ */
+struct ti_sci_resource {
+ u16 sets;
+ struct ti_sci_resource_desc *desc;
+};
+
+#if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL)
+
+const struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *dev);
+const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev);
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
+ const char *property);
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res);
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id);
+struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct udevice *dev, u32 dev_id, char *of_prop);
+
+#else /* CONFIG_TI_SCI_PROTOCOL */
+
+static inline
+const struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
+ const char *property)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
+{
+ return TI_SCI_RESOURCE_NULL;
+}
+
+static inline void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
+{
+}
+
+static inline struct ti_sci_resource *
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
+ struct udevice *dev, u32 dev_id, char *of_prop)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_TI_SCI_PROTOCOL */
+
+#endif /* __TISCI_PROTOCOL_H */