summaryrefslogtreecommitdiffstats
path: root/meta-agl-bsp/meta-renesas/recipes-bsp/u-boot/u-boot/hibernation/0002-Enable-swsusp-DMA-support.patch
blob: 2a525d39a29df51a184b304deb4f12149c0045fc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
From 33dfe19185b35fc61613070032836beee0f48c45 Mon Sep 17 00:00:00 2001
From: Yuichi Kusakabe <yuichi.kusakabe@jp.fujitsu.com>
Date: Fri, 9 Jun 2017 20:45:39 +0900
Subject: [PATCH 2/3] Enable swsusp DMA support

Signed-off-by: Yuichi Kusakabe <yuichi.kusakabe@jp.fujitsu.com>
---
 common/cmd_swsusp.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/common/cmd_swsusp.c b/common/cmd_swsusp.c
index ba05aa4..b1d6c22 100644
--- a/common/cmd_swsusp.c
+++ b/common/cmd_swsusp.c
@@ -226,6 +226,53 @@ static inline void *malloc_aligned(u32 size, u32 align)
 	return (void *)(((u32)malloc(size + align) + align - 1) & ~(align - 1));
 }
 
+static int block_read(u32 page, void *addr, u32 count)
+{
+	__u32 cnt;
+	int blk_per_page;
+
+	blk_per_page = PAGE_SIZE / swap_dev->blksz;
+	cnt = swap_dev->block_read(swap_dev->dev,
+				swap_info.start + (page * blk_per_page),
+				count * blk_per_page, addr);
+
+	return cnt != count * blk_per_page;
+}
+
+static int get_block(unsigned char *buffer, u32 size)
+{
+	int need_num_pages = size / PAGE_SIZE;
+	int read_pages = 0;
+	int count;
+	u64 start;
+
+	do {
+		u64 prev;
+		count = 0;
+
+		if (!get_meta())
+			goto exit;
+
+		prev = start = meta_map->entries[meta_idx];
+		do {
+			count++;
+			meta_idx++;
+			if (meta_map->entries[meta_idx] - prev > 1)
+				break;
+			prev = meta_map->entries[meta_idx];
+		} while (read_pages + count < need_num_pages &&
+			meta_idx < ARRAY_SIZE(meta_map->entries));
+
+		if (block_read(start, buffer, count))
+			return -1;
+		read_pages += count;
+		buffer += count * PAGE_SIZE;
+	} while (read_pages < need_num_pages);
+
+exit:
+	return read_pages * PAGE_SIZE;
+}
+
 #endif
 
 static int page_read(u32 page, void *addr)
@@ -465,12 +512,23 @@ static int image_page_get_next(void *buffer)
 			cmp_len = *(size_t *) cmp_buf;
 			cmp_avail = PAGE_SIZE;
 
+#ifdef CONFIG_SH_DMA
+			while (cmp_avail < cmp_len + LZO_HEADER) {
+				/* try to DMA-read whole block */
+				ret = get_block(cmp_buf + cmp_avail,
+						cmp_len + LZO_HEADER);
+				if (unlikely(ret <= 0))
+					return ret;
+				cmp_avail += ret;
+			}
+#else
 			while (cmp_avail < cmp_len + LZO_HEADER) {
 				ret = raw_page_get_next(cmp_buf + cmp_avail);
 				if (unlikely(ret <= 0))
 					return ret;
 				cmp_avail += PAGE_SIZE;
 			}
+#endif
 			unc_len = LZO_UNC_SIZE;
 			ret = lzo1x_decompress_safe(cmp_buf + LZO_HEADER,
 						cmp_len, unc_buf, &unc_len);
-- 
1.8.3.1