aboutsummaryrefslogtreecommitdiffstats
path: root/roms/u-boot/tools/patman
diff options
context:
space:
mode:
authorAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
committerAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
commitaf1a266670d040d2f4083ff309d732d648afba2a (patch)
tree2fc46203448ddcc6f81546d379abfaeb323575e9 /roms/u-boot/tools/patman
parente02cda008591317b1625707ff8e115a4841aa889 (diff)
Add submodule dependency filesHEADmaster
Change-Id: Iaf8d18082d3991dec7c0ebbea540f092188eb4ec
Diffstat (limited to 'roms/u-boot/tools/patman')
-rw-r--r--roms/u-boot/tools/patman/.gitignore1
-rw-r--r--roms/u-boot/tools/patman/README644
-rw-r--r--roms/u-boot/tools/patman/__init__.py3
-rw-r--r--roms/u-boot/tools/patman/checkpatch.py271
-rw-r--r--roms/u-boot/tools/patman/command.py142
-rw-r--r--roms/u-boot/tools/patman/commit.py109
-rw-r--r--roms/u-boot/tools/patman/control.py237
-rw-r--r--roms/u-boot/tools/patman/cros_subprocess.py404
-rw-r--r--roms/u-boot/tools/patman/func_test.py1284
-rw-r--r--roms/u-boot/tools/patman/get_maintainer.py48
-rw-r--r--roms/u-boot/tools/patman/gitutil.py675
-rwxr-xr-xroms/u-boot/tools/patman/main.py202
-rw-r--r--roms/u-boot/tools/patman/patchstream.py841
l---------roms/u-boot/tools/patman/patman1
-rw-r--r--roms/u-boot/tools/patman/project.py26
-rw-r--r--roms/u-boot/tools/patman/series.py325
-rw-r--r--roms/u-boot/tools/patman/settings.py362
-rw-r--r--roms/u-boot/tools/patman/setup.py12
-rw-r--r--roms/u-boot/tools/patman/status.py487
-rw-r--r--roms/u-boot/tools/patman/terminal.py270
-rw-r--r--roms/u-boot/tools/patman/test/test01.txt69
-rw-r--r--roms/u-boot/tools/patman/test_checkpatch.py457
-rw-r--r--roms/u-boot/tools/patman/test_util.py193
-rw-r--r--roms/u-boot/tools/patman/tools.py580
-rw-r--r--roms/u-boot/tools/patman/tout.py179
25 files changed, 7822 insertions, 0 deletions
diff --git a/roms/u-boot/tools/patman/.gitignore b/roms/u-boot/tools/patman/.gitignore
new file mode 100644
index 000000000..0d20b6487
--- /dev/null
+++ b/roms/u-boot/tools/patman/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/roms/u-boot/tools/patman/README b/roms/u-boot/tools/patman/README
new file mode 100644
index 000000000..53f55ce95
--- /dev/null
+++ b/roms/u-boot/tools/patman/README
@@ -0,0 +1,644 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+
+What is this?
+=============
+
+This tool is a Python script which:
+- Creates patch directly from your branch
+- Cleans them up by removing unwanted tags
+- Inserts a cover letter with change lists
+- Runs the patches through checkpatch.pl and its own checks
+- Optionally emails them out to selected people
+
+It also has some Patchwork features:
+- shows review tags from Patchwork so you can update your local patches
+- pulls these down into a new branch on request
+- lists comments received on a series
+
+It is intended to automate patch creation and make it a less
+error-prone process. It is useful for U-Boot and Linux work so far,
+since they use the checkpatch.pl script.
+
+It is configured almost entirely by tags it finds in your commits.
+This means that you can work on a number of different branches at
+once, and keep the settings with each branch rather than having to
+git format-patch, git send-email, etc. with the correct parameters
+each time. So for example if you put:
+
+Series-to: fred.blogs@napier.co.nz
+
+in one of your commits, the series will be sent there.
+
+In Linux and U-Boot this will also call get_maintainer.pl on each of your
+patches automatically (unless you use -m to disable this).
+
+
+How to use this tool
+====================
+
+This tool requires a certain way of working:
+
+- Maintain a number of branches, one for each patch series you are
+working on
+- Add tags into the commits within each branch to indicate where the
+series should be sent, cover letter, version, etc. Most of these are
+normally in the top commit so it is easy to change them with 'git
+commit --amend'
+- Each branch tracks the upstream branch, so that this script can
+automatically determine the number of commits in it (optional)
+- Check out a branch, and run this script to create and send out your
+patches. Weeks later, change the patches and repeat, knowing that you
+will get a consistent result each time.
+
+
+How to configure it
+===================
+
+For most cases of using patman for U-Boot development, patman can use the
+file 'doc/git-mailrc' in your U-Boot directory to supply the email aliases
+you need. To make this work, tell git where to find the file by typing
+this once:
+
+ git config sendemail.aliasesfile doc/git-mailrc
+
+For both Linux and U-Boot the 'scripts/get_maintainer.pl' handles figuring
+out where to send patches pretty well.
+
+During the first run patman creates a config file for you by taking the default
+user name and email address from the global .gitconfig file.
+
+To add your own, create a file ~/.patman like this:
+
+>>>>
+# patman alias file
+
+[alias]
+me: Simon Glass <sjg@chromium.org>
+
+u-boot: U-Boot Mailing List <u-boot@lists.denx.de>
+wolfgang: Wolfgang Denk <wd@denx.de>
+others: Mike Frysinger <vapier@gentoo.org>, Fred Bloggs <f.bloggs@napier.net>
+
+<<<<
+
+Aliases are recursive.
+
+The checkpatch.pl in the U-Boot tools/ subdirectory will be located and
+used. Failing that you can put it into your path or ~/bin/checkpatch.pl
+
+If you want to avoid sending patches to email addresses that are picked up
+by patman but are known to bounce you can add a [bounces] section to your
+.patman file. Unlike the [alias] section these are simple key: value pairs
+that are not recursive.
+
+>>>
+
+[bounces]
+gonefishing: Fred Bloggs <f.bloggs@napier.net>
+
+<<<
+
+
+If you want to change the defaults for patman's command-line arguments,
+you can add a [settings] section to your .patman file. This can be used
+for any command line option by referring to the "dest" for the option in
+patman.py. For reference, the useful ones (at the moment) shown below
+(all with the non-default setting):
+
+>>>
+
+[settings]
+ignore_errors: True
+process_tags: False
+verbose: True
+smtp_server: /path/to/sendmail
+patchwork_server: https://patchwork.ozlabs.org
+
+<<<
+
+
+If you want to adjust settings (or aliases) that affect just a single
+project you can add a section that looks like [project_settings] or
+[project_alias]. If you want to use tags for your linux work, you could
+do:
+
+>>>
+
+[linux_settings]
+process_tags: True
+
+<<<
+
+
+How to run it
+=============
+
+First do a dry run:
+
+$ ./tools/patman/patman send -n
+
+If it can't detect the upstream branch, try telling it how many patches
+there are in your series:
+
+$ ./tools/patman/patman -c5 send -n
+
+This will create patch files in your current directory and tell you who
+it is thinking of sending them to. Take a look at the patch files.
+
+$ ./tools/patman/patman -c5 -s1 send -n
+
+Similar to the above, but skip the first commit and take the next 5. This
+is useful if your top commit is for setting up testing.
+
+
+How to install it
+=================
+
+The most up to date version of patman can be found in the U-Boot sources.
+However to use it on other projects it may be more convenient to install it as
+a standalone application. A distutils installer is included, this can be used
+to install patman:
+
+$ cd tools/patman && python setup.py install
+
+
+How to add tags
+===============
+
+To make this script useful you must add tags like the following into any
+commit. Most can only appear once in the whole series.
+
+Series-to: email / alias
+ Email address / alias to send patch series to (you can add this
+ multiple times)
+
+Series-cc: email / alias, ...
+ Email address / alias to Cc patch series to (you can add this
+ multiple times)
+
+Series-version: n
+ Sets the version number of this patch series
+
+Series-prefix: prefix
+ Sets the subject prefix. Normally empty but it can be RFC for
+ RFC patches, or RESEND if you are being ignored. The patch subject
+ is like [RFC PATCH] or [RESEND PATCH].
+ In the meantime, git format.subjectprefix option will be added as
+ well. If your format.subjectprefix is set to InternalProject, then
+ the patch shows like: [InternalProject][RFC/RESEND PATCH]
+
+Series-name: name
+ Sets the name of the series. You don't need to have a name, and
+ patman does not yet use it, but it is convenient to put the branch
+ name here to help you keep track of multiple upstreaming efforts.
+
+Series-links: [id | version:id]...
+ Set the ID of the series in patchwork. You can set this after you send
+ out the series and look in patchwork for the resulting series. The
+ URL you want is the one for the series itself, not any particular patch.
+ E.g. for http://patchwork.ozlabs.org/project/uboot/list/?series=187331
+ the series ID is 187331. This property can have a list of series IDs,
+ one for each version of the series, e.g.
+
+ Series-links: 1:187331 2:188434 189372
+
+ Patman always uses the one without a version, since it assumes this is
+ the latest one. When this tag is provided, patman can compare your local
+ branch against patchwork to see what new reviews your series has
+ collected ('patman status').
+
+Series-patchwork-url: url
+ This allows specifying the Patchwork URL for a branch. This overrides
+ both the setting files and the command-line argument. The URL should
+ include the protocol and web site, with no trailing slash, for example
+ 'https://patchwork.ozlabs.org/project'
+
+Cover-letter:
+This is the patch set title
+blah blah
+more blah blah
+END
+ Sets the cover letter contents for the series. The first line
+ will become the subject of the cover letter
+
+Cover-letter-cc: email / alias
+ Additional email addresses / aliases to send cover letter to (you
+ can add this multiple times)
+
+Series-notes:
+blah blah
+blah blah
+more blah blah
+END
+ Sets some notes for the patch series, which you don't want in
+ the commit messages, but do want to send, The notes are joined
+ together and put after the cover letter. Can appear multiple
+ times.
+
+Commit-notes:
+blah blah
+blah blah
+more blah blah
+END
+ Similar, but for a single commit (patch). These notes will appear
+ immediately below the --- cut in the patch file.
+
+ Signed-off-by: Their Name <email>
+ A sign-off is added automatically to your patches (this is
+ probably a bug). If you put this tag in your patches, it will
+ override the default signoff that patman automatically adds.
+ Multiple duplicate signoffs will be removed.
+
+ Tested-by: Their Name <email>
+ Reviewed-by: Their Name <email>
+ Acked-by: Their Name <email>
+ These indicate that someone has tested/reviewed/acked your patch.
+ When you get this reply on the mailing list, you can add this
+ tag to the relevant commit and the script will include it when
+ you send out the next version. If 'Tested-by:' is set to
+ yourself, it will be removed. No one will believe you.
+
+Series-changes: n
+- Guinea pig moved into its cage
+- Other changes ending with a blank line
+<blank line>
+ This can appear in any commit. It lists the changes for a
+ particular version n of that commit. The change list is
+ created based on this information. Each commit gets its own
+ change list and also the whole thing is repeated in the cover
+ letter (where duplicate change lines are merged).
+
+ By adding your change lists into your commits it is easier to
+ keep track of what happened. When you amend a commit, remember
+ to update the log there and then, knowing that the script will
+ do the rest.
+
+Commit-changes: n
+- This line will not appear in the cover-letter changelog
+<blank line>
+ This tag is like Series-changes, except changes in this changelog will
+ only appear in the changelog of the commit this tag is in. This is
+ useful when you want to add notes which may not make sense in the cover
+ letter. For example, you can have short changes such as "New" or
+ "Lint".
+
+Cover-changes: n
+- This line will only appear in the cover letter
+<blank line>
+ This tag is like Series-changes, except changes in this changelog will
+ only appear in the cover-letter changelog. This is useful to summarize
+ changes made with Commit-changes, or to add additional context to
+ changes.
+
+Patch-cc: Their Name <email>
+ This copies a single patch to another email address. Note that the
+ Cc: used by git send-email is ignored by patman, but will be
+ interpreted by git send-email if you use it.
+
+Series-process-log: sort, uniq
+ This tells patman to sort and/or uniq the change logs. Changes may be
+ multiple lines long, as long as each subsequent line of a change begins
+ with a whitespace character. For example,
+
+- This change
+ continues onto the next line
+- But this change is separate
+
+ Use 'sort' to sort the entries, and 'uniq' to include only
+ unique entries. If omitted, no change log processing is done.
+ Separate each tag with a comma.
+
+Change-Id:
+ This tag is stripped out but is used to generate the Message-Id
+ of the emails that will be sent. When you keep the Change-Id the
+ same you are asserting that this is a slightly different version
+ (but logically the same patch) as other patches that have been
+ sent out with the same Change-Id.
+
+Various other tags are silently removed, like these Chrome OS and
+Gerrit tags:
+
+BUG=...
+TEST=...
+Review URL:
+Reviewed-on:
+Commit-xxxx: (except Commit-notes)
+
+Exercise for the reader: Try adding some tags to one of your current
+patch series and see how the patches turn out.
+
+
+Where Patches Are Sent
+======================
+
+Once the patches are created, patman sends them using git send-email. The
+whole series is sent to the recipients in Series-to: and Series-cc.
+You can Cc individual patches to other people with the Patch-cc: tag. Tags
+in the subject are also picked up to Cc patches. For example, a commit like
+this:
+
+>>>>
+commit 10212537b85ff9b6e09c82045127522c0f0db981
+Author: Mike Frysinger <vapier@gentoo.org>
+Date: Mon Nov 7 23:18:44 2011 -0500
+
+ x86: arm: add a git mailrc file for maintainers
+
+ This should make sending out e-mails to the right people easier.
+
+ Patch-cc: sandbox, mikef, ag
+ Patch-cc: afleming
+<<<<
+
+will create a patch which is copied to x86, arm, sandbox, mikef, ag and
+afleming.
+
+If you have a cover letter it will get sent to the union of the Patch-cc
+lists of all of the other patches. If you want to sent it to additional
+people you can add a tag:
+
+Cover-letter-cc: <list of addresses>
+
+These people will get the cover letter even if they are not on the To/Cc
+list for any of the patches.
+
+
+Patchwork Integration
+=====================
+
+Patman has a very basic integration with Patchwork. If you point patman to
+your series on patchwork it can show you what new reviews have appears since
+you sent your series.
+
+To set this up, add a Series-link tag to one of the commits in your series
+(see above).
+
+Then you can type
+
+ patman status
+
+and patman will show you each patch and what review tags have been collected,
+for example:
+
+...
+ 21 x86: mtrr: Update the command to use the new mtrr
+ Reviewed-by: Wolfgang Wallner <wolfgang.wallner@br-automation.com>
+ + Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
+ 22 x86: mtrr: Restructure so command execution is in
+ Reviewed-by: Wolfgang Wallner <wolfgang.wallner@br-automation.com>
+ + Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
+...
+
+This shows that patch 21 and 22 were sent out with one review but have since
+attracted another review each. If the series needs changes, you can update
+these commits with the new review tag before sending the next version of the
+series.
+
+To automatically pull into these tags into a new branch, use the -d option:
+
+ patman status -d mtrr4
+
+This will create a new 'mtrr4' branch which is the same as your current branch
+but has the new review tags in it. The tags are added in alphabetic order and
+are placed immediately after any existing ack/review/test/fixes tags, or at the
+end. You can check that this worked with:
+
+ patman -b mtrr4 status
+
+which should show that there are no new responses compared to this new branch.
+
+There is also a -C option to list the comments received for each patch.
+
+
+Example Work Flow
+=================
+
+The basic workflow is to create your commits, add some tags to the top
+commit, and type 'patman' to check and send them.
+
+Here is an example workflow for a series of 4 patches. Let's say you have
+these rather contrived patches in the following order in branch us-cmd in
+your tree where 'us' means your upstreaming activity (newest to oldest as
+output by git log --oneline):
+
+ 7c7909c wip
+ 89234f5 Don't include standard parser if hush is used
+ 8d640a7 mmc: sparc: Stop using builtin_run_command()
+ 0c859a9 Rename run_command2() to run_command()
+ a74443f sandbox: Rename run_command() to builtin_run_command()
+
+The first patch is some test things that enable your code to be compiled,
+but that you don't want to submit because there is an existing patch for it
+on the list. So you can tell patman to create and check some patches
+(skipping the first patch) with:
+
+ patman -s1 send -n
+
+If you want to do all of them including the work-in-progress one, then
+(if you are tracking an upstream branch):
+
+ patman send -n
+
+Let's say that patman reports an error in the second patch. Then:
+
+ git rebase -i HEAD~6
+ <change 'pick' to 'edit' in 89234f5>
+ <use editor to make code changes>
+ git add -u
+ git rebase --continue
+
+Now you have an updated patch series. To check it:
+
+ patman -s1 send -n
+
+Let's say it is now clean and you want to send it. Now you need to set up
+the destination. So amend the top commit with:
+
+ git commit --amend
+
+Use your editor to add some tags, so that the whole commit message is:
+
+ The current run_command() is really only one of the options, with
+ hush providing the other. It really shouldn't be called directly
+ in case the hush parser is bring used, so rename this function to
+ better explain its purpose.
+
+ Series-to: u-boot
+ Series-cc: bfin, marex
+ Series-prefix: RFC
+ Cover-letter:
+ Unified command execution in one place
+
+ At present two parsers have similar code to execute commands. Also
+ cmd_usage() is called all over the place. This series adds a single
+ function which processes commands called cmd_process().
+ END
+
+ Change-Id: Ica71a14c1f0ecb5650f771a32fecb8d2eb9d8a17
+
+
+You want this to be an RFC and Cc the whole series to the bfin alias and
+to Marek. Two of the patches have tags (those are the bits at the front of
+the subject that say mmc: sparc: and sandbox:), so 8d640a7 will be Cc'd to
+mmc and sparc, and the last one to sandbox.
+
+Now to send the patches, take off the -n flag:
+
+ patman -s1 send
+
+The patches will be created, shown in your editor, and then sent along with
+the cover letter. Note that patman's tags are automatically removed so that
+people on the list don't see your secret info.
+
+Of course patches often attract comments and you need to make some updates.
+Let's say one person sent comments and you get an Acked-by: on one patch.
+Also, the patch on the list that you were waiting for has been merged,
+so you can drop your wip commit.
+
+Take a look on patchwork and find out the URL of the series. This will be
+something like http://patchwork.ozlabs.org/project/uboot/list/?series=187331
+Add this to a tag in your top commit:
+
+ Series-link: http://patchwork.ozlabs.org/project/uboot/list/?series=187331
+
+You can use then patman to collect the Acked-by tag to the correct commit,
+creating a new 'version 2' branch for us-cmd:
+
+ patman status -d us-cmd2
+ git checkout us-cmd2
+
+You can look at the comments in Patchwork or with:
+
+ patman status -C
+
+Then you can resync with upstream:
+
+ git fetch origin (or whatever upstream is called)
+ git rebase origin/master
+
+and use git rebase -i to edit the commits, dropping the wip one.
+
+Then update the Series-cc: in the top commit to add the person who reviewed
+the v1 series:
+
+ Series-cc: bfin, marex, Heiko Schocher <hs@denx.de>
+
+and remove the Series-prefix: tag since it it isn't an RFC any more. The
+series is now version two, so the series info in the top commit looks like
+this:
+
+ Series-to: u-boot
+ Series-cc: bfin, marex, Heiko Schocher <hs@denx.de>
+ Series-version: 2
+ Cover-letter:
+ ...
+
+Finally, you need to add a change log to the two commits you changed. You
+add change logs to each individual commit where the changes happened, like
+this:
+
+ Series-changes: 2
+ - Updated the command decoder to reduce code size
+ - Wound the torque propounder up a little more
+
+(note the blank line at the end of the list)
+
+When you run patman it will collect all the change logs from the different
+commits and combine them into the cover letter, if you have one. So finally
+you have a new series of commits:
+
+ faeb973 Don't include standard parser if hush is used
+ 1b2f2fe mmc: sparc: Stop using builtin_run_command()
+ cfbe330 Rename run_command2() to run_command()
+ 0682677 sandbox: Rename run_command() to builtin_run_command()
+
+so to send them:
+
+ patman
+
+and it will create and send the version 2 series.
+
+
+General points
+==============
+
+1. When you change back to the us-cmd branch days or weeks later all your
+information is still there, safely stored in the commits. You don't need
+to remember what version you are up to, who you sent the last lot of patches
+to, or anything about the change logs.
+
+2. If you put tags in the subject, patman will Cc the maintainers
+automatically in many cases.
+
+3. If you want to keep the commits from each series you sent so that you can
+compare change and see what you did, you can either create a new branch for
+each version, or just tag the branch before you start changing it:
+
+ git tag sent/us-cmd-rfc
+ ...later...
+ git tag sent/us-cmd-v2
+
+4. If you want to modify the patches a little before sending, you can do
+this in your editor, but be careful!
+
+5. If you want to run git send-email yourself, use the -n flag which will
+print out the command line patman would have used.
+
+6. It is a good idea to add the change log info as you change the commit,
+not later when you can't remember which patch you changed. You can always
+go back and change or remove logs from commits.
+
+7. Some mailing lists have size limits and when we add binary contents to
+our patches it's easy to exceed the size limits. Use "--no-binary" to
+generate patches without any binary contents. You are supposed to include
+a link to a git repository in your "Commit-notes", "Series-notes" or
+"Cover-letter" for maintainers to fetch the original commit.
+
+8. Patches will have no changelog entries for revisions where they did not
+change. For clarity, if there are no changes for this patch in the most
+recent revision of the series, a note will be added. For example, a patch
+with the following tags in the commit
+
+ Series-version: 5
+ Series-changes: 2
+ - Some change
+
+ Series-changes: 4
+ - Another change
+
+would have a changelog of
+
+ (no changes since v4)
+
+ Changes in v4:
+ - Another change
+
+ Changes in v2:
+ - Some change
+
+Other thoughts
+==============
+
+This script has been split into sensible files but still needs work.
+Most of these are indicated by a TODO in the code.
+
+It would be nice if this could handle the In-reply-to side of things.
+
+The tests are incomplete, as is customary. Use the 'test' subcommand to run
+them:
+
+ $ tools/patman/patman test
+
+Error handling doesn't always produce friendly error messages - e.g.
+putting an incorrect tag in a commit may provide a confusing message.
+
+There might be a few other features not mentioned in this README. They
+might be bugs. In particular, tags are case sensitive which is probably
+a bad thing.
+
+
+Simon Glass <sjg@chromium.org>
+v1, v2, 19-Oct-11
+revised v3 24-Nov-11
+revised v4 Independence Day 2020, with Patchwork integration
diff --git a/roms/u-boot/tools/patman/__init__.py b/roms/u-boot/tools/patman/__init__.py
new file mode 100644
index 000000000..7cbe5fa4b
--- /dev/null
+++ b/roms/u-boot/tools/patman/__init__.py
@@ -0,0 +1,3 @@
+__all__ = ['checkpatch', 'command', 'commit', 'cros_subprocess',
+ 'get_maintainer', 'gitutil', 'patchstream', 'project',
+ 'series', 'settings', 'terminal', 'test']
diff --git a/roms/u-boot/tools/patman/checkpatch.py b/roms/u-boot/tools/patman/checkpatch.py
new file mode 100644
index 000000000..8978df25c
--- /dev/null
+++ b/roms/u-boot/tools/patman/checkpatch.py
@@ -0,0 +1,271 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import collections
+import os
+import re
+import sys
+
+from patman import command
+from patman import gitutil
+from patman import terminal
+
+EMACS_PREFIX = r'(?:[0-9]{4}.*\.patch:[0-9]+: )?'
+TYPE_NAME = r'([A-Z_]+:)?'
+RE_ERROR = re.compile(r'ERROR:%s (.*)' % TYPE_NAME)
+RE_WARNING = re.compile(EMACS_PREFIX + r'WARNING:%s (.*)' % TYPE_NAME)
+RE_CHECK = re.compile(r'CHECK:%s (.*)' % TYPE_NAME)
+RE_FILE = re.compile(r'#(\d+): (FILE: ([^:]*):(\d+):)?')
+RE_NOTE = re.compile(r'NOTE: (.*)')
+
+
+def FindCheckPatch():
+ top_level = gitutil.GetTopLevel()
+ try_list = [
+ os.getcwd(),
+ os.path.join(os.getcwd(), '..', '..'),
+ os.path.join(top_level, 'tools'),
+ os.path.join(top_level, 'scripts'),
+ '%s/bin' % os.getenv('HOME'),
+ ]
+ # Look in current dir
+ for path in try_list:
+ fname = os.path.join(path, 'checkpatch.pl')
+ if os.path.isfile(fname):
+ return fname
+
+ # Look upwwards for a Chrome OS tree
+ while not os.path.ismount(path):
+ fname = os.path.join(path, 'src', 'third_party', 'kernel', 'files',
+ 'scripts', 'checkpatch.pl')
+ if os.path.isfile(fname):
+ return fname
+ path = os.path.dirname(path)
+
+ sys.exit('Cannot find checkpatch.pl - please put it in your ' +
+ '~/bin directory or use --no-check')
+
+
+def CheckPatchParseOneMessage(message):
+ """Parse one checkpatch message
+
+ Args:
+ message: string to parse
+
+ Returns:
+ dict:
+ 'type'; error or warning
+ 'msg': text message
+ 'file' : filename
+ 'line': line number
+ """
+
+ if RE_NOTE.match(message):
+ return {}
+
+ item = {}
+
+ err_match = RE_ERROR.match(message)
+ warn_match = RE_WARNING.match(message)
+ check_match = RE_CHECK.match(message)
+ if err_match:
+ item['cptype'] = err_match.group(1)
+ item['msg'] = err_match.group(2)
+ item['type'] = 'error'
+ elif warn_match:
+ item['cptype'] = warn_match.group(1)
+ item['msg'] = warn_match.group(2)
+ item['type'] = 'warning'
+ elif check_match:
+ item['cptype'] = check_match.group(1)
+ item['msg'] = check_match.group(2)
+ item['type'] = 'check'
+ else:
+ message_indent = ' '
+ print('patman: failed to parse checkpatch message:\n%s' %
+ (message_indent + message.replace('\n', '\n' + message_indent)),
+ file=sys.stderr)
+ return {}
+
+ file_match = RE_FILE.search(message)
+ # some messages have no file, catch those here
+ no_file_match = any(s in message for s in [
+ '\nSubject:', 'Missing Signed-off-by: line(s)',
+ 'does MAINTAINERS need updating'
+ ])
+
+ if file_match:
+ err_fname = file_match.group(3)
+ if err_fname:
+ item['file'] = err_fname
+ item['line'] = int(file_match.group(4))
+ else:
+ item['file'] = '<patch>'
+ item['line'] = int(file_match.group(1))
+ elif no_file_match:
+ item['file'] = '<patch>'
+ else:
+ message_indent = ' '
+ print('patman: failed to find file / line information:\n%s' %
+ (message_indent + message.replace('\n', '\n' + message_indent)),
+ file=sys.stderr)
+
+ return item
+
+
+def CheckPatchParse(checkpatch_output, verbose=False):
+ """Parse checkpatch.pl output
+
+ Args:
+ checkpatch_output: string to parse
+ verbose: True to print out every line of the checkpatch output as it is
+ parsed
+
+ Returns:
+ namedtuple containing:
+ ok: False=failure, True=ok
+ problems: List of problems, each a dict:
+ 'type'; error or warning
+ 'msg': text message
+ 'file' : filename
+ 'line': line number
+ errors: Number of errors
+ warnings: Number of warnings
+ checks: Number of checks
+ lines: Number of lines
+ stdout: checkpatch_output
+ """
+ fields = ['ok', 'problems', 'errors', 'warnings', 'checks', 'lines',
+ 'stdout']
+ result = collections.namedtuple('CheckPatchResult', fields)
+ result.stdout = checkpatch_output
+ result.ok = False
+ result.errors, result.warnings, result.checks = 0, 0, 0
+ result.lines = 0
+ result.problems = []
+
+ # total: 0 errors, 0 warnings, 159 lines checked
+ # or:
+ # total: 0 errors, 2 warnings, 7 checks, 473 lines checked
+ emacs_stats = r'(?:[0-9]{4}.*\.patch )?'
+ re_stats = re.compile(emacs_stats +
+ r'total: (\d+) errors, (\d+) warnings, (\d+)')
+ re_stats_full = re.compile(emacs_stats +
+ r'total: (\d+) errors, (\d+) warnings, (\d+)'
+ r' checks, (\d+)')
+ re_ok = re.compile(r'.*has no obvious style problems')
+ re_bad = re.compile(r'.*has style problems, please review')
+
+ # A blank line indicates the end of a message
+ for message in result.stdout.split('\n\n'):
+ if verbose:
+ print(message)
+
+ # either find stats, the verdict, or delegate
+ match = re_stats_full.match(message)
+ if not match:
+ match = re_stats.match(message)
+ if match:
+ result.errors = int(match.group(1))
+ result.warnings = int(match.group(2))
+ if len(match.groups()) == 4:
+ result.checks = int(match.group(3))
+ result.lines = int(match.group(4))
+ else:
+ result.lines = int(match.group(3))
+ elif re_ok.match(message):
+ result.ok = True
+ elif re_bad.match(message):
+ result.ok = False
+ else:
+ problem = CheckPatchParseOneMessage(message)
+ if problem:
+ result.problems.append(problem)
+
+ return result
+
+
+def CheckPatch(fname, verbose=False, show_types=False):
+ """Run checkpatch.pl on a file and parse the results.
+
+ Args:
+ fname: Filename to check
+ verbose: True to print out every line of the checkpatch output as it is
+ parsed
+ show_types: Tell checkpatch to show the type (number) of each message
+
+ Returns:
+ namedtuple containing:
+ ok: False=failure, True=ok
+ problems: List of problems, each a dict:
+ 'type'; error or warning
+ 'msg': text message
+ 'file' : filename
+ 'line': line number
+ errors: Number of errors
+ warnings: Number of warnings
+ checks: Number of checks
+ lines: Number of lines
+ stdout: Full output of checkpatch
+ """
+ chk = FindCheckPatch()
+ args = [chk, '--no-tree']
+ if show_types:
+ args.append('--show-types')
+ output = command.Output(*args, fname, raise_on_error=False)
+
+ return CheckPatchParse(output, verbose)
+
+
+def GetWarningMsg(col, msg_type, fname, line, msg):
+ '''Create a message for a given file/line
+
+ Args:
+ msg_type: Message type ('error' or 'warning')
+ fname: Filename which reports the problem
+ line: Line number where it was noticed
+ msg: Message to report
+ '''
+ if msg_type == 'warning':
+ msg_type = col.Color(col.YELLOW, msg_type)
+ elif msg_type == 'error':
+ msg_type = col.Color(col.RED, msg_type)
+ elif msg_type == 'check':
+ msg_type = col.Color(col.MAGENTA, msg_type)
+ line_str = '' if line is None else '%d' % line
+ return '%s:%s: %s: %s\n' % (fname, line_str, msg_type, msg)
+
+def CheckPatches(verbose, args):
+ '''Run the checkpatch.pl script on each patch'''
+ error_count, warning_count, check_count = 0, 0, 0
+ col = terminal.Color()
+
+ for fname in args:
+ result = CheckPatch(fname, verbose)
+ if not result.ok:
+ error_count += result.errors
+ warning_count += result.warnings
+ check_count += result.checks
+ print('%d errors, %d warnings, %d checks for %s:' % (result.errors,
+ result.warnings, result.checks, col.Color(col.BLUE, fname)))
+ if (len(result.problems) != result.errors + result.warnings +
+ result.checks):
+ print("Internal error: some problems lost")
+ for item in result.problems:
+ sys.stderr.write(
+ GetWarningMsg(col, item.get('type', '<unknown>'),
+ item.get('file', '<unknown>'),
+ item.get('line', 0), item.get('msg', 'message')))
+ print
+ #print(stdout)
+ if error_count or warning_count or check_count:
+ str = 'checkpatch.pl found %d error(s), %d warning(s), %d checks(s)'
+ color = col.GREEN
+ if warning_count:
+ color = col.YELLOW
+ if error_count:
+ color = col.RED
+ print(col.Color(color, str % (error_count, warning_count, check_count)))
+ return False
+ return True
diff --git a/roms/u-boot/tools/patman/command.py b/roms/u-boot/tools/patman/command.py
new file mode 100644
index 000000000..bf8ea6c8c
--- /dev/null
+++ b/roms/u-boot/tools/patman/command.py
@@ -0,0 +1,142 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import os
+
+from patman import cros_subprocess
+
+"""Shell command ease-ups for Python."""
+
+class CommandResult:
+ """A class which captures the result of executing a command.
+
+ Members:
+ stdout: stdout obtained from command, as a string
+ stderr: stderr obtained from command, as a string
+ return_code: Return code from command
+ exception: Exception received, or None if all ok
+ """
+ def __init__(self):
+ self.stdout = None
+ self.stderr = None
+ self.combined = None
+ self.return_code = None
+ self.exception = None
+
+ def __init__(self, stdout='', stderr='', combined='', return_code=0,
+ exception=None):
+ self.stdout = stdout
+ self.stderr = stderr
+ self.combined = combined
+ self.return_code = return_code
+ self.exception = exception
+
+ def ToOutput(self, binary):
+ if not binary:
+ self.stdout = self.stdout.decode('utf-8')
+ self.stderr = self.stderr.decode('utf-8')
+ self.combined = self.combined.decode('utf-8')
+ return self
+
+
+# This permits interception of RunPipe for test purposes. If it is set to
+# a function, then that function is called with the pipe list being
+# executed. Otherwise, it is assumed to be a CommandResult object, and is
+# returned as the result for every RunPipe() call.
+# When this value is None, commands are executed as normal.
+test_result = None
+
+def RunPipe(pipe_list, infile=None, outfile=None,
+ capture=False, capture_stderr=False, oneline=False,
+ raise_on_error=True, cwd=None, binary=False, **kwargs):
+ """
+ Perform a command pipeline, with optional input/output filenames.
+
+ Args:
+ pipe_list: List of command lines to execute. Each command line is
+ piped into the next, and is itself a list of strings. For
+ example [ ['ls', '.git'] ['wc'] ] will pipe the output of
+ 'ls .git' into 'wc'.
+ infile: File to provide stdin to the pipeline
+ outfile: File to store stdout
+ capture: True to capture output
+ capture_stderr: True to capture stderr
+ oneline: True to strip newline chars from output
+ kwargs: Additional keyword arguments to cros_subprocess.Popen()
+ Returns:
+ CommandResult object
+ """
+ if test_result:
+ if hasattr(test_result, '__call__'):
+ result = test_result(pipe_list=pipe_list)
+ if result:
+ return result
+ else:
+ return test_result
+ # No result: fall through to normal processing
+ result = CommandResult(b'', b'', b'')
+ last_pipe = None
+ pipeline = list(pipe_list)
+ user_pipestr = '|'.join([' '.join(pipe) for pipe in pipe_list])
+ kwargs['stdout'] = None
+ kwargs['stderr'] = None
+ while pipeline:
+ cmd = pipeline.pop(0)
+ if last_pipe is not None:
+ kwargs['stdin'] = last_pipe.stdout
+ elif infile:
+ kwargs['stdin'] = open(infile, 'rb')
+ if pipeline or capture:
+ kwargs['stdout'] = cros_subprocess.PIPE
+ elif outfile:
+ kwargs['stdout'] = open(outfile, 'wb')
+ if capture_stderr:
+ kwargs['stderr'] = cros_subprocess.PIPE
+
+ try:
+ last_pipe = cros_subprocess.Popen(cmd, cwd=cwd, **kwargs)
+ except Exception as err:
+ result.exception = err
+ if raise_on_error:
+ raise Exception("Error running '%s': %s" % (user_pipestr, str))
+ result.return_code = 255
+ return result.ToOutput(binary)
+
+ if capture:
+ result.stdout, result.stderr, result.combined = (
+ last_pipe.CommunicateFilter(None))
+ if result.stdout and oneline:
+ result.output = result.stdout.rstrip(b'\r\n')
+ result.return_code = last_pipe.wait()
+ else:
+ result.return_code = os.waitpid(last_pipe.pid, 0)[1]
+ if raise_on_error and result.return_code:
+ raise Exception("Error running '%s'" % user_pipestr)
+ return result.ToOutput(binary)
+
+def Output(*cmd, **kwargs):
+ kwargs['raise_on_error'] = kwargs.get('raise_on_error', True)
+ return RunPipe([cmd], capture=True, **kwargs).stdout
+
+def OutputOneLine(*cmd, **kwargs):
+ """Run a command and output it as a single-line string
+
+ The command us expected to produce a single line of output
+
+ Returns:
+ String containing output of command
+ """
+ raise_on_error = kwargs.pop('raise_on_error', True)
+ result = RunPipe([cmd], capture=True, oneline=True,
+ raise_on_error=raise_on_error, **kwargs).stdout.strip()
+ return result
+
+def Run(*cmd, **kwargs):
+ return RunPipe([cmd], **kwargs).stdout
+
+def RunList(cmd):
+ return RunPipe([cmd], capture=True).stdout
+
+def StopAll():
+ cros_subprocess.stay_alive = False
diff --git a/roms/u-boot/tools/patman/commit.py b/roms/u-boot/tools/patman/commit.py
new file mode 100644
index 000000000..5bf2b9402
--- /dev/null
+++ b/roms/u-boot/tools/patman/commit.py
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import collections
+import re
+
+# Separates a tag: at the beginning of the subject from the rest of it
+re_subject_tag = re.compile('([^:\s]*):\s*(.*)')
+
+class Commit:
+ """Holds information about a single commit/patch in the series.
+
+ Args:
+ hash: Commit hash (as a string)
+
+ Variables:
+ hash: Commit hash
+ subject: Subject line
+ tags: List of maintainer tag strings
+ changes: Dict containing a list of changes (single line strings).
+ The dict is indexed by change version (an integer)
+ cc_list: List of people to aliases/emails to cc on this commit
+ notes: List of lines in the commit (not series) notes
+ change_id: the Change-Id: tag that was stripped from this commit
+ and can be used to generate the Message-Id.
+ rtags: Response tags (e.g. Reviewed-by) collected by the commit, dict:
+ key: rtag type (e.g. 'Reviewed-by')
+ value: Set of people who gave that rtag, each a name/email string
+ warn: List of warnings for this commit, each a str
+ """
+ def __init__(self, hash):
+ self.hash = hash
+ self.subject = None
+ self.tags = []
+ self.changes = {}
+ self.cc_list = []
+ self.signoff_set = set()
+ self.notes = []
+ self.change_id = None
+ self.rtags = collections.defaultdict(set)
+ self.warn = []
+
+ def __str__(self):
+ return self.subject
+
+ def AddChange(self, version, info):
+ """Add a new change line to the change list for a version.
+
+ Args:
+ version: Patch set version (integer: 1, 2, 3)
+ info: Description of change in this version
+ """
+ if not self.changes.get(version):
+ self.changes[version] = []
+ self.changes[version].append(info)
+
+ def CheckTags(self):
+ """Create a list of subject tags in the commit
+
+ Subject tags look like this:
+
+ propounder: fort: Change the widget to propound correctly
+
+ Here the tags are propounder and fort. Multiple tags are supported.
+ The list is updated in self.tag.
+
+ Returns:
+ None if ok, else the name of a tag with no email alias
+ """
+ str = self.subject
+ m = True
+ while m:
+ m = re_subject_tag.match(str)
+ if m:
+ tag = m.group(1)
+ self.tags.append(tag)
+ str = m.group(2)
+ return None
+
+ def AddCc(self, cc_list):
+ """Add a list of people to Cc when we send this patch.
+
+ Args:
+ cc_list: List of aliases or email addresses
+ """
+ self.cc_list += cc_list
+
+ def CheckDuplicateSignoff(self, signoff):
+ """Check a list of signoffs we have send for this patch
+
+ Args:
+ signoff: Signoff line
+ Returns:
+ True if this signoff is new, False if we have already seen it.
+ """
+ if signoff in self.signoff_set:
+ return False
+ self.signoff_set.add(signoff)
+ return True
+
+ def AddRtag(self, rtag_type, who):
+ """Add a response tag to a commit
+
+ Args:
+ key: rtag type (e.g. 'Reviewed-by')
+ who: Person who gave that rtag, e.g. 'Fred Bloggs <fred@bloggs.org>'
+ """
+ self.rtags[rtag_type].add(who)
diff --git a/roms/u-boot/tools/patman/control.py b/roms/u-boot/tools/patman/control.py
new file mode 100644
index 000000000..ee9717cbf
--- /dev/null
+++ b/roms/u-boot/tools/patman/control.py
@@ -0,0 +1,237 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright 2020 Google LLC
+#
+"""Handles the main control logic of patman
+
+This module provides various functions called by the main program to implement
+the features of patman.
+"""
+
+import os
+import sys
+
+from patman import checkpatch
+from patman import gitutil
+from patman import patchstream
+from patman import terminal
+
+def setup():
+ """Do required setup before doing anything"""
+ gitutil.Setup()
+
+def prepare_patches(col, branch, count, start, end, ignore_binary, signoff):
+ """Figure out what patches to generate, then generate them
+
+ The patch files are written to the current directory, e.g. 0001_xxx.patch
+ 0002_yyy.patch
+
+ Args:
+ col (terminal.Color): Colour output object
+ branch (str): Branch to create patches from (None = current)
+ count (int): Number of patches to produce, or -1 to produce patches for
+ the current branch back to the upstream commit
+ start (int): Start partch to use (0=first / top of branch)
+ end (int): End patch to use (0=last one in series, 1=one before that,
+ etc.)
+ ignore_binary (bool): Don't generate patches for binary files
+
+ Returns:
+ Tuple:
+ Series object for this series (set of patches)
+ Filename of the cover letter as a string (None if none)
+ patch_files: List of patch filenames, each a string, e.g.
+ ['0001_xxx.patch', '0002_yyy.patch']
+ """
+ if count == -1:
+ # Work out how many patches to send if we can
+ count = (gitutil.CountCommitsToBranch(branch) - start)
+
+ if not count:
+ str = 'No commits found to process - please use -c flag, or run:\n' \
+ ' git branch --set-upstream-to remote/branch'
+ sys.exit(col.Color(col.RED, str))
+
+ # Read the metadata from the commits
+ to_do = count - end
+ series = patchstream.get_metadata(branch, start, to_do)
+ cover_fname, patch_files = gitutil.CreatePatches(
+ branch, start, to_do, ignore_binary, series, signoff)
+
+ # Fix up the patch files to our liking, and insert the cover letter
+ patchstream.fix_patches(series, patch_files)
+ if cover_fname and series.get('cover'):
+ patchstream.insert_cover_letter(cover_fname, series, to_do)
+ return series, cover_fname, patch_files
+
+def check_patches(series, patch_files, run_checkpatch, verbose):
+ """Run some checks on a set of patches
+
+ This santiy-checks the patman tags like Series-version and runs the patches
+ through checkpatch
+
+ Args:
+ series (Series): Series object for this series (set of patches)
+ patch_files (list): List of patch filenames, each a string, e.g.
+ ['0001_xxx.patch', '0002_yyy.patch']
+ run_checkpatch (bool): True to run checkpatch.pl
+ verbose (bool): True to print out every line of the checkpatch output as
+ it is parsed
+
+ Returns:
+ bool: True if the patches had no errors, False if they did
+ """
+ # Do a few checks on the series
+ series.DoChecks()
+
+ # Check the patches, and run them through 'git am' just to be sure
+ if run_checkpatch:
+ ok = checkpatch.CheckPatches(verbose, patch_files)
+ else:
+ ok = True
+ return ok
+
+
+def email_patches(col, series, cover_fname, patch_files, process_tags, its_a_go,
+ ignore_bad_tags, add_maintainers, limit, dry_run, in_reply_to,
+ thread, smtp_server):
+ """Email patches to the recipients
+
+ This emails out the patches and cover letter using 'git send-email'. Each
+ patch is copied to recipients identified by the patch tag and output from
+ the get_maintainer.pl script. The cover letter is copied to all recipients
+ of any patch.
+
+ To make this work a CC file is created holding the recipients for each patch
+ and the cover letter. See the main program 'cc_cmd' for this logic.
+
+ Args:
+ col (terminal.Color): Colour output object
+ series (Series): Series object for this series (set of patches)
+ cover_fname (str): Filename of the cover letter as a string (None if
+ none)
+ patch_files (list): List of patch filenames, each a string, e.g.
+ ['0001_xxx.patch', '0002_yyy.patch']
+ process_tags (bool): True to process subject tags in each patch, e.g.
+ for 'dm: spi: Add SPI support' this would be 'dm' and 'spi'. The
+ tags are looked up in the configured sendemail.aliasesfile and also
+ in ~/.patman (see README)
+ its_a_go (bool): True if we are going to actually send the patches,
+ False if the patches have errors and will not be sent unless
+ @ignore_errors
+ ignore_bad_tags (bool): True to just print a warning for unknown tags,
+ False to halt with an error
+ add_maintainers (bool): Run the get_maintainer.pl script for each patch
+ limit (int): Limit on the number of people that can be cc'd on a single
+ patch or the cover letter (None if no limit)
+ dry_run (bool): Don't actually email the patches, just print out what
+ would be sent
+ in_reply_to (str): If not None we'll pass this to git as --in-reply-to.
+ Should be a message ID that this is in reply to.
+ thread (bool): True to add --thread to git send-email (make all patches
+ reply to cover-letter or first patch in series)
+ smtp_server (str): SMTP server to use to send patches (None for default)
+ """
+ cc_file = series.MakeCcFile(process_tags, cover_fname, not ignore_bad_tags,
+ add_maintainers, limit)
+
+ # Email the patches out (giving the user time to check / cancel)
+ cmd = ''
+ if its_a_go:
+ cmd = gitutil.EmailPatches(
+ series, cover_fname, patch_files, dry_run, not ignore_bad_tags,
+ cc_file, in_reply_to=in_reply_to, thread=thread,
+ smtp_server=smtp_server)
+ else:
+ print(col.Color(col.RED, "Not sending emails due to errors/warnings"))
+
+ # For a dry run, just show our actions as a sanity check
+ if dry_run:
+ series.ShowActions(patch_files, cmd, process_tags)
+ if not its_a_go:
+ print(col.Color(col.RED, "Email would not be sent"))
+
+ os.remove(cc_file)
+
+def send(args):
+ """Create, check and send patches by email
+
+ Args:
+ args (argparse.Namespace): Arguments to patman
+ """
+ setup()
+ col = terminal.Color()
+ series, cover_fname, patch_files = prepare_patches(
+ col, args.branch, args.count, args.start, args.end,
+ args.ignore_binary, args.add_signoff)
+ ok = check_patches(series, patch_files, args.check_patch,
+ args.verbose)
+
+ ok = ok and gitutil.CheckSuppressCCConfig()
+
+ its_a_go = ok or args.ignore_errors
+ email_patches(
+ col, series, cover_fname, patch_files, args.process_tags,
+ its_a_go, args.ignore_bad_tags, args.add_maintainers,
+ args.limit, args.dry_run, args.in_reply_to, args.thread,
+ args.smtp_server)
+
+def patchwork_status(branch, count, start, end, dest_branch, force,
+ show_comments, url):
+ """Check the status of patches in patchwork
+
+ This finds the series in patchwork using the Series-link tag, checks for new
+ comments and review tags, displays then and creates a new branch with the
+ review tags.
+
+ Args:
+ branch (str): Branch to create patches from (None = current)
+ count (int): Number of patches to produce, or -1 to produce patches for
+ the current branch back to the upstream commit
+ start (int): Start partch to use (0=first / top of branch)
+ end (int): End patch to use (0=last one in series, 1=one before that,
+ etc.)
+ dest_branch (str): Name of new branch to create with the updated tags
+ (None to not create a branch)
+ force (bool): With dest_branch, force overwriting an existing branch
+ show_comments (bool): True to display snippets from the comments
+ provided by reviewers
+ url (str): URL of patchwork server, e.g. 'https://patchwork.ozlabs.org'.
+ This is ignored if the series provides a Series-patchwork-url tag.
+
+ Raises:
+ ValueError: if the branch has no Series-link value
+ """
+ if count == -1:
+ # Work out how many patches to send if we can
+ count = (gitutil.CountCommitsToBranch(branch) - start)
+
+ series = patchstream.get_metadata(branch, start, count - end)
+ warnings = 0
+ for cmt in series.commits:
+ if cmt.warn:
+ print('%d warnings for %s:' % (len(cmt.warn), cmt.hash))
+ for warn in cmt.warn:
+ print('\t', warn)
+ warnings += 1
+ print
+ if warnings:
+ raise ValueError('Please fix warnings before running status')
+ links = series.get('links')
+ if not links:
+ raise ValueError("Branch has no Series-links value")
+
+ # Find the link without a version number (we don't support versions yet)
+ found = [link for link in links.split() if not ':' in link]
+ if not found:
+ raise ValueError('Series-links has no current version (without :)')
+
+ # Allow the series to override the URL
+ if 'patchwork_url' in series:
+ url = series.patchwork_url
+
+ # Import this here to avoid failing on other commands if the dependencies
+ # are not present
+ from patman import status
+ status.check_patchwork_status(series, found[0], branch, dest_branch, force,
+ show_comments, url)
diff --git a/roms/u-boot/tools/patman/cros_subprocess.py b/roms/u-boot/tools/patman/cros_subprocess.py
new file mode 100644
index 000000000..efd0a5aaf
--- /dev/null
+++ b/roms/u-boot/tools/patman/cros_subprocess.py
@@ -0,0 +1,404 @@
+# Copyright (c) 2012 The Chromium OS Authors.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
+
+"""Subprocess execution
+
+This module holds a subclass of subprocess.Popen with our own required
+features, mainly that we get access to the subprocess output while it
+is running rather than just at the end. This makes it easier to show
+progress information and filter output in real time.
+"""
+
+import errno
+import os
+import pty
+import select
+import subprocess
+import sys
+import unittest
+
+
+# Import these here so the caller does not need to import subprocess also.
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+PIPE_PTY = -3 # Pipe output through a pty
+stay_alive = True
+
+
+class Popen(subprocess.Popen):
+ """Like subprocess.Popen with ptys and incremental output
+
+ This class deals with running a child process and filtering its output on
+ both stdout and stderr while it is running. We do this so we can monitor
+ progress, and possibly relay the output to the user if requested.
+
+ The class is similar to subprocess.Popen, the equivalent is something like:
+
+ Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ But this class has many fewer features, and two enhancement:
+
+ 1. Rather than getting the output data only at the end, this class sends it
+ to a provided operation as it arrives.
+ 2. We use pseudo terminals so that the child will hopefully flush its output
+ to us as soon as it is produced, rather than waiting for the end of a
+ line.
+
+ Use CommunicateFilter() to handle output from the subprocess.
+
+ """
+
+ def __init__(self, args, stdin=None, stdout=PIPE_PTY, stderr=PIPE_PTY,
+ shell=False, cwd=None, env=None, **kwargs):
+ """Cut-down constructor
+
+ Args:
+ args: Program and arguments for subprocess to execute.
+ stdin: See subprocess.Popen()
+ stdout: See subprocess.Popen(), except that we support the sentinel
+ value of cros_subprocess.PIPE_PTY.
+ stderr: See subprocess.Popen(), except that we support the sentinel
+ value of cros_subprocess.PIPE_PTY.
+ shell: See subprocess.Popen()
+ cwd: Working directory to change to for subprocess, or None if none.
+ env: Environment to use for this subprocess, or None to inherit parent.
+ kwargs: No other arguments are supported at the moment. Passing other
+ arguments will cause a ValueError to be raised.
+ """
+ stdout_pty = None
+ stderr_pty = None
+
+ if stdout == PIPE_PTY:
+ stdout_pty = pty.openpty()
+ stdout = os.fdopen(stdout_pty[1])
+ if stderr == PIPE_PTY:
+ stderr_pty = pty.openpty()
+ stderr = os.fdopen(stderr_pty[1])
+
+ super(Popen, self).__init__(args, stdin=stdin,
+ stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env,
+ **kwargs)
+
+ # If we're on a PTY, we passed the slave half of the PTY to the subprocess.
+ # We want to use the master half on our end from now on. Setting this here
+ # does make some assumptions about the implementation of subprocess, but
+ # those assumptions are pretty minor.
+
+ # Note that if stderr is STDOUT, then self.stderr will be set to None by
+ # this constructor.
+ if stdout_pty is not None:
+ self.stdout = os.fdopen(stdout_pty[0])
+ if stderr_pty is not None:
+ self.stderr = os.fdopen(stderr_pty[0])
+
+ # Insist that unit tests exist for other arguments we don't support.
+ if kwargs:
+ raise ValueError("Unit tests do not test extra args - please add tests")
+
+ def ConvertData(self, data):
+ """Convert stdout/stderr data to the correct format for output
+
+ Args:
+ data: Data to convert, or None for ''
+
+ Returns:
+ Converted data, as bytes
+ """
+ if data is None:
+ return b''
+ return data
+
+ def CommunicateFilter(self, output):
+ """Interact with process: Read data from stdout and stderr.
+
+ This method runs until end-of-file is reached, then waits for the
+ subprocess to terminate.
+
+ The output function is sent all output from the subprocess and must be
+ defined like this:
+
+ def Output([self,] stream, data)
+ Args:
+ stream: the stream the output was received on, which will be
+ sys.stdout or sys.stderr.
+ data: a string containing the data
+
+ Note: The data read is buffered in memory, so do not use this
+ method if the data size is large or unlimited.
+
+ Args:
+ output: Function to call with each fragment of output.
+
+ Returns:
+ A tuple (stdout, stderr, combined) which is the data received on
+ stdout, stderr and the combined data (interleaved stdout and stderr).
+
+ Note that the interleaved output will only be sensible if you have
+ set both stdout and stderr to PIPE or PIPE_PTY. Even then it depends on
+ the timing of the output in the subprocess. If a subprocess flips
+ between stdout and stderr quickly in succession, by the time we come to
+ read the output from each we may see several lines in each, and will read
+ all the stdout lines, then all the stderr lines. So the interleaving
+ may not be correct. In this case you might want to pass
+ stderr=cros_subprocess.STDOUT to the constructor.
+
+ This feature is still useful for subprocesses where stderr is
+ rarely used and indicates an error.
+
+ Note also that if you set stderr to STDOUT, then stderr will be empty
+ and the combined output will just be the same as stdout.
+ """
+
+ read_set = []
+ write_set = []
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdin:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ self.stdin.flush()
+ if input:
+ write_set.append(self.stdin)
+ else:
+ self.stdin.close()
+ if self.stdout:
+ read_set.append(self.stdout)
+ stdout = b''
+ if self.stderr and self.stderr != self.stdout:
+ read_set.append(self.stderr)
+ stderr = b''
+ combined = b''
+
+ input_offset = 0
+ while read_set or write_set:
+ try:
+ rlist, wlist, _ = select.select(read_set, write_set, [], 0.2)
+ except select.error as e:
+ if e.args[0] == errno.EINTR:
+ continue
+ raise
+
+ if not stay_alive:
+ self.terminate()
+
+ if self.stdin in wlist:
+ # When select has indicated that the file is writable,
+ # we can write up to PIPE_BUF bytes without risk
+ # blocking. POSIX defines PIPE_BUF >= 512
+ chunk = input[input_offset : input_offset + 512]
+ bytes_written = os.write(self.stdin.fileno(), chunk)
+ input_offset += bytes_written
+ if input_offset >= len(input):
+ self.stdin.close()
+ write_set.remove(self.stdin)
+
+ if self.stdout in rlist:
+ data = b''
+ # We will get an error on read if the pty is closed
+ try:
+ data = os.read(self.stdout.fileno(), 1024)
+ except OSError:
+ pass
+ if not len(data):
+ self.stdout.close()
+ read_set.remove(self.stdout)
+ else:
+ stdout += data
+ combined += data
+ if output:
+ output(sys.stdout, data)
+ if self.stderr in rlist:
+ data = b''
+ # We will get an error on read if the pty is closed
+ try:
+ data = os.read(self.stderr.fileno(), 1024)
+ except OSError:
+ pass
+ if not len(data):
+ self.stderr.close()
+ read_set.remove(self.stderr)
+ else:
+ stderr += data
+ combined += data
+ if output:
+ output(sys.stderr, data)
+
+ # All data exchanged. Translate lists into strings.
+ stdout = self.ConvertData(stdout)
+ stderr = self.ConvertData(stderr)
+ combined = self.ConvertData(combined)
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr, combined)
+
+
+# Just being a unittest.TestCase gives us 14 public methods. Unless we
+# disable this, we can only have 6 tests in a TestCase. That's not enough.
+#
+# pylint: disable=R0904
+
+class TestSubprocess(unittest.TestCase):
+ """Our simple unit test for this module"""
+
+ class MyOperation:
+ """Provides a operation that we can pass to Popen"""
+ def __init__(self, input_to_send=None):
+ """Constructor to set up the operation and possible input.
+
+ Args:
+ input_to_send: a text string to send when we first get input. We will
+ add \r\n to the string.
+ """
+ self.stdout_data = ''
+ self.stderr_data = ''
+ self.combined_data = ''
+ self.stdin_pipe = None
+ self._input_to_send = input_to_send
+ if input_to_send:
+ pipe = os.pipe()
+ self.stdin_read_pipe = pipe[0]
+ self._stdin_write_pipe = os.fdopen(pipe[1], 'w')
+
+ def Output(self, stream, data):
+ """Output handler for Popen. Stores the data for later comparison"""
+ if stream == sys.stdout:
+ self.stdout_data += data
+ if stream == sys.stderr:
+ self.stderr_data += data
+ self.combined_data += data
+
+ # Output the input string if we have one.
+ if self._input_to_send:
+ self._stdin_write_pipe.write(self._input_to_send + '\r\n')
+ self._stdin_write_pipe.flush()
+
+ def _BasicCheck(self, plist, oper):
+ """Basic checks that the output looks sane."""
+ self.assertEqual(plist[0], oper.stdout_data)
+ self.assertEqual(plist[1], oper.stderr_data)
+ self.assertEqual(plist[2], oper.combined_data)
+
+ # The total length of stdout and stderr should equal the combined length
+ self.assertEqual(len(plist[0]) + len(plist[1]), len(plist[2]))
+
+ def test_simple(self):
+ """Simple redirection: Get process list"""
+ oper = TestSubprocess.MyOperation()
+ plist = Popen(['ps']).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+
+ def test_stderr(self):
+ """Check stdout and stderr"""
+ oper = TestSubprocess.MyOperation()
+ cmd = 'echo fred >/dev/stderr && false || echo bad'
+ plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], 'bad\r\n')
+ self.assertEqual(plist [1], 'fred\r\n')
+
+ def test_shell(self):
+ """Check with and without shell works"""
+ oper = TestSubprocess.MyOperation()
+ cmd = 'echo test >/dev/stderr'
+ self.assertRaises(OSError, Popen, [cmd], shell=False)
+ plist = Popen([cmd], shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(len(plist [0]), 0)
+ self.assertEqual(plist [1], 'test\r\n')
+
+ def test_list_args(self):
+ """Check with and without shell works using list arguments"""
+ oper = TestSubprocess.MyOperation()
+ cmd = ['echo', 'test', '>/dev/stderr']
+ plist = Popen(cmd, shell=False).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], ' '.join(cmd[1:]) + '\r\n')
+ self.assertEqual(len(plist [1]), 0)
+
+ oper = TestSubprocess.MyOperation()
+
+ # this should be interpreted as 'echo' with the other args dropped
+ cmd = ['echo', 'test', '>/dev/stderr']
+ plist = Popen(cmd, shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], '\r\n')
+
+ def test_cwd(self):
+ """Check we can change directory"""
+ for shell in (False, True):
+ oper = TestSubprocess.MyOperation()
+ plist = Popen('pwd', shell=shell, cwd='/tmp').CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], '/tmp\r\n')
+
+ def test_env(self):
+ """Check we can change environment"""
+ for add in (False, True):
+ oper = TestSubprocess.MyOperation()
+ env = os.environ
+ if add:
+ env ['FRED'] = 'fred'
+ cmd = 'echo $FRED'
+ plist = Popen(cmd, shell=True, env=env).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], add and 'fred\r\n' or '\r\n')
+
+ def test_extra_args(self):
+ """Check we can't add extra arguments"""
+ self.assertRaises(ValueError, Popen, 'true', close_fds=False)
+
+ def test_basic_input(self):
+ """Check that incremental input works
+
+ We set up a subprocess which will prompt for name. When we see this prompt
+ we send the name as input to the process. It should then print the name
+ properly to stdout.
+ """
+ oper = TestSubprocess.MyOperation('Flash')
+ prompt = 'What is your name?: '
+ cmd = 'echo -n "%s"; read name; echo Hello $name' % prompt
+ plist = Popen([cmd], stdin=oper.stdin_read_pipe,
+ shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(len(plist [1]), 0)
+ self.assertEqual(plist [0], prompt + 'Hello Flash\r\r\n')
+
+ def test_isatty(self):
+ """Check that ptys appear as terminals to the subprocess"""
+ oper = TestSubprocess.MyOperation()
+ cmd = ('if [ -t %d ]; then echo "terminal %d" >&%d; '
+ 'else echo "not %d" >&%d; fi;')
+ both_cmds = ''
+ for fd in (1, 2):
+ both_cmds += cmd % (fd, fd, fd, fd, fd)
+ plist = Popen(both_cmds, shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], 'terminal 1\r\n')
+ self.assertEqual(plist [1], 'terminal 2\r\n')
+
+ # Now try with PIPE and make sure it is not a terminal
+ oper = TestSubprocess.MyOperation()
+ plist = Popen(both_cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=True).CommunicateFilter(oper.Output)
+ self._BasicCheck(plist, oper)
+ self.assertEqual(plist [0], 'not 1\n')
+ self.assertEqual(plist [1], 'not 2\n')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/roms/u-boot/tools/patman/func_test.py b/roms/u-boot/tools/patman/func_test.py
new file mode 100644
index 000000000..1ce6448d0
--- /dev/null
+++ b/roms/u-boot/tools/patman/func_test.py
@@ -0,0 +1,1284 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright 2017 Google, Inc
+#
+
+"""Functional tests for checking that patman behaves correctly"""
+
+import os
+import re
+import shutil
+import sys
+import tempfile
+import unittest
+
+
+from patman.commit import Commit
+from patman import control
+from patman import gitutil
+from patman import patchstream
+from patman.patchstream import PatchStream
+from patman.series import Series
+from patman import settings
+from patman import terminal
+from patman import tools
+from patman.test_util import capture_sys_output
+
+import pygit2
+from patman import status
+
+class TestFunctional(unittest.TestCase):
+ """Functional tests for checking that patman behaves correctly"""
+ leb = (b'Lord Edmund Blackadd\xc3\xabr <weasel@blackadder.org>'.
+ decode('utf-8'))
+ fred = 'Fred Bloggs <f.bloggs@napier.net>'
+ joe = 'Joe Bloggs <joe@napierwallies.co.nz>'
+ mary = 'Mary Bloggs <mary@napierwallies.co.nz>'
+ commits = None
+ patches = None
+
+ def setUp(self):
+ self.tmpdir = tempfile.mkdtemp(prefix='patman.')
+ self.gitdir = os.path.join(self.tmpdir, 'git')
+ self.repo = None
+
+ def tearDown(self):
+ shutil.rmtree(self.tmpdir)
+ terminal.SetPrintTestMode(False)
+
+ @staticmethod
+ def _get_path(fname):
+ """Get the path to a test file
+
+ Args:
+ fname (str): Filename to obtain
+
+ Returns:
+ str: Full path to file in the test directory
+ """
+ return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
+ 'test', fname)
+
+ @classmethod
+ def _get_text(cls, fname):
+ """Read a file as text
+
+ Args:
+ fname (str): Filename to read
+
+ Returns:
+ str: Contents of file
+ """
+ return open(cls._get_path(fname), encoding='utf-8').read()
+
+ @classmethod
+ def _get_patch_name(cls, subject):
+ """Get the filename of a patch given its subject
+
+ Args:
+ subject (str): Patch subject
+
+ Returns:
+ str: Filename for that patch
+ """
+ fname = re.sub('[ :]', '-', subject)
+ return fname.replace('--', '-')
+
+ def _create_patches_for_test(self, series):
+ """Create patch files for use by tests
+
+ This copies patch files from the test directory as needed by the series
+
+ Args:
+ series (Series): Series containing commits to convert
+
+ Returns:
+ tuple:
+ str: Cover-letter filename, or None if none
+ fname_list: list of str, each a patch filename
+ """
+ cover_fname = None
+ fname_list = []
+ for i, commit in enumerate(series.commits):
+ clean_subject = self._get_patch_name(commit.subject)
+ src_fname = '%04d-%s.patch' % (i + 1, clean_subject[:52])
+ fname = os.path.join(self.tmpdir, src_fname)
+ shutil.copy(self._get_path(src_fname), fname)
+ fname_list.append(fname)
+ if series.get('cover'):
+ src_fname = '0000-cover-letter.patch'
+ cover_fname = os.path.join(self.tmpdir, src_fname)
+ fname = os.path.join(self.tmpdir, src_fname)
+ shutil.copy(self._get_path(src_fname), fname)
+
+ return cover_fname, fname_list
+
+ def testBasic(self):
+ """Tests the basic flow of patman
+
+ This creates a series from some hard-coded patches build from a simple
+ tree with the following metadata in the top commit:
+
+ Series-to: u-boot
+ Series-prefix: RFC
+ Series-cc: Stefan Brüns <stefan.bruens@rwth-aachen.de>
+ Cover-letter-cc: Lord Mëlchett <clergy@palace.gov>
+ Series-version: 3
+ Patch-cc: fred
+ Series-process-log: sort, uniq
+ Series-changes: 4
+ - Some changes
+ - Multi
+ line
+ change
+
+ Commit-changes: 2
+ - Changes only for this commit
+
+ Cover-changes: 4
+ - Some notes for the cover letter
+
+ Cover-letter:
+ test: A test patch series
+ This is a test of how the cover
+ letter
+ works
+ END
+
+ and this in the first commit:
+
+ Commit-changes: 2
+ - second revision change
+
+ Series-notes:
+ some notes
+ about some things
+ from the first commit
+ END
+
+ Commit-notes:
+ Some notes about
+ the first commit
+ END
+
+ with the following commands:
+
+ git log -n2 --reverse >/path/to/tools/patman/test/test01.txt
+ git format-patch --subject-prefix RFC --cover-letter HEAD~2
+ mv 00* /path/to/tools/patman/test
+
+ It checks these aspects:
+ - git log can be processed by patchstream
+ - emailing patches uses the correct command
+ - CC file has information on each commit
+ - cover letter has the expected text and subject
+ - each patch has the correct subject
+ - dry-run information prints out correctly
+ - unicode is handled correctly
+ - Series-to, Series-cc, Series-prefix, Cover-letter
+ - Cover-letter-cc, Series-version, Series-changes, Series-notes
+ - Commit-notes
+ """
+ process_tags = True
+ ignore_bad_tags = False
+ stefan = b'Stefan Br\xc3\xbcns <stefan.bruens@rwth-aachen.de>'.decode('utf-8')
+ rick = 'Richard III <richard@palace.gov>'
+ mel = b'Lord M\xc3\xablchett <clergy@palace.gov>'.decode('utf-8')
+ add_maintainers = [stefan, rick]
+ dry_run = True
+ in_reply_to = mel
+ count = 2
+ settings.alias = {
+ 'fdt': ['simon'],
+ 'u-boot': ['u-boot@lists.denx.de'],
+ 'simon': [self.leb],
+ 'fred': [self.fred],
+ }
+
+ text = self._get_text('test01.txt')
+ series = patchstream.get_metadata_for_test(text)
+ cover_fname, args = self._create_patches_for_test(series)
+ with capture_sys_output() as out:
+ patchstream.fix_patches(series, args)
+ if cover_fname and series.get('cover'):
+ patchstream.insert_cover_letter(cover_fname, series, count)
+ series.DoChecks()
+ cc_file = series.MakeCcFile(process_tags, cover_fname,
+ not ignore_bad_tags, add_maintainers,
+ None)
+ cmd = gitutil.EmailPatches(
+ series, cover_fname, args, dry_run, not ignore_bad_tags,
+ cc_file, in_reply_to=in_reply_to, thread=None)
+ series.ShowActions(args, cmd, process_tags)
+ cc_lines = open(cc_file, encoding='utf-8').read().splitlines()
+ os.remove(cc_file)
+
+ lines = iter(out[0].getvalue().splitlines())
+ self.assertEqual('Cleaned %s patches' % len(series.commits),
+ next(lines))
+ self.assertEqual('Change log missing for v2', next(lines))
+ self.assertEqual('Change log missing for v3', next(lines))
+ self.assertEqual('Change log for unknown version v4', next(lines))
+ self.assertEqual("Alias 'pci' not found", next(lines))
+ self.assertIn('Dry run', next(lines))
+ self.assertEqual('', next(lines))
+ self.assertIn('Send a total of %d patches' % count, next(lines))
+ prev = next(lines)
+ for i, commit in enumerate(series.commits):
+ self.assertEqual(' %s' % args[i], prev)
+ while True:
+ prev = next(lines)
+ if 'Cc:' not in prev:
+ break
+ self.assertEqual('To: u-boot@lists.denx.de', prev)
+ self.assertEqual('Cc: %s' % stefan, next(lines))
+ self.assertEqual('Version: 3', next(lines))
+ self.assertEqual('Prefix:\t RFC', next(lines))
+ self.assertEqual('Cover: 4 lines', next(lines))
+ self.assertEqual(' Cc: %s' % self.fred, next(lines))
+ self.assertEqual(' Cc: %s' % self.leb,
+ next(lines))
+ self.assertEqual(' Cc: %s' % mel, next(lines))
+ self.assertEqual(' Cc: %s' % rick, next(lines))
+ expected = ('Git command: git send-email --annotate '
+ '--in-reply-to="%s" --to "u-boot@lists.denx.de" '
+ '--cc "%s" --cc-cmd "%s send --cc-cmd %s" %s %s'
+ % (in_reply_to, stefan, sys.argv[0], cc_file, cover_fname,
+ ' '.join(args)))
+ self.assertEqual(expected, next(lines))
+
+ self.assertEqual(('%s %s\0%s' % (args[0], rick, stefan)), cc_lines[0])
+ self.assertEqual(
+ '%s %s\0%s\0%s\0%s' % (args[1], self.fred, self.leb, rick, stefan),
+ cc_lines[1])
+
+ expected = '''
+This is a test of how the cover
+letter
+works
+
+some notes
+about some things
+from the first commit
+
+Changes in v4:
+- Multi
+ line
+ change
+- Some changes
+- Some notes for the cover letter
+
+Simon Glass (2):
+ pci: Correct cast for sandbox
+ fdt: Correct cast for sandbox in fdtdec_setup_mem_size_base()
+
+ cmd/pci.c | 3 ++-
+ fs/fat/fat.c | 1 +
+ lib/efi_loader/efi_memory.c | 1 +
+ lib/fdtdec.c | 3 ++-
+ 4 files changed, 6 insertions(+), 2 deletions(-)
+
+--\x20
+2.7.4
+
+'''
+ lines = open(cover_fname, encoding='utf-8').read().splitlines()
+ self.assertEqual(
+ 'Subject: [RFC PATCH v3 0/2] test: A test patch series',
+ lines[3])
+ self.assertEqual(expected.splitlines(), lines[7:])
+
+ for i, fname in enumerate(args):
+ lines = open(fname, encoding='utf-8').read().splitlines()
+ subject = [line for line in lines if line.startswith('Subject')]
+ self.assertEqual('Subject: [RFC %d/%d]' % (i + 1, count),
+ subject[0][:18])
+
+ # Check that we got our commit notes
+ start = 0
+ expected = ''
+
+ if i == 0:
+ start = 17
+ expected = '''---
+Some notes about
+the first commit
+
+(no changes since v2)
+
+Changes in v2:
+- second revision change'''
+ elif i == 1:
+ start = 17
+ expected = '''---
+
+Changes in v4:
+- Multi
+ line
+ change
+- Some changes
+
+Changes in v2:
+- Changes only for this commit'''
+
+ if expected:
+ expected = expected.splitlines()
+ self.assertEqual(expected, lines[start:(start+len(expected))])
+
+ def make_commit_with_file(self, subject, body, fname, text):
+ """Create a file and add it to the git repo with a new commit
+
+ Args:
+ subject (str): Subject for the commit
+ body (str): Body text of the commit
+ fname (str): Filename of file to create
+ text (str): Text to put into the file
+ """
+ path = os.path.join(self.gitdir, fname)
+ tools.WriteFile(path, text, binary=False)
+ index = self.repo.index
+ index.add(fname)
+ author = pygit2.Signature('Test user', 'test@email.com')
+ committer = author
+ tree = index.write_tree()
+ message = subject + '\n' + body
+ self.repo.create_commit('HEAD', author, committer, message, tree,
+ [self.repo.head.target])
+
+ def make_git_tree(self):
+ """Make a simple git tree suitable for testing
+
+ It has three branches:
+ 'base' has two commits: PCI, main
+ 'first' has base as upstream and two more commits: I2C, SPI
+ 'second' has base as upstream and three more: video, serial, bootm
+
+ Returns:
+ pygit2.Repository: repository
+ """
+ repo = pygit2.init_repository(self.gitdir)
+ self.repo = repo
+ new_tree = repo.TreeBuilder().write()
+
+ author = pygit2.Signature('Test user', 'test@email.com')
+ committer = author
+ _ = repo.create_commit('HEAD', author, committer, 'Created master',
+ new_tree, [])
+
+ self.make_commit_with_file('Initial commit', '''
+Add a README
+
+''', 'README', '''This is the README file
+describing this project
+in very little detail''')
+
+ self.make_commit_with_file('pci: PCI implementation', '''
+Here is a basic PCI implementation
+
+''', 'pci.c', '''This is a file
+it has some contents
+and some more things''')
+ self.make_commit_with_file('main: Main program', '''
+Hello here is the second commit.
+''', 'main.c', '''This is the main file
+there is very little here
+but we can always add more later
+if we want to
+
+Series-to: u-boot
+Series-cc: Barry Crump <bcrump@whataroa.nz>
+''')
+ base_target = repo.revparse_single('HEAD')
+ self.make_commit_with_file('i2c: I2C things', '''
+This has some stuff to do with I2C
+''', 'i2c.c', '''And this is the file contents
+with some I2C-related things in it''')
+ self.make_commit_with_file('spi: SPI fixes', '''
+SPI needs some fixes
+and here they are
+
+Signed-off-by: %s
+
+Series-to: u-boot
+Commit-notes:
+title of the series
+This is the cover letter for the series
+with various details
+END
+''' % self.leb, 'spi.c', '''Some fixes for SPI in this
+file to make SPI work
+better than before''')
+ first_target = repo.revparse_single('HEAD')
+
+ target = repo.revparse_single('HEAD~2')
+ repo.reset(target.oid, pygit2.GIT_CHECKOUT_FORCE)
+ self.make_commit_with_file('video: Some video improvements', '''
+Fix up the video so that
+it looks more purple. Purple is
+a very nice colour.
+''', 'video.c', '''More purple here
+Purple and purple
+Even more purple
+Could not be any more purple''')
+ self.make_commit_with_file('serial: Add a serial driver', '''
+Here is the serial driver
+for my chip.
+
+Cover-letter:
+Series for my board
+This series implements support
+for my glorious board.
+END
+Series-links: 183237
+''', 'serial.c', '''The code for the
+serial driver is here''')
+ self.make_commit_with_file('bootm: Make it boot', '''
+This makes my board boot
+with a fix to the bootm
+command
+''', 'bootm.c', '''Fix up the bootm
+command to make the code as
+complicated as possible''')
+ second_target = repo.revparse_single('HEAD')
+
+ repo.branches.local.create('first', first_target)
+ repo.config.set_multivar('branch.first.remote', '', '.')
+ repo.config.set_multivar('branch.first.merge', '', 'refs/heads/base')
+
+ repo.branches.local.create('second', second_target)
+ repo.config.set_multivar('branch.second.remote', '', '.')
+ repo.config.set_multivar('branch.second.merge', '', 'refs/heads/base')
+
+ repo.branches.local.create('base', base_target)
+ return repo
+
+ def testBranch(self):
+ """Test creating patches from a branch"""
+ repo = self.make_git_tree()
+ target = repo.lookup_reference('refs/heads/first')
+ self.repo.checkout(target, strategy=pygit2.GIT_CHECKOUT_FORCE)
+ control.setup()
+ try:
+ orig_dir = os.getcwd()
+ os.chdir(self.gitdir)
+
+ # Check that it can detect the current branch
+ self.assertEqual(2, gitutil.CountCommitsToBranch(None))
+ col = terminal.Color()
+ with capture_sys_output() as _:
+ _, cover_fname, patch_files = control.prepare_patches(
+ col, branch=None, count=-1, start=0, end=0,
+ ignore_binary=False, signoff=True)
+ self.assertIsNone(cover_fname)
+ self.assertEqual(2, len(patch_files))
+
+ # Check that it can detect a different branch
+ self.assertEqual(3, gitutil.CountCommitsToBranch('second'))
+ with capture_sys_output() as _:
+ _, cover_fname, patch_files = control.prepare_patches(
+ col, branch='second', count=-1, start=0, end=0,
+ ignore_binary=False, signoff=True)
+ self.assertIsNotNone(cover_fname)
+ self.assertEqual(3, len(patch_files))
+
+ # Check that it can skip patches at the end
+ with capture_sys_output() as _:
+ _, cover_fname, patch_files = control.prepare_patches(
+ col, branch='second', count=-1, start=0, end=1,
+ ignore_binary=False, signoff=True)
+ self.assertIsNotNone(cover_fname)
+ self.assertEqual(2, len(patch_files))
+ finally:
+ os.chdir(orig_dir)
+
+ def testTags(self):
+ """Test collection of tags in a patchstream"""
+ text = '''This is a patch
+
+Signed-off-by: Terminator
+Reviewed-by: %s
+Reviewed-by: %s
+Tested-by: %s
+''' % (self.joe, self.mary, self.leb)
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(pstrm.commit.rtags, {
+ 'Reviewed-by': {self.joe, self.mary},
+ 'Tested-by': {self.leb}})
+
+ def testMissingEnd(self):
+ """Test a missing END tag"""
+ text = '''This is a patch
+
+Cover-letter:
+This is the title
+missing END after this line
+Signed-off-by: Fred
+'''
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(["Missing 'END' in section 'cover'"],
+ pstrm.commit.warn)
+
+ def testMissingBlankLine(self):
+ """Test a missing blank line after a tag"""
+ text = '''This is a patch
+
+Series-changes: 2
+- First line of changes
+- Missing blank line after this line
+Signed-off-by: Fred
+'''
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(["Missing 'blank line' in section 'Series-changes'"],
+ pstrm.commit.warn)
+
+ def testInvalidCommitTag(self):
+ """Test an invalid Commit-xxx tag"""
+ text = '''This is a patch
+
+Commit-fred: testing
+'''
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(["Line 3: Ignoring Commit-fred"], pstrm.commit.warn)
+
+ def testSelfTest(self):
+ """Test a tested by tag by this user"""
+ test_line = 'Tested-by: %s@napier.com' % os.getenv('USER')
+ text = '''This is a patch
+
+%s
+''' % test_line
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(["Ignoring '%s'" % test_line], pstrm.commit.warn)
+
+ def testSpaceBeforeTab(self):
+ """Test a space before a tab"""
+ text = '''This is a patch
+
++ \tSomething
+'''
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(["Line 3/0 has space before tab"], pstrm.commit.warn)
+
+ def testLinesAfterTest(self):
+ """Test detecting lines after TEST= line"""
+ text = '''This is a patch
+
+TEST=sometest
+more lines
+here
+'''
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(["Found 2 lines after TEST="], pstrm.commit.warn)
+
+ def testBlankLineAtEnd(self):
+ """Test detecting a blank line at the end of a file"""
+ text = '''This is a patch
+
+diff --git a/lib/fdtdec.c b/lib/fdtdec.c
+index c072e54..942244f 100644
+--- a/lib/fdtdec.c
++++ b/lib/fdtdec.c
+@@ -1200,7 +1200,8 @@ int fdtdec_setup_mem_size_base(void)
+ }
+
+ gd->ram_size = (phys_size_t)(res.end - res.start + 1);
+- debug("%s: Initial DRAM size %llx\n", __func__, (u64)gd->ram_size);
++ debug("%s: Initial DRAM size %llx\n", __func__,
++ (unsigned long long)gd->ram_size);
++
+diff --git a/lib/efi_loader/efi_memory.c b/lib/efi_loader/efi_memory.c
+
+--
+2.7.4
+
+ '''
+ pstrm = PatchStream.process_text(text)
+ self.assertEqual(
+ ["Found possible blank line(s) at end of file 'lib/fdtdec.c'"],
+ pstrm.commit.warn)
+
+ def testNoUpstream(self):
+ """Test CountCommitsToBranch when there is no upstream"""
+ repo = self.make_git_tree()
+ target = repo.lookup_reference('refs/heads/base')
+ self.repo.checkout(target, strategy=pygit2.GIT_CHECKOUT_FORCE)
+
+ # Check that it can detect the current branch
+ try:
+ orig_dir = os.getcwd()
+ os.chdir(self.gitdir)
+ with self.assertRaises(ValueError) as exc:
+ gitutil.CountCommitsToBranch(None)
+ self.assertIn(
+ "Failed to determine upstream: fatal: no upstream configured for branch 'base'",
+ str(exc.exception))
+ finally:
+ os.chdir(orig_dir)
+
+ @staticmethod
+ def _fake_patchwork(url, subpath):
+ """Fake Patchwork server for the function below
+
+ This handles accessing a series, providing a list consisting of a
+ single patch
+
+ Args:
+ url (str): URL of patchwork server
+ subpath (str): URL subpath to use
+ """
+ re_series = re.match(r'series/(\d*)/$', subpath)
+ if re_series:
+ series_num = re_series.group(1)
+ if series_num == '1234':
+ return {'patches': [
+ {'id': '1', 'name': 'Some patch'}]}
+ raise ValueError('Fake Patchwork does not understand: %s' % subpath)
+
+ def testStatusMismatch(self):
+ """Test Patchwork patches not matching the series"""
+ series = Series()
+
+ with capture_sys_output() as (_, err):
+ status.collect_patches(series, 1234, None, self._fake_patchwork)
+ self.assertIn('Warning: Patchwork reports 1 patches, series has 0',
+ err.getvalue())
+
+ def testStatusReadPatch(self):
+ """Test handling a single patch in Patchwork"""
+ series = Series()
+ series.commits = [Commit('abcd')]
+
+ patches = status.collect_patches(series, 1234, None,
+ self._fake_patchwork)
+ self.assertEqual(1, len(patches))
+ patch = patches[0]
+ self.assertEqual('1', patch.id)
+ self.assertEqual('Some patch', patch.raw_subject)
+
+ def testParseSubject(self):
+ """Test parsing of the patch subject"""
+ patch = status.Patch('1')
+
+ # Simple patch not in a series
+ patch.parse_subject('Testing')
+ self.assertEqual('Testing', patch.raw_subject)
+ self.assertEqual('Testing', patch.subject)
+ self.assertEqual(1, patch.seq)
+ self.assertEqual(1, patch.count)
+ self.assertEqual(None, patch.prefix)
+ self.assertEqual(None, patch.version)
+
+ # First patch in a series
+ patch.parse_subject('[1/2] Testing')
+ self.assertEqual('[1/2] Testing', patch.raw_subject)
+ self.assertEqual('Testing', patch.subject)
+ self.assertEqual(1, patch.seq)
+ self.assertEqual(2, patch.count)
+ self.assertEqual(None, patch.prefix)
+ self.assertEqual(None, patch.version)
+
+ # Second patch in a series
+ patch.parse_subject('[2/2] Testing')
+ self.assertEqual('Testing', patch.subject)
+ self.assertEqual(2, patch.seq)
+ self.assertEqual(2, patch.count)
+ self.assertEqual(None, patch.prefix)
+ self.assertEqual(None, patch.version)
+
+ # RFC patch
+ patch.parse_subject('[RFC,3/7] Testing')
+ self.assertEqual('Testing', patch.subject)
+ self.assertEqual(3, patch.seq)
+ self.assertEqual(7, patch.count)
+ self.assertEqual('RFC', patch.prefix)
+ self.assertEqual(None, patch.version)
+
+ # Version patch
+ patch.parse_subject('[v2,3/7] Testing')
+ self.assertEqual('Testing', patch.subject)
+ self.assertEqual(3, patch.seq)
+ self.assertEqual(7, patch.count)
+ self.assertEqual(None, patch.prefix)
+ self.assertEqual('v2', patch.version)
+
+ # All fields
+ patch.parse_subject('[RESEND,v2,3/7] Testing')
+ self.assertEqual('Testing', patch.subject)
+ self.assertEqual(3, patch.seq)
+ self.assertEqual(7, patch.count)
+ self.assertEqual('RESEND', patch.prefix)
+ self.assertEqual('v2', patch.version)
+
+ # RFC only
+ patch.parse_subject('[RESEND] Testing')
+ self.assertEqual('Testing', patch.subject)
+ self.assertEqual(1, patch.seq)
+ self.assertEqual(1, patch.count)
+ self.assertEqual('RESEND', patch.prefix)
+ self.assertEqual(None, patch.version)
+
+ def testCompareSeries(self):
+ """Test operation of compare_with_series()"""
+ commit1 = Commit('abcd')
+ commit1.subject = 'Subject 1'
+ commit2 = Commit('ef12')
+ commit2.subject = 'Subject 2'
+ commit3 = Commit('3456')
+ commit3.subject = 'Subject 2'
+
+ patch1 = status.Patch('1')
+ patch1.subject = 'Subject 1'
+ patch2 = status.Patch('2')
+ patch2.subject = 'Subject 2'
+ patch3 = status.Patch('3')
+ patch3.subject = 'Subject 2'
+
+ series = Series()
+ series.commits = [commit1]
+ patches = [patch1]
+ patch_for_commit, commit_for_patch, warnings = (
+ status.compare_with_series(series, patches))
+ self.assertEqual(1, len(patch_for_commit))
+ self.assertEqual(patch1, patch_for_commit[0])
+ self.assertEqual(1, len(commit_for_patch))
+ self.assertEqual(commit1, commit_for_patch[0])
+
+ series.commits = [commit1]
+ patches = [patch1, patch2]
+ patch_for_commit, commit_for_patch, warnings = (
+ status.compare_with_series(series, patches))
+ self.assertEqual(1, len(patch_for_commit))
+ self.assertEqual(patch1, patch_for_commit[0])
+ self.assertEqual(1, len(commit_for_patch))
+ self.assertEqual(commit1, commit_for_patch[0])
+ self.assertEqual(["Cannot find commit for patch 2 ('Subject 2')"],
+ warnings)
+
+ series.commits = [commit1, commit2]
+ patches = [patch1]
+ patch_for_commit, commit_for_patch, warnings = (
+ status.compare_with_series(series, patches))
+ self.assertEqual(1, len(patch_for_commit))
+ self.assertEqual(patch1, patch_for_commit[0])
+ self.assertEqual(1, len(commit_for_patch))
+ self.assertEqual(commit1, commit_for_patch[0])
+ self.assertEqual(["Cannot find patch for commit 2 ('Subject 2')"],
+ warnings)
+
+ series.commits = [commit1, commit2, commit3]
+ patches = [patch1, patch2]
+ patch_for_commit, commit_for_patch, warnings = (
+ status.compare_with_series(series, patches))
+ self.assertEqual(2, len(patch_for_commit))
+ self.assertEqual(patch1, patch_for_commit[0])
+ self.assertEqual(patch2, patch_for_commit[1])
+ self.assertEqual(1, len(commit_for_patch))
+ self.assertEqual(commit1, commit_for_patch[0])
+ self.assertEqual(["Cannot find patch for commit 3 ('Subject 2')",
+ "Multiple commits match patch 2 ('Subject 2'):\n"
+ ' Subject 2\n Subject 2'],
+ warnings)
+
+ series.commits = [commit1, commit2]
+ patches = [patch1, patch2, patch3]
+ patch_for_commit, commit_for_patch, warnings = (
+ status.compare_with_series(series, patches))
+ self.assertEqual(1, len(patch_for_commit))
+ self.assertEqual(patch1, patch_for_commit[0])
+ self.assertEqual(2, len(commit_for_patch))
+ self.assertEqual(commit1, commit_for_patch[0])
+ self.assertEqual(["Multiple patches match commit 2 ('Subject 2'):\n"
+ ' Subject 2\n Subject 2',
+ "Cannot find commit for patch 3 ('Subject 2')"],
+ warnings)
+
+ def _fake_patchwork2(self, url, subpath):
+ """Fake Patchwork server for the function below
+
+ This handles accessing series, patches and comments, providing the data
+ in self.patches to the caller
+
+ Args:
+ url (str): URL of patchwork server
+ subpath (str): URL subpath to use
+ """
+ re_series = re.match(r'series/(\d*)/$', subpath)
+ re_patch = re.match(r'patches/(\d*)/$', subpath)
+ re_comments = re.match(r'patches/(\d*)/comments/$', subpath)
+ if re_series:
+ series_num = re_series.group(1)
+ if series_num == '1234':
+ return {'patches': self.patches}
+ elif re_patch:
+ patch_num = int(re_patch.group(1))
+ patch = self.patches[patch_num - 1]
+ return patch
+ elif re_comments:
+ patch_num = int(re_comments.group(1))
+ patch = self.patches[patch_num - 1]
+ return patch.comments
+ raise ValueError('Fake Patchwork does not understand: %s' % subpath)
+
+ def testFindNewResponses(self):
+ """Test operation of find_new_responses()"""
+ commit1 = Commit('abcd')
+ commit1.subject = 'Subject 1'
+ commit2 = Commit('ef12')
+ commit2.subject = 'Subject 2'
+
+ patch1 = status.Patch('1')
+ patch1.parse_subject('[1/2] Subject 1')
+ patch1.name = patch1.raw_subject
+ patch1.content = 'This is my patch content'
+ comment1a = {'content': 'Reviewed-by: %s\n' % self.joe}
+
+ patch1.comments = [comment1a]
+
+ patch2 = status.Patch('2')
+ patch2.parse_subject('[2/2] Subject 2')
+ patch2.name = patch2.raw_subject
+ patch2.content = 'Some other patch content'
+ comment2a = {
+ 'content': 'Reviewed-by: %s\nTested-by: %s\n' %
+ (self.mary, self.leb)}
+ comment2b = {'content': 'Reviewed-by: %s' % self.fred}
+ patch2.comments = [comment2a, comment2b]
+
+ # This test works by setting up commits and patch for use by the fake
+ # Rest API function _fake_patchwork2(). It calls various functions in
+ # the status module after setting up tags in the commits, checking that
+ # things behaves as expected
+ self.commits = [commit1, commit2]
+ self.patches = [patch1, patch2]
+ count = 2
+ new_rtag_list = [None] * count
+ review_list = [None, None]
+
+ # Check that the tags are picked up on the first patch
+ status.find_new_responses(new_rtag_list, review_list, 0, commit1,
+ patch1, None, self._fake_patchwork2)
+ self.assertEqual(new_rtag_list[0], {'Reviewed-by': {self.joe}})
+
+ # Now the second patch
+ status.find_new_responses(new_rtag_list, review_list, 1, commit2,
+ patch2, None, self._fake_patchwork2)
+ self.assertEqual(new_rtag_list[1], {
+ 'Reviewed-by': {self.mary, self.fred},
+ 'Tested-by': {self.leb}})
+
+ # Now add some tags to the commit, which means they should not appear as
+ # 'new' tags when scanning comments
+ new_rtag_list = [None] * count
+ commit1.rtags = {'Reviewed-by': {self.joe}}
+ status.find_new_responses(new_rtag_list, review_list, 0, commit1,
+ patch1, None, self._fake_patchwork2)
+ self.assertEqual(new_rtag_list[0], {})
+
+ # For the second commit, add Ed and Fred, so only Mary should be left
+ commit2.rtags = {
+ 'Tested-by': {self.leb},
+ 'Reviewed-by': {self.fred}}
+ status.find_new_responses(new_rtag_list, review_list, 1, commit2,
+ patch2, None, self._fake_patchwork2)
+ self.assertEqual(new_rtag_list[1], {'Reviewed-by': {self.mary}})
+
+ # Check that the output patches expectations:
+ # 1 Subject 1
+ # Reviewed-by: Joe Bloggs <joe@napierwallies.co.nz>
+ # 2 Subject 2
+ # Tested-by: Lord Edmund Blackaddër <weasel@blackadder.org>
+ # Reviewed-by: Fred Bloggs <f.bloggs@napier.net>
+ # + Reviewed-by: Mary Bloggs <mary@napierwallies.co.nz>
+ # 1 new response available in patchwork
+
+ series = Series()
+ series.commits = [commit1, commit2]
+ terminal.SetPrintTestMode()
+ status.check_patchwork_status(series, '1234', None, None, False, False,
+ None, self._fake_patchwork2)
+ lines = iter(terminal.GetPrintTestLines())
+ col = terminal.Color()
+ self.assertEqual(terminal.PrintLine(' 1 Subject 1', col.BLUE),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' Reviewed-by: ', col.GREEN, newline=False,
+ bright=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.joe, col.WHITE, bright=False),
+ next(lines))
+
+ self.assertEqual(terminal.PrintLine(' 2 Subject 2', col.BLUE),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' Reviewed-by: ', col.GREEN, newline=False,
+ bright=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.fred, col.WHITE, bright=False),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' Tested-by: ', col.GREEN, newline=False,
+ bright=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.leb, col.WHITE, bright=False),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' + Reviewed-by: ', col.GREEN, newline=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.mary, col.WHITE),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(
+ '1 new response available in patchwork (use -d to write them to a new branch)',
+ None), next(lines))
+
+ def _fake_patchwork3(self, url, subpath):
+ """Fake Patchwork server for the function below
+
+ This handles accessing series, patches and comments, providing the data
+ in self.patches to the caller
+
+ Args:
+ url (str): URL of patchwork server
+ subpath (str): URL subpath to use
+ """
+ re_series = re.match(r'series/(\d*)/$', subpath)
+ re_patch = re.match(r'patches/(\d*)/$', subpath)
+ re_comments = re.match(r'patches/(\d*)/comments/$', subpath)
+ if re_series:
+ series_num = re_series.group(1)
+ if series_num == '1234':
+ return {'patches': self.patches}
+ elif re_patch:
+ patch_num = int(re_patch.group(1))
+ patch = self.patches[patch_num - 1]
+ return patch
+ elif re_comments:
+ patch_num = int(re_comments.group(1))
+ patch = self.patches[patch_num - 1]
+ return patch.comments
+ raise ValueError('Fake Patchwork does not understand: %s' % subpath)
+
+ def testCreateBranch(self):
+ """Test operation of create_branch()"""
+ repo = self.make_git_tree()
+ branch = 'first'
+ dest_branch = 'first2'
+ count = 2
+ gitdir = os.path.join(self.gitdir, '.git')
+
+ # Set up the test git tree. We use branch 'first' which has two commits
+ # in it
+ series = patchstream.get_metadata_for_list(branch, gitdir, count)
+ self.assertEqual(2, len(series.commits))
+
+ patch1 = status.Patch('1')
+ patch1.parse_subject('[1/2] %s' % series.commits[0].subject)
+ patch1.name = patch1.raw_subject
+ patch1.content = 'This is my patch content'
+ comment1a = {'content': 'Reviewed-by: %s\n' % self.joe}
+
+ patch1.comments = [comment1a]
+
+ patch2 = status.Patch('2')
+ patch2.parse_subject('[2/2] %s' % series.commits[1].subject)
+ patch2.name = patch2.raw_subject
+ patch2.content = 'Some other patch content'
+ comment2a = {
+ 'content': 'Reviewed-by: %s\nTested-by: %s\n' %
+ (self.mary, self.leb)}
+ comment2b = {
+ 'content': 'Reviewed-by: %s' % self.fred}
+ patch2.comments = [comment2a, comment2b]
+
+ # This test works by setting up patches for use by the fake Rest API
+ # function _fake_patchwork3(). The fake patch comments above should
+ # result in new review tags that are collected and added to the commits
+ # created in the destination branch.
+ self.patches = [patch1, patch2]
+ count = 2
+
+ # Expected output:
+ # 1 i2c: I2C things
+ # + Reviewed-by: Joe Bloggs <joe@napierwallies.co.nz>
+ # 2 spi: SPI fixes
+ # + Reviewed-by: Fred Bloggs <f.bloggs@napier.net>
+ # + Reviewed-by: Mary Bloggs <mary@napierwallies.co.nz>
+ # + Tested-by: Lord Edmund Blackaddër <weasel@blackadder.org>
+ # 4 new responses available in patchwork
+ # 4 responses added from patchwork into new branch 'first2'
+ # <unittest.result.TestResult run=8 errors=0 failures=0>
+
+ terminal.SetPrintTestMode()
+ status.check_patchwork_status(series, '1234', branch, dest_branch,
+ False, False, None, self._fake_patchwork3,
+ repo)
+ lines = terminal.GetPrintTestLines()
+ self.assertEqual(12, len(lines))
+ self.assertEqual(
+ "4 responses added from patchwork into new branch 'first2'",
+ lines[11].text)
+
+ # Check that the destination branch has the new tags
+ new_series = patchstream.get_metadata_for_list(dest_branch, gitdir,
+ count)
+ self.assertEqual(
+ {'Reviewed-by': {self.joe}},
+ new_series.commits[0].rtags)
+ self.assertEqual(
+ {'Tested-by': {self.leb},
+ 'Reviewed-by': {self.fred, self.mary}},
+ new_series.commits[1].rtags)
+
+ # Now check the actual test of the first commit message. We expect to
+ # see the new tags immediately below the old ones.
+ stdout = patchstream.get_list(dest_branch, count=count, git_dir=gitdir)
+ lines = iter([line.strip() for line in stdout.splitlines()
+ if '-by:' in line])
+
+ # First patch should have the review tag
+ self.assertEqual('Reviewed-by: %s' % self.joe, next(lines))
+
+ # Second patch should have the sign-off then the tested-by and two
+ # reviewed-by tags
+ self.assertEqual('Signed-off-by: %s' % self.leb, next(lines))
+ self.assertEqual('Reviewed-by: %s' % self.fred, next(lines))
+ self.assertEqual('Reviewed-by: %s' % self.mary, next(lines))
+ self.assertEqual('Tested-by: %s' % self.leb, next(lines))
+
+ def testParseSnippets(self):
+ """Test parsing of review snippets"""
+ text = '''Hi Fred,
+
+This is a comment from someone.
+
+Something else
+
+On some recent date, Fred wrote:
+> This is why I wrote the patch
+> so here it is
+
+Now a comment about the commit message
+A little more to say
+
+Even more
+
+> diff --git a/file.c b/file.c
+> Some more code
+> Code line 2
+> Code line 3
+> Code line 4
+> Code line 5
+> Code line 6
+> Code line 7
+> Code line 8
+> Code line 9
+
+And another comment
+
+> @@ -153,8 +143,13 @@ def CheckPatch(fname, show_types=False):
+> further down on the file
+> and more code
+> +Addition here
+> +Another addition here
+> codey
+> more codey
+
+and another thing in same file
+
+> @@ -253,8 +243,13 @@
+> with no function context
+
+one more thing
+
+> diff --git a/tools/patman/main.py b/tools/patman/main.py
+> +line of code
+now a very long comment in a different file
+line2
+line3
+line4
+line5
+line6
+line7
+line8
+'''
+ pstrm = PatchStream.process_text(text, True)
+ self.assertEqual([], pstrm.commit.warn)
+
+ # We expect to the filename and up to 5 lines of code context before
+ # each comment. The 'On xxx wrote:' bit should be removed.
+ self.assertEqual(
+ [['Hi Fred,',
+ 'This is a comment from someone.',
+ 'Something else'],
+ ['> This is why I wrote the patch',
+ '> so here it is',
+ 'Now a comment about the commit message',
+ 'A little more to say', 'Even more'],
+ ['> File: file.c', '> Code line 5', '> Code line 6',
+ '> Code line 7', '> Code line 8', '> Code line 9',
+ 'And another comment'],
+ ['> File: file.c',
+ '> Line: 153 / 143: def CheckPatch(fname, show_types=False):',
+ '> and more code', '> +Addition here', '> +Another addition here',
+ '> codey', '> more codey', 'and another thing in same file'],
+ ['> File: file.c', '> Line: 253 / 243',
+ '> with no function context', 'one more thing'],
+ ['> File: tools/patman/main.py', '> +line of code',
+ 'now a very long comment in a different file',
+ 'line2', 'line3', 'line4', 'line5', 'line6', 'line7', 'line8']],
+ pstrm.snippets)
+
+ def testReviewSnippets(self):
+ """Test showing of review snippets"""
+ def _to_submitter(who):
+ m_who = re.match('(.*) <(.*)>', who)
+ return {
+ 'name': m_who.group(1),
+ 'email': m_who.group(2)
+ }
+
+ commit1 = Commit('abcd')
+ commit1.subject = 'Subject 1'
+ commit2 = Commit('ef12')
+ commit2.subject = 'Subject 2'
+
+ patch1 = status.Patch('1')
+ patch1.parse_subject('[1/2] Subject 1')
+ patch1.name = patch1.raw_subject
+ patch1.content = 'This is my patch content'
+ comment1a = {'submitter': _to_submitter(self.joe),
+ 'content': '''Hi Fred,
+
+On some date Fred wrote:
+
+> diff --git a/file.c b/file.c
+> Some code
+> and more code
+
+Here is my comment above the above...
+
+
+Reviewed-by: %s
+''' % self.joe}
+
+ patch1.comments = [comment1a]
+
+ patch2 = status.Patch('2')
+ patch2.parse_subject('[2/2] Subject 2')
+ patch2.name = patch2.raw_subject
+ patch2.content = 'Some other patch content'
+ comment2a = {
+ 'content': 'Reviewed-by: %s\nTested-by: %s\n' %
+ (self.mary, self.leb)}
+ comment2b = {'submitter': _to_submitter(self.fred),
+ 'content': '''Hi Fred,
+
+On some date Fred wrote:
+
+> diff --git a/tools/patman/commit.py b/tools/patman/commit.py
+> @@ -41,6 +41,9 @@ class Commit:
+> self.rtags = collections.defaultdict(set)
+> self.warn = []
+>
+> + def __str__(self):
+> + return self.subject
+> +
+> def AddChange(self, version, info):
+> """Add a new change line to the change list for a version.
+>
+A comment
+
+Reviewed-by: %s
+''' % self.fred}
+ patch2.comments = [comment2a, comment2b]
+
+ # This test works by setting up commits and patch for use by the fake
+ # Rest API function _fake_patchwork2(). It calls various functions in
+ # the status module after setting up tags in the commits, checking that
+ # things behaves as expected
+ self.commits = [commit1, commit2]
+ self.patches = [patch1, patch2]
+
+ # Check that the output patches expectations:
+ # 1 Subject 1
+ # Reviewed-by: Joe Bloggs <joe@napierwallies.co.nz>
+ # 2 Subject 2
+ # Tested-by: Lord Edmund Blackaddër <weasel@blackadder.org>
+ # Reviewed-by: Fred Bloggs <f.bloggs@napier.net>
+ # + Reviewed-by: Mary Bloggs <mary@napierwallies.co.nz>
+ # 1 new response available in patchwork
+
+ series = Series()
+ series.commits = [commit1, commit2]
+ terminal.SetPrintTestMode()
+ status.check_patchwork_status(series, '1234', None, None, False, True,
+ None, self._fake_patchwork2)
+ lines = iter(terminal.GetPrintTestLines())
+ col = terminal.Color()
+ self.assertEqual(terminal.PrintLine(' 1 Subject 1', col.BLUE),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' + Reviewed-by: ', col.GREEN, newline=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.joe, col.WHITE), next(lines))
+
+ self.assertEqual(terminal.PrintLine('Review: %s' % self.joe, col.RED),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(' Hi Fred,', None), next(lines))
+ self.assertEqual(terminal.PrintLine('', None), next(lines))
+ self.assertEqual(terminal.PrintLine(' > File: file.c', col.MAGENTA),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(' > Some code', col.MAGENTA),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(' > and more code', col.MAGENTA),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' Here is my comment above the above...', None), next(lines))
+ self.assertEqual(terminal.PrintLine('', None), next(lines))
+
+ self.assertEqual(terminal.PrintLine(' 2 Subject 2', col.BLUE),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' + Reviewed-by: ', col.GREEN, newline=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.fred, col.WHITE),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' + Reviewed-by: ', col.GREEN, newline=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.mary, col.WHITE),
+ next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' + Tested-by: ', col.GREEN, newline=False),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(self.leb, col.WHITE),
+ next(lines))
+
+ self.assertEqual(terminal.PrintLine('Review: %s' % self.fred, col.RED),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(' Hi Fred,', None), next(lines))
+ self.assertEqual(terminal.PrintLine('', None), next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' > File: tools/patman/commit.py', col.MAGENTA), next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' > Line: 41 / 41: class Commit:', col.MAGENTA), next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' > + return self.subject', col.MAGENTA), next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' > +', col.MAGENTA), next(lines))
+ self.assertEqual(
+ terminal.PrintLine(' > def AddChange(self, version, info):',
+ col.MAGENTA),
+ next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' > """Add a new change line to the change list for a version.',
+ col.MAGENTA), next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' >', col.MAGENTA), next(lines))
+ self.assertEqual(terminal.PrintLine(
+ ' A comment', None), next(lines))
+ self.assertEqual(terminal.PrintLine('', None), next(lines))
+
+ self.assertEqual(terminal.PrintLine(
+ '4 new responses available in patchwork (use -d to write them to a new branch)',
+ None), next(lines))
diff --git a/roms/u-boot/tools/patman/get_maintainer.py b/roms/u-boot/tools/patman/get_maintainer.py
new file mode 100644
index 000000000..af4ba15bc
--- /dev/null
+++ b/roms/u-boot/tools/patman/get_maintainer.py
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+import os
+
+from patman import command
+
+def FindGetMaintainer(try_list):
+ """Look for the get_maintainer.pl script.
+
+ Args:
+ try_list: List of directories to try for the get_maintainer.pl script
+
+ Returns:
+ If the script is found we'll return a path to it; else None.
+ """
+ # Look in the list
+ for path in try_list:
+ fname = os.path.join(path, 'get_maintainer.pl')
+ if os.path.isfile(fname):
+ return fname
+
+ return None
+
+def GetMaintainer(dir_list, fname, verbose=False):
+ """Run get_maintainer.pl on a file if we find it.
+
+ We look for get_maintainer.pl in the 'scripts' directory at the top of
+ git. If we find it we'll run it. If we don't find get_maintainer.pl
+ then we fail silently.
+
+ Args:
+ dir_list: List of directories to try for the get_maintainer.pl script
+ fname: Path to the patch file to run get_maintainer.pl on.
+
+ Returns:
+ A list of email addresses to CC to.
+ """
+ get_maintainer = FindGetMaintainer(dir_list)
+ if not get_maintainer:
+ if verbose:
+ print("WARNING: Couldn't find get_maintainer.pl")
+ return []
+
+ stdout = command.Output(get_maintainer, '--norolestats', fname)
+ lines = stdout.splitlines()
+ return [ x.replace('"', '') for x in lines ]
diff --git a/roms/u-boot/tools/patman/gitutil.py b/roms/u-boot/tools/patman/gitutil.py
new file mode 100644
index 000000000..5e4c1128d
--- /dev/null
+++ b/roms/u-boot/tools/patman/gitutil.py
@@ -0,0 +1,675 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import re
+import os
+import subprocess
+import sys
+
+from patman import command
+from patman import settings
+from patman import terminal
+from patman import tools
+
+# True to use --no-decorate - we check this in Setup()
+use_no_decorate = True
+
+def LogCmd(commit_range, git_dir=None, oneline=False, reverse=False,
+ count=None):
+ """Create a command to perform a 'git log'
+
+ Args:
+ commit_range: Range expression to use for log, None for none
+ git_dir: Path to git repository (None to use default)
+ oneline: True to use --oneline, else False
+ reverse: True to reverse the log (--reverse)
+ count: Number of commits to list, or None for no limit
+ Return:
+ List containing command and arguments to run
+ """
+ cmd = ['git']
+ if git_dir:
+ cmd += ['--git-dir', git_dir]
+ cmd += ['--no-pager', 'log', '--no-color']
+ if oneline:
+ cmd.append('--oneline')
+ if use_no_decorate:
+ cmd.append('--no-decorate')
+ if reverse:
+ cmd.append('--reverse')
+ if count is not None:
+ cmd.append('-n%d' % count)
+ if commit_range:
+ cmd.append(commit_range)
+
+ # Add this in case we have a branch with the same name as a directory.
+ # This avoids messages like this, for example:
+ # fatal: ambiguous argument 'test': both revision and filename
+ cmd.append('--')
+ return cmd
+
+def CountCommitsToBranch(branch):
+ """Returns number of commits between HEAD and the tracking branch.
+
+ This looks back to the tracking branch and works out the number of commits
+ since then.
+
+ Args:
+ branch: Branch to count from (None for current branch)
+
+ Return:
+ Number of patches that exist on top of the branch
+ """
+ if branch:
+ us, msg = GetUpstream('.git', branch)
+ rev_range = '%s..%s' % (us, branch)
+ else:
+ rev_range = '@{upstream}..'
+ pipe = [LogCmd(rev_range, oneline=True)]
+ result = command.RunPipe(pipe, capture=True, capture_stderr=True,
+ oneline=True, raise_on_error=False)
+ if result.return_code:
+ raise ValueError('Failed to determine upstream: %s' %
+ result.stderr.strip())
+ patch_count = len(result.stdout.splitlines())
+ return patch_count
+
+def NameRevision(commit_hash):
+ """Gets the revision name for a commit
+
+ Args:
+ commit_hash: Commit hash to look up
+
+ Return:
+ Name of revision, if any, else None
+ """
+ pipe = ['git', 'name-rev', commit_hash]
+ stdout = command.RunPipe([pipe], capture=True, oneline=True).stdout
+
+ # We expect a commit, a space, then a revision name
+ name = stdout.split(' ')[1].strip()
+ return name
+
+def GuessUpstream(git_dir, branch):
+ """Tries to guess the upstream for a branch
+
+ This lists out top commits on a branch and tries to find a suitable
+ upstream. It does this by looking for the first commit where
+ 'git name-rev' returns a plain branch name, with no ! or ^ modifiers.
+
+ Args:
+ git_dir: Git directory containing repo
+ branch: Name of branch
+
+ Returns:
+ Tuple:
+ Name of upstream branch (e.g. 'upstream/master') or None if none
+ Warning/error message, or None if none
+ """
+ pipe = [LogCmd(branch, git_dir=git_dir, oneline=True, count=100)]
+ result = command.RunPipe(pipe, capture=True, capture_stderr=True,
+ raise_on_error=False)
+ if result.return_code:
+ return None, "Branch '%s' not found" % branch
+ for line in result.stdout.splitlines()[1:]:
+ commit_hash = line.split(' ')[0]
+ name = NameRevision(commit_hash)
+ if '~' not in name and '^' not in name:
+ if name.startswith('remotes/'):
+ name = name[8:]
+ return name, "Guessing upstream as '%s'" % name
+ return None, "Cannot find a suitable upstream for branch '%s'" % branch
+
+def GetUpstream(git_dir, branch):
+ """Returns the name of the upstream for a branch
+
+ Args:
+ git_dir: Git directory containing repo
+ branch: Name of branch
+
+ Returns:
+ Tuple:
+ Name of upstream branch (e.g. 'upstream/master') or None if none
+ Warning/error message, or None if none
+ """
+ try:
+ remote = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
+ 'branch.%s.remote' % branch)
+ merge = command.OutputOneLine('git', '--git-dir', git_dir, 'config',
+ 'branch.%s.merge' % branch)
+ except:
+ upstream, msg = GuessUpstream(git_dir, branch)
+ return upstream, msg
+
+ if remote == '.':
+ return merge, None
+ elif remote and merge:
+ leaf = merge.split('/')[-1]
+ return '%s/%s' % (remote, leaf), None
+ else:
+ raise ValueError("Cannot determine upstream branch for branch "
+ "'%s' remote='%s', merge='%s'" % (branch, remote, merge))
+
+
+def GetRangeInBranch(git_dir, branch, include_upstream=False):
+ """Returns an expression for the commits in the given branch.
+
+ Args:
+ git_dir: Directory containing git repo
+ branch: Name of branch
+ Return:
+ Expression in the form 'upstream..branch' which can be used to
+ access the commits. If the branch does not exist, returns None.
+ """
+ upstream, msg = GetUpstream(git_dir, branch)
+ if not upstream:
+ return None, msg
+ rstr = '%s%s..%s' % (upstream, '~' if include_upstream else '', branch)
+ return rstr, msg
+
+def CountCommitsInRange(git_dir, range_expr):
+ """Returns the number of commits in the given range.
+
+ Args:
+ git_dir: Directory containing git repo
+ range_expr: Range to check
+ Return:
+ Number of patches that exist in the supplied range or None if none
+ were found
+ """
+ pipe = [LogCmd(range_expr, git_dir=git_dir, oneline=True)]
+ result = command.RunPipe(pipe, capture=True, capture_stderr=True,
+ raise_on_error=False)
+ if result.return_code:
+ return None, "Range '%s' not found or is invalid" % range_expr
+ patch_count = len(result.stdout.splitlines())
+ return patch_count, None
+
+def CountCommitsInBranch(git_dir, branch, include_upstream=False):
+ """Returns the number of commits in the given branch.
+
+ Args:
+ git_dir: Directory containing git repo
+ branch: Name of branch
+ Return:
+ Number of patches that exist on top of the branch, or None if the
+ branch does not exist.
+ """
+ range_expr, msg = GetRangeInBranch(git_dir, branch, include_upstream)
+ if not range_expr:
+ return None, msg
+ return CountCommitsInRange(git_dir, range_expr)
+
+def CountCommits(commit_range):
+ """Returns the number of commits in the given range.
+
+ Args:
+ commit_range: Range of commits to count (e.g. 'HEAD..base')
+ Return:
+ Number of patches that exist on top of the branch
+ """
+ pipe = [LogCmd(commit_range, oneline=True),
+ ['wc', '-l']]
+ stdout = command.RunPipe(pipe, capture=True, oneline=True).stdout
+ patch_count = int(stdout)
+ return patch_count
+
+def Checkout(commit_hash, git_dir=None, work_tree=None, force=False):
+ """Checkout the selected commit for this build
+
+ Args:
+ commit_hash: Commit hash to check out
+ """
+ pipe = ['git']
+ if git_dir:
+ pipe.extend(['--git-dir', git_dir])
+ if work_tree:
+ pipe.extend(['--work-tree', work_tree])
+ pipe.append('checkout')
+ if force:
+ pipe.append('-f')
+ pipe.append(commit_hash)
+ result = command.RunPipe([pipe], capture=True, raise_on_error=False,
+ capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError('git checkout (%s): %s' % (pipe, result.stderr))
+
+def Clone(git_dir, output_dir):
+ """Checkout the selected commit for this build
+
+ Args:
+ commit_hash: Commit hash to check out
+ """
+ pipe = ['git', 'clone', git_dir, '.']
+ result = command.RunPipe([pipe], capture=True, cwd=output_dir,
+ capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError('git clone: %s' % result.stderr)
+
+def Fetch(git_dir=None, work_tree=None):
+ """Fetch from the origin repo
+
+ Args:
+ commit_hash: Commit hash to check out
+ """
+ pipe = ['git']
+ if git_dir:
+ pipe.extend(['--git-dir', git_dir])
+ if work_tree:
+ pipe.extend(['--work-tree', work_tree])
+ pipe.append('fetch')
+ result = command.RunPipe([pipe], capture=True, capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError('git fetch: %s' % result.stderr)
+
+def CheckWorktreeIsAvailable(git_dir):
+ """Check if git-worktree functionality is available
+
+ Args:
+ git_dir: The repository to test in
+
+ Returns:
+ True if git-worktree commands will work, False otherwise.
+ """
+ pipe = ['git', '--git-dir', git_dir, 'worktree', 'list']
+ result = command.RunPipe([pipe], capture=True, capture_stderr=True,
+ raise_on_error=False)
+ return result.return_code == 0
+
+def AddWorktree(git_dir, output_dir, commit_hash=None):
+ """Create and checkout a new git worktree for this build
+
+ Args:
+ git_dir: The repository to checkout the worktree from
+ output_dir: Path for the new worktree
+ commit_hash: Commit hash to checkout
+ """
+ # We need to pass --detach to avoid creating a new branch
+ pipe = ['git', '--git-dir', git_dir, 'worktree', 'add', '.', '--detach']
+ if commit_hash:
+ pipe.append(commit_hash)
+ result = command.RunPipe([pipe], capture=True, cwd=output_dir,
+ capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError('git worktree add: %s' % result.stderr)
+
+def PruneWorktrees(git_dir):
+ """Remove administrative files for deleted worktrees
+
+ Args:
+ git_dir: The repository whose deleted worktrees should be pruned
+ """
+ pipe = ['git', '--git-dir', git_dir, 'worktree', 'prune']
+ result = command.RunPipe([pipe], capture=True, capture_stderr=True)
+ if result.return_code != 0:
+ raise OSError('git worktree prune: %s' % result.stderr)
+
+def CreatePatches(branch, start, count, ignore_binary, series, signoff = True):
+ """Create a series of patches from the top of the current branch.
+
+ The patch files are written to the current directory using
+ git format-patch.
+
+ Args:
+ branch: Branch to create patches from (None for current branch)
+ start: Commit to start from: 0=HEAD, 1=next one, etc.
+ count: number of commits to include
+ ignore_binary: Don't generate patches for binary files
+ series: Series object for this series (set of patches)
+ Return:
+ Filename of cover letter (None if none)
+ List of filenames of patch files
+ """
+ if series.get('version'):
+ version = '%s ' % series['version']
+ cmd = ['git', 'format-patch', '-M' ]
+ if signoff:
+ cmd.append('--signoff')
+ if ignore_binary:
+ cmd.append('--no-binary')
+ if series.get('cover'):
+ cmd.append('--cover-letter')
+ prefix = series.GetPatchPrefix()
+ if prefix:
+ cmd += ['--subject-prefix=%s' % prefix]
+ brname = branch or 'HEAD'
+ cmd += ['%s~%d..%s~%d' % (brname, start + count, brname, start)]
+
+ stdout = command.RunList(cmd)
+ files = stdout.splitlines()
+
+ # We have an extra file if there is a cover letter
+ if series.get('cover'):
+ return files[0], files[1:]
+ else:
+ return None, files
+
+def BuildEmailList(in_list, tag=None, alias=None, warn_on_error=True):
+ """Build a list of email addresses based on an input list.
+
+ Takes a list of email addresses and aliases, and turns this into a list
+ of only email address, by resolving any aliases that are present.
+
+ If the tag is given, then each email address is prepended with this
+ tag and a space. If the tag starts with a minus sign (indicating a
+ command line parameter) then the email address is quoted.
+
+ Args:
+ in_list: List of aliases/email addresses
+ tag: Text to put before each address
+ alias: Alias dictionary
+ warn_on_error: True to raise an error when an alias fails to match,
+ False to just print a message.
+
+ Returns:
+ List of email addresses
+
+ >>> alias = {}
+ >>> alias['fred'] = ['f.bloggs@napier.co.nz']
+ >>> alias['john'] = ['j.bloggs@napier.co.nz']
+ >>> alias['mary'] = ['Mary Poppins <m.poppins@cloud.net>']
+ >>> alias['boys'] = ['fred', ' john']
+ >>> alias['all'] = ['fred ', 'john', ' mary ']
+ >>> BuildEmailList(['john', 'mary'], None, alias)
+ ['j.bloggs@napier.co.nz', 'Mary Poppins <m.poppins@cloud.net>']
+ >>> BuildEmailList(['john', 'mary'], '--to', alias)
+ ['--to "j.bloggs@napier.co.nz"', \
+'--to "Mary Poppins <m.poppins@cloud.net>"']
+ >>> BuildEmailList(['john', 'mary'], 'Cc', alias)
+ ['Cc j.bloggs@napier.co.nz', 'Cc Mary Poppins <m.poppins@cloud.net>']
+ """
+ quote = '"' if tag and tag[0] == '-' else ''
+ raw = []
+ for item in in_list:
+ raw += LookupEmail(item, alias, warn_on_error=warn_on_error)
+ result = []
+ for item in raw:
+ if not item in result:
+ result.append(item)
+ if tag:
+ return ['%s %s%s%s' % (tag, quote, email, quote) for email in result]
+ return result
+
+def CheckSuppressCCConfig():
+ """Check if sendemail.suppresscc is configured correctly.
+
+ Returns:
+ True if the option is configured correctly, False otherwise.
+ """
+ suppresscc = command.OutputOneLine('git', 'config', 'sendemail.suppresscc',
+ raise_on_error=False)
+
+ # Other settings should be fine.
+ if suppresscc == 'all' or suppresscc == 'cccmd':
+ col = terminal.Color()
+
+ print((col.Color(col.RED, "error") +
+ ": git config sendemail.suppresscc set to %s\n" % (suppresscc)) +
+ " patman needs --cc-cmd to be run to set the cc list.\n" +
+ " Please run:\n" +
+ " git config --unset sendemail.suppresscc\n" +
+ " Or read the man page:\n" +
+ " git send-email --help\n" +
+ " and set an option that runs --cc-cmd\n")
+ return False
+
+ return True
+
+def EmailPatches(series, cover_fname, args, dry_run, warn_on_error, cc_fname,
+ self_only=False, alias=None, in_reply_to=None, thread=False,
+ smtp_server=None):
+ """Email a patch series.
+
+ Args:
+ series: Series object containing destination info
+ cover_fname: filename of cover letter
+ args: list of filenames of patch files
+ dry_run: Just return the command that would be run
+ warn_on_error: True to print a warning when an alias fails to match,
+ False to ignore it.
+ cc_fname: Filename of Cc file for per-commit Cc
+ self_only: True to just email to yourself as a test
+ in_reply_to: If set we'll pass this to git as --in-reply-to.
+ Should be a message ID that this is in reply to.
+ thread: True to add --thread to git send-email (make
+ all patches reply to cover-letter or first patch in series)
+ smtp_server: SMTP server to use to send patches
+
+ Returns:
+ Git command that was/would be run
+
+ # For the duration of this doctest pretend that we ran patman with ./patman
+ >>> _old_argv0 = sys.argv[0]
+ >>> sys.argv[0] = './patman'
+
+ >>> alias = {}
+ >>> alias['fred'] = ['f.bloggs@napier.co.nz']
+ >>> alias['john'] = ['j.bloggs@napier.co.nz']
+ >>> alias['mary'] = ['m.poppins@cloud.net']
+ >>> alias['boys'] = ['fred', ' john']
+ >>> alias['all'] = ['fred ', 'john', ' mary ']
+ >>> alias[os.getenv('USER')] = ['this-is-me@me.com']
+ >>> series = {}
+ >>> series['to'] = ['fred']
+ >>> series['cc'] = ['mary']
+ >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+ False, alias)
+ 'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
+"m.poppins@cloud.net" --cc-cmd "./patman send --cc-cmd cc-fname" cover p1 p2'
+ >>> EmailPatches(series, None, ['p1'], True, True, 'cc-fname', False, \
+ alias)
+ 'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
+"m.poppins@cloud.net" --cc-cmd "./patman send --cc-cmd cc-fname" p1'
+ >>> series['cc'] = ['all']
+ >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+ True, alias)
+ 'git send-email --annotate --to "this-is-me@me.com" --cc-cmd "./patman \
+send --cc-cmd cc-fname" cover p1 p2'
+ >>> EmailPatches(series, 'cover', ['p1', 'p2'], True, True, 'cc-fname', \
+ False, alias)
+ 'git send-email --annotate --to "f.bloggs@napier.co.nz" --cc \
+"f.bloggs@napier.co.nz" --cc "j.bloggs@napier.co.nz" --cc \
+"m.poppins@cloud.net" --cc-cmd "./patman send --cc-cmd cc-fname" cover p1 p2'
+
+ # Restore argv[0] since we clobbered it.
+ >>> sys.argv[0] = _old_argv0
+ """
+ to = BuildEmailList(series.get('to'), '--to', alias, warn_on_error)
+ if not to:
+ git_config_to = command.Output('git', 'config', 'sendemail.to',
+ raise_on_error=False)
+ if not git_config_to:
+ print("No recipient.\n"
+ "Please add something like this to a commit\n"
+ "Series-to: Fred Bloggs <f.blogs@napier.co.nz>\n"
+ "Or do something like this\n"
+ "git config sendemail.to u-boot@lists.denx.de")
+ return
+ cc = BuildEmailList(list(set(series.get('cc')) - set(series.get('to'))),
+ '--cc', alias, warn_on_error)
+ if self_only:
+ to = BuildEmailList([os.getenv('USER')], '--to', alias, warn_on_error)
+ cc = []
+ cmd = ['git', 'send-email', '--annotate']
+ if smtp_server:
+ cmd.append('--smtp-server=%s' % smtp_server)
+ if in_reply_to:
+ cmd.append('--in-reply-to="%s"' % in_reply_to)
+ if thread:
+ cmd.append('--thread')
+
+ cmd += to
+ cmd += cc
+ cmd += ['--cc-cmd', '"%s send --cc-cmd %s"' % (sys.argv[0], cc_fname)]
+ if cover_fname:
+ cmd.append(cover_fname)
+ cmd += args
+ cmdstr = ' '.join(cmd)
+ if not dry_run:
+ os.system(cmdstr)
+ return cmdstr
+
+
+def LookupEmail(lookup_name, alias=None, warn_on_error=True, level=0):
+ """If an email address is an alias, look it up and return the full name
+
+ TODO: Why not just use git's own alias feature?
+
+ Args:
+ lookup_name: Alias or email address to look up
+ alias: Dictionary containing aliases (None to use settings default)
+ warn_on_error: True to print a warning when an alias fails to match,
+ False to ignore it.
+
+ Returns:
+ tuple:
+ list containing a list of email addresses
+
+ Raises:
+ OSError if a recursive alias reference was found
+ ValueError if an alias was not found
+
+ >>> alias = {}
+ >>> alias['fred'] = ['f.bloggs@napier.co.nz']
+ >>> alias['john'] = ['j.bloggs@napier.co.nz']
+ >>> alias['mary'] = ['m.poppins@cloud.net']
+ >>> alias['boys'] = ['fred', ' john', 'f.bloggs@napier.co.nz']
+ >>> alias['all'] = ['fred ', 'john', ' mary ']
+ >>> alias['loop'] = ['other', 'john', ' mary ']
+ >>> alias['other'] = ['loop', 'john', ' mary ']
+ >>> LookupEmail('mary', alias)
+ ['m.poppins@cloud.net']
+ >>> LookupEmail('arthur.wellesley@howe.ro.uk', alias)
+ ['arthur.wellesley@howe.ro.uk']
+ >>> LookupEmail('boys', alias)
+ ['f.bloggs@napier.co.nz', 'j.bloggs@napier.co.nz']
+ >>> LookupEmail('all', alias)
+ ['f.bloggs@napier.co.nz', 'j.bloggs@napier.co.nz', 'm.poppins@cloud.net']
+ >>> LookupEmail('odd', alias)
+ Alias 'odd' not found
+ []
+ >>> LookupEmail('loop', alias)
+ Traceback (most recent call last):
+ ...
+ OSError: Recursive email alias at 'other'
+ >>> LookupEmail('odd', alias, warn_on_error=False)
+ []
+ >>> # In this case the loop part will effectively be ignored.
+ >>> LookupEmail('loop', alias, warn_on_error=False)
+ Recursive email alias at 'other'
+ Recursive email alias at 'john'
+ Recursive email alias at 'mary'
+ ['j.bloggs@napier.co.nz', 'm.poppins@cloud.net']
+ """
+ if not alias:
+ alias = settings.alias
+ lookup_name = lookup_name.strip()
+ if '@' in lookup_name: # Perhaps a real email address
+ return [lookup_name]
+
+ lookup_name = lookup_name.lower()
+ col = terminal.Color()
+
+ out_list = []
+ if level > 10:
+ msg = "Recursive email alias at '%s'" % lookup_name
+ if warn_on_error:
+ raise OSError(msg)
+ else:
+ print(col.Color(col.RED, msg))
+ return out_list
+
+ if lookup_name:
+ if not lookup_name in alias:
+ msg = "Alias '%s' not found" % lookup_name
+ if warn_on_error:
+ print(col.Color(col.RED, msg))
+ return out_list
+ for item in alias[lookup_name]:
+ todo = LookupEmail(item, alias, warn_on_error, level + 1)
+ for new_item in todo:
+ if not new_item in out_list:
+ out_list.append(new_item)
+
+ return out_list
+
+def GetTopLevel():
+ """Return name of top-level directory for this git repo.
+
+ Returns:
+ Full path to git top-level directory
+
+ This test makes sure that we are running tests in the right subdir
+
+ >>> os.path.realpath(os.path.dirname(__file__)) == \
+ os.path.join(GetTopLevel(), 'tools', 'patman')
+ True
+ """
+ return command.OutputOneLine('git', 'rev-parse', '--show-toplevel')
+
+def GetAliasFile():
+ """Gets the name of the git alias file.
+
+ Returns:
+ Filename of git alias file, or None if none
+ """
+ fname = command.OutputOneLine('git', 'config', 'sendemail.aliasesfile',
+ raise_on_error=False)
+ if fname:
+ fname = os.path.join(GetTopLevel(), fname.strip())
+ return fname
+
+def GetDefaultUserName():
+ """Gets the user.name from .gitconfig file.
+
+ Returns:
+ User name found in .gitconfig file, or None if none
+ """
+ uname = command.OutputOneLine('git', 'config', '--global', 'user.name')
+ return uname
+
+def GetDefaultUserEmail():
+ """Gets the user.email from the global .gitconfig file.
+
+ Returns:
+ User's email found in .gitconfig file, or None if none
+ """
+ uemail = command.OutputOneLine('git', 'config', '--global', 'user.email')
+ return uemail
+
+def GetDefaultSubjectPrefix():
+ """Gets the format.subjectprefix from local .git/config file.
+
+ Returns:
+ Subject prefix found in local .git/config file, or None if none
+ """
+ sub_prefix = command.OutputOneLine('git', 'config', 'format.subjectprefix',
+ raise_on_error=False)
+
+ return sub_prefix
+
+def Setup():
+ """Set up git utils, by reading the alias files."""
+ # Check for a git alias file also
+ global use_no_decorate
+
+ alias_fname = GetAliasFile()
+ if alias_fname:
+ settings.ReadGitAliases(alias_fname)
+ cmd = LogCmd(None, count=0)
+ use_no_decorate = (command.RunPipe([cmd], raise_on_error=False)
+ .return_code == 0)
+
+def GetHead():
+ """Get the hash of the current HEAD
+
+ Returns:
+ Hash of HEAD
+ """
+ return command.OutputOneLine('git', 'show', '-s', '--pretty=format:%H')
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/roms/u-boot/tools/patman/main.py b/roms/u-boot/tools/patman/main.py
new file mode 100755
index 000000000..04e37a593
--- /dev/null
+++ b/roms/u-boot/tools/patman/main.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+"""See README for more information"""
+
+from argparse import ArgumentParser
+import os
+import re
+import shutil
+import sys
+import traceback
+import unittest
+
+if __name__ == "__main__":
+ # Allow 'from patman import xxx to work'
+ our_path = os.path.dirname(os.path.realpath(__file__))
+ sys.path.append(os.path.join(our_path, '..'))
+
+# Our modules
+from patman import command
+from patman import control
+from patman import gitutil
+from patman import project
+from patman import settings
+from patman import terminal
+from patman import test_util
+from patman import test_checkpatch
+
+epilog = '''Create patches from commits in a branch, check them and email them
+as specified by tags you place in the commits. Use -n to do a dry run first.'''
+
+parser = ArgumentParser(epilog=epilog)
+parser.add_argument('-b', '--branch', type=str,
+ help="Branch to process (by default, the current branch)")
+parser.add_argument('-c', '--count', dest='count', type=int,
+ default=-1, help='Automatically create patches from top n commits')
+parser.add_argument('-e', '--end', type=int, default=0,
+ help='Commits to skip at end of patch list')
+parser.add_argument('-D', '--debug', action='store_true',
+ help='Enabling debugging (provides a full traceback on error)')
+parser.add_argument('-p', '--project', default=project.DetectProject(),
+ help="Project name; affects default option values and "
+ "aliases [default: %(default)s]")
+parser.add_argument('-P', '--patchwork-url',
+ default='https://patchwork.ozlabs.org',
+ help='URL of patchwork server [default: %(default)s]')
+parser.add_argument('-s', '--start', dest='start', type=int,
+ default=0, help='Commit to start creating patches from (0 = HEAD)')
+parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
+ default=False, help='Verbose output of errors and warnings')
+parser.add_argument('-H', '--full-help', action='store_true', dest='full_help',
+ default=False, help='Display the README file')
+
+subparsers = parser.add_subparsers(dest='cmd')
+send = subparsers.add_parser('send')
+send.add_argument('-i', '--ignore-errors', action='store_true',
+ dest='ignore_errors', default=False,
+ help='Send patches email even if patch errors are found')
+send.add_argument('-l', '--limit-cc', dest='limit', type=int, default=None,
+ help='Limit the cc list to LIMIT entries [default: %(default)s]')
+send.add_argument('-m', '--no-maintainers', action='store_false',
+ dest='add_maintainers', default=True,
+ help="Don't cc the file maintainers automatically")
+send.add_argument('-n', '--dry-run', action='store_true', dest='dry_run',
+ default=False, help="Do a dry run (create but don't email patches)")
+send.add_argument('-r', '--in-reply-to', type=str, action='store',
+ help="Message ID that this series is in reply to")
+send.add_argument('-t', '--ignore-bad-tags', action='store_true',
+ default=False,
+ help='Ignore bad tags / aliases (default=warn)')
+send.add_argument('-T', '--thread', action='store_true', dest='thread',
+ default=False, help='Create patches as a single thread')
+send.add_argument('--cc-cmd', dest='cc_cmd', type=str, action='store',
+ default=None, help='Output cc list for patch file (used by git)')
+send.add_argument('--no-binary', action='store_true', dest='ignore_binary',
+ default=False,
+ help="Do not output contents of changes in binary files")
+send.add_argument('--no-check', action='store_false', dest='check_patch',
+ default=True,
+ help="Don't check for patch compliance")
+send.add_argument('--no-tags', action='store_false', dest='process_tags',
+ default=True, help="Don't process subject tags as aliases")
+send.add_argument('--no-signoff', action='store_false', dest='add_signoff',
+ default=True, help="Don't add Signed-off-by to patches")
+send.add_argument('--smtp-server', type=str,
+ help="Specify the SMTP server to 'git send-email'")
+
+send.add_argument('patchfiles', nargs='*')
+
+test_parser = subparsers.add_parser('test', help='Run tests')
+test_parser.add_argument('testname', type=str, default=None, nargs='?',
+ help="Specify the test to run")
+
+status = subparsers.add_parser('status',
+ help='Check status of patches in patchwork')
+status.add_argument('-C', '--show-comments', action='store_true',
+ help='Show comments from each patch')
+status.add_argument('-d', '--dest-branch', type=str,
+ help='Name of branch to create with collected responses')
+status.add_argument('-f', '--force', action='store_true',
+ help='Force overwriting an existing branch')
+
+# Parse options twice: first to get the project and second to handle
+# defaults properly (which depends on project)
+# Use parse_known_args() in case 'cmd' is omitted
+argv = sys.argv[1:]
+args, rest = parser.parse_known_args(argv)
+if hasattr(args, 'project'):
+ settings.Setup(gitutil, parser, args.project, '')
+ args, rest = parser.parse_known_args(argv)
+
+# If we have a command, it is safe to parse all arguments
+if args.cmd:
+ args = parser.parse_args(argv)
+else:
+ # No command, so insert it after the known arguments and before the ones
+ # that presumably relate to the 'send' subcommand
+ nargs = len(rest)
+ argv = argv[:-nargs] + ['send'] + rest
+ args = parser.parse_args(argv)
+
+if __name__ != "__main__":
+ pass
+
+if not args.debug:
+ sys.tracebacklimit = 0
+
+# Run our meagre tests
+if args.cmd == 'test':
+ import doctest
+ from patman import func_test
+
+ sys.argv = [sys.argv[0]]
+ result = unittest.TestResult()
+ suite = unittest.TestSuite()
+ loader = unittest.TestLoader()
+ for module in (test_checkpatch.TestPatch, func_test.TestFunctional):
+ if args.testname:
+ try:
+ suite.addTests(loader.loadTestsFromName(args.testname, module))
+ except AttributeError:
+ continue
+ else:
+ suite.addTests(loader.loadTestsFromTestCase(module))
+ suite.run(result)
+
+ for module in ['gitutil', 'settings', 'terminal']:
+ suite = doctest.DocTestSuite(module)
+ suite.run(result)
+
+ sys.exit(test_util.ReportResult('patman', args.testname, result))
+
+# Process commits, produce patches files, check them, email them
+elif args.cmd == 'send':
+ # Called from git with a patch filename as argument
+ # Printout a list of additional CC recipients for this patch
+ if args.cc_cmd:
+ fd = open(args.cc_cmd, 'r')
+ re_line = re.compile('(\S*) (.*)')
+ for line in fd.readlines():
+ match = re_line.match(line)
+ if match and match.group(1) == args.patchfiles[0]:
+ for cc in match.group(2).split('\0'):
+ cc = cc.strip()
+ if cc:
+ print(cc)
+ fd.close()
+
+ elif args.full_help:
+ pager = os.getenv('PAGER')
+ if not pager:
+ pager = shutil.which('less')
+ if not pager:
+ pager = 'more'
+ fname = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
+ 'README')
+ command.Run(pager, fname)
+
+ else:
+ # If we are not processing tags, no need to warning about bad ones
+ if not args.process_tags:
+ args.ignore_bad_tags = True
+ control.send(args)
+
+# Check status of patches in patchwork
+elif args.cmd == 'status':
+ ret_code = 0
+ try:
+ control.patchwork_status(args.branch, args.count, args.start, args.end,
+ args.dest_branch, args.force,
+ args.show_comments, args.patchwork_url)
+ except Exception as e:
+ terminal.Print('patman: %s: %s' % (type(e).__name__, e),
+ colour=terminal.Color.RED)
+ if args.debug:
+ print()
+ traceback.print_exc()
+ ret_code = 1
+ sys.exit(ret_code)
diff --git a/roms/u-boot/tools/patman/patchstream.py b/roms/u-boot/tools/patman/patchstream.py
new file mode 100644
index 000000000..a44cd861a
--- /dev/null
+++ b/roms/u-boot/tools/patman/patchstream.py
@@ -0,0 +1,841 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+"""Handles parsing a stream of commits/emails from 'git log' or other source"""
+
+import collections
+import datetime
+import io
+import math
+import os
+import re
+import queue
+import shutil
+import tempfile
+
+from patman import command
+from patman import commit
+from patman import gitutil
+from patman.series import Series
+
+# Tags that we detect and remove
+RE_REMOVE = re.compile(r'^BUG=|^TEST=|^BRANCH=|^Review URL:'
+ r'|Reviewed-on:|Commit-\w*:')
+
+# Lines which are allowed after a TEST= line
+RE_ALLOWED_AFTER_TEST = re.compile('^Signed-off-by:')
+
+# Signoffs
+RE_SIGNOFF = re.compile('^Signed-off-by: *(.*)')
+
+# Cover letter tag
+RE_COVER = re.compile('^Cover-([a-z-]*): *(.*)')
+
+# Patch series tag
+RE_SERIES_TAG = re.compile('^Series-([a-z-]*): *(.*)')
+
+# Change-Id will be used to generate the Message-Id and then be stripped
+RE_CHANGE_ID = re.compile('^Change-Id: *(.*)')
+
+# Commit series tag
+RE_COMMIT_TAG = re.compile('^Commit-([a-z-]*): *(.*)')
+
+# Commit tags that we want to collect and keep
+RE_TAG = re.compile('^(Tested-by|Acked-by|Reviewed-by|Patch-cc|Fixes): (.*)')
+
+# The start of a new commit in the git log
+RE_COMMIT = re.compile('^commit ([0-9a-f]*)$')
+
+# We detect these since checkpatch doesn't always do it
+RE_SPACE_BEFORE_TAB = re.compile('^[+].* \t')
+
+# Match indented lines for changes
+RE_LEADING_WHITESPACE = re.compile(r'^\s')
+
+# Detect a 'diff' line
+RE_DIFF = re.compile(r'^>.*diff --git a/(.*) b/(.*)$')
+
+# Detect a context line, like '> @@ -153,8 +153,13 @@ CheckPatch
+RE_LINE = re.compile(r'>.*@@ \-(\d+),\d+ \+(\d+),\d+ @@ *(.*)')
+
+# States we can be in - can we use range() and still have comments?
+STATE_MSG_HEADER = 0 # Still in the message header
+STATE_PATCH_SUBJECT = 1 # In patch subject (first line of log for a commit)
+STATE_PATCH_HEADER = 2 # In patch header (after the subject)
+STATE_DIFFS = 3 # In the diff part (past --- line)
+
+class PatchStream:
+ """Class for detecting/injecting tags in a patch or series of patches
+
+ We support processing the output of 'git log' to read out the tags we
+ are interested in. We can also process a patch file in order to remove
+ unwanted tags or inject additional ones. These correspond to the two
+ phases of processing.
+ """
+ def __init__(self, series, is_log=False):
+ self.skip_blank = False # True to skip a single blank line
+ self.found_test = False # Found a TEST= line
+ self.lines_after_test = 0 # Number of lines found after TEST=
+ self.linenum = 1 # Output line number we are up to
+ self.in_section = None # Name of start...END section we are in
+ self.notes = [] # Series notes
+ self.section = [] # The current section...END section
+ self.series = series # Info about the patch series
+ self.is_log = is_log # True if indent like git log
+ self.in_change = None # Name of the change list we are in
+ self.change_version = 0 # Non-zero if we are in a change list
+ self.change_lines = [] # Lines of the current change
+ self.blank_count = 0 # Number of blank lines stored up
+ self.state = STATE_MSG_HEADER # What state are we in?
+ self.commit = None # Current commit
+ # List of unquoted test blocks, each a list of str lines
+ self.snippets = []
+ self.cur_diff = None # Last 'diff' line seen (str)
+ self.cur_line = None # Last context (@@) line seen (str)
+ self.recent_diff = None # 'diff' line for current snippet (str)
+ self.recent_line = None # '@@' line for current snippet (str)
+ self.recent_quoted = collections.deque([], 5)
+ self.recent_unquoted = queue.Queue()
+ self.was_quoted = None
+
+ @staticmethod
+ def process_text(text, is_comment=False):
+ """Process some text through this class using a default Commit/Series
+
+ Args:
+ text (str): Text to parse
+ is_comment (bool): True if this is a comment rather than a patch.
+ If True, PatchStream doesn't expect a patch subject at the
+ start, but jumps straight into the body
+
+ Returns:
+ PatchStream: object with results
+ """
+ pstrm = PatchStream(Series())
+ pstrm.commit = commit.Commit(None)
+ infd = io.StringIO(text)
+ outfd = io.StringIO()
+ if is_comment:
+ pstrm.state = STATE_PATCH_HEADER
+ pstrm.process_stream(infd, outfd)
+ return pstrm
+
+ def _add_warn(self, warn):
+ """Add a new warning to report to the user about the current commit
+
+ The new warning is added to the current commit if not already present.
+
+ Args:
+ warn (str): Warning to report
+
+ Raises:
+ ValueError: Warning is generated with no commit associated
+ """
+ if not self.commit:
+ print('Warning outside commit: %s' % warn)
+ elif warn not in self.commit.warn:
+ self.commit.warn.append(warn)
+
+ def _add_to_series(self, line, name, value):
+ """Add a new Series-xxx tag.
+
+ When a Series-xxx tag is detected, we come here to record it, if we
+ are scanning a 'git log'.
+
+ Args:
+ line (str): Source line containing tag (useful for debug/error
+ messages)
+ name (str): Tag name (part after 'Series-')
+ value (str): Tag value (part after 'Series-xxx: ')
+ """
+ if name == 'notes':
+ self.in_section = name
+ self.skip_blank = False
+ if self.is_log:
+ warn = self.series.AddTag(self.commit, line, name, value)
+ if warn:
+ self.commit.warn.append(warn)
+
+ def _add_to_commit(self, name):
+ """Add a new Commit-xxx tag.
+
+ When a Commit-xxx tag is detected, we come here to record it.
+
+ Args:
+ name (str): Tag name (part after 'Commit-')
+ """
+ if name == 'notes':
+ self.in_section = 'commit-' + name
+ self.skip_blank = False
+
+ def _add_commit_rtag(self, rtag_type, who):
+ """Add a response tag to the current commit
+
+ Args:
+ rtag_type (str): rtag type (e.g. 'Reviewed-by')
+ who (str): Person who gave that rtag, e.g.
+ 'Fred Bloggs <fred@bloggs.org>'
+ """
+ self.commit.AddRtag(rtag_type, who)
+
+ def _close_commit(self):
+ """Save the current commit into our commit list, and reset our state"""
+ if self.commit and self.is_log:
+ self.series.AddCommit(self.commit)
+ self.commit = None
+ # If 'END' is missing in a 'Cover-letter' section, and that section
+ # happens to show up at the very end of the commit message, this is
+ # the chance for us to fix it up.
+ if self.in_section == 'cover' and self.is_log:
+ self.series.cover = self.section
+ self.in_section = None
+ self.skip_blank = True
+ self.section = []
+
+ self.cur_diff = None
+ self.recent_diff = None
+ self.recent_line = None
+
+ def _parse_version(self, value, line):
+ """Parse a version from a *-changes tag
+
+ Args:
+ value (str): Tag value (part after 'xxx-changes: '
+ line (str): Source line containing tag
+
+ Returns:
+ int: The version as an integer
+
+ Raises:
+ ValueError: the value cannot be converted
+ """
+ try:
+ return int(value)
+ except ValueError:
+ raise ValueError("%s: Cannot decode version info '%s'" %
+ (self.commit.hash, line))
+
+ def _finalise_change(self):
+ """_finalise a (multi-line) change and add it to the series or commit"""
+ if not self.change_lines:
+ return
+ change = '\n'.join(self.change_lines)
+
+ if self.in_change == 'Series':
+ self.series.AddChange(self.change_version, self.commit, change)
+ elif self.in_change == 'Cover':
+ self.series.AddChange(self.change_version, None, change)
+ elif self.in_change == 'Commit':
+ self.commit.AddChange(self.change_version, change)
+ self.change_lines = []
+
+ def _finalise_snippet(self):
+ """Finish off a snippet and add it to the list
+
+ This is called when we get to the end of a snippet, i.e. the we enter
+ the next block of quoted text:
+
+ This is a comment from someone.
+
+ Something else
+
+ > Now we have some code <----- end of snippet
+ > more code
+
+ Now a comment about the above code
+
+ This adds the snippet to our list
+ """
+ quoted_lines = []
+ while self.recent_quoted:
+ quoted_lines.append(self.recent_quoted.popleft())
+ unquoted_lines = []
+ valid = False
+ while not self.recent_unquoted.empty():
+ text = self.recent_unquoted.get()
+ if not (text.startswith('On ') and text.endswith('wrote:')):
+ unquoted_lines.append(text)
+ if text:
+ valid = True
+ if valid:
+ lines = []
+ if self.recent_diff:
+ lines.append('> File: %s' % self.recent_diff)
+ if self.recent_line:
+ out = '> Line: %s / %s' % self.recent_line[:2]
+ if self.recent_line[2]:
+ out += ': %s' % self.recent_line[2]
+ lines.append(out)
+ lines += quoted_lines + unquoted_lines
+ if lines:
+ self.snippets.append(lines)
+
+ def process_line(self, line):
+ """Process a single line of a patch file or commit log
+
+ This process a line and returns a list of lines to output. The list
+ may be empty or may contain multiple output lines.
+
+ This is where all the complicated logic is located. The class's
+ state is used to move between different states and detect things
+ properly.
+
+ We can be in one of two modes:
+ self.is_log == True: This is 'git log' mode, where most output is
+ indented by 4 characters and we are scanning for tags
+
+ self.is_log == False: This is 'patch' mode, where we already have
+ all the tags, and are processing patches to remove junk we
+ don't want, and add things we think are required.
+
+ Args:
+ line (str): text line to process
+
+ Returns:
+ list: list of output lines, or [] if nothing should be output
+
+ Raises:
+ ValueError: a fatal error occurred while parsing, e.g. an END
+ without a starting tag, or two commits with two change IDs
+ """
+ # Initially we have no output. Prepare the input line string
+ out = []
+ line = line.rstrip('\n')
+
+ commit_match = RE_COMMIT.match(line) if self.is_log else None
+
+ if self.is_log:
+ if line[:4] == ' ':
+ line = line[4:]
+
+ # Handle state transition and skipping blank lines
+ series_tag_match = RE_SERIES_TAG.match(line)
+ change_id_match = RE_CHANGE_ID.match(line)
+ commit_tag_match = RE_COMMIT_TAG.match(line)
+ cover_match = RE_COVER.match(line)
+ signoff_match = RE_SIGNOFF.match(line)
+ leading_whitespace_match = RE_LEADING_WHITESPACE.match(line)
+ diff_match = RE_DIFF.match(line)
+ line_match = RE_LINE.match(line)
+ tag_match = None
+ if self.state == STATE_PATCH_HEADER:
+ tag_match = RE_TAG.match(line)
+ is_blank = not line.strip()
+ if is_blank:
+ if (self.state == STATE_MSG_HEADER
+ or self.state == STATE_PATCH_SUBJECT):
+ self.state += 1
+
+ # We don't have a subject in the text stream of patch files
+ # It has its own line with a Subject: tag
+ if not self.is_log and self.state == STATE_PATCH_SUBJECT:
+ self.state += 1
+ elif commit_match:
+ self.state = STATE_MSG_HEADER
+
+ # If a tag is detected, or a new commit starts
+ if series_tag_match or commit_tag_match or change_id_match or \
+ cover_match or signoff_match or self.state == STATE_MSG_HEADER:
+ # but we are already in a section, this means 'END' is missing
+ # for that section, fix it up.
+ if self.in_section:
+ self._add_warn("Missing 'END' in section '%s'" % self.in_section)
+ if self.in_section == 'cover':
+ self.series.cover = self.section
+ elif self.in_section == 'notes':
+ if self.is_log:
+ self.series.notes += self.section
+ elif self.in_section == 'commit-notes':
+ if self.is_log:
+ self.commit.notes += self.section
+ else:
+ # This should not happen
+ raise ValueError("Unknown section '%s'" % self.in_section)
+ self.in_section = None
+ self.skip_blank = True
+ self.section = []
+ # but we are already in a change list, that means a blank line
+ # is missing, fix it up.
+ if self.in_change:
+ self._add_warn("Missing 'blank line' in section '%s-changes'" %
+ self.in_change)
+ self._finalise_change()
+ self.in_change = None
+ self.change_version = 0
+
+ # If we are in a section, keep collecting lines until we see END
+ if self.in_section:
+ if line == 'END':
+ if self.in_section == 'cover':
+ self.series.cover = self.section
+ elif self.in_section == 'notes':
+ if self.is_log:
+ self.series.notes += self.section
+ elif self.in_section == 'commit-notes':
+ if self.is_log:
+ self.commit.notes += self.section
+ else:
+ # This should not happen
+ raise ValueError("Unknown section '%s'" % self.in_section)
+ self.in_section = None
+ self.skip_blank = True
+ self.section = []
+ else:
+ self.section.append(line)
+
+ # If we are not in a section, it is an unexpected END
+ elif line == 'END':
+ raise ValueError("'END' wihout section")
+
+ # Detect the commit subject
+ elif not is_blank and self.state == STATE_PATCH_SUBJECT:
+ self.commit.subject = line
+
+ # Detect the tags we want to remove, and skip blank lines
+ elif RE_REMOVE.match(line) and not commit_tag_match:
+ self.skip_blank = True
+
+ # TEST= should be the last thing in the commit, so remove
+ # everything after it
+ if line.startswith('TEST='):
+ self.found_test = True
+ elif self.skip_blank and is_blank:
+ self.skip_blank = False
+
+ # Detect Cover-xxx tags
+ elif cover_match:
+ name = cover_match.group(1)
+ value = cover_match.group(2)
+ if name == 'letter':
+ self.in_section = 'cover'
+ self.skip_blank = False
+ elif name == 'letter-cc':
+ self._add_to_series(line, 'cover-cc', value)
+ elif name == 'changes':
+ self.in_change = 'Cover'
+ self.change_version = self._parse_version(value, line)
+
+ # If we are in a change list, key collected lines until a blank one
+ elif self.in_change:
+ if is_blank:
+ # Blank line ends this change list
+ self._finalise_change()
+ self.in_change = None
+ self.change_version = 0
+ elif line == '---':
+ self._finalise_change()
+ self.in_change = None
+ self.change_version = 0
+ out = self.process_line(line)
+ elif self.is_log:
+ if not leading_whitespace_match:
+ self._finalise_change()
+ self.change_lines.append(line)
+ self.skip_blank = False
+
+ # Detect Series-xxx tags
+ elif series_tag_match:
+ name = series_tag_match.group(1)
+ value = series_tag_match.group(2)
+ if name == 'changes':
+ # value is the version number: e.g. 1, or 2
+ self.in_change = 'Series'
+ self.change_version = self._parse_version(value, line)
+ else:
+ self._add_to_series(line, name, value)
+ self.skip_blank = True
+
+ # Detect Change-Id tags
+ elif change_id_match:
+ value = change_id_match.group(1)
+ if self.is_log:
+ if self.commit.change_id:
+ raise ValueError(
+ "%s: Two Change-Ids: '%s' vs. '%s'" %
+ (self.commit.hash, self.commit.change_id, value))
+ self.commit.change_id = value
+ self.skip_blank = True
+
+ # Detect Commit-xxx tags
+ elif commit_tag_match:
+ name = commit_tag_match.group(1)
+ value = commit_tag_match.group(2)
+ if name == 'notes':
+ self._add_to_commit(name)
+ self.skip_blank = True
+ elif name == 'changes':
+ self.in_change = 'Commit'
+ self.change_version = self._parse_version(value, line)
+ else:
+ self._add_warn('Line %d: Ignoring Commit-%s' %
+ (self.linenum, name))
+
+ # Detect the start of a new commit
+ elif commit_match:
+ self._close_commit()
+ self.commit = commit.Commit(commit_match.group(1))
+
+ # Detect tags in the commit message
+ elif tag_match:
+ rtag_type, who = tag_match.groups()
+ self._add_commit_rtag(rtag_type, who)
+ # Remove Tested-by self, since few will take much notice
+ if (rtag_type == 'Tested-by' and
+ who.find(os.getenv('USER') + '@') != -1):
+ self._add_warn("Ignoring '%s'" % line)
+ elif rtag_type == 'Patch-cc':
+ self.commit.AddCc(who.split(','))
+ else:
+ out = [line]
+
+ # Suppress duplicate signoffs
+ elif signoff_match:
+ if (self.is_log or not self.commit or
+ self.commit.CheckDuplicateSignoff(signoff_match.group(1))):
+ out = [line]
+
+ # Well that means this is an ordinary line
+ else:
+ # Look for space before tab
+ mat = RE_SPACE_BEFORE_TAB.match(line)
+ if mat:
+ self._add_warn('Line %d/%d has space before tab' %
+ (self.linenum, mat.start()))
+
+ # OK, we have a valid non-blank line
+ out = [line]
+ self.linenum += 1
+ self.skip_blank = False
+
+ if diff_match:
+ self.cur_diff = diff_match.group(1)
+
+ # If this is quoted, keep recent lines
+ if not diff_match and self.linenum > 1 and line:
+ if line.startswith('>'):
+ if not self.was_quoted:
+ self._finalise_snippet()
+ self.recent_line = None
+ if not line_match:
+ self.recent_quoted.append(line)
+ self.was_quoted = True
+ self.recent_diff = self.cur_diff
+ else:
+ self.recent_unquoted.put(line)
+ self.was_quoted = False
+
+ if line_match:
+ self.recent_line = line_match.groups()
+
+ if self.state == STATE_DIFFS:
+ pass
+
+ # If this is the start of the diffs section, emit our tags and
+ # change log
+ elif line == '---':
+ self.state = STATE_DIFFS
+
+ # Output the tags (signoff first), then change list
+ out = []
+ log = self.series.MakeChangeLog(self.commit)
+ out += [line]
+ if self.commit:
+ out += self.commit.notes
+ out += [''] + log
+ elif self.found_test:
+ if not RE_ALLOWED_AFTER_TEST.match(line):
+ self.lines_after_test += 1
+
+ return out
+
+ def finalise(self):
+ """Close out processing of this patch stream"""
+ self._finalise_snippet()
+ self._finalise_change()
+ self._close_commit()
+ if self.lines_after_test:
+ self._add_warn('Found %d lines after TEST=' % self.lines_after_test)
+
+ def _write_message_id(self, outfd):
+ """Write the Message-Id into the output.
+
+ This is based on the Change-Id in the original patch, the version,
+ and the prefix.
+
+ Args:
+ outfd (io.IOBase): Output stream file object
+ """
+ if not self.commit.change_id:
+ return
+
+ # If the count is -1 we're testing, so use a fixed time
+ if self.commit.count == -1:
+ time_now = datetime.datetime(1999, 12, 31, 23, 59, 59)
+ else:
+ time_now = datetime.datetime.now()
+
+ # In theory there is email.utils.make_msgid() which would be nice
+ # to use, but it already produces something way too long and thus
+ # will produce ugly commit lines if someone throws this into
+ # a "Link:" tag in the final commit. So (sigh) roll our own.
+
+ # Start with the time; presumably we wouldn't send the same series
+ # with the same Change-Id at the exact same second.
+ parts = [time_now.strftime("%Y%m%d%H%M%S")]
+
+ # These seem like they would be nice to include.
+ if 'prefix' in self.series:
+ parts.append(self.series['prefix'])
+ if 'version' in self.series:
+ parts.append("v%s" % self.series['version'])
+
+ parts.append(str(self.commit.count + 1))
+
+ # The Change-Id must be last, right before the @
+ parts.append(self.commit.change_id)
+
+ # Join parts together with "." and write it out.
+ outfd.write('Message-Id: <%s@changeid>\n' % '.'.join(parts))
+
+ def process_stream(self, infd, outfd):
+ """Copy a stream from infd to outfd, filtering out unwanting things.
+
+ This is used to process patch files one at a time.
+
+ Args:
+ infd (io.IOBase): Input stream file object
+ outfd (io.IOBase): Output stream file object
+ """
+ # Extract the filename from each diff, for nice warnings
+ fname = None
+ last_fname = None
+ re_fname = re.compile('diff --git a/(.*) b/.*')
+
+ self._write_message_id(outfd)
+
+ while True:
+ line = infd.readline()
+ if not line:
+ break
+ out = self.process_line(line)
+
+ # Try to detect blank lines at EOF
+ for line in out:
+ match = re_fname.match(line)
+ if match:
+ last_fname = fname
+ fname = match.group(1)
+ if line == '+':
+ self.blank_count += 1
+ else:
+ if self.blank_count and (line == '-- ' or match):
+ self._add_warn("Found possible blank line(s) at end of file '%s'" %
+ last_fname)
+ outfd.write('+\n' * self.blank_count)
+ outfd.write(line + '\n')
+ self.blank_count = 0
+ self.finalise()
+
+def insert_tags(msg, tags_to_emit):
+ """Add extra tags to a commit message
+
+ The tags are added after an existing block of tags if found, otherwise at
+ the end.
+
+ Args:
+ msg (str): Commit message
+ tags_to_emit (list): List of tags to emit, each a str
+
+ Returns:
+ (str) new message
+ """
+ out = []
+ done = False
+ emit_tags = False
+ for line in msg.splitlines():
+ if not done:
+ signoff_match = RE_SIGNOFF.match(line)
+ tag_match = RE_TAG.match(line)
+ if tag_match or signoff_match:
+ emit_tags = True
+ if emit_tags and not tag_match and not signoff_match:
+ out += tags_to_emit
+ emit_tags = False
+ done = True
+ out.append(line)
+ if not done:
+ out.append('')
+ out += tags_to_emit
+ return '\n'.join(out)
+
+def get_list(commit_range, git_dir=None, count=None):
+ """Get a log of a list of comments
+
+ This returns the output of 'git log' for the selected commits
+
+ Args:
+ commit_range (str): Range of commits to count (e.g. 'HEAD..base')
+ git_dir (str): Path to git repositiory (None to use default)
+ count (int): Number of commits to list, or None for no limit
+
+ Returns
+ str: String containing the contents of the git log
+ """
+ params = gitutil.LogCmd(commit_range, reverse=True, count=count,
+ git_dir=git_dir)
+ return command.RunPipe([params], capture=True).stdout
+
+def get_metadata_for_list(commit_range, git_dir=None, count=None,
+ series=None, allow_overwrite=False):
+ """Reads out patch series metadata from the commits
+
+ This does a 'git log' on the relevant commits and pulls out the tags we
+ are interested in.
+
+ Args:
+ commit_range (str): Range of commits to count (e.g. 'HEAD..base')
+ git_dir (str): Path to git repositiory (None to use default)
+ count (int): Number of commits to list, or None for no limit
+ series (Series): Object to add information into. By default a new series
+ is started.
+ allow_overwrite (bool): Allow tags to overwrite an existing tag
+
+ Returns:
+ Series: Object containing information about the commits.
+ """
+ if not series:
+ series = Series()
+ series.allow_overwrite = allow_overwrite
+ stdout = get_list(commit_range, git_dir, count)
+ pst = PatchStream(series, is_log=True)
+ for line in stdout.splitlines():
+ pst.process_line(line)
+ pst.finalise()
+ return series
+
+def get_metadata(branch, start, count):
+ """Reads out patch series metadata from the commits
+
+ This does a 'git log' on the relevant commits and pulls out the tags we
+ are interested in.
+
+ Args:
+ branch (str): Branch to use (None for current branch)
+ start (int): Commit to start from: 0=branch HEAD, 1=next one, etc.
+ count (int): Number of commits to list
+
+ Returns:
+ Series: Object containing information about the commits.
+ """
+ return get_metadata_for_list(
+ '%s~%d' % (branch if branch else 'HEAD', start), None, count)
+
+def get_metadata_for_test(text):
+ """Process metadata from a file containing a git log. Used for tests
+
+ Args:
+ text:
+
+ Returns:
+ Series: Object containing information about the commits.
+ """
+ series = Series()
+ pst = PatchStream(series, is_log=True)
+ for line in text.splitlines():
+ pst.process_line(line)
+ pst.finalise()
+ return series
+
+def fix_patch(backup_dir, fname, series, cmt):
+ """Fix up a patch file, by adding/removing as required.
+
+ We remove our tags from the patch file, insert changes lists, etc.
+ The patch file is processed in place, and overwritten.
+
+ A backup file is put into backup_dir (if not None).
+
+ Args:
+ backup_dir (str): Path to directory to use to backup the file
+ fname (str): Filename to patch file to process
+ series (Series): Series information about this patch set
+ cmt (Commit): Commit object for this patch file
+
+ Return:
+ list: A list of errors, each str, or [] if all ok.
+ """
+ handle, tmpname = tempfile.mkstemp()
+ outfd = os.fdopen(handle, 'w', encoding='utf-8')
+ infd = open(fname, 'r', encoding='utf-8')
+ pst = PatchStream(series)
+ pst.commit = cmt
+ pst.process_stream(infd, outfd)
+ infd.close()
+ outfd.close()
+
+ # Create a backup file if required
+ if backup_dir:
+ shutil.copy(fname, os.path.join(backup_dir, os.path.basename(fname)))
+ shutil.move(tmpname, fname)
+ return cmt.warn
+
+def fix_patches(series, fnames):
+ """Fix up a list of patches identified by filenames
+
+ The patch files are processed in place, and overwritten.
+
+ Args:
+ series (Series): The Series object
+ fnames (:type: list of str): List of patch files to process
+ """
+ # Current workflow creates patches, so we shouldn't need a backup
+ backup_dir = None #tempfile.mkdtemp('clean-patch')
+ count = 0
+ for fname in fnames:
+ cmt = series.commits[count]
+ cmt.patch = fname
+ cmt.count = count
+ result = fix_patch(backup_dir, fname, series, cmt)
+ if result:
+ print('%d warning%s for %s:' %
+ (len(result), 's' if len(result) > 1 else '', fname))
+ for warn in result:
+ print('\t%s' % warn)
+ print()
+ count += 1
+ print('Cleaned %d patch%s' % (count, 'es' if count > 1 else ''))
+
+def insert_cover_letter(fname, series, count):
+ """Inserts a cover letter with the required info into patch 0
+
+ Args:
+ fname (str): Input / output filename of the cover letter file
+ series (Series): Series object
+ count (int): Number of patches in the series
+ """
+ fil = open(fname, 'r')
+ lines = fil.readlines()
+ fil.close()
+
+ fil = open(fname, 'w')
+ text = series.cover
+ prefix = series.GetPatchPrefix()
+ for line in lines:
+ if line.startswith('Subject:'):
+ # if more than 10 or 100 patches, it should say 00/xx, 000/xxx, etc
+ zero_repeat = int(math.log10(count)) + 1
+ zero = '0' * zero_repeat
+ line = 'Subject: [%s %s/%d] %s\n' % (prefix, zero, count, text[0])
+
+ # Insert our cover letter
+ elif line.startswith('*** BLURB HERE ***'):
+ # First the blurb test
+ line = '\n'.join(text[1:]) + '\n'
+ if series.get('notes'):
+ line += '\n'.join(series.notes) + '\n'
+
+ # Now the change list
+ out = series.MakeChangeLog(None)
+ line += '\n' + '\n'.join(out)
+ fil.write(line)
+ fil.close()
diff --git a/roms/u-boot/tools/patman/patman b/roms/u-boot/tools/patman/patman
new file mode 120000
index 000000000..11a5d8e18
--- /dev/null
+++ b/roms/u-boot/tools/patman/patman
@@ -0,0 +1 @@
+main.py \ No newline at end of file
diff --git a/roms/u-boot/tools/patman/project.py b/roms/u-boot/tools/patman/project.py
new file mode 100644
index 000000000..2dfc30372
--- /dev/null
+++ b/roms/u-boot/tools/patman/project.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2012 The Chromium OS Authors.
+#
+
+import os.path
+
+from patman import gitutil
+
+def DetectProject():
+ """Autodetect the name of the current project.
+
+ This looks for signature files/directories that are unlikely to exist except
+ in the given project.
+
+ Returns:
+ The name of the project, like "linux" or "u-boot". Returns "unknown"
+ if we can't detect the project.
+ """
+ top_level = gitutil.GetTopLevel()
+
+ if os.path.exists(os.path.join(top_level, "include", "u-boot")):
+ return "u-boot"
+ elif os.path.exists(os.path.join(top_level, "kernel")):
+ return "linux"
+
+ return "unknown"
diff --git a/roms/u-boot/tools/patman/series.py b/roms/u-boot/tools/patman/series.py
new file mode 100644
index 000000000..8ae218d3a
--- /dev/null
+++ b/roms/u-boot/tools/patman/series.py
@@ -0,0 +1,325 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+from __future__ import print_function
+
+import collections
+import itertools
+import os
+
+from patman import get_maintainer
+from patman import gitutil
+from patman import settings
+from patman import terminal
+from patman import tools
+
+# Series-xxx tags that we understand
+valid_series = ['to', 'cc', 'version', 'changes', 'prefix', 'notes', 'name',
+ 'cover_cc', 'process_log', 'links', 'patchwork_url']
+
+class Series(dict):
+ """Holds information about a patch series, including all tags.
+
+ Vars:
+ cc: List of aliases/emails to Cc all patches to
+ commits: List of Commit objects, one for each patch
+ cover: List of lines in the cover letter
+ notes: List of lines in the notes
+ changes: (dict) List of changes for each version, The key is
+ the integer version number
+ allow_overwrite: Allow tags to overwrite an existing tag
+ """
+ def __init__(self):
+ self.cc = []
+ self.to = []
+ self.cover_cc = []
+ self.commits = []
+ self.cover = None
+ self.notes = []
+ self.changes = {}
+ self.allow_overwrite = False
+
+ # Written in MakeCcFile()
+ # key: name of patch file
+ # value: list of email addresses
+ self._generated_cc = {}
+
+ # These make us more like a dictionary
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __getattr__(self, name):
+ return self[name]
+
+ def AddTag(self, commit, line, name, value):
+ """Add a new Series-xxx tag along with its value.
+
+ Args:
+ line: Source line containing tag (useful for debug/error messages)
+ name: Tag name (part after 'Series-')
+ value: Tag value (part after 'Series-xxx: ')
+
+ Returns:
+ String warning if something went wrong, else None
+ """
+ # If we already have it, then add to our list
+ name = name.replace('-', '_')
+ if name in self and not self.allow_overwrite:
+ values = value.split(',')
+ values = [str.strip() for str in values]
+ if type(self[name]) != type([]):
+ raise ValueError("In %s: line '%s': Cannot add another value "
+ "'%s' to series '%s'" %
+ (commit.hash, line, values, self[name]))
+ self[name] += values
+
+ # Otherwise just set the value
+ elif name in valid_series:
+ if name=="notes":
+ self[name] = [value]
+ else:
+ self[name] = value
+ else:
+ return ("In %s: line '%s': Unknown 'Series-%s': valid "
+ "options are %s" % (commit.hash, line, name,
+ ', '.join(valid_series)))
+ return None
+
+ def AddCommit(self, commit):
+ """Add a commit into our list of commits
+
+ We create a list of tags in the commit subject also.
+
+ Args:
+ commit: Commit object to add
+ """
+ commit.CheckTags()
+ self.commits.append(commit)
+
+ def ShowActions(self, args, cmd, process_tags):
+ """Show what actions we will/would perform
+
+ Args:
+ args: List of patch files we created
+ cmd: The git command we would have run
+ process_tags: Process tags as if they were aliases
+ """
+ to_set = set(gitutil.BuildEmailList(self.to));
+ cc_set = set(gitutil.BuildEmailList(self.cc));
+
+ col = terminal.Color()
+ print('Dry run, so not doing much. But I would do this:')
+ print()
+ print('Send a total of %d patch%s with %scover letter.' % (
+ len(args), '' if len(args) == 1 else 'es',
+ self.get('cover') and 'a ' or 'no '))
+
+ # TODO: Colour the patches according to whether they passed checks
+ for upto in range(len(args)):
+ commit = self.commits[upto]
+ print(col.Color(col.GREEN, ' %s' % args[upto]))
+ cc_list = list(self._generated_cc[commit.patch])
+ for email in sorted(set(cc_list) - to_set - cc_set):
+ if email == None:
+ email = col.Color(col.YELLOW, "<alias '%s' not found>"
+ % tag)
+ if email:
+ print(' Cc: ', email)
+ print
+ for item in sorted(to_set):
+ print('To:\t ', item)
+ for item in sorted(cc_set - to_set):
+ print('Cc:\t ', item)
+ print('Version: ', self.get('version'))
+ print('Prefix:\t ', self.get('prefix'))
+ if self.cover:
+ print('Cover: %d lines' % len(self.cover))
+ cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
+ all_ccs = itertools.chain(cover_cc, *self._generated_cc.values())
+ for email in sorted(set(all_ccs) - to_set - cc_set):
+ print(' Cc: ', email)
+ if cmd:
+ print('Git command: %s' % cmd)
+
+ def MakeChangeLog(self, commit):
+ """Create a list of changes for each version.
+
+ Return:
+ The change log as a list of strings, one per line
+
+ Changes in v4:
+ - Jog the dial back closer to the widget
+
+ Changes in v2:
+ - Fix the widget
+ - Jog the dial
+
+ If there are no new changes in a patch, a note will be added
+
+ (no changes since v2)
+
+ Changes in v2:
+ - Fix the widget
+ - Jog the dial
+ """
+ # Collect changes from the series and this commit
+ changes = collections.defaultdict(list)
+ for version, changelist in self.changes.items():
+ changes[version] += changelist
+ if commit:
+ for version, changelist in commit.changes.items():
+ changes[version] += [[commit, text] for text in changelist]
+
+ versions = sorted(changes, reverse=True)
+ newest_version = 1
+ if 'version' in self:
+ newest_version = max(newest_version, int(self.version))
+ if versions:
+ newest_version = max(newest_version, versions[0])
+
+ final = []
+ process_it = self.get('process_log', '').split(',')
+ process_it = [item.strip() for item in process_it]
+ need_blank = False
+ for version in versions:
+ out = []
+ for this_commit, text in changes[version]:
+ if commit and this_commit != commit:
+ continue
+ if 'uniq' not in process_it or text not in out:
+ out.append(text)
+ if 'sort' in process_it:
+ out = sorted(out)
+ have_changes = len(out) > 0
+ line = 'Changes in v%d:' % version
+ if have_changes:
+ out.insert(0, line)
+ if version < newest_version and len(final) == 0:
+ out.insert(0, '')
+ out.insert(0, '(no changes since v%d)' % version)
+ newest_version = 0
+ # Only add a new line if we output something
+ if need_blank:
+ out.insert(0, '')
+ need_blank = False
+ final += out
+ need_blank = need_blank or have_changes
+
+ if len(final) > 0:
+ final.append('')
+ elif newest_version != 1:
+ final = ['(no changes since v1)', '']
+ return final
+
+ def DoChecks(self):
+ """Check that each version has a change log
+
+ Print an error if something is wrong.
+ """
+ col = terminal.Color()
+ if self.get('version'):
+ changes_copy = dict(self.changes)
+ for version in range(1, int(self.version) + 1):
+ if self.changes.get(version):
+ del changes_copy[version]
+ else:
+ if version > 1:
+ str = 'Change log missing for v%d' % version
+ print(col.Color(col.RED, str))
+ for version in changes_copy:
+ str = 'Change log for unknown version v%d' % version
+ print(col.Color(col.RED, str))
+ elif self.changes:
+ str = 'Change log exists, but no version is set'
+ print(col.Color(col.RED, str))
+
+ def MakeCcFile(self, process_tags, cover_fname, warn_on_error,
+ add_maintainers, limit):
+ """Make a cc file for us to use for per-commit Cc automation
+
+ Also stores in self._generated_cc to make ShowActions() faster.
+
+ Args:
+ process_tags: Process tags as if they were aliases
+ cover_fname: If non-None the name of the cover letter.
+ warn_on_error: True to print a warning when an alias fails to match,
+ False to ignore it.
+ add_maintainers: Either:
+ True/False to call the get_maintainers to CC maintainers
+ List of maintainers to include (for testing)
+ limit: Limit the length of the Cc list (None if no limit)
+ Return:
+ Filename of temp file created
+ """
+ col = terminal.Color()
+ # Look for commit tags (of the form 'xxx:' at the start of the subject)
+ fname = '/tmp/patman.%d' % os.getpid()
+ fd = open(fname, 'w', encoding='utf-8')
+ all_ccs = []
+ for commit in self.commits:
+ cc = []
+ if process_tags:
+ cc += gitutil.BuildEmailList(commit.tags,
+ warn_on_error=warn_on_error)
+ cc += gitutil.BuildEmailList(commit.cc_list,
+ warn_on_error=warn_on_error)
+ if type(add_maintainers) == type(cc):
+ cc += add_maintainers
+ elif add_maintainers:
+ dir_list = [os.path.join(gitutil.GetTopLevel(), 'scripts')]
+ cc += get_maintainer.GetMaintainer(dir_list, commit.patch)
+ for x in set(cc) & set(settings.bounces):
+ print(col.Color(col.YELLOW, 'Skipping "%s"' % x))
+ cc = list(set(cc) - set(settings.bounces))
+ if limit is not None:
+ cc = cc[:limit]
+ all_ccs += cc
+ print(commit.patch, '\0'.join(sorted(set(cc))), file=fd)
+ self._generated_cc[commit.patch] = cc
+
+ if cover_fname:
+ cover_cc = gitutil.BuildEmailList(self.get('cover_cc', ''))
+ cover_cc = list(set(cover_cc + all_ccs))
+ if limit is not None:
+ cover_cc = cover_cc[:limit]
+ cc_list = '\0'.join([x for x in sorted(cover_cc)])
+ print(cover_fname, cc_list, file=fd)
+
+ fd.close()
+ return fname
+
+ def AddChange(self, version, commit, info):
+ """Add a new change line to a version.
+
+ This will later appear in the change log.
+
+ Args:
+ version: version number to add change list to
+ info: change line for this version
+ """
+ if not self.changes.get(version):
+ self.changes[version] = []
+ self.changes[version].append([commit, info])
+
+ def GetPatchPrefix(self):
+ """Get the patch version string
+
+ Return:
+ Patch string, like 'RFC PATCH v5' or just 'PATCH'
+ """
+ git_prefix = gitutil.GetDefaultSubjectPrefix()
+ if git_prefix:
+ git_prefix = '%s][' % git_prefix
+ else:
+ git_prefix = ''
+
+ version = ''
+ if self.get('version'):
+ version = ' v%s' % self['version']
+
+ # Get patch name prefix
+ prefix = ''
+ if self.get('prefix'):
+ prefix = '%s ' % self['prefix']
+ return '%s%sPATCH%s' % (git_prefix, prefix, version)
diff --git a/roms/u-boot/tools/patman/settings.py b/roms/u-boot/tools/patman/settings.py
new file mode 100644
index 000000000..13c1ee4f5
--- /dev/null
+++ b/roms/u-boot/tools/patman/settings.py
@@ -0,0 +1,362 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+try:
+ import configparser as ConfigParser
+except:
+ import ConfigParser
+
+import argparse
+import os
+import re
+
+from patman import command
+from patman import tools
+
+"""Default settings per-project.
+
+These are used by _ProjectConfigParser. Settings names should match
+the "dest" of the option parser from patman.py.
+"""
+_default_settings = {
+ "u-boot": {},
+ "linux": {
+ "process_tags": "False",
+ },
+ "gcc": {
+ "process_tags": "False",
+ "add_signoff": "False",
+ "check_patch": "False",
+ },
+}
+
+class _ProjectConfigParser(ConfigParser.SafeConfigParser):
+ """ConfigParser that handles projects.
+
+ There are two main goals of this class:
+ - Load project-specific default settings.
+ - Merge general default settings/aliases with project-specific ones.
+
+ # Sample config used for tests below...
+ >>> from io import StringIO
+ >>> sample_config = '''
+ ... [alias]
+ ... me: Peter P. <likesspiders@example.com>
+ ... enemies: Evil <evil@example.com>
+ ...
+ ... [sm_alias]
+ ... enemies: Green G. <ugly@example.com>
+ ...
+ ... [sm2_alias]
+ ... enemies: Doc O. <pus@example.com>
+ ...
+ ... [settings]
+ ... am_hero: True
+ ... '''
+
+ # Check to make sure that bogus project gets general alias.
+ >>> config = _ProjectConfigParser("zzz")
+ >>> config.readfp(StringIO(sample_config))
+ >>> str(config.get("alias", "enemies"))
+ 'Evil <evil@example.com>'
+
+ # Check to make sure that alias gets overridden by project.
+ >>> config = _ProjectConfigParser("sm")
+ >>> config.readfp(StringIO(sample_config))
+ >>> str(config.get("alias", "enemies"))
+ 'Green G. <ugly@example.com>'
+
+ # Check to make sure that settings get merged with project.
+ >>> config = _ProjectConfigParser("linux")
+ >>> config.readfp(StringIO(sample_config))
+ >>> sorted((str(a), str(b)) for (a, b) in config.items("settings"))
+ [('am_hero', 'True'), ('process_tags', 'False')]
+
+ # Check to make sure that settings works with unknown project.
+ >>> config = _ProjectConfigParser("unknown")
+ >>> config.readfp(StringIO(sample_config))
+ >>> sorted((str(a), str(b)) for (a, b) in config.items("settings"))
+ [('am_hero', 'True')]
+ """
+ def __init__(self, project_name):
+ """Construct _ProjectConfigParser.
+
+ In addition to standard SafeConfigParser initialization, this also loads
+ project defaults.
+
+ Args:
+ project_name: The name of the project.
+ """
+ self._project_name = project_name
+ ConfigParser.SafeConfigParser.__init__(self)
+
+ # Update the project settings in the config based on
+ # the _default_settings global.
+ project_settings = "%s_settings" % project_name
+ if not self.has_section(project_settings):
+ self.add_section(project_settings)
+ project_defaults = _default_settings.get(project_name, {})
+ for setting_name, setting_value in project_defaults.items():
+ self.set(project_settings, setting_name, setting_value)
+
+ def get(self, section, option, *args, **kwargs):
+ """Extend SafeConfigParser to try project_section before section.
+
+ Args:
+ See SafeConfigParser.
+ Returns:
+ See SafeConfigParser.
+ """
+ try:
+ val = ConfigParser.SafeConfigParser.get(
+ self, "%s_%s" % (self._project_name, section), option,
+ *args, **kwargs
+ )
+ except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
+ val = ConfigParser.SafeConfigParser.get(
+ self, section, option, *args, **kwargs
+ )
+ return val
+
+ def items(self, section, *args, **kwargs):
+ """Extend SafeConfigParser to add project_section to section.
+
+ Args:
+ See SafeConfigParser.
+ Returns:
+ See SafeConfigParser.
+ """
+ project_items = []
+ has_project_section = False
+ top_items = []
+
+ # Get items from the project section
+ try:
+ project_items = ConfigParser.SafeConfigParser.items(
+ self, "%s_%s" % (self._project_name, section), *args, **kwargs
+ )
+ has_project_section = True
+ except ConfigParser.NoSectionError:
+ pass
+
+ # Get top-level items
+ try:
+ top_items = ConfigParser.SafeConfigParser.items(
+ self, section, *args, **kwargs
+ )
+ except ConfigParser.NoSectionError:
+ # If neither section exists raise the error on...
+ if not has_project_section:
+ raise
+
+ item_dict = dict(top_items)
+ item_dict.update(project_items)
+ return {(item, val) for item, val in item_dict.items()}
+
+def ReadGitAliases(fname):
+ """Read a git alias file. This is in the form used by git:
+
+ alias uboot u-boot@lists.denx.de
+ alias wd Wolfgang Denk <wd@denx.de>
+
+ Args:
+ fname: Filename to read
+ """
+ try:
+ fd = open(fname, 'r', encoding='utf-8')
+ except IOError:
+ print("Warning: Cannot find alias file '%s'" % fname)
+ return
+
+ re_line = re.compile('alias\s+(\S+)\s+(.*)')
+ for line in fd.readlines():
+ line = line.strip()
+ if not line or line[0] == '#':
+ continue
+
+ m = re_line.match(line)
+ if not m:
+ print("Warning: Alias file line '%s' not understood" % line)
+ continue
+
+ list = alias.get(m.group(1), [])
+ for item in m.group(2).split(','):
+ item = item.strip()
+ if item:
+ list.append(item)
+ alias[m.group(1)] = list
+
+ fd.close()
+
+def CreatePatmanConfigFile(gitutil, config_fname):
+ """Creates a config file under $(HOME)/.patman if it can't find one.
+
+ Args:
+ config_fname: Default config filename i.e., $(HOME)/.patman
+
+ Returns:
+ None
+ """
+ name = gitutil.GetDefaultUserName()
+ if name == None:
+ name = raw_input("Enter name: ")
+
+ email = gitutil.GetDefaultUserEmail()
+
+ if email == None:
+ email = raw_input("Enter email: ")
+
+ try:
+ f = open(config_fname, 'w')
+ except IOError:
+ print("Couldn't create patman config file\n")
+ raise
+
+ print('''[alias]
+me: %s <%s>
+
+[bounces]
+nxp = Zhikang Zhang <zhikang.zhang@nxp.com>
+''' % (name, email), file=f)
+ f.close();
+
+def _UpdateDefaults(main_parser, config):
+ """Update the given OptionParser defaults based on config.
+
+ We'll walk through all of the settings from all parsers.
+ For each setting we'll look for a default in the option parser.
+ If it's found we'll update the option parser default.
+
+ The idea here is that the .patman file should be able to update
+ defaults but that command line flags should still have the final
+ say.
+
+ Args:
+ parser: An instance of an ArgumentParser whose defaults will be
+ updated.
+ config: An instance of _ProjectConfigParser that we will query
+ for settings.
+ """
+ # Find all the parsers and subparsers
+ parsers = [main_parser]
+ parsers += [subparser for action in main_parser._actions
+ if isinstance(action, argparse._SubParsersAction)
+ for _, subparser in action.choices.items()]
+
+ # Collect the defaults from each parser
+ defaults = {}
+ for parser in parsers:
+ pdefs = parser.parse_known_args()[0]
+ defaults.update(vars(pdefs))
+
+ # Go through the settings and collect defaults
+ for name, val in config.items('settings'):
+ if name in defaults:
+ default_val = defaults[name]
+ if isinstance(default_val, bool):
+ val = config.getboolean('settings', name)
+ elif isinstance(default_val, int):
+ val = config.getint('settings', name)
+ elif isinstance(default_val, str):
+ val = config.get('settings', name)
+ defaults[name] = val
+ else:
+ print("WARNING: Unknown setting %s" % name)
+
+ # Set all the defaults (this propagates through all subparsers)
+ main_parser.set_defaults(**defaults)
+
+def _ReadAliasFile(fname):
+ """Read in the U-Boot git alias file if it exists.
+
+ Args:
+ fname: Filename to read.
+ """
+ if os.path.exists(fname):
+ bad_line = None
+ with open(fname, encoding='utf-8') as fd:
+ linenum = 0
+ for line in fd:
+ linenum += 1
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ words = line.split(None, 2)
+ if len(words) < 3 or words[0] != 'alias':
+ if not bad_line:
+ bad_line = "%s:%d:Invalid line '%s'" % (fname, linenum,
+ line)
+ continue
+ alias[words[1]] = [s.strip() for s in words[2].split(',')]
+ if bad_line:
+ print(bad_line)
+
+def _ReadBouncesFile(fname):
+ """Read in the bounces file if it exists
+
+ Args:
+ fname: Filename to read.
+ """
+ if os.path.exists(fname):
+ with open(fname) as fd:
+ for line in fd:
+ if line.startswith('#'):
+ continue
+ bounces.add(line.strip())
+
+def GetItems(config, section):
+ """Get the items from a section of the config.
+
+ Args:
+ config: _ProjectConfigParser object containing settings
+ section: name of section to retrieve
+
+ Returns:
+ List of (name, value) tuples for the section
+ """
+ try:
+ return config.items(section)
+ except ConfigParser.NoSectionError as e:
+ return []
+ except:
+ raise
+
+def Setup(gitutil, parser, project_name, config_fname=''):
+ """Set up the settings module by reading config files.
+
+ Args:
+ parser: The parser to update
+ project_name: Name of project that we're working on; we'll look
+ for sections named "project_section" as well.
+ config_fname: Config filename to read ('' for default)
+ """
+ # First read the git alias file if available
+ _ReadAliasFile('doc/git-mailrc')
+ config = _ProjectConfigParser(project_name)
+ if config_fname == '':
+ config_fname = '%s/.patman' % os.getenv('HOME')
+
+ if not os.path.exists(config_fname):
+ print("No config file found ~/.patman\nCreating one...\n")
+ CreatePatmanConfigFile(gitutil, config_fname)
+
+ config.read(config_fname)
+
+ for name, value in GetItems(config, 'alias'):
+ alias[name] = value.split(',')
+
+ _ReadBouncesFile('doc/bounces')
+ for name, value in GetItems(config, 'bounces'):
+ bounces.add(value)
+
+ _UpdateDefaults(parser, config)
+
+# These are the aliases we understand, indexed by alias. Each member is a list.
+alias = {}
+bounces = set()
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
diff --git a/roms/u-boot/tools/patman/setup.py b/roms/u-boot/tools/patman/setup.py
new file mode 100644
index 000000000..43fdc00ce
--- /dev/null
+++ b/roms/u-boot/tools/patman/setup.py
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+from distutils.core import setup
+setup(name='patman',
+ version='1.0',
+ license='GPL-2.0+',
+ scripts=['patman'],
+ packages=['patman'],
+ package_dir={'patman': ''},
+ package_data={'patman': ['README']},
+ classifiers=['Environment :: Console',
+ 'Topic :: Software Development'])
diff --git a/roms/u-boot/tools/patman/status.py b/roms/u-boot/tools/patman/status.py
new file mode 100644
index 000000000..f3fbc661b
--- /dev/null
+++ b/roms/u-boot/tools/patman/status.py
@@ -0,0 +1,487 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright 2020 Google LLC
+#
+"""Talks to the patchwork service to figure out what patches have been reviewed
+and commented on. Provides a way to display review tags and comments.
+Allows creation of a new branch based on the old but with the review tags
+collected from patchwork.
+"""
+
+import collections
+import concurrent.futures
+from itertools import repeat
+import re
+
+import pygit2
+import requests
+
+from patman import patchstream
+from patman.patchstream import PatchStream
+from patman import terminal
+from patman import tout
+
+# Patches which are part of a multi-patch series are shown with a prefix like
+# [prefix, version, sequence], for example '[RFC, v2, 3/5]'. All but the last
+# part is optional. This decodes the string into groups. For single patches
+# the [] part is not present:
+# Groups: (ignore, ignore, ignore, prefix, version, sequence, subject)
+RE_PATCH = re.compile(r'(\[(((.*),)?(.*),)?(.*)\]\s)?(.*)$')
+
+# This decodes the sequence string into a patch number and patch count
+RE_SEQ = re.compile(r'(\d+)/(\d+)')
+
+def to_int(vals):
+ """Convert a list of strings into integers, using 0 if not an integer
+
+ Args:
+ vals (list): List of strings
+
+ Returns:
+ list: List of integers, one for each input string
+ """
+ out = [int(val) if val.isdigit() else 0 for val in vals]
+ return out
+
+
+class Patch(dict):
+ """Models a patch in patchwork
+
+ This class records information obtained from patchwork
+
+ Some of this information comes from the 'Patch' column:
+
+ [RFC,v2,1/3] dm: Driver and uclass changes for tiny-dm
+
+ This shows the prefix, version, seq, count and subject.
+
+ The other properties come from other columns in the display.
+
+ Properties:
+ pid (str): ID of the patch (typically an integer)
+ seq (int): Sequence number within series (1=first) parsed from sequence
+ string
+ count (int): Number of patches in series, parsed from sequence string
+ raw_subject (str): Entire subject line, e.g.
+ "[1/2,v2] efi_loader: Sort header file ordering"
+ prefix (str): Prefix string or None (e.g. 'RFC')
+ version (str): Version string or None (e.g. 'v2')
+ raw_subject (str): Raw patch subject
+ subject (str): Patch subject with [..] part removed (same as commit
+ subject)
+ """
+ def __init__(self, pid):
+ super().__init__()
+ self.id = pid # Use 'id' to match what the Rest API provides
+ self.seq = None
+ self.count = None
+ self.prefix = None
+ self.version = None
+ self.raw_subject = None
+ self.subject = None
+
+ # These make us more like a dictionary
+ def __setattr__(self, name, value):
+ self[name] = value
+
+ def __getattr__(self, name):
+ return self[name]
+
+ def __hash__(self):
+ return hash(frozenset(self.items()))
+
+ def __str__(self):
+ return self.raw_subject
+
+ def parse_subject(self, raw_subject):
+ """Parse the subject of a patch into its component parts
+
+ See RE_PATCH for details. The parsed info is placed into seq, count,
+ prefix, version, subject
+
+ Args:
+ raw_subject (str): Subject string to parse
+
+ Raises:
+ ValueError: the subject cannot be parsed
+ """
+ self.raw_subject = raw_subject.strip()
+ mat = RE_PATCH.search(raw_subject.strip())
+ if not mat:
+ raise ValueError("Cannot parse subject '%s'" % raw_subject)
+ self.prefix, self.version, seq_info, self.subject = mat.groups()[3:]
+ mat_seq = RE_SEQ.match(seq_info) if seq_info else False
+ if mat_seq is None:
+ self.version = seq_info
+ seq_info = None
+ if self.version and not self.version.startswith('v'):
+ self.prefix = self.version
+ self.version = None
+ if seq_info:
+ if mat_seq:
+ self.seq = int(mat_seq.group(1))
+ self.count = int(mat_seq.group(2))
+ else:
+ self.seq = 1
+ self.count = 1
+
+
+class Review:
+ """Represents a single review email collected in Patchwork
+
+ Patches can attract multiple reviews. Each consists of an author/date and
+ a variable number of 'snippets', which are groups of quoted and unquoted
+ text.
+ """
+ def __init__(self, meta, snippets):
+ """Create new Review object
+
+ Args:
+ meta (str): Text containing review author and date
+ snippets (list): List of snippets in th review, each a list of text
+ lines
+ """
+ self.meta = ' : '.join([line for line in meta.splitlines() if line])
+ self.snippets = snippets
+
+def compare_with_series(series, patches):
+ """Compare a list of patches with a series it came from
+
+ This prints any problems as warnings
+
+ Args:
+ series (Series): Series to compare against
+ patches (:type: list of Patch): list of Patch objects to compare with
+
+ Returns:
+ tuple
+ dict:
+ key: Commit number (0...n-1)
+ value: Patch object for that commit
+ dict:
+ key: Patch number (0...n-1)
+ value: Commit object for that patch
+ """
+ # Check the names match
+ warnings = []
+ patch_for_commit = {}
+ all_patches = set(patches)
+ for seq, cmt in enumerate(series.commits):
+ pmatch = [p for p in all_patches if p.subject == cmt.subject]
+ if len(pmatch) == 1:
+ patch_for_commit[seq] = pmatch[0]
+ all_patches.remove(pmatch[0])
+ elif len(pmatch) > 1:
+ warnings.append("Multiple patches match commit %d ('%s'):\n %s" %
+ (seq + 1, cmt.subject,
+ '\n '.join([p.subject for p in pmatch])))
+ else:
+ warnings.append("Cannot find patch for commit %d ('%s')" %
+ (seq + 1, cmt.subject))
+
+
+ # Check the names match
+ commit_for_patch = {}
+ all_commits = set(series.commits)
+ for seq, patch in enumerate(patches):
+ cmatch = [c for c in all_commits if c.subject == patch.subject]
+ if len(cmatch) == 1:
+ commit_for_patch[seq] = cmatch[0]
+ all_commits.remove(cmatch[0])
+ elif len(cmatch) > 1:
+ warnings.append("Multiple commits match patch %d ('%s'):\n %s" %
+ (seq + 1, patch.subject,
+ '\n '.join([c.subject for c in cmatch])))
+ else:
+ warnings.append("Cannot find commit for patch %d ('%s')" %
+ (seq + 1, patch.subject))
+
+ return patch_for_commit, commit_for_patch, warnings
+
+def call_rest_api(url, subpath):
+ """Call the patchwork API and return the result as JSON
+
+ Args:
+ url (str): URL of patchwork server, e.g. 'https://patchwork.ozlabs.org'
+ subpath (str): URL subpath to use
+
+ Returns:
+ dict: Json result
+
+ Raises:
+ ValueError: the URL could not be read
+ """
+ full_url = '%s/api/1.2/%s' % (url, subpath)
+ response = requests.get(full_url)
+ if response.status_code != 200:
+ raise ValueError("Could not read URL '%s'" % full_url)
+ return response.json()
+
+def collect_patches(series, series_id, url, rest_api=call_rest_api):
+ """Collect patch information about a series from patchwork
+
+ Uses the Patchwork REST API to collect information provided by patchwork
+ about the status of each patch.
+
+ Args:
+ series (Series): Series object corresponding to the local branch
+ containing the series
+ series_id (str): Patch series ID number
+ url (str): URL of patchwork server, e.g. 'https://patchwork.ozlabs.org'
+ rest_api (function): API function to call to access Patchwork, for
+ testing
+
+ Returns:
+ list: List of patches sorted by sequence number, each a Patch object
+
+ Raises:
+ ValueError: if the URL could not be read or the web page does not follow
+ the expected structure
+ """
+ data = rest_api(url, 'series/%s/' % series_id)
+
+ # Get all the rows, which are patches
+ patch_dict = data['patches']
+ count = len(patch_dict)
+ num_commits = len(series.commits)
+ if count != num_commits:
+ tout.Warning('Warning: Patchwork reports %d patches, series has %d' %
+ (count, num_commits))
+
+ patches = []
+
+ # Work through each row (patch) one at a time, collecting the information
+ warn_count = 0
+ for pw_patch in patch_dict:
+ patch = Patch(pw_patch['id'])
+ patch.parse_subject(pw_patch['name'])
+ patches.append(patch)
+ if warn_count > 1:
+ tout.Warning(' (total of %d warnings)' % warn_count)
+
+ # Sort patches by patch number
+ patches = sorted(patches, key=lambda x: x.seq)
+ return patches
+
+def find_new_responses(new_rtag_list, review_list, seq, cmt, patch, url,
+ rest_api=call_rest_api):
+ """Find new rtags collected by patchwork that we don't know about
+
+ This is designed to be run in parallel, once for each commit/patch
+
+ Args:
+ new_rtag_list (list): New rtags are written to new_rtag_list[seq]
+ list, each a dict:
+ key: Response tag (e.g. 'Reviewed-by')
+ value: Set of people who gave that response, each a name/email
+ string
+ review_list (list): New reviews are written to review_list[seq]
+ list, each a
+ List of reviews for the patch, each a Review
+ seq (int): Position in new_rtag_list to update
+ cmt (Commit): Commit object for this commit
+ patch (Patch): Corresponding Patch object for this patch
+ url (str): URL of patchwork server, e.g. 'https://patchwork.ozlabs.org'
+ rest_api (function): API function to call to access Patchwork, for
+ testing
+ """
+ if not patch:
+ return
+
+ # Get the content for the patch email itself as well as all comments
+ data = rest_api(url, 'patches/%s/' % patch.id)
+ pstrm = PatchStream.process_text(data['content'], True)
+
+ rtags = collections.defaultdict(set)
+ for response, people in pstrm.commit.rtags.items():
+ rtags[response].update(people)
+
+ data = rest_api(url, 'patches/%s/comments/' % patch.id)
+
+ reviews = []
+ for comment in data:
+ pstrm = PatchStream.process_text(comment['content'], True)
+ if pstrm.snippets:
+ submitter = comment['submitter']
+ person = '%s <%s>' % (submitter['name'], submitter['email'])
+ reviews.append(Review(person, pstrm.snippets))
+ for response, people in pstrm.commit.rtags.items():
+ rtags[response].update(people)
+
+ # Find the tags that are not in the commit
+ new_rtags = collections.defaultdict(set)
+ base_rtags = cmt.rtags
+ for tag, people in rtags.items():
+ for who in people:
+ is_new = (tag not in base_rtags or
+ who not in base_rtags[tag])
+ if is_new:
+ new_rtags[tag].add(who)
+ new_rtag_list[seq] = new_rtags
+ review_list[seq] = reviews
+
+def show_responses(rtags, indent, is_new):
+ """Show rtags collected
+
+ Args:
+ rtags (dict): review tags to show
+ key: Response tag (e.g. 'Reviewed-by')
+ value: Set of people who gave that response, each a name/email string
+ indent (str): Indentation string to write before each line
+ is_new (bool): True if this output should be highlighted
+
+ Returns:
+ int: Number of review tags displayed
+ """
+ col = terminal.Color()
+ count = 0
+ for tag in sorted(rtags.keys()):
+ people = rtags[tag]
+ for who in sorted(people):
+ terminal.Print(indent + '%s %s: ' % ('+' if is_new else ' ', tag),
+ newline=False, colour=col.GREEN, bright=is_new)
+ terminal.Print(who, colour=col.WHITE, bright=is_new)
+ count += 1
+ return count
+
+def create_branch(series, new_rtag_list, branch, dest_branch, overwrite,
+ repo=None):
+ """Create a new branch with review tags added
+
+ Args:
+ series (Series): Series object for the existing branch
+ new_rtag_list (list): List of review tags to add, one for each commit,
+ each a dict:
+ key: Response tag (e.g. 'Reviewed-by')
+ value: Set of people who gave that response, each a name/email
+ string
+ branch (str): Existing branch to update
+ dest_branch (str): Name of new branch to create
+ overwrite (bool): True to force overwriting dest_branch if it exists
+ repo (pygit2.Repository): Repo to use (use None unless testing)
+
+ Returns:
+ int: Total number of review tags added across all commits
+
+ Raises:
+ ValueError: if the destination branch name is the same as the original
+ branch, or it already exists and @overwrite is False
+ """
+ if branch == dest_branch:
+ raise ValueError(
+ 'Destination branch must not be the same as the original branch')
+ if not repo:
+ repo = pygit2.Repository('.')
+ count = len(series.commits)
+ new_br = repo.branches.get(dest_branch)
+ if new_br:
+ if not overwrite:
+ raise ValueError("Branch '%s' already exists (-f to overwrite)" %
+ dest_branch)
+ new_br.delete()
+ if not branch:
+ branch = 'HEAD'
+ target = repo.revparse_single('%s~%d' % (branch, count))
+ repo.branches.local.create(dest_branch, target)
+
+ num_added = 0
+ for seq in range(count):
+ parent = repo.branches.get(dest_branch)
+ cherry = repo.revparse_single('%s~%d' % (branch, count - seq - 1))
+
+ repo.merge_base(cherry.oid, parent.target)
+ base_tree = cherry.parents[0].tree
+
+ index = repo.merge_trees(base_tree, parent, cherry)
+ tree_id = index.write_tree(repo)
+
+ lines = []
+ if new_rtag_list[seq]:
+ for tag, people in new_rtag_list[seq].items():
+ for who in people:
+ lines.append('%s: %s' % (tag, who))
+ num_added += 1
+ message = patchstream.insert_tags(cherry.message.rstrip(),
+ sorted(lines))
+
+ repo.create_commit(
+ parent.name, cherry.author, cherry.committer, message, tree_id,
+ [parent.target])
+ return num_added
+
+def check_patchwork_status(series, series_id, branch, dest_branch, force,
+ show_comments, url, rest_api=call_rest_api,
+ test_repo=None):
+ """Check the status of a series on Patchwork
+
+ This finds review tags and comments for a series in Patchwork, displaying
+ them to show what is new compared to the local series.
+
+ Args:
+ series (Series): Series object for the existing branch
+ series_id (str): Patch series ID number
+ branch (str): Existing branch to update, or None
+ dest_branch (str): Name of new branch to create, or None
+ force (bool): True to force overwriting dest_branch if it exists
+ show_comments (bool): True to show the comments on each patch
+ url (str): URL of patchwork server, e.g. 'https://patchwork.ozlabs.org'
+ rest_api (function): API function to call to access Patchwork, for
+ testing
+ test_repo (pygit2.Repository): Repo to use (use None unless testing)
+ """
+ patches = collect_patches(series, series_id, url, rest_api)
+ col = terminal.Color()
+ count = len(series.commits)
+ new_rtag_list = [None] * count
+ review_list = [None] * count
+
+ patch_for_commit, _, warnings = compare_with_series(series, patches)
+ for warn in warnings:
+ tout.Warning(warn)
+
+ patch_list = [patch_for_commit.get(c) for c in range(len(series.commits))]
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
+ futures = executor.map(
+ find_new_responses, repeat(new_rtag_list), repeat(review_list),
+ range(count), series.commits, patch_list, repeat(url),
+ repeat(rest_api))
+ for fresponse in futures:
+ if fresponse:
+ raise fresponse.exception()
+
+ num_to_add = 0
+ for seq, cmt in enumerate(series.commits):
+ patch = patch_for_commit.get(seq)
+ if not patch:
+ continue
+ terminal.Print('%3d %s' % (patch.seq, patch.subject[:50]),
+ colour=col.BLUE)
+ cmt = series.commits[seq]
+ base_rtags = cmt.rtags
+ new_rtags = new_rtag_list[seq]
+
+ indent = ' ' * 2
+ show_responses(base_rtags, indent, False)
+ num_to_add += show_responses(new_rtags, indent, True)
+ if show_comments:
+ for review in review_list[seq]:
+ terminal.Print('Review: %s' % review.meta, colour=col.RED)
+ for snippet in review.snippets:
+ for line in snippet:
+ quoted = line.startswith('>')
+ terminal.Print(' %s' % line,
+ colour=col.MAGENTA if quoted else None)
+ terminal.Print()
+
+ terminal.Print("%d new response%s available in patchwork%s" %
+ (num_to_add, 's' if num_to_add != 1 else '',
+ '' if dest_branch
+ else ' (use -d to write them to a new branch)'))
+
+ if dest_branch:
+ num_added = create_branch(series, new_rtag_list, branch,
+ dest_branch, force, test_repo)
+ terminal.Print(
+ "%d response%s added from patchwork into new branch '%s'" %
+ (num_added, 's' if num_added != 1 else '', dest_branch))
diff --git a/roms/u-boot/tools/patman/terminal.py b/roms/u-boot/tools/patman/terminal.py
new file mode 100644
index 000000000..9be03b3a6
--- /dev/null
+++ b/roms/u-boot/tools/patman/terminal.py
@@ -0,0 +1,270 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+"""Terminal utilities
+
+This module handles terminal interaction including ANSI color codes.
+"""
+
+import os
+import re
+import shutil
+import sys
+
+# Selection of when we want our output to be colored
+COLOR_IF_TERMINAL, COLOR_ALWAYS, COLOR_NEVER = range(3)
+
+# Initially, we are set up to print to the terminal
+print_test_mode = False
+print_test_list = []
+
+# The length of the last line printed without a newline
+last_print_len = None
+
+# credit:
+# stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
+ansi_escape = re.compile(r'\x1b(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
+
+class PrintLine:
+ """A line of text output
+
+ Members:
+ text: Text line that was printed
+ newline: True to output a newline after the text
+ colour: Text colour to use
+ """
+ def __init__(self, text, colour, newline=True, bright=True):
+ self.text = text
+ self.newline = newline
+ self.colour = colour
+ self.bright = bright
+
+ def __eq__(self, other):
+ return (self.text == other.text and
+ self.newline == other.newline and
+ self.colour == other.colour and
+ self.bright == other.bright)
+
+ def __str__(self):
+ return ("newline=%s, colour=%s, bright=%d, text='%s'" %
+ (self.newline, self.colour, self.bright, self.text))
+
+
+def CalcAsciiLen(text):
+ """Calculate the length of a string, ignoring any ANSI sequences
+
+ When displayed on a terminal, ANSI sequences don't take any space, so we
+ need to ignore them when calculating the length of a string.
+
+ Args:
+ text: Text to check
+
+ Returns:
+ Length of text, after skipping ANSI sequences
+
+ >>> col = Color(COLOR_ALWAYS)
+ >>> text = col.Color(Color.RED, 'abc')
+ >>> len(text)
+ 14
+ >>> CalcAsciiLen(text)
+ 3
+ >>>
+ >>> text += 'def'
+ >>> CalcAsciiLen(text)
+ 6
+ >>> text += col.Color(Color.RED, 'abc')
+ >>> CalcAsciiLen(text)
+ 9
+ """
+ result = ansi_escape.sub('', text)
+ return len(result)
+
+def TrimAsciiLen(text, size):
+ """Trim a string containing ANSI sequences to the given ASCII length
+
+ The string is trimmed with ANSI sequences being ignored for the length
+ calculation.
+
+ >>> col = Color(COLOR_ALWAYS)
+ >>> text = col.Color(Color.RED, 'abc')
+ >>> len(text)
+ 14
+ >>> CalcAsciiLen(TrimAsciiLen(text, 4))
+ 3
+ >>> CalcAsciiLen(TrimAsciiLen(text, 2))
+ 2
+ >>> text += 'def'
+ >>> CalcAsciiLen(TrimAsciiLen(text, 4))
+ 4
+ >>> text += col.Color(Color.RED, 'ghi')
+ >>> CalcAsciiLen(TrimAsciiLen(text, 7))
+ 7
+ """
+ if CalcAsciiLen(text) < size:
+ return text
+ pos = 0
+ out = ''
+ left = size
+
+ # Work through each ANSI sequence in turn
+ for m in ansi_escape.finditer(text):
+ # Find the text before the sequence and add it to our string, making
+ # sure it doesn't overflow
+ before = text[pos:m.start()]
+ toadd = before[:left]
+ out += toadd
+
+ # Figure out how much non-ANSI space we have left
+ left -= len(toadd)
+
+ # Add the ANSI sequence and move to the position immediately after it
+ out += m.group()
+ pos = m.start() + len(m.group())
+
+ # Deal with text after the last ANSI sequence
+ after = text[pos:]
+ toadd = after[:left]
+ out += toadd
+
+ return out
+
+
+def Print(text='', newline=True, colour=None, limit_to_line=False, bright=True):
+ """Handle a line of output to the terminal.
+
+ In test mode this is recorded in a list. Otherwise it is output to the
+ terminal.
+
+ Args:
+ text: Text to print
+ newline: True to add a new line at the end of the text
+ colour: Colour to use for the text
+ """
+ global last_print_len
+
+ if print_test_mode:
+ print_test_list.append(PrintLine(text, colour, newline, bright))
+ else:
+ if colour:
+ col = Color()
+ text = col.Color(colour, text, bright=bright)
+ if newline:
+ print(text)
+ last_print_len = None
+ else:
+ if limit_to_line:
+ cols = shutil.get_terminal_size().columns
+ text = TrimAsciiLen(text, cols)
+ print(text, end='', flush=True)
+ last_print_len = CalcAsciiLen(text)
+
+def PrintClear():
+ """Clear a previously line that was printed with no newline"""
+ global last_print_len
+
+ if last_print_len:
+ print('\r%s\r' % (' '* last_print_len), end='', flush=True)
+ last_print_len = None
+
+def SetPrintTestMode(enable=True):
+ """Go into test mode, where all printing is recorded"""
+ global print_test_mode
+
+ print_test_mode = enable
+ GetPrintTestLines()
+
+def GetPrintTestLines():
+ """Get a list of all lines output through Print()
+
+ Returns:
+ A list of PrintLine objects
+ """
+ global print_test_list
+
+ ret = print_test_list
+ print_test_list = []
+ return ret
+
+def EchoPrintTestLines():
+ """Print out the text lines collected"""
+ for line in print_test_list:
+ if line.colour:
+ col = Color()
+ print(col.Color(line.colour, line.text), end='')
+ else:
+ print(line.text, end='')
+ if line.newline:
+ print()
+
+
+class Color(object):
+ """Conditionally wraps text in ANSI color escape sequences."""
+ BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
+ BOLD = -1
+ BRIGHT_START = '\033[1;%dm'
+ NORMAL_START = '\033[22;%dm'
+ BOLD_START = '\033[1m'
+ RESET = '\033[0m'
+
+ def __init__(self, colored=COLOR_IF_TERMINAL):
+ """Create a new Color object, optionally disabling color output.
+
+ Args:
+ enabled: True if color output should be enabled. If False then this
+ class will not add color codes at all.
+ """
+ try:
+ self._enabled = (colored == COLOR_ALWAYS or
+ (colored == COLOR_IF_TERMINAL and
+ os.isatty(sys.stdout.fileno())))
+ except:
+ self._enabled = False
+
+ def Start(self, color, bright=True):
+ """Returns a start color code.
+
+ Args:
+ color: Color to use, .e.g BLACK, RED, etc.
+
+ Returns:
+ If color is enabled, returns an ANSI sequence to start the given
+ color, otherwise returns empty string
+ """
+ if self._enabled:
+ base = self.BRIGHT_START if bright else self.NORMAL_START
+ return base % (color + 30)
+ return ''
+
+ def Stop(self):
+ """Returns a stop color code.
+
+ Returns:
+ If color is enabled, returns an ANSI color reset sequence,
+ otherwise returns empty string
+ """
+ if self._enabled:
+ return self.RESET
+ return ''
+
+ def Color(self, color, text, bright=True):
+ """Returns text with conditionally added color escape sequences.
+
+ Keyword arguments:
+ color: Text color -- one of the color constants defined in this
+ class.
+ text: The text to color.
+
+ Returns:
+ If self._enabled is False, returns the original text. If it's True,
+ returns text with color escape sequences based on the value of
+ color.
+ """
+ if not self._enabled:
+ return text
+ if color == self.BOLD:
+ start = self.BOLD_START
+ else:
+ base = self.BRIGHT_START if bright else self.NORMAL_START
+ start = base % (color + 30)
+ return start + text + self.RESET
diff --git a/roms/u-boot/tools/patman/test/test01.txt b/roms/u-boot/tools/patman/test/test01.txt
new file mode 100644
index 000000000..b238a8b4b
--- /dev/null
+++ b/roms/u-boot/tools/patman/test/test01.txt
@@ -0,0 +1,69 @@
+commit b9da5f937bd5ea4931ea17459bf79b2905d9594d
+Author: Simon Glass <sjg@chromium.org>
+Date: Sat Apr 15 15:39:08 2017 -0600
+
+ pci: Correct cast for sandbox
+
+ This gives a warning with some native compilers:
+
+ cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
+ ‘long long unsigned int’, but argument 3 has type
+ ‘u64 {aka long unsigned int}’ [-Wformat=]
+
+ Fix it with a cast.
+
+ Signed-off-by: Simon Glass <sjg@chromium.org>
+ Commit-changes: 2
+ - second revision change
+
+ Series-notes:
+ some notes
+ about some things
+ from the first commit
+ END
+
+ Commit-notes:
+ Some notes about
+ the first commit
+ END
+
+commit 5ab48490f03051875ab13d288a4bf32b507d76fd
+Author: Simon Glass <sjg@chromium.org>
+Date: Sat Apr 15 15:39:08 2017 -0600
+
+ fdt: Correct cast for sandbox in fdtdec_setup_mem_size_base()
+
+ This gives a warning with some native compilers:
+
+ lib/fdtdec.c:1203:8: warning: format ‘%llx’ expects argument of type
+ ‘long long unsigned int’, but argument 3 has type
+ ‘long unsigned int’ [-Wformat=]
+
+ Fix it with a cast.
+
+ Signed-off-by: Simon Glass <sjg@chromium.org>
+ Series-to: u-boot
+ Series-prefix: RFC
+ Series-cc: Stefan Brüns <stefan.bruens@rwth-aachen.de>
+ Cover-letter-cc: Lord Mëlchett <clergy@palace.gov>
+ Series-version: 3
+ Patch-cc: fred
+ Series-process-log: sort, uniq
+ Series-changes: 4
+ - Some changes
+ - Multi
+ line
+ change
+
+ Commit-changes: 2
+ - Changes only for this commit
+
+ Cover-changes: 4
+ - Some notes for the cover letter
+
+ Cover-letter:
+ test: A test patch series
+ This is a test of how the cover
+ letter
+ works
+ END
diff --git a/roms/u-boot/tools/patman/test_checkpatch.py b/roms/u-boot/tools/patman/test_checkpatch.py
new file mode 100644
index 000000000..56af5265c
--- /dev/null
+++ b/roms/u-boot/tools/patman/test_checkpatch.py
@@ -0,0 +1,457 @@
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Tests for U-Boot-specific checkpatch.pl features
+#
+# Copyright (c) 2011 The Chromium OS Authors.
+#
+
+import os
+import tempfile
+import unittest
+
+from patman import checkpatch
+from patman import gitutil
+from patman import patchstream
+from patman import series
+from patman import commit
+
+
+class Line:
+ def __init__(self, fname, text):
+ self.fname = fname
+ self.text = text
+
+
+class PatchMaker:
+ def __init__(self):
+ self.lines = []
+
+ def add_line(self, fname, text):
+ self.lines.append(Line(fname, text))
+
+ def get_patch_text(self):
+ base = '''From 125b77450f4c66b8fd9654319520bbe795c9ef31 Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Sun, 14 Jun 2020 09:45:14 -0600
+Subject: [PATCH] Test commit
+
+This is a test commit.
+
+Signed-off-by: Simon Glass <sjg@chromium.org>
+---
+
+'''
+ lines = base.splitlines()
+
+ # Create the diffstat
+ change = 0
+ insert = 0
+ for line in self.lines:
+ lines.append(' %s | 1 +' % line.fname)
+ change += 1
+ insert += 1
+ lines.append(' %d files changed, %d insertions(+)' % (change, insert))
+ lines.append('')
+
+ # Create the patch info for each file
+ for line in self.lines:
+ lines.append('diff --git a/%s b/%s' % (line.fname, line.fname))
+ lines.append('index 7837d459f18..5ba7840f68e 100644')
+ lines.append('--- a/%s' % line.fname)
+ lines.append('+++ b/%s' % line.fname)
+ lines += ('''@@ -121,6 +121,7 @@ enum uclass_id {
+ UCLASS_W1, /* Dallas 1-Wire bus */
+ UCLASS_W1_EEPROM, /* one-wire EEPROMs */
+ UCLASS_WDT, /* Watchdog Timer driver */
++%s
+
+ UCLASS_COUNT,
+ UCLASS_INVALID = -1,
+''' % line.text).splitlines()
+ lines.append('---')
+ lines.append('2.17.1')
+
+ return '\n'.join(lines)
+
+ def get_patch(self):
+ inhandle, inname = tempfile.mkstemp()
+ infd = os.fdopen(inhandle, 'w')
+ infd.write(self.get_patch_text())
+ infd.close()
+ return inname
+
+ def run_checkpatch(self):
+ return checkpatch.CheckPatch(self.get_patch(), show_types=True)
+
+
+class TestPatch(unittest.TestCase):
+ """Test the u_boot_line() function in checkpatch.pl"""
+
+ def testBasic(self):
+ """Test basic filter operation"""
+ data='''
+
+From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Thu, 28 Apr 2011 09:58:51 -0700
+Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
+
+This adds functions to enable/disable clocks and reset to on-chip peripherals.
+
+cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
+ ‘long long unsigned int’, but argument 3 has type
+ ‘u64 {aka long unsigned int}’ [-Wformat=]
+
+BUG=chromium-os:13875
+TEST=build U-Boot for Seaboard, boot
+
+Change-Id: I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413
+
+Review URL: http://codereview.chromium.org/6900006
+
+Signed-off-by: Simon Glass <sjg@chromium.org>
+---
+ arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
+ arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
+ arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
+'''
+ expected='''Message-Id: <19991231235959.0.I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413@changeid>
+
+
+From 656c9a8c31fa65859d924cd21da920d6ba537fad Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Thu, 28 Apr 2011 09:58:51 -0700
+Subject: [PATCH (resend) 3/7] Tegra2: Add more clock support
+
+This adds functions to enable/disable clocks and reset to on-chip peripherals.
+
+cmd/pci.c:152:11: warning: format ‘%llx’ expects argument of type
+ ‘long long unsigned int’, but argument 3 has type
+ ‘u64 {aka long unsigned int}’ [-Wformat=]
+
+Signed-off-by: Simon Glass <sjg@chromium.org>
+---
+
+ arch/arm/cpu/armv7/tegra2/Makefile | 2 +-
+ arch/arm/cpu/armv7/tegra2/ap20.c | 57 ++----
+ arch/arm/cpu/armv7/tegra2/clock.c | 163 +++++++++++++++++
+'''
+ out = ''
+ inhandle, inname = tempfile.mkstemp()
+ infd = os.fdopen(inhandle, 'w', encoding='utf-8')
+ infd.write(data)
+ infd.close()
+
+ exphandle, expname = tempfile.mkstemp()
+ expfd = os.fdopen(exphandle, 'w', encoding='utf-8')
+ expfd.write(expected)
+ expfd.close()
+
+ # Normally by the time we call fix_patch we've already collected
+ # metadata. Here, we haven't, but at least fake up something.
+ # Set the "count" to -1 which tells fix_patch to use a bogus/fixed
+ # time for generating the Message-Id.
+ com = commit.Commit('')
+ com.change_id = 'I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413'
+ com.count = -1
+
+ patchstream.fix_patch(None, inname, series.Series(), com)
+
+ rc = os.system('diff -u %s %s' % (inname, expname))
+ self.assertEqual(rc, 0)
+
+ os.remove(inname)
+ os.remove(expname)
+
+ def GetData(self, data_type):
+ data='''From 4924887af52713cabea78420eff03badea8f0035 Mon Sep 17 00:00:00 2001
+From: Simon Glass <sjg@chromium.org>
+Date: Thu, 7 Apr 2011 10:14:41 -0700
+Subject: [PATCH 1/4] Add microsecond boot time measurement
+
+This defines the basics of a new boot time measurement feature. This allows
+logging of very accurate time measurements as the boot proceeds, by using
+an available microsecond counter.
+
+%s
+---
+ README | 11 ++++++++
+ MAINTAINERS | 3 ++
+ common/bootstage.c | 50 ++++++++++++++++++++++++++++++++++++
+ include/bootstage.h | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ include/common.h | 8 ++++++
+ 5 files changed, 141 insertions(+), 0 deletions(-)
+ create mode 100644 common/bootstage.c
+ create mode 100644 include/bootstage.h
+
+diff --git a/README b/README
+index 6f3748d..f9e4e65 100644
+--- a/README
++++ b/README
+@@ -2026,6 +2026,17 @@ The following options need to be configured:
+ example, some LED's) on your board. At the moment,
+ the following checkpoints are implemented:
+
++- Time boot progress
++ CONFIG_BOOTSTAGE
++
++ Define this option to enable microsecond boot stage timing
++ on supported platforms. For this to work your platform
++ needs to define a function timer_get_us() which returns the
++ number of microseconds since reset. This would normally
++ be done in your SOC or board timer.c file.
++
++ You can add calls to bootstage_mark() to set time markers.
++
+ - Standalone program support:
+ CONFIG_STANDALONE_LOAD_ADDR
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index b167b028ec..beb7dc634f 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -474,3 +474,8 @@ S: Maintained
+ T: git git://git.denx.de/u-boot.git
+ F: *
+ F: */
++
++BOOTSTAGE
++M: Simon Glass <sjg@chromium.org>
++L: u-boot@lists.denx.de
++F: common/bootstage.c
+diff --git a/common/bootstage.c b/common/bootstage.c
+new file mode 100644
+index 0000000..2234c87
+--- /dev/null
++++ b/common/bootstage.c
+@@ -0,0 +1,37 @@
++%s
++/*
++ * Copyright (c) 2011, Google Inc. All rights reserved.
++ *
++ */
++
++/*
++ * This module records the progress of boot and arbitrary commands, and
++ * permits accurate timestamping of each. The records can optionally be
++ * passed to kernel in the ATAGs
++ */
++
++#include <common.h>
++
++struct bootstage_record {
++ u32 time_us;
++ const char *name;
++};
++
++static struct bootstage_record record[BOOTSTAGE_COUNT];
++
++u32 bootstage_mark(enum bootstage_id id, const char *name)
++{
++ struct bootstage_record *rec = &record[id];
++
++ /* Only record the first event for each */
++%sif (!rec->name) {
++ rec->time_us = (u32)timer_get_us();
++ rec->name = name;
++ }
++ if (!rec->name &&
++ %ssomething_else) {
++ rec->time_us = (u32)timer_get_us();
++ rec->name = name;
++ }
++%sreturn rec->time_us;
++}
+--
+1.7.3.1
+'''
+ signoff = 'Signed-off-by: Simon Glass <sjg@chromium.org>\n'
+ license = '// SPDX-License-Identifier: GPL-2.0+'
+ tab = ' '
+ indent = ' '
+ if data_type == 'good':
+ pass
+ elif data_type == 'no-signoff':
+ signoff = ''
+ elif data_type == 'no-license':
+ license = ''
+ elif data_type == 'spaces':
+ tab = ' '
+ elif data_type == 'indent':
+ indent = tab
+ else:
+ print('not implemented')
+ return data % (signoff, license, tab, indent, tab)
+
+ def SetupData(self, data_type):
+ inhandle, inname = tempfile.mkstemp()
+ infd = os.fdopen(inhandle, 'w')
+ data = self.GetData(data_type)
+ infd.write(data)
+ infd.close()
+ return inname
+
+ def testGood(self):
+ """Test checkpatch operation"""
+ inf = self.SetupData('good')
+ result = checkpatch.CheckPatch(inf)
+ self.assertEqual(result.ok, True)
+ self.assertEqual(result.problems, [])
+ self.assertEqual(result.errors, 0)
+ self.assertEqual(result.warnings, 0)
+ self.assertEqual(result.checks, 0)
+ self.assertEqual(result.lines, 62)
+ os.remove(inf)
+
+ def testNoSignoff(self):
+ inf = self.SetupData('no-signoff')
+ result = checkpatch.CheckPatch(inf)
+ self.assertEqual(result.ok, False)
+ self.assertEqual(len(result.problems), 1)
+ self.assertEqual(result.errors, 1)
+ self.assertEqual(result.warnings, 0)
+ self.assertEqual(result.checks, 0)
+ self.assertEqual(result.lines, 62)
+ os.remove(inf)
+
+ def testNoLicense(self):
+ inf = self.SetupData('no-license')
+ result = checkpatch.CheckPatch(inf)
+ self.assertEqual(result.ok, False)
+ self.assertEqual(len(result.problems), 1)
+ self.assertEqual(result.errors, 0)
+ self.assertEqual(result.warnings, 1)
+ self.assertEqual(result.checks, 0)
+ self.assertEqual(result.lines, 62)
+ os.remove(inf)
+
+ def testSpaces(self):
+ inf = self.SetupData('spaces')
+ result = checkpatch.CheckPatch(inf)
+ self.assertEqual(result.ok, False)
+ self.assertEqual(len(result.problems), 3)
+ self.assertEqual(result.errors, 0)
+ self.assertEqual(result.warnings, 3)
+ self.assertEqual(result.checks, 0)
+ self.assertEqual(result.lines, 62)
+ os.remove(inf)
+
+ def testIndent(self):
+ inf = self.SetupData('indent')
+ result = checkpatch.CheckPatch(inf)
+ self.assertEqual(result.ok, False)
+ self.assertEqual(len(result.problems), 1)
+ self.assertEqual(result.errors, 0)
+ self.assertEqual(result.warnings, 0)
+ self.assertEqual(result.checks, 1)
+ self.assertEqual(result.lines, 62)
+ os.remove(inf)
+
+ def checkSingleMessage(self, pm, msg, pmtype = 'warning'):
+ """Helper function to run checkpatch and check the result
+
+ Args:
+ pm: PatchMaker object to use
+ msg: Expected message (e.g. 'LIVETREE')
+ pmtype: Type of problem ('error', 'warning')
+ """
+ result = pm.run_checkpatch()
+ if pmtype == 'warning':
+ self.assertEqual(result.warnings, 1)
+ elif pmtype == 'error':
+ self.assertEqual(result.errors, 1)
+ if len(result.problems) != 1:
+ print(result.problems)
+ self.assertEqual(len(result.problems), 1)
+ self.assertIn(msg, result.problems[0]['cptype'])
+
+ def testUclass(self):
+ """Test for possible new uclass"""
+ pm = PatchMaker()
+ pm.add_line('include/dm/uclass-id.h', 'UCLASS_WIBBLE,')
+ self.checkSingleMessage(pm, 'NEW_UCLASS')
+
+ def testLivetree(self):
+ """Test for using the livetree API"""
+ pm = PatchMaker()
+ pm.add_line('common/main.c', 'fdtdec_do_something()')
+ self.checkSingleMessage(pm, 'LIVETREE')
+
+ def testNewCommand(self):
+ """Test for adding a new command"""
+ pm = PatchMaker()
+ pm.add_line('common/main.c', 'do_wibble(struct cmd_tbl *cmd_tbl)')
+ self.checkSingleMessage(pm, 'CMD_TEST')
+
+ def testPreferIf(self):
+ """Test for using #ifdef"""
+ pm = PatchMaker()
+ pm.add_line('common/main.c', '#ifdef CONFIG_YELLOW')
+ pm.add_line('common/init.h', '#ifdef CONFIG_YELLOW')
+ pm.add_line('fred.dtsi', '#ifdef CONFIG_YELLOW')
+ self.checkSingleMessage(pm, "PREFER_IF")
+
+ def testCommandUseDefconfig(self):
+ """Test for enabling/disabling commands using preprocesor"""
+ pm = PatchMaker()
+ pm.add_line('common/main.c', '#undef CONFIG_CMD_WHICH')
+ self.checkSingleMessage(pm, 'DEFINE_CONFIG_CMD', 'error')
+
+ def testBarredIncludeInHdr(self):
+ """Test for using a barred include in a header file"""
+ pm = PatchMaker()
+ #pm.add_line('include/myfile.h', '#include <common.h>')
+ pm.add_line('include/myfile.h', '#include <dm.h>')
+ self.checkSingleMessage(pm, 'BARRED_INCLUDE_IN_HDR', 'error')
+
+ def testConfigIsEnabledConfig(self):
+ """Test for accidental CONFIG_IS_ENABLED(CONFIG_*) calls"""
+ pm = PatchMaker()
+ pm.add_line('common/main.c', 'if (CONFIG_IS_ENABLED(CONFIG_CLK))')
+ self.checkSingleMessage(pm, 'CONFIG_IS_ENABLED_CONFIG', 'error')
+
+ def check_struct(self, auto, suffix, warning):
+ """Check one of the warnings for struct naming
+
+ Args:
+ auto: Auto variable name, e.g. 'per_child_auto'
+ suffix: Suffix to expect on member, e.g. '_priv'
+ warning: Warning name, e.g. 'PRIV_AUTO'
+ """
+ pm = PatchMaker()
+ pm.add_line('common/main.c', '.%s = sizeof(struct(fred)),' % auto)
+ pm.add_line('common/main.c', '.%s = sizeof(struct(mary%s)),' %
+ (auto, suffix))
+ self.checkSingleMessage(
+ pm, warning, "struct 'fred' should have a %s suffix" % suffix)
+
+ def testDmDriverAuto(self):
+ """Check for the correct suffix on 'struct driver' auto members"""
+ self.check_struct('priv_auto', '_priv', 'PRIV_AUTO')
+ self.check_struct('plat_auto', '_plat', 'PLAT_AUTO')
+ self.check_struct('per_child_auto', '_priv', 'CHILD_PRIV_AUTO')
+ self.check_struct('per_child_plat_auto', '_plat', 'CHILD_PLAT_AUTO')
+
+ def testDmUclassAuto(self):
+ """Check for the correct suffix on 'struct uclass' auto members"""
+ # Some of these are omitted since they match those from struct driver
+ self.check_struct('per_device_auto', '_priv', 'DEVICE_PRIV_AUTO')
+ self.check_struct('per_device_plat_auto', '_plat', 'DEVICE_PLAT_AUTO')
+
+ def check_strl(self, func):
+ """Check one of the checks for strn(cpy|cat)"""
+ pm = PatchMaker()
+ pm.add_line('common/main.c', "strn%s(foo, bar, sizeof(foo));" % func)
+ self.checkSingleMessage(pm, "STRL",
+ "strl%s is preferred over strn%s because it always produces a nul-terminated string\n"
+ % (func, func))
+
+ def testStrl(self):
+ """Check for uses of strn(cat|cpy)"""
+ self.check_strl("cat");
+ self.check_strl("cpy");
+
+if __name__ == "__main__":
+ unittest.main()
+ gitutil.RunTests()
diff --git a/roms/u-boot/tools/patman/test_util.py b/roms/u-boot/tools/patman/test_util.py
new file mode 100644
index 000000000..4e261755d
--- /dev/null
+++ b/roms/u-boot/tools/patman/test_util.py
@@ -0,0 +1,193 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2016 Google, Inc
+#
+
+from contextlib import contextmanager
+import glob
+import multiprocessing
+import os
+import sys
+import unittest
+
+from patman import command
+
+from io import StringIO
+
+use_concurrent = True
+try:
+ from concurrencytest.concurrencytest import ConcurrentTestSuite
+ from concurrencytest.concurrencytest import fork_for_tests
+except:
+ use_concurrent = False
+
+
+def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None,
+ extra_args=None):
+ """Run tests and check that we get 100% coverage
+
+ Args:
+ prog: Program to run (with be passed a '-t' argument to run tests
+ filter_fname: Normally all *.py files in the program's directory will
+ be included. If this is not None, then it is used to filter the
+ list so that only filenames that don't contain filter_fname are
+ included.
+ exclude_list: List of file patterns to exclude from the coverage
+ calculation
+ build_dir: Build directory, used to locate libfdt.py
+ required: List of modules which must be in the coverage report
+ extra_args (str): Extra arguments to pass to the tool before the -t/test
+ arg
+
+ Raises:
+ ValueError if the code coverage is not 100%
+ """
+ # This uses the build output from sandbox_spl to get _libfdt.so
+ path = os.path.dirname(prog)
+ if filter_fname:
+ glob_list = glob.glob(os.path.join(path, '*.py'))
+ glob_list = [fname for fname in glob_list if filter_fname in fname]
+ else:
+ glob_list = []
+ glob_list += exclude_list
+ glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
+ glob_list += ['*concurrencytest*']
+ test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
+ prefix = ''
+ if build_dir:
+ prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
+ cmd = ('%spython3-coverage run '
+ '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
+ prog, extra_args or '', test_cmd))
+ os.system(cmd)
+ stdout = command.Output('python3-coverage', 'report')
+ lines = stdout.splitlines()
+ if required:
+ # Convert '/path/to/name.py' just the module name 'name'
+ test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
+ for line in lines if '/etype/' in line])
+ missing_list = required
+ missing_list.discard('__init__')
+ missing_list.difference_update(test_set)
+ if missing_list:
+ print('Missing tests for %s' % (', '.join(missing_list)))
+ print(stdout)
+ ok = False
+
+ coverage = lines[-1].split(' ')[-1]
+ ok = True
+ print(coverage)
+ if coverage != '100%':
+ print(stdout)
+ print("Type 'python3-coverage html' to get a report in "
+ 'htmlcov/index.html')
+ print('Coverage error: %s, but should be 100%%' % coverage)
+ ok = False
+ if not ok:
+ raise ValueError('Test coverage failure')
+
+
+# Use this to suppress stdout/stderr output:
+# with capture_sys_output() as (stdout, stderr)
+# ...do something...
+@contextmanager
+def capture_sys_output():
+ capture_out, capture_err = StringIO(), StringIO()
+ old_out, old_err = sys.stdout, sys.stderr
+ try:
+ sys.stdout, sys.stderr = capture_out, capture_err
+ yield capture_out, capture_err
+ finally:
+ sys.stdout, sys.stderr = old_out, old_err
+
+
+def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
+ """Report the results from a suite of tests
+
+ Args:
+ toolname: Name of the tool that ran the tests
+ test_name: Name of test that was run, or None for all
+ result: A unittest.TestResult object containing the results
+ """
+ # Remove errors which just indicate a missing test. Since Python v3.5 If an
+ # ImportError or AttributeError occurs while traversing name then a
+ # synthetic test that raises that error when run will be returned. These
+ # errors are included in the errors accumulated by result.errors.
+ if test_name:
+ errors = []
+
+ for test, err in result.errors:
+ if ("has no attribute '%s'" % test_name) not in err:
+ errors.append((test, err))
+ result.testsRun -= 1
+ result.errors = errors
+
+ print(result)
+ for test, err in result.errors:
+ print(test.id(), err)
+ for test, err in result.failures:
+ print(err, result.failures)
+ if result.skipped:
+ print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
+ 's' if len(result.skipped) > 1 else ''))
+ for skip_info in result.skipped:
+ print('%s: %s' % (skip_info[0], skip_info[1]))
+ if result.errors or result.failures:
+ print('%s tests FAILED' % toolname)
+ return 1
+ return 0
+
+
+def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
+ test_name, toolpath, test_class_list):
+ """Run a series of test suites and collect the results
+
+ Args:
+ result: A unittest.TestResult object to add the results to
+ debug: True to enable debugging, which shows a full stack trace on error
+ verbosity: Verbosity level to use (0-4)
+ test_preserve_dirs: True to preserve the input directory used by tests
+ so that it can be examined afterwards (only useful for debugging
+ tests). If a single test is selected (in args[0]) it also preserves
+ the output directory for this test. Both directories are displayed
+ on the command line.
+ processes: Number of processes to use to run tests (None=same as #CPUs)
+ test_name: Name of test to run, or None for all
+ toolpath: List of paths to use for tools
+ test_class_list: List of test classes to run
+ """
+ for module in []:
+ suite = doctest.DocTestSuite(module)
+ suite.run(result)
+
+ sys.argv = [sys.argv[0]]
+ if debug:
+ sys.argv.append('-D')
+ if verbosity:
+ sys.argv.append('-v%d' % verbosity)
+ if toolpath:
+ for path in toolpath:
+ sys.argv += ['--toolpath', path]
+
+ suite = unittest.TestSuite()
+ loader = unittest.TestLoader()
+ for module in test_class_list:
+ # Test the test module about our arguments, if it is interested
+ if hasattr(module, 'setup_test_args'):
+ setup_test_args = getattr(module, 'setup_test_args')
+ setup_test_args(preserve_indir=test_preserve_dirs,
+ preserve_outdirs=test_preserve_dirs and test_name is not None,
+ toolpath=toolpath, verbosity=verbosity)
+ if test_name:
+ try:
+ suite.addTests(loader.loadTestsFromName(test_name, module))
+ except AttributeError:
+ continue
+ else:
+ suite.addTests(loader.loadTestsFromTestCase(module))
+ if use_concurrent and processes != 1:
+ concurrent_suite = ConcurrentTestSuite(suite,
+ fork_for_tests(processes or multiprocessing.cpu_count()))
+ concurrent_suite.run(result)
+ else:
+ suite.run(result)
diff --git a/roms/u-boot/tools/patman/tools.py b/roms/u-boot/tools/patman/tools.py
new file mode 100644
index 000000000..e5f391b7a
--- /dev/null
+++ b/roms/u-boot/tools/patman/tools.py
@@ -0,0 +1,580 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright (c) 2016 Google, Inc
+#
+
+import glob
+import os
+import shutil
+import struct
+import sys
+import tempfile
+
+from patman import command
+from patman import tout
+
+# Output directly (generally this is temporary)
+outdir = None
+
+# True to keep the output directory around after exiting
+preserve_outdir = False
+
+# Path to the Chrome OS chroot, if we know it
+chroot_path = None
+
+# Search paths to use for Filename(), used to find files
+search_paths = []
+
+tool_search_paths = []
+
+# Tools and the packages that contain them, on debian
+packages = {
+ 'lz4': 'liblz4-tool',
+ }
+
+# List of paths to use when looking for an input file
+indir = []
+
+def PrepareOutputDir(dirname, preserve=False):
+ """Select an output directory, ensuring it exists.
+
+ This either creates a temporary directory or checks that the one supplied
+ by the user is valid. For a temporary directory, it makes a note to
+ remove it later if required.
+
+ Args:
+ dirname: a string, name of the output directory to use to store
+ intermediate and output files. If is None - create a temporary
+ directory.
+ preserve: a Boolean. If outdir above is None and preserve is False, the
+ created temporary directory will be destroyed on exit.
+
+ Raises:
+ OSError: If it cannot create the output directory.
+ """
+ global outdir, preserve_outdir
+
+ preserve_outdir = dirname or preserve
+ if dirname:
+ outdir = dirname
+ if not os.path.isdir(outdir):
+ try:
+ os.makedirs(outdir)
+ except OSError as err:
+ raise CmdError("Cannot make output directory '%s': '%s'" %
+ (outdir, err.strerror))
+ tout.Debug("Using output directory '%s'" % outdir)
+ else:
+ outdir = tempfile.mkdtemp(prefix='binman.')
+ tout.Debug("Using temporary directory '%s'" % outdir)
+
+def _RemoveOutputDir():
+ global outdir
+
+ shutil.rmtree(outdir)
+ tout.Debug("Deleted temporary directory '%s'" % outdir)
+ outdir = None
+
+def FinaliseOutputDir():
+ global outdir, preserve_outdir
+
+ """Tidy up: delete output directory if temporary and not preserved."""
+ if outdir and not preserve_outdir:
+ _RemoveOutputDir()
+ outdir = None
+
+def GetOutputFilename(fname):
+ """Return a filename within the output directory.
+
+ Args:
+ fname: Filename to use for new file
+
+ Returns:
+ The full path of the filename, within the output directory
+ """
+ return os.path.join(outdir, fname)
+
+def GetOutputDir():
+ """Return the current output directory
+
+ Returns:
+ str: The output directory
+ """
+ return outdir
+
+def _FinaliseForTest():
+ """Remove the output directory (for use by tests)"""
+ global outdir
+
+ if outdir:
+ _RemoveOutputDir()
+ outdir = None
+
+def SetInputDirs(dirname):
+ """Add a list of input directories, where input files are kept.
+
+ Args:
+ dirname: a list of paths to input directories to use for obtaining
+ files needed by binman to place in the image.
+ """
+ global indir
+
+ indir = dirname
+ tout.Debug("Using input directories %s" % indir)
+
+def GetInputFilename(fname, allow_missing=False):
+ """Return a filename for use as input.
+
+ Args:
+ fname: Filename to use for new file
+ allow_missing: True if the filename can be missing
+
+ Returns:
+ fname, if indir is None;
+ full path of the filename, within the input directory;
+ None, if file is missing and allow_missing is True
+
+ Raises:
+ ValueError if file is missing and allow_missing is False
+ """
+ if not indir or fname[:1] == '/':
+ return fname
+ for dirname in indir:
+ pathname = os.path.join(dirname, fname)
+ if os.path.exists(pathname):
+ return pathname
+
+ if allow_missing:
+ return None
+ raise ValueError("Filename '%s' not found in input path (%s) (cwd='%s')" %
+ (fname, ','.join(indir), os.getcwd()))
+
+def GetInputFilenameGlob(pattern):
+ """Return a list of filenames for use as input.
+
+ Args:
+ pattern: Filename pattern to search for
+
+ Returns:
+ A list of matching files in all input directories
+ """
+ if not indir:
+ return glob.glob(fname)
+ files = []
+ for dirname in indir:
+ pathname = os.path.join(dirname, pattern)
+ files += glob.glob(pathname)
+ return sorted(files)
+
+def Align(pos, align):
+ if align:
+ mask = align - 1
+ pos = (pos + mask) & ~mask
+ return pos
+
+def NotPowerOfTwo(num):
+ return num and (num & (num - 1))
+
+def SetToolPaths(toolpaths):
+ """Set the path to search for tools
+
+ Args:
+ toolpaths: List of paths to search for tools executed by Run()
+ """
+ global tool_search_paths
+
+ tool_search_paths = toolpaths
+
+def PathHasFile(path_spec, fname):
+ """Check if a given filename is in the PATH
+
+ Args:
+ path_spec: Value of PATH variable to check
+ fname: Filename to check
+
+ Returns:
+ True if found, False if not
+ """
+ for dir in path_spec.split(':'):
+ if os.path.exists(os.path.join(dir, fname)):
+ return True
+ return False
+
+def GetHostCompileTool(name):
+ """Get the host-specific version for a compile tool
+
+ This checks the environment variables that specify which version of
+ the tool should be used (e.g. ${HOSTCC}).
+
+ The following table lists the host-specific versions of the tools
+ this function resolves to:
+
+ Compile Tool | Host version
+ --------------+----------------
+ as | ${HOSTAS}
+ ld | ${HOSTLD}
+ cc | ${HOSTCC}
+ cpp | ${HOSTCPP}
+ c++ | ${HOSTCXX}
+ ar | ${HOSTAR}
+ nm | ${HOSTNM}
+ ldr | ${HOSTLDR}
+ strip | ${HOSTSTRIP}
+ objcopy | ${HOSTOBJCOPY}
+ objdump | ${HOSTOBJDUMP}
+ dtc | ${HOSTDTC}
+
+ Args:
+ name: Command name to run
+
+ Returns:
+ host_name: Exact command name to run instead
+ extra_args: List of extra arguments to pass
+ """
+ host_name = None
+ extra_args = []
+ if name in ('as', 'ld', 'cc', 'cpp', 'ar', 'nm', 'ldr', 'strip',
+ 'objcopy', 'objdump', 'dtc'):
+ host_name, *host_args = env.get('HOST' + name.upper(), '').split(' ')
+ elif name == 'c++':
+ host_name, *host_args = env.get('HOSTCXX', '').split(' ')
+
+ if host_name:
+ return host_name, extra_args
+ return name, []
+
+def GetTargetCompileTool(name, cross_compile=None):
+ """Get the target-specific version for a compile tool
+
+ This first checks the environment variables that specify which
+ version of the tool should be used (e.g. ${CC}). If those aren't
+ specified, it checks the CROSS_COMPILE variable as a prefix for the
+ tool with some substitutions (e.g. "${CROSS_COMPILE}gcc" for cc).
+
+ The following table lists the target-specific versions of the tools
+ this function resolves to:
+
+ Compile Tool | First choice | Second choice
+ --------------+----------------+----------------------------
+ as | ${AS} | ${CROSS_COMPILE}as
+ ld | ${LD} | ${CROSS_COMPILE}ld.bfd
+ | | or ${CROSS_COMPILE}ld
+ cc | ${CC} | ${CROSS_COMPILE}gcc
+ cpp | ${CPP} | ${CROSS_COMPILE}gcc -E
+ c++ | ${CXX} | ${CROSS_COMPILE}g++
+ ar | ${AR} | ${CROSS_COMPILE}ar
+ nm | ${NM} | ${CROSS_COMPILE}nm
+ ldr | ${LDR} | ${CROSS_COMPILE}ldr
+ strip | ${STRIP} | ${CROSS_COMPILE}strip
+ objcopy | ${OBJCOPY} | ${CROSS_COMPILE}objcopy
+ objdump | ${OBJDUMP} | ${CROSS_COMPILE}objdump
+ dtc | ${DTC} | (no CROSS_COMPILE version)
+
+ Args:
+ name: Command name to run
+
+ Returns:
+ target_name: Exact command name to run instead
+ extra_args: List of extra arguments to pass
+ """
+ env = dict(os.environ)
+
+ target_name = None
+ extra_args = []
+ if name in ('as', 'ld', 'cc', 'cpp', 'ar', 'nm', 'ldr', 'strip',
+ 'objcopy', 'objdump', 'dtc'):
+ target_name, *extra_args = env.get(name.upper(), '').split(' ')
+ elif name == 'c++':
+ target_name, *extra_args = env.get('CXX', '').split(' ')
+
+ if target_name:
+ return target_name, extra_args
+
+ if cross_compile is None:
+ cross_compile = env.get('CROSS_COMPILE', '')
+ if not cross_compile:
+ return name, []
+
+ if name in ('as', 'ar', 'nm', 'ldr', 'strip', 'objcopy', 'objdump'):
+ target_name = cross_compile + name
+ elif name == 'ld':
+ try:
+ if Run(cross_compile + 'ld.bfd', '-v'):
+ target_name = cross_compile + 'ld.bfd'
+ except:
+ target_name = cross_compile + 'ld'
+ elif name == 'cc':
+ target_name = cross_compile + 'gcc'
+ elif name == 'cpp':
+ target_name = cross_compile + 'gcc'
+ extra_args = ['-E']
+ elif name == 'c++':
+ target_name = cross_compile + 'g++'
+ else:
+ target_name = name
+ return target_name, extra_args
+
+def Run(name, *args, **kwargs):
+ """Run a tool with some arguments
+
+ This runs a 'tool', which is a program used by binman to process files and
+ perhaps produce some output. Tools can be located on the PATH or in a
+ search path.
+
+ Args:
+ name: Command name to run
+ args: Arguments to the tool
+ for_host: True to resolve the command to the version for the host
+ for_target: False to run the command as-is, without resolving it
+ to the version for the compile target
+
+ Returns:
+ CommandResult object
+ """
+ try:
+ binary = kwargs.get('binary')
+ for_host = kwargs.get('for_host', False)
+ for_target = kwargs.get('for_target', not for_host)
+ env = None
+ if tool_search_paths:
+ env = dict(os.environ)
+ env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']
+ if for_target:
+ name, extra_args = GetTargetCompileTool(name)
+ args = tuple(extra_args) + args
+ elif for_host:
+ name, extra_args = GetHostCompileTool(name)
+ args = tuple(extra_args) + args
+ name = os.path.expanduser(name) # Expand paths containing ~
+ all_args = (name,) + args
+ result = command.RunPipe([all_args], capture=True, capture_stderr=True,
+ env=env, raise_on_error=False, binary=binary)
+ if result.return_code:
+ raise Exception("Error %d running '%s': %s" %
+ (result.return_code,' '.join(all_args),
+ result.stderr))
+ return result.stdout
+ except:
+ if env and not PathHasFile(env['PATH'], name):
+ msg = "Please install tool '%s'" % name
+ package = packages.get(name)
+ if package:
+ msg += " (e.g. from package '%s')" % package
+ raise ValueError(msg)
+ raise
+
+def Filename(fname):
+ """Resolve a file path to an absolute path.
+
+ If fname starts with ##/ and chroot is available, ##/ gets replaced with
+ the chroot path. If chroot is not available, this file name can not be
+ resolved, `None' is returned.
+
+ If fname is not prepended with the above prefix, and is not an existing
+ file, the actual file name is retrieved from the passed in string and the
+ search_paths directories (if any) are searched to for the file. If found -
+ the path to the found file is returned, `None' is returned otherwise.
+
+ Args:
+ fname: a string, the path to resolve.
+
+ Returns:
+ Absolute path to the file or None if not found.
+ """
+ if fname.startswith('##/'):
+ if chroot_path:
+ fname = os.path.join(chroot_path, fname[3:])
+ else:
+ return None
+
+ # Search for a pathname that exists, and return it if found
+ if fname and not os.path.exists(fname):
+ for path in search_paths:
+ pathname = os.path.join(path, os.path.basename(fname))
+ if os.path.exists(pathname):
+ return pathname
+
+ # If not found, just return the standard, unchanged path
+ return fname
+
+def ReadFile(fname, binary=True):
+ """Read and return the contents of a file.
+
+ Args:
+ fname: path to filename to read, where ## signifiies the chroot.
+
+ Returns:
+ data read from file, as a string.
+ """
+ with open(Filename(fname), binary and 'rb' or 'r') as fd:
+ data = fd.read()
+ #self._out.Info("Read file '%s' size %d (%#0x)" %
+ #(fname, len(data), len(data)))
+ return data
+
+def WriteFile(fname, data, binary=True):
+ """Write data into a file.
+
+ Args:
+ fname: path to filename to write
+ data: data to write to file, as a string
+ """
+ #self._out.Info("Write file '%s' size %d (%#0x)" %
+ #(fname, len(data), len(data)))
+ with open(Filename(fname), binary and 'wb' or 'w') as fd:
+ fd.write(data)
+
+def GetBytes(byte, size):
+ """Get a string of bytes of a given size
+
+ Args:
+ byte: Numeric byte value to use
+ size: Size of bytes/string to return
+
+ Returns:
+ A bytes type with 'byte' repeated 'size' times
+ """
+ return bytes([byte]) * size
+
+def ToBytes(string):
+ """Convert a str type into a bytes type
+
+ Args:
+ string: string to convert
+
+ Returns:
+ A bytes type
+ """
+ return string.encode('utf-8')
+
+def ToString(bval):
+ """Convert a bytes type into a str type
+
+ Args:
+ bval: bytes value to convert
+
+ Returns:
+ Python 3: A bytes type
+ Python 2: A string type
+ """
+ return bval.decode('utf-8')
+
+def Compress(indata, algo, with_header=True):
+ """Compress some data using a given algorithm
+
+ Note that for lzma this uses an old version of the algorithm, not that
+ provided by xz.
+
+ This requires 'lz4' and 'lzma_alone' tools. It also requires an output
+ directory to be previously set up, by calling PrepareOutputDir().
+
+ Args:
+ indata: Input data to compress
+ algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
+
+ Returns:
+ Compressed data
+ """
+ if algo == 'none':
+ return indata
+ fname = GetOutputFilename('%s.comp.tmp' % algo)
+ WriteFile(fname, indata)
+ if algo == 'lz4':
+ data = Run('lz4', '--no-frame-crc', '-B4', '-5', '-c', fname,
+ binary=True)
+ # cbfstool uses a very old version of lzma
+ elif algo == 'lzma':
+ outfname = GetOutputFilename('%s.comp.otmp' % algo)
+ Run('lzma_alone', 'e', fname, outfname, '-lc1', '-lp0', '-pb0', '-d8')
+ data = ReadFile(outfname)
+ elif algo == 'gzip':
+ data = Run('gzip', '-c', fname, binary=True)
+ else:
+ raise ValueError("Unknown algorithm '%s'" % algo)
+ if with_header:
+ hdr = struct.pack('<I', len(data))
+ data = hdr + data
+ return data
+
+def Decompress(indata, algo, with_header=True):
+ """Decompress some data using a given algorithm
+
+ Note that for lzma this uses an old version of the algorithm, not that
+ provided by xz.
+
+ This requires 'lz4' and 'lzma_alone' tools. It also requires an output
+ directory to be previously set up, by calling PrepareOutputDir().
+
+ Args:
+ indata: Input data to decompress
+ algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
+
+ Returns:
+ Compressed data
+ """
+ if algo == 'none':
+ return indata
+ if with_header:
+ data_len = struct.unpack('<I', indata[:4])[0]
+ indata = indata[4:4 + data_len]
+ fname = GetOutputFilename('%s.decomp.tmp' % algo)
+ with open(fname, 'wb') as fd:
+ fd.write(indata)
+ if algo == 'lz4':
+ data = Run('lz4', '-dc', fname, binary=True)
+ elif algo == 'lzma':
+ outfname = GetOutputFilename('%s.decomp.otmp' % algo)
+ Run('lzma_alone', 'd', fname, outfname)
+ data = ReadFile(outfname, binary=True)
+ elif algo == 'gzip':
+ data = Run('gzip', '-cd', fname, binary=True)
+ else:
+ raise ValueError("Unknown algorithm '%s'" % algo)
+ return data
+
+CMD_CREATE, CMD_DELETE, CMD_ADD, CMD_REPLACE, CMD_EXTRACT = range(5)
+
+IFWITOOL_CMDS = {
+ CMD_CREATE: 'create',
+ CMD_DELETE: 'delete',
+ CMD_ADD: 'add',
+ CMD_REPLACE: 'replace',
+ CMD_EXTRACT: 'extract',
+ }
+
+def RunIfwiTool(ifwi_file, cmd, fname=None, subpart=None, entry_name=None):
+ """Run ifwitool with the given arguments:
+
+ Args:
+ ifwi_file: IFWI file to operation on
+ cmd: Command to execute (CMD_...)
+ fname: Filename of file to add/replace/extract/create (None for
+ CMD_DELETE)
+ subpart: Name of sub-partition to operation on (None for CMD_CREATE)
+ entry_name: Name of directory entry to operate on, or None if none
+ """
+ args = ['ifwitool', ifwi_file]
+ args.append(IFWITOOL_CMDS[cmd])
+ if fname:
+ args += ['-f', fname]
+ if subpart:
+ args += ['-n', subpart]
+ if entry_name:
+ args += ['-d', '-e', entry_name]
+ Run(*args)
+
+def ToHex(val):
+ """Convert an integer value (or None) to a string
+
+ Returns:
+ hex value, or 'None' if the value is None
+ """
+ return 'None' if val is None else '%#x' % val
+
+def ToHexSize(val):
+ """Return the size of an object in hex
+
+ Returns:
+ hex value of size, or 'None' if the value is None
+ """
+ return 'None' if val is None else '%#x' % len(val)
diff --git a/roms/u-boot/tools/patman/tout.py b/roms/u-boot/tools/patman/tout.py
new file mode 100644
index 000000000..33305263d
--- /dev/null
+++ b/roms/u-boot/tools/patman/tout.py
@@ -0,0 +1,179 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+#
+# Terminal output logging.
+#
+
+import sys
+
+from patman import terminal
+
+# Output verbosity levels that we support
+ERROR, WARNING, NOTICE, INFO, DETAIL, DEBUG = range(6)
+
+in_progress = False
+
+"""
+This class handles output of progress and other useful information
+to the user. It provides for simple verbosity level control and can
+output nothing but errors at verbosity zero.
+
+The idea is that modules set up an Output object early in their years and pass
+it around to other modules that need it. This keeps the output under control
+of a single class.
+
+Public properties:
+ verbose: Verbosity level: 0=silent, 1=progress, 3=full, 4=debug
+"""
+def __enter__():
+ return
+
+def __exit__(unused1, unused2, unused3):
+ """Clean up and remove any progress message."""
+ ClearProgress()
+ return False
+
+def UserIsPresent():
+ """This returns True if it is likely that a user is present.
+
+ Sometimes we want to prompt the user, but if no one is there then this
+ is a waste of time, and may lock a script which should otherwise fail.
+
+ Returns:
+ True if it thinks the user is there, and False otherwise
+ """
+ return stdout_is_tty and verbose > 0
+
+def ClearProgress():
+ """Clear any active progress message on the terminal."""
+ global in_progress
+ if verbose > 0 and stdout_is_tty and in_progress:
+ _stdout.write('\r%s\r' % (" " * len (_progress)))
+ _stdout.flush()
+ in_progress = False
+
+def Progress(msg, warning=False, trailer='...'):
+ """Display progress information.
+
+ Args:
+ msg: Message to display.
+ warning: True if this is a warning."""
+ global in_progress
+ ClearProgress()
+ if verbose > 0:
+ _progress = msg + trailer
+ if stdout_is_tty:
+ col = _color.YELLOW if warning else _color.GREEN
+ _stdout.write('\r' + _color.Color(col, _progress))
+ _stdout.flush()
+ in_progress = True
+ else:
+ _stdout.write(_progress + '\n')
+
+def _Output(level, msg, color=None):
+ """Output a message to the terminal.
+
+ Args:
+ level: Verbosity level for this message. It will only be displayed if
+ this as high as the currently selected level.
+ msg; Message to display.
+ error: True if this is an error message, else False.
+ """
+ if verbose >= level:
+ ClearProgress()
+ if color:
+ msg = _color.Color(color, msg)
+ if level < NOTICE:
+ print(msg, file=sys.stderr)
+ else:
+ print(msg)
+
+def DoOutput(level, msg):
+ """Output a message to the terminal.
+
+ Args:
+ level: Verbosity level for this message. It will only be displayed if
+ this as high as the currently selected level.
+ msg; Message to display.
+ """
+ _Output(level, msg)
+
+def Error(msg):
+ """Display an error message
+
+ Args:
+ msg; Message to display.
+ """
+ _Output(ERROR, msg, _color.RED)
+
+def Warning(msg):
+ """Display a warning message
+
+ Args:
+ msg; Message to display.
+ """
+ _Output(WARNING, msg, _color.YELLOW)
+
+def Notice(msg):
+ """Display an important infomation message
+
+ Args:
+ msg; Message to display.
+ """
+ _Output(NOTICE, msg)
+
+def Info(msg):
+ """Display an infomation message
+
+ Args:
+ msg; Message to display.
+ """
+ _Output(INFO, msg)
+
+def Detail(msg):
+ """Display a detailed message
+
+ Args:
+ msg; Message to display.
+ """
+ _Output(DETAIL, msg)
+
+def Debug(msg):
+ """Display a debug message
+
+ Args:
+ msg; Message to display.
+ """
+ _Output(DEBUG, msg)
+
+def UserOutput(msg):
+ """Display a message regardless of the current output level.
+
+ This is used when the output was specifically requested by the user.
+ Args:
+ msg; Message to display.
+ """
+ _Output(0, msg)
+
+def Init(_verbose=WARNING, stdout=sys.stdout):
+ """Initialize a new output object.
+
+ Args:
+ verbose: Verbosity level (0-4).
+ stdout: File to use for stdout.
+ """
+ global verbose, _progress, _color, _stdout, stdout_is_tty
+
+ verbose = _verbose
+ _progress = '' # Our last progress message
+ _color = terminal.Color()
+ _stdout = stdout
+
+ # TODO(sjg): Move this into Chromite libraries when we have them
+ stdout_is_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
+ stderr_is_tty = hasattr(sys.stderr, 'isatty') and sys.stderr.isatty()
+
+def Uninit():
+ ClearProgress()
+
+Init()