Skip to content

Commit

Permalink
resolve and merge master
Browse files Browse the repository at this point in the history
  • Loading branch information
pepoviola committed Dec 5, 2024
2 parents 2686ec9 + 82117ad commit 92b6099
Show file tree
Hide file tree
Showing 1,449 changed files with 83,779 additions and 40,435 deletions.
8 changes: 7 additions & 1 deletion .config/nextest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ retries = 5
# The number of threads to run tests with. Supported values are either an integer or
# the string "num-cpus". Can be overridden through the `--test-threads` option.
# test-threads = "num-cpus"

test-threads = 20

# The number of threads required for each test. This is generally used in overrides to
Expand Down Expand Up @@ -124,3 +123,10 @@ serial-integration = { max-threads = 1 }
[[profile.default.overrides]]
filter = 'test(/(^ui$|_ui|ui_)/)'
test-group = 'serial-integration'

# Running eth-rpc tests sequentially
# These tests rely on a shared resource (the RPC and Node)
# and would cause race conditions due to transaction nonces if run in parallel.
[[profile.default.overrides]]
filter = 'package(pallet-revive-eth-rpc) and test(/^tests::/)'
test-group = 'serial-integration'
2 changes: 1 addition & 1 deletion .config/zepter.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ workflows:
]
# The umbrella crate uses more features, so we to check those too:
check_umbrella:
- [ $check.0, '--features=serde,experimental,riscv,runtime,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ]
- [ $check.0, '--features=serde,experimental,runtime,with-tracing,tuples-96,with-tracing', '-p=polkadot-sdk' ]
# Same as `check_*`, but with the `--fix` flag.
default:
- [ $check.0, '--fix' ]
Expand Down
28 changes: 28 additions & 0 deletions .github/actions/workflow-stopper/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: "stop all workflows"
description: "Action stops all workflows in a PR to save compute resources."
inputs:
app-id:
description: "App id"
required: true
app-key:
description: "App token"
required: true
runs:
using: "composite"
steps:
- name: Worfklow stopper - Generate token
uses: actions/create-github-app-token@v1
id: app-token
with:
app-id: ${{ inputs.app-id }}
private-key: ${{ inputs.app-key }}
owner: "paritytech"
repositories: "workflow-stopper"
- name: Workflow stopper - Stop all workflows
uses: octokit/[email protected]
with:
route: POST /repos/paritytech/workflow-stopper/actions/workflows/stopper.yml/dispatches
ref: main
inputs: '${{ format(''{{ "github_sha": "{0}", "github_repository": "{1}", "github_ref_name": "{2}", "github_workflow_id": "{3}", "github_job_name": "{4}" }}'', github.event.pull_request.head.sha, github.repository, github.ref_name, github.run_id, github.job) }}'
env:
GITHUB_TOKEN: ${{ steps.app-token.outputs.token }}
2 changes: 1 addition & 1 deletion .github/env
Original file line number Diff line number Diff line change
@@ -1 +1 @@
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-09-11-v202409111034"
IMAGE="docker.io/paritytech/ci-unified:bullseye-1.81.0-2024-11-19-v202411281558"
36 changes: 36 additions & 0 deletions .github/scripts/check-missing-readme-generation.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#!/bin/bash
echo "Running script relative to `pwd`"
# Find all README.docify.md files
DOCIFY_FILES=$(find . -name "README.docify.md")

# Initialize a variable to track directories needing README regeneration
NEED_REGENERATION=""

for file in $DOCIFY_FILES; do
echo "Processing $file"

# Get the directory containing the docify file
DIR=$(dirname "$file")

# Go to the directory and run cargo build
cd "$DIR"
cargo check --features generate-readme || { echo "Readme generation for $DIR failed. Ensure the crate compiles successfully and has a `generate-readme` feature which guards markdown compilation in the crate as follows: https://docs.rs/docify/latest/docify/macro.compile_markdown.html#conventions." && exit 1; }

# Check if README.md has any uncommitted changes
git diff --exit-code README.md

if [ $? -ne 0 ]; then
echo "Error: Found uncommitted changes in $DIR/README.md"
NEED_REGENERATION="$NEED_REGENERATION $DIR"
fi

# Return to the original directory
cd - > /dev/null
done

# Check if any directories need README regeneration
if [ -n "$NEED_REGENERATION" ]; then
echo "The following directories need README regeneration:"
echo "$NEED_REGENERATION"
exit 1
fi
25 changes: 17 additions & 8 deletions .github/scripts/cmd/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import argparse
import _help
import importlib.util
import re

_HelpAction = _help._HelpAction

Expand Down Expand Up @@ -40,20 +41,20 @@ def setup_logging():
setup_logging()

"""
BENCH
BENCH
"""

bench_example = '''**Examples**:
Runs all benchmarks
Runs all benchmarks
%(prog)s
Runs benchmarks for pallet_balances and pallet_multisig for all runtimes which have these pallets. **--quiet** makes it to output nothing to PR but reactions
%(prog)s --pallet pallet_balances pallet_xcm_benchmarks::generic --quiet
Runs bench for all pallets for westend runtime and fails fast on first failed benchmark
%(prog)s --runtime westend --fail-fast
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
Does not output anything and cleans up the previous bot's & author command triggering comments in PR
%(prog)s --runtime westend rococo --pallet pallet_balances pallet_multisig --quiet --clean
'''

Expand All @@ -67,14 +68,14 @@ def setup_logging():
parser_bench.add_argument('--fail-fast', help='Fail fast on first failed benchmark', action='store_true')

"""
FMT
FMT
"""
parser_fmt = subparsers.add_parser('fmt', help='Formats code (cargo +nightly-VERSION fmt) and configs (taplo format)')
for arg, config in common_args.items():
parser_fmt.add_argument(arg, **config)

"""
Update UI
Update UI
"""
parser_ui = subparsers.add_parser('update-ui', help='Updates UI tests')
for arg, config in common_args.items():
Expand Down Expand Up @@ -175,7 +176,15 @@ def main():
print(f'-- package_dir: {package_dir}')
print(f'-- manifest_path: {manifest_path}')
output_path = os.path.join(package_dir, "src", "weights.rs")
# TODO: we can remove once all pallets in dev runtime are migrated to polkadot-sdk-frame
try:
uses_polkadot_sdk_frame = "true" in os.popen(f"cargo metadata --locked --format-version 1 --no-deps | jq -r '.packages[] | select(.name == \"{pallet.replace('_', '-')}\") | .dependencies | any(.name == \"polkadot-sdk-frame\")'").read()
# Empty output from the previous os.popen command
except StopIteration:
uses_polkadot_sdk_frame = False
template = config['template']
if uses_polkadot_sdk_frame and re.match(r"frame-(:?umbrella-)?weight-template\.hbs", os.path.normpath(template).split(os.path.sep)[-1]):
template = "substrate/.maintain/frame-umbrella-weight-template.hbs"
else:
default_path = f"./{config['path']}/src/weights"
xcm_path = f"./{config['path']}/src/weights/xcm"
Expand Down Expand Up @@ -251,4 +260,4 @@ def main():
print('🚀 Done')

if __name__ == '__main__':
main()
main()
26 changes: 13 additions & 13 deletions .github/scripts/cmd/test_cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
"path": "substrate/frame",
"header": "substrate/HEADER-APACHE2",
"template": "substrate/.maintain/frame-weight-template.hbs",
"bench_features": "runtime-benchmarks,riscv",
"bench_features": "runtime-benchmarks",
"bench_flags": "--flag1 --flag2"
},
{
Expand Down Expand Up @@ -67,7 +67,7 @@ def setUp(self):
self.patcher6 = patch('importlib.util.spec_from_file_location', return_value=MagicMock())
self.patcher7 = patch('importlib.util.module_from_spec', return_value=MagicMock())
self.patcher8 = patch('cmd.generate_prdoc.main', return_value=0)

self.mock_open = self.patcher1.start()
self.mock_json_load = self.patcher2.start()
self.mock_parse_args = self.patcher3.start()
Expand Down Expand Up @@ -101,27 +101,27 @@ def test_bench_command_normal_execution_all_runtimes(self):
clean=False,
image=None
), [])

self.mock_popen.return_value.read.side_effect = [
"pallet_balances\npallet_staking\npallet_something\n", # Output for dev runtime
"pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime
"pallet_staking\npallet_something\n", # Output for rococo runtime - no pallet here
"pallet_balances\npallet_staking\npallet_something\n", # Output for asset-hub-westend runtime
"./substrate/frame/balances/Cargo.toml\n", # Mock manifest path for dev -> pallet_balances
]

with patch('sys.exit') as mock_exit:
import cmd
cmd.main()
mock_exit.assert_not_called()

expected_calls = [
# Build calls
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks,riscv"),
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p rococo-runtime --profile release --features=runtime-benchmarks"),
call("forklift cargo build -p asset-hub-westend-runtime --profile release --features=runtime-benchmarks"),

call(get_mock_bench_output(
runtime='kitchensink',
pallets='pallet_balances',
Expand Down Expand Up @@ -162,7 +162,7 @@ def test_bench_command_normal_execution(self):
self.mock_popen.return_value.read.side_effect = [
"pallet_balances\npallet_staking\npallet_something\n", # Output for westend runtime
]

with patch('sys.exit') as mock_exit:
import cmd
cmd.main()
Expand All @@ -171,7 +171,7 @@ def test_bench_command_normal_execution(self):
expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),

# Westend runtime calls
call(get_mock_bench_output(
runtime='westend',
Expand Down Expand Up @@ -205,7 +205,7 @@ def test_bench_command_normal_execution_xcm(self):
self.mock_popen.return_value.read.side_effect = [
"pallet_balances\npallet_staking\npallet_something\npallet_xcm_benchmarks::generic\n", # Output for westend runtime
]

with patch('sys.exit') as mock_exit:
import cmd
cmd.main()
Expand All @@ -214,7 +214,7 @@ def test_bench_command_normal_execution_xcm(self):
expected_calls = [
# Build calls
call("forklift cargo build -p westend-runtime --profile release --features=runtime-benchmarks"),

# Westend runtime calls
call(get_mock_bench_output(
runtime='westend',
Expand All @@ -241,7 +241,7 @@ def test_bench_command_two_runtimes_two_pallets(self):
"pallet_staking\npallet_balances\n", # Output for westend runtime
"pallet_staking\npallet_balances\n", # Output for rococo runtime
]

with patch('sys.exit') as mock_exit:
import cmd
cmd.main()
Expand Down Expand Up @@ -309,7 +309,7 @@ def test_bench_command_one_dev_runtime(self):

expected_calls = [
# Build calls
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks,riscv"),
call("forklift cargo build -p kitchensink-runtime --profile release --features=runtime-benchmarks"),
# Westend runtime calls
call(get_mock_bench_output(
runtime='kitchensink',
Expand Down Expand Up @@ -429,4 +429,4 @@ def test_prdoc_command(self, mock_system, mock_parse_args):
self.mock_generate_prdoc_main.assert_called_with(mock_parse_args.return_value[0])

if __name__ == '__main__':
unittest.main()
unittest.main()
Loading

0 comments on commit 92b6099

Please sign in to comment.