Skip to content

Commit

Permalink
Merge pull request #63 from efabless/main
Browse files Browse the repository at this point in the history
update verilator directory
  • Loading branch information
M0stafaRady authored Nov 20, 2023
2 parents f1900de + 1860d0e commit fa708ba
Show file tree
Hide file tree
Showing 193 changed files with 33,690 additions and 2,494 deletions.
49 changes: 49 additions & 0 deletions .github/workflows/caravel_cocotb.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: Caravel Cocotb CI

on:
push:
branches:
- '*'
paths:
- 'cocotb/caravel_cocotb/CI/**'
- '.github/workflows/caravel_cocotb.yml'
- 'cocotb/caravel_cocotb/scripts/**'
- '!cocotb/caravel_cocotb/**/' # Exclude directories within the specified directory


pull_request:

jobs:
build:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Set up QEMU
uses: docker/setup-qemu-action@v1

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1

- name: Install dependencies
run: |
cd $GITHUB_WORKSPACE/cocotb
python3 -m pip install --upgrade pip
pip install -r requirements.txt
python3 -m pip install --upgrade --no-cache-dir volare
- name: Install caravel_cocotb
run: |
cd $GITHUB_WORKSPACE/cocotb
pip install .
- name: Run main.py
run: |
cd $GITHUB_WORKSPACE/cocotb/caravel_cocotb/CI
python3 main.py
19 changes: 13 additions & 6 deletions cocotb/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,14 @@ This project aims to provide a user friendly environment for adding and running

## How to install caravel_cocotb

This is temporary, it will soon be released to PyPi
```bash
pip install caravel-cocotb
```

or to install from repo
```bash
git clone [email protected]:efabless/caravel-sim-infrastructure.git
git checkout <release>
cd caravel-sim-infrastructure/cocotb
pip install .
cd ../..
Expand Down Expand Up @@ -118,7 +123,7 @@ Commonly used APIs for firmware can be found in [`C_api`](docs/build/html/C_api.
<!-- start create a test include3 -->
# Test Examples
Refer to this [directory](https://github.com/efabless/caravel_user_project/tree/cocotb_dev/verilog/dv/cocotb) for tests example generated for 16-bit counter
Refer to this [directory](https://github.com/efabless/caravel_user_project/tree/main/verilog/dv/cocotb) for tests example generated for 16-bit counter
<!-- end create a test include3 -->
Expand All @@ -145,7 +150,7 @@ usage: caravel_cocotb [-h] [-test TEST [TEST ...]] [-design_info DESIGN_INFO]
[-sdf_setup] [-clk CLK] [-lint]
[-macros MACROS [MACROS ...]] [-sim_path SIM_PATH]
[-verbosity VERBOSITY] [-openframe] [-check_commits]
[-no_docker]
[-no_docker] [-compile]
Run cocotb tests
Expand Down Expand Up @@ -191,6 +196,7 @@ optional arguments:
-openframe use openframe for the simulation rather than caravel
-check_commits use to check if repos are up to date
-no_docker run iverilog without docker
-compile force recompilation
```
<!-- end run a test include -->

Expand Down Expand Up @@ -249,12 +255,13 @@ New directory named ``sim`` would be created under ``<repo root>/cocotb/`` or to

| sim # directory get generate when run a test
│ ├── <tag> # tag of the run
│ │ ├── compilation # directory contain all logs and build files related to the RTL compilation
│ │ │ └── compilation.log # log file has all the commands used to run iverilog and any compilation error or warning
│ │ ├── <sim type>-<test name> # test result directory contain all logs and wave related to the test
│ │ │ └── <test name>.hex # hex file used in running this test
│ │ │ └── firmware.hex # hex file used in running this test
│ │ │ └── <test name>.log # log file generated from cocotb
│ │ │ └── compilation.log # log file has all the commands used to run iverilog and any compilation error or warning
│ │ │ └── firmware.log # log file has all the commands used to compile the C code and any compilation error or warning
│ │ │ └── <test name>.vcd # waves can be opened by gtkwave
│ │ │ └── waves.vcd # waves can be opened by gtkwave
│ │ │ └── rerun.py # script to rerun the test
│ │ └── command.log # command use for this run
│ │ └── repos_info.log # contain information about the repos used to run these tests
Expand Down
14 changes: 14 additions & 0 deletions cocotb/caravel_cocotb/CI/base_class.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import logging


class BaseClass:
def __init__(self):
self.configure_logger()

def configure_logger(self):
self.logger = logging.getLogger(f"{self.__class__.__name__}")
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s@%(asctime)s: [%(name)s] %(message)s")
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
self.logger.addHandler(console_handler)
169 changes: 169 additions & 0 deletions cocotb/caravel_cocotb/CI/checker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,169 @@
from base_class import BaseClass
import yaml
import os
import xml.etree.ElementTree as ET


class Checker(BaseClass):
def __init__(self):
super().__init__()
pass

def check_command(self, command):
self.command = command
self.logger.info(f"[Checker] Check test {self.command}")
all_tests_paths = self.get_expected_tests_dir()
is_pass = self.check_exist_pass(all_tests_paths)
if not is_pass:
# don't do other checks in sdf sim because it didn't run
return
self.check_configs()
self.check_seed(all_tests_paths)
self.check_dump_wave(all_tests_paths)
self.check_compile(all_tests_paths)
self.check_macros(all_tests_paths)
return

def get_expected_tests_dir(self):
self.logger.debug(f"[get_expected_tests_dir] Checking tests exist and pass test = {self.command.test} testlist = {self.command.test_list} sim = {self.command.sim}")
if self.command.sim is None:
self.logger.debug("Sim is None")
sims = ["RTL"]
else:
sims = self.command.sim.split()
expected_tests = list()
# to get tests run by -t
if self.command.test is None:
self.logger.debug("Test is None")
tests = None
else:
tests = self.command.test.split()
for sim_type in sims:
for test_name in tests:
if sim_type != "GL_SDF":
expected_tests.append(f"{sim_type}-{test_name}")
else:
expected_tests.append(f"{sim_type}-{test_name}-{'nom-t' if self.command.corner is None else self.command.corner}")
# to get tests run by -tl
if self.command.test_list is not None:
with open(self.command.test_list, 'r') as yaml_file:
yaml_data = yaml_file.read()
tests = yaml.safe_load(yaml_data).get("Tests", [])
for test in tests:
test_name = test.get("name")
sim_type = test.get("sim")
if sim_type != "GL_SDF":
expected_tests.append(f"{sim_type}-{test_name}")
else:
expected_tests.append(f"{sim_type}-{test_name}-{'nom-t' if self.command.corner is None else self.command.corner}")
self.tag_path = f"{self.command.run_location if self.command.sim_path is None else self.command.sim_path}/sim/{self.command.tag}/"
full_tests_paths = [self.tag_path+expected_test for expected_test in expected_tests]
self.logger.info(f"[get_expected_tests_dir] Expected tests: {full_tests_paths}")
return full_tests_paths

def check_exist_pass(self, all_tests_paths):
for test_path in all_tests_paths:
if os.path.exists(test_path):
self.logger.info(f"[check_exist_pass] Test {test_path} exists")
else:
raise ValueError(f"[check_exist_pass] Test {test_path} doesn't exist")
return False

if os.path.exists(f"{test_path}/passed"):
self.logger.info(f"[check_exist_pass] Test {test_path} passed")
else:
if self.command.sim != "GL_SDF":
raise ValueError(f"[check_exist_pass] Test {test_path} failed")
return False
return True

def check_configs(self):
# read design_info.yaml
design_info_path = self.command.design_info if self.command.design_info is not None else f"{self.command.run_location}/design_info.yaml"
with open(design_info_path, 'r') as yaml_file:
yaml_data = yaml_file.read()
design_info = yaml.safe_load(yaml_data)
caravel_root_exp = design_info.get("CARAVEL_ROOT")
mgmt_core_exp = design_info.get("MCW_ROOT")
pdk_root_exp = design_info.get("PDK_ROOT")+"/"+design_info.get("PDK")
pdk_exp = design_info.get("PDK")[:-1]
clk_exp = self.command.clk if self.command.clk is not None else design_info.get("clk")
max_err_exp = int(self.command.max_error) if self.command.max_error is not None else 3

# read configs.yaml generated
config_path = f"{self.tag_path}/configs.yaml"
with open(config_path, 'r') as yaml_file:
yaml_data = yaml_file.read()
configs = yaml.safe_load(yaml_data)
if clk_exp != configs.get("clock"):
raise ValueError(f"[check_configs] Clock mismatch: {clk_exp} != {configs.get('clock')}")
if max_err_exp != int(configs.get("max_err")):
raise ValueError(f"[check_configs] Max error mismatch: {max_err_exp} != {configs.get('max_err')}")
if caravel_root_exp != configs.get("CARAVEL_ROOT"):
raise ValueError(f"[check_configs] Caravel root mismatch: {caravel_root_exp} != {configs.get('CARAVEL_ROOT')}")
if mgmt_core_exp != configs.get("MCW_ROOT"):
raise ValueError(f"[check_configs] Management core mismatch: {mgmt_core_exp} != {configs.get('MCW_ROOT')}")
if pdk_root_exp != configs.get("PDK_ROOT"):
raise ValueError(f"[check_configs] PDK root mismatch: {pdk_root_exp} != {configs.get('PDK_ROOT')}")
if pdk_exp != configs.get("PDK"):
raise ValueError(f"[check_configs] PDK mismatch: {pdk_exp} != {configs.get('PDK')}")

def check_seed(self, all_tests_paths):
if self.command.seed is not None:
for test_path in all_tests_paths:
seed_file_path = f"{test_path}/seed.xml"
with open(seed_file_path, 'r') as xml_file:
xml_content = xml_file.read()

# Parse the XML content
root = ET.fromstring(xml_content)

# Find the random_seed property and extract its value
seed_value = None
for testsuite in root.findall(".//testsuite"):
seed_elem = testsuite.find(".//property[@name='random_seed']")
if seed_elem is not None:
seed_value = int(seed_elem.get("value"))
break
if seed_value == self.command.seed:
self.logger.info(f"[check_seed] Test run with correct seed {seed_value}")
else:
raise ValueError(f"[check_seed] Test run with incorrect seed {seed_value} instead of {self.command.seed}")

def check_dump_wave(self, all_tests_paths):
dump_wave_exp = False if self.command.no_wave is not None else True
for test_path in all_tests_paths:
if os.path.exists(f"{test_path}/waves.vcd") and not dump_wave_exp:
raise ValueError(f"[check_dump_wave] Test {test_path} dump waves while -no_wave switch is used")
elif not os.path.exists(f"{test_path}/waves.vcd") and dump_wave_exp:
raise ValueError(f"[check_dump_wave] Test {test_path} doesn't dump waves while -no_wave switch isn't used")
else:
self.logger.info(f"[check_dump_wave] Test {test_path} has wave {'dumped' if dump_wave_exp else 'not dumped'} waves as expected")

def check_compile(self, all_tests_paths):
is_compile_shared = True if self.command.compile is None else False
for test_path in all_tests_paths:
simvpp_exist = os.path.exists(f"{test_path}/sim.vvp")
if simvpp_exist and is_compile_shared:
raise ValueError(f"[check_compile] Test {test_path} compile is not shared while -compile switch is used")
elif not simvpp_exist and not is_compile_shared:
raise ValueError(f"[check_compile] Test {test_path} shared compile while -compile switch isn't used simvpp exist = {simvpp_exist} is shared = {is_compile_shared}")
else:
self.logger.info(f"[check_compile] Test {test_path} has compile {'shared' if is_compile_shared else 'not shared'} as expected")

def check_macros(self, all_tests_paths):
macro_used = self.command.macro
if macro_used is not None:
pattern_to_search = f"Found {macro_used} effect"
for test_path in all_tests_paths:
# search pattern in all .log files
for filename in os.listdir(test_path):
if filename.endswith('.log'):
file_log = os.path.join(test_path, filename)
with open(file_log, 'r') as log_file:
content = log_file.read()
if pattern_to_search in content:
self.logger.info(f"[check_macros] Test {test_path} uses macro {macro_used} correctly")
return True
raise ValueError(f"[check_macros] Test {test_path} doesn't use macro {macro_used}")
return False
Loading

0 comments on commit fa708ba

Please sign in to comment.