Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,20 @@ jobs:

- name: Check Spelling
run: codespell

PythonLint:
name: Python Lint
runs-on: ubuntu-24.04
timeout-minutes: 5

steps:
- name: Check out Git repository
uses: actions/checkout@v6

- name: Install Requirements
run: |
python3 -m pip install --upgrade pip
pip install -r requirements.txt

- name: Lint Python Files Using Pylint
run: ./dev/pylint_check.py
1 change: 0 additions & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@ jobs:
include:
- { name: 'C/C++', script: 'check-format.sh' , with_python: 'yes', with_submodules: 'true', all_vtr_pkgs: 'yes', pkgs: 'clang-format-18' }
- { name: 'Python', script: 'check-format-py.sh', with_python: 'yes', with_submodules: 'true', all_vtr_pkgs: 'yes', pkgs: '' }
- { name: 'Python Lint', script: 'pylint_check.py' , with_python: 'yes', with_submodules: 'false', all_vtr_pkgs: 'no', pkgs: '' }
name: 'F: ${{ matrix.name }}'
steps:

Expand Down
3 changes: 0 additions & 3 deletions dev/annealing_curve_plotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ def extract_annealing_log(log):
# Gather data lines until '# Placement took' is encountered
data_lines = []
for line in log:

# Reached the end of the placement section
if line.startswith("# Placement took"):
break
Expand Down Expand Up @@ -98,7 +97,6 @@ def extract_fields(header):


def main():

# Parse arguments
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
Expand Down Expand Up @@ -163,7 +161,6 @@ def col_name_error(name):
# Parse the data,
data = {c.name: [] for c in columns}
for line in annealing_log:

# Split fields, convert to floats
fields = [float(f) for f in line.split()]

Expand Down
2 changes: 0 additions & 2 deletions dev/external_subtrees.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def main():


def load_subtree_config(config_path):

config = OrderedDict()

tree = ET.parse(config_path)
Expand All @@ -95,7 +94,6 @@ def load_subtree_config(config_path):
external_url = None
default_external_ref = None
for attrib, value in child.attrib.items():

if attrib == "name":
name = value
elif attrib == "internal_path":
Expand Down
13 changes: 10 additions & 3 deletions dev/pylint_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,9 +208,10 @@ def main():
# Pylint checks to ignore
ignore_list = []

# Ignore function argument indenting, which is currently incompabile with black
# https://github.com/psf/black/issues/48
ignore_list.append("C0330")
# Ignore consider-using-f-string.
# Many Python scripts in this repo use .format instead of f-strings.
# We should replace these in the future.
ignore_list.append("C0209")

# Build pylint command
cmd = ["pylint", path, "-s", "n"]
Expand All @@ -220,6 +221,12 @@ def main():
# see https://stackoverflow.com/q/21833872
cmd.append("--variable-rgx=[a-z][a-z0-9_]{0,40}$")

# Increase the max number of positional arguments.
# Many legacy functions in this codebase use 25+ positional arguments.
# We should refactor these functions to use fewer parameters or
# configuration objects in the future.
cmd.append("--max-positional-arguments=30")

# Run pylint and check output
process = subprocess.run(cmd, check=False, stdout=subprocess.PIPE)
if process.returncode:
Expand Down
5 changes: 0 additions & 5 deletions dev/submit_slurm.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ def __init__(self, script, time_minutes=None, memory_mb=None, num_cores=1):


def parse_args():

parser = argparse.ArgumentParser("Helper script to submit VTR task jobs to a SLURM scheduler")

parser.add_argument(
Expand Down Expand Up @@ -82,7 +81,6 @@ def main():

jobs = []
for script in scripts:

time_minutes, mem_mb = get_resource_estimates(script)

time_minutes = max(time_minutes, args.min_time)
Expand All @@ -96,7 +94,6 @@ def main():
# TODO: could batch jobs here

for job in jobs:

job_name = None
match = JOB_INFO_REGEX.match(job.script)
if match:
Expand Down Expand Up @@ -132,7 +129,6 @@ def submit_sbatch(
submit_dir=None,
job_name=None,
):

cwd = os.getcwd()

if submit_dir:
Expand Down Expand Up @@ -178,7 +174,6 @@ def get_resource_estimates(filepath):

with open(filepath) as f:
for line in f:

match = TIME_EST_REGEX.match(line)
if match:
time_sec = float(match.groupdict()["time_sec"])
Expand Down
4 changes: 2 additions & 2 deletions dev/vtr_test_suite_verifier/verify_test_suites.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def parse_test_suite_info(test_suite_info_file: str) -> List[TestSuite]:
}
]}
"""
with open(test_suite_info_file, "r") as file:
with open(test_suite_info_file, "r", encoding="utf-8") as file:
data = json.load(file)

assert isinstance(data, dict), "Test suite info should be a dictionary"
Expand Down Expand Up @@ -78,7 +78,7 @@ def parse_task_list(task_list_file: str) -> Set[str]:
the task list.
"""
tasks = set()
with open(task_list_file, "r") as file:
with open(task_list_file, "r", encoding="utf-8") as file:
for line in file:
# Strip the whitespace from the line.
line.strip()
Expand Down
2 changes: 0 additions & 2 deletions doc/src/vtr_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,15 +49,13 @@ def get_vtr_version_info():
patch = None
prerelease = None
try:

major_regex = re.compile(r".*VTR_VERSION_MAJOR (?P<major>\d+)")
minor_regex = re.compile(r".*VTR_VERSION_MINOR (?P<minor>\d+)")
patch_regex = re.compile(r".*VTR_VERSION_PATCH (?P<patch>\d+)")
prerelease_regex = re.compile(r".*VTR_VERSION_PRERELEASE \"(?P<prerelease>.*)\"")

with open(root_cmakelists) as f:
for line in f:

match = major_regex.match(line)
if match:
major = match.group("major")
Expand Down
1 change: 0 additions & 1 deletion odin_ii/regression_test/parse_result/conf/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ def patch_logs(values):


def inverse_result_from_expectation(values):

should_fail = False
if "expectation" in values:
for log in values["expectation"]:
Expand Down
9 changes: 0 additions & 9 deletions odin_ii/regression_test/parse_result/parse_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,6 @@ def sanitize_toml(toml_dict):
if _HOOK_HDR in toml_dict:
global HOOK_FILES
if _K_FILE in toml_dict[_HOOK_HDR]:

# we append the filenames and strip the .py extension
if not isinstance(toml_dict[_HOOK_HDR][_K_FILE], list):
HOOK_FILES.append(toml_dict[_HOOK_HDR][_K_FILE][:-3])
Expand Down Expand Up @@ -407,7 +406,6 @@ def load_csv_into_tbl(toml_dict, csv_file_name):


def load_json_into_tbl(toml_dict, file_name):

file_dict = OrderedDict()
with open(file_name, newline="") as json_file:
file_dict = json.load(json_file, object_pairs_hook=OrderedDict)
Expand All @@ -431,7 +429,6 @@ def load_json_into_tbl(toml_dict, file_name):


def load_log_into_tbl(toml_dict, log_file_name):

# load the hooks if there are any
# run.py
pre_hooks = []
Expand All @@ -455,7 +452,6 @@ def load_log_into_tbl(toml_dict, log_file_name):
input_values = create_tbl(toml_dict, _K_DFLT)

for line in log:

# boostrap the preprocessor here
for fn in pre_hooks:
line = fn(line)
Expand Down Expand Up @@ -577,7 +573,6 @@ def compare_instances(header, toml_dict, tbl_entry, golden_tbl_entry):


def regex_line(toml_dict, header, line):

# compile the regex entries
entry_list = []

Expand Down Expand Up @@ -628,7 +623,6 @@ def _compare(toml_file_name, golden_result_file_name, result_file_name, diff_fil
for key in golden_tbl:
error_str = []
for header in toml_dict:

if key not in tbl:
if header in golden_tbl[key]:
# if we are only running partial tests, key will not be existing
Expand All @@ -649,7 +643,6 @@ def _compare(toml_file_name, golden_result_file_name, result_file_name, diff_fil
pass

elif header not in golden_tbl[key] and header in tbl[key]:

error_str.append(mismatch_str(header, "null", tbl[key][header]))
diff[key][header] = tbl[key][header]

Expand All @@ -658,7 +651,6 @@ def _compare(toml_file_name, golden_result_file_name, result_file_name, diff_fil
# don't create the entry

else:

if compare_instances(header, toml_dict, tbl[key], golden_tbl[key]):
# use the golden value since it is within range and we don't wanna trigger a diff
diff[key][header] = golden_tbl[key][header]
Expand Down Expand Up @@ -768,7 +760,6 @@ def parse_shared_args(args):


def main():

this_exec = sys.argv[0]
if len(sys.argv) < 2:
print("expected: display, parse, join or compare")
Expand Down
1 change: 0 additions & 1 deletion odin_ii/regression_test/tools/odin_config_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@


def main(argv=None):

if argv is None:
argv = sys.argv

Expand Down
1 change: 0 additions & 1 deletion odin_ii/regression_test/tools/parse_odin_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@ def insert_decimal(value_map, key, input_str):


def parse_line(benchmarks, line):

line.strip()
line = " ".join(line.split())

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ scipy
# Python linter and formatter
click==8.0.2 # Our version of black needs an older version of click (https://stackoverflow.com/questions/71673404/importerror-cannot-import-name-unicodefun-from-click)
black==24.3.0
pylint==2.7.4
pylint==4.0.4

# Surelog
orderedmultidict
1 change: 0 additions & 1 deletion run_reg_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,6 @@ def display_qor(reg_test):
return 1
print_header("{} QoR Results".format(reg_test))
with (test_dir / "qor_geomean.txt").open("r") as results:

# create list of desired values, their unit and how to display them.
data = OrderedDict()
data["revision"] = ["", "{}"]
Expand Down
6 changes: 0 additions & 6 deletions vpr/scripts/compare_timing_reports.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ def __init__(self, startpoint, endpoint):


def parse_args():

parser = argparse.ArgumentParser()

parser.add_argument("first_report")
Expand All @@ -30,7 +29,6 @@ def parse_args():


def main():

args = parse_args()

print("Parsing {}".format(args.first_report))
Expand All @@ -43,7 +41,6 @@ def main():


def parse_timing_report(args, filename):

regex = re.compile(r".*?Endpoint : (?P<end>\S+).*?", re.DOTALL)

start_regex = re.compile(r"Startpoint: (?P<start>\S+)")
Expand All @@ -60,7 +57,6 @@ def parse_timing_report(args, filename):

paths = OrderedDict()
for path_lines in paths_lines:

distance = None
startpoint = None
endpoint = None
Expand Down Expand Up @@ -112,7 +108,6 @@ def parse_timing_report(args, filename):


def correlate_paths(first_paths, second_paths):

first_keys = set(first_paths.keys())
second_keys = set(second_paths.keys())

Expand All @@ -137,7 +132,6 @@ def correlate_paths(first_paths, second_paths):


def plot_correlation(first_paths, second_paths, first_name, second_name):

correlated_paths, first_only, second_only = correlate_paths(first_paths, second_paths)

print("Correlated {} paths".format(len(correlated_paths)))
Expand Down
1 change: 0 additions & 1 deletion vpr/scripts/profile/parse_and_plot_detailed.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ def plot_results(param_names, param_options, results, params):
os.mkdir(directory)

with Chdir(directory):

export_results_to_csv(param_names, results, params)

x = results.keys()
Expand Down
8 changes: 4 additions & 4 deletions vtr_flow/benchmarks/system_verilog/f4pga/make_sv_flattened.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def identify_top_module(file_list):
"""Identify the file containing the top module definition."""
top_module_regex = re.compile(r"module\s+top\s*\(")
for file in file_list:
with open(file, "r") as f:
with open(file, "r", encoding="utf-8") as f:
for line in f:
if top_module_regex.search(line):
return file
Expand All @@ -27,18 +27,18 @@ def create_flattened_file(top_file, file_list):
current_dir = os.path.basename(os.getcwd())
output_file_name = f"flattened_{current_dir}.sv"

with open(output_file_name, "w") as output_file:
with open(output_file_name, "w", encoding="utf-8") as output_file:
if top_file:
# Write the top module first
with open(top_file, "r") as top_module:
with open(top_file, "r", encoding="utf-8") as top_module:
output_file.write(f"// Content from {top_file}\n")
output_file.write(top_module.read())
output_file.write("\n\n")

# Write the rest of the files
for file in file_list:
if file != top_file:
with open(file, "r") as verilog_file:
with open(file, "r", encoding="utf-8") as verilog_file:
output_file.write(f"// Content from {file}\n")
output_file.write(verilog_file.read())
output_file.write("\n\n")
Expand Down
3 changes: 0 additions & 3 deletions vtr_flow/scripts/arch_gen/arch_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,14 +233,12 @@ def xCLB(k_LUT, N_BLE, I_CLB, I_BLE, fracture_level, num_FF, crossbar_str):
next_start = 0

for lut_input in range(k_LUT):

xbegin("mux")
xprop("name", "crossbar-" + str(i_ble) + "-" + str(lut_input))

# Inputs
inputs = []
for i_cb in range(1, cb + 1):

input_str = ""
if input_idx < I_CLB:
input_str = "clb.I[" + str(input_idx) + "]"
Expand Down Expand Up @@ -835,7 +833,6 @@ def gen_arch(
)

else:

# K - N - I - Fi - Frac - FF - L - 45

# Non-Fractured
Expand Down
Loading