diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 86a0ac12b16..7dd989d76a0 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -26,3 +26,20 @@ jobs: - name: Check Spelling run: codespell + + PythonLint: + name: Python Lint + runs-on: ubuntu-24.04 + timeout-minutes: 5 + + steps: + - name: Check out Git repository + uses: actions/checkout@v6 + + - name: Install Requirements + run: | + python3 -m pip install --upgrade pip + pip install -r requirements.txt + + - name: Lint Python Files Using Pylint + run: ./dev/pylint_check.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 12282c84637..0bf8efd18b1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -130,7 +130,6 @@ jobs: include: - { name: 'C/C++', script: 'check-format.sh' , with_python: 'yes', with_submodules: 'true', all_vtr_pkgs: 'yes', pkgs: 'clang-format-18' } - { name: 'Python', script: 'check-format-py.sh', with_python: 'yes', with_submodules: 'true', all_vtr_pkgs: 'yes', pkgs: '' } - - { name: 'Python Lint', script: 'pylint_check.py' , with_python: 'yes', with_submodules: 'false', all_vtr_pkgs: 'no', pkgs: '' } name: 'F: ${{ matrix.name }}' steps: diff --git a/dev/annealing_curve_plotter.py b/dev/annealing_curve_plotter.py index 4be0d6d9544..b8b65b47f15 100755 --- a/dev/annealing_curve_plotter.py +++ b/dev/annealing_curve_plotter.py @@ -49,7 +49,6 @@ def extract_annealing_log(log): # Gather data lines until '# Placement took' is encountered data_lines = [] for line in log: - # Reached the end of the placement section if line.startswith("# Placement took"): break @@ -98,7 +97,6 @@ def extract_fields(header): def main(): - # Parse arguments parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter @@ -163,7 +161,6 @@ def col_name_error(name): # Parse the data, data = {c.name: [] for c in columns} for line in annealing_log: - # Split fields, convert to floats fields = [float(f) for f in line.split()] diff --git a/dev/external_subtrees.py b/dev/external_subtrees.py index a5b6bf35b73..f0c535edcef 100755 --- a/dev/external_subtrees.py +++ b/dev/external_subtrees.py @@ -79,7 +79,6 @@ def main(): def load_subtree_config(config_path): - config = OrderedDict() tree = ET.parse(config_path) @@ -95,7 +94,6 @@ def load_subtree_config(config_path): external_url = None default_external_ref = None for attrib, value in child.attrib.items(): - if attrib == "name": name = value elif attrib == "internal_path": diff --git a/dev/pylint_check.py b/dev/pylint_check.py index b884dc77d1f..2b896e54f26 100755 --- a/dev/pylint_check.py +++ b/dev/pylint_check.py @@ -208,9 +208,10 @@ def main(): # Pylint checks to ignore ignore_list = [] - # Ignore function argument indenting, which is currently incompabile with black - # https://github.com/psf/black/issues/48 - ignore_list.append("C0330") + # Ignore consider-using-f-string. + # Many Python scripts in this repo use .format instead of f-strings. + # We should replace these in the future. + ignore_list.append("C0209") # Build pylint command cmd = ["pylint", path, "-s", "n"] @@ -220,6 +221,12 @@ def main(): # see https://stackoverflow.com/q/21833872 cmd.append("--variable-rgx=[a-z][a-z0-9_]{0,40}$") + # Increase the max number of positional arguments. + # Many legacy functions in this codebase use 25+ positional arguments. + # We should refactor these functions to use fewer parameters or + # configuration objects in the future. + cmd.append("--max-positional-arguments=30") + # Run pylint and check output process = subprocess.run(cmd, check=False, stdout=subprocess.PIPE) if process.returncode: diff --git a/dev/submit_slurm.py b/dev/submit_slurm.py index f372f787e45..03aef435a79 100755 --- a/dev/submit_slurm.py +++ b/dev/submit_slurm.py @@ -22,7 +22,6 @@ def __init__(self, script, time_minutes=None, memory_mb=None, num_cores=1): def parse_args(): - parser = argparse.ArgumentParser("Helper script to submit VTR task jobs to a SLURM scheduler") parser.add_argument( @@ -82,7 +81,6 @@ def main(): jobs = [] for script in scripts: - time_minutes, mem_mb = get_resource_estimates(script) time_minutes = max(time_minutes, args.min_time) @@ -96,7 +94,6 @@ def main(): # TODO: could batch jobs here for job in jobs: - job_name = None match = JOB_INFO_REGEX.match(job.script) if match: @@ -132,7 +129,6 @@ def submit_sbatch( submit_dir=None, job_name=None, ): - cwd = os.getcwd() if submit_dir: @@ -178,7 +174,6 @@ def get_resource_estimates(filepath): with open(filepath) as f: for line in f: - match = TIME_EST_REGEX.match(line) if match: time_sec = float(match.groupdict()["time_sec"]) diff --git a/dev/vtr_test_suite_verifier/verify_test_suites.py b/dev/vtr_test_suite_verifier/verify_test_suites.py index 2e94eadcfd8..4190145a6e6 100755 --- a/dev/vtr_test_suite_verifier/verify_test_suites.py +++ b/dev/vtr_test_suite_verifier/verify_test_suites.py @@ -50,7 +50,7 @@ def parse_test_suite_info(test_suite_info_file: str) -> List[TestSuite]: } ]} """ - with open(test_suite_info_file, "r") as file: + with open(test_suite_info_file, "r", encoding="utf-8") as file: data = json.load(file) assert isinstance(data, dict), "Test suite info should be a dictionary" @@ -78,7 +78,7 @@ def parse_task_list(task_list_file: str) -> Set[str]: the task list. """ tasks = set() - with open(task_list_file, "r") as file: + with open(task_list_file, "r", encoding="utf-8") as file: for line in file: # Strip the whitespace from the line. line.strip() diff --git a/doc/src/vtr_version.py b/doc/src/vtr_version.py index feeab0c4c07..9517f770627 100644 --- a/doc/src/vtr_version.py +++ b/doc/src/vtr_version.py @@ -49,7 +49,6 @@ def get_vtr_version_info(): patch = None prerelease = None try: - major_regex = re.compile(r".*VTR_VERSION_MAJOR (?P\d+)") minor_regex = re.compile(r".*VTR_VERSION_MINOR (?P\d+)") patch_regex = re.compile(r".*VTR_VERSION_PATCH (?P\d+)") @@ -57,7 +56,6 @@ def get_vtr_version_info(): with open(root_cmakelists) as f: for line in f: - match = major_regex.match(line) if match: major = match.group("major") diff --git a/odin_ii/regression_test/parse_result/conf/hooks.py b/odin_ii/regression_test/parse_result/conf/hooks.py index edb676ac9cd..30a7cec111c 100644 --- a/odin_ii/regression_test/parse_result/conf/hooks.py +++ b/odin_ii/regression_test/parse_result/conf/hooks.py @@ -89,7 +89,6 @@ def patch_logs(values): def inverse_result_from_expectation(values): - should_fail = False if "expectation" in values: for log in values["expectation"]: diff --git a/odin_ii/regression_test/parse_result/parse_result.py b/odin_ii/regression_test/parse_result/parse_result.py index 0e3bb7faefc..abf4fb37fa9 100755 --- a/odin_ii/regression_test/parse_result/parse_result.py +++ b/odin_ii/regression_test/parse_result/parse_result.py @@ -224,7 +224,6 @@ def sanitize_toml(toml_dict): if _HOOK_HDR in toml_dict: global HOOK_FILES if _K_FILE in toml_dict[_HOOK_HDR]: - # we append the filenames and strip the .py extension if not isinstance(toml_dict[_HOOK_HDR][_K_FILE], list): HOOK_FILES.append(toml_dict[_HOOK_HDR][_K_FILE][:-3]) @@ -407,7 +406,6 @@ def load_csv_into_tbl(toml_dict, csv_file_name): def load_json_into_tbl(toml_dict, file_name): - file_dict = OrderedDict() with open(file_name, newline="") as json_file: file_dict = json.load(json_file, object_pairs_hook=OrderedDict) @@ -431,7 +429,6 @@ def load_json_into_tbl(toml_dict, file_name): def load_log_into_tbl(toml_dict, log_file_name): - # load the hooks if there are any # run.py pre_hooks = [] @@ -455,7 +452,6 @@ def load_log_into_tbl(toml_dict, log_file_name): input_values = create_tbl(toml_dict, _K_DFLT) for line in log: - # boostrap the preprocessor here for fn in pre_hooks: line = fn(line) @@ -577,7 +573,6 @@ def compare_instances(header, toml_dict, tbl_entry, golden_tbl_entry): def regex_line(toml_dict, header, line): - # compile the regex entries entry_list = [] @@ -628,7 +623,6 @@ def _compare(toml_file_name, golden_result_file_name, result_file_name, diff_fil for key in golden_tbl: error_str = [] for header in toml_dict: - if key not in tbl: if header in golden_tbl[key]: # if we are only running partial tests, key will not be existing @@ -649,7 +643,6 @@ def _compare(toml_file_name, golden_result_file_name, result_file_name, diff_fil pass elif header not in golden_tbl[key] and header in tbl[key]: - error_str.append(mismatch_str(header, "null", tbl[key][header])) diff[key][header] = tbl[key][header] @@ -658,7 +651,6 @@ def _compare(toml_file_name, golden_result_file_name, result_file_name, diff_fil # don't create the entry else: - if compare_instances(header, toml_dict, tbl[key], golden_tbl[key]): # use the golden value since it is within range and we don't wanna trigger a diff diff[key][header] = golden_tbl[key][header] @@ -768,7 +760,6 @@ def parse_shared_args(args): def main(): - this_exec = sys.argv[0] if len(sys.argv) < 2: print("expected: display, parse, join or compare") diff --git a/odin_ii/regression_test/tools/odin_config_maker.py b/odin_ii/regression_test/tools/odin_config_maker.py index c5a73f51f3a..1d06e23e5c3 100644 --- a/odin_ii/regression_test/tools/odin_config_maker.py +++ b/odin_ii/regression_test/tools/odin_config_maker.py @@ -19,7 +19,6 @@ def main(argv=None): - if argv is None: argv = sys.argv diff --git a/odin_ii/regression_test/tools/parse_odin_result.py b/odin_ii/regression_test/tools/parse_odin_result.py index 2bab39bbffb..960b386f2c1 100755 --- a/odin_ii/regression_test/tools/parse_odin_result.py +++ b/odin_ii/regression_test/tools/parse_odin_result.py @@ -121,7 +121,6 @@ def insert_decimal(value_map, key, input_str): def parse_line(benchmarks, line): - line.strip() line = " ".join(line.split()) diff --git a/requirements.txt b/requirements.txt index 3498cec63c3..ef6a2a9a103 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ scipy # Python linter and formatter click==8.0.2 # Our version of black needs an older version of click (https://stackoverflow.com/questions/71673404/importerror-cannot-import-name-unicodefun-from-click) black==24.3.0 -pylint==2.7.4 +pylint==4.0.4 # Surelog orderedmultidict diff --git a/run_reg_test.py b/run_reg_test.py index 152ec31cb62..0c1fe4d4bda 100755 --- a/run_reg_test.py +++ b/run_reg_test.py @@ -214,7 +214,6 @@ def display_qor(reg_test): return 1 print_header("{} QoR Results".format(reg_test)) with (test_dir / "qor_geomean.txt").open("r") as results: - # create list of desired values, their unit and how to display them. data = OrderedDict() data["revision"] = ["", "{}"] diff --git a/vpr/scripts/compare_timing_reports.py b/vpr/scripts/compare_timing_reports.py index c2423140b05..d68c0e8f919 100755 --- a/vpr/scripts/compare_timing_reports.py +++ b/vpr/scripts/compare_timing_reports.py @@ -18,7 +18,6 @@ def __init__(self, startpoint, endpoint): def parse_args(): - parser = argparse.ArgumentParser() parser.add_argument("first_report") @@ -30,7 +29,6 @@ def parse_args(): def main(): - args = parse_args() print("Parsing {}".format(args.first_report)) @@ -43,7 +41,6 @@ def main(): def parse_timing_report(args, filename): - regex = re.compile(r".*?Endpoint : (?P\S+).*?", re.DOTALL) start_regex = re.compile(r"Startpoint: (?P\S+)") @@ -60,7 +57,6 @@ def parse_timing_report(args, filename): paths = OrderedDict() for path_lines in paths_lines: - distance = None startpoint = None endpoint = None @@ -112,7 +108,6 @@ def parse_timing_report(args, filename): def correlate_paths(first_paths, second_paths): - first_keys = set(first_paths.keys()) second_keys = set(second_paths.keys()) @@ -137,7 +132,6 @@ def correlate_paths(first_paths, second_paths): def plot_correlation(first_paths, second_paths, first_name, second_name): - correlated_paths, first_only, second_only = correlate_paths(first_paths, second_paths) print("Correlated {} paths".format(len(correlated_paths))) diff --git a/vpr/scripts/profile/parse_and_plot_detailed.py b/vpr/scripts/profile/parse_and_plot_detailed.py index fff610b2f17..c5ac1cf8c39 100755 --- a/vpr/scripts/profile/parse_and_plot_detailed.py +++ b/vpr/scripts/profile/parse_and_plot_detailed.py @@ -58,7 +58,6 @@ def plot_results(param_names, param_options, results, params): os.mkdir(directory) with Chdir(directory): - export_results_to_csv(param_names, results, params) x = results.keys() diff --git a/vtr_flow/benchmarks/system_verilog/f4pga/make_sv_flattened.py b/vtr_flow/benchmarks/system_verilog/f4pga/make_sv_flattened.py index 930d5fb40a6..a5b1b85cee8 100644 --- a/vtr_flow/benchmarks/system_verilog/f4pga/make_sv_flattened.py +++ b/vtr_flow/benchmarks/system_verilog/f4pga/make_sv_flattened.py @@ -15,7 +15,7 @@ def identify_top_module(file_list): """Identify the file containing the top module definition.""" top_module_regex = re.compile(r"module\s+top\s*\(") for file in file_list: - with open(file, "r") as f: + with open(file, "r", encoding="utf-8") as f: for line in f: if top_module_regex.search(line): return file @@ -27,10 +27,10 @@ def create_flattened_file(top_file, file_list): current_dir = os.path.basename(os.getcwd()) output_file_name = f"flattened_{current_dir}.sv" - with open(output_file_name, "w") as output_file: + with open(output_file_name, "w", encoding="utf-8") as output_file: if top_file: # Write the top module first - with open(top_file, "r") as top_module: + with open(top_file, "r", encoding="utf-8") as top_module: output_file.write(f"// Content from {top_file}\n") output_file.write(top_module.read()) output_file.write("\n\n") @@ -38,7 +38,7 @@ def create_flattened_file(top_file, file_list): # Write the rest of the files for file in file_list: if file != top_file: - with open(file, "r") as verilog_file: + with open(file, "r", encoding="utf-8") as verilog_file: output_file.write(f"// Content from {file}\n") output_file.write(verilog_file.read()) output_file.write("\n\n") diff --git a/vtr_flow/scripts/arch_gen/arch_gen.py b/vtr_flow/scripts/arch_gen/arch_gen.py index ef68cde7af5..22bc7fb6f59 100755 --- a/vtr_flow/scripts/arch_gen/arch_gen.py +++ b/vtr_flow/scripts/arch_gen/arch_gen.py @@ -233,14 +233,12 @@ def xCLB(k_LUT, N_BLE, I_CLB, I_BLE, fracture_level, num_FF, crossbar_str): next_start = 0 for lut_input in range(k_LUT): - xbegin("mux") xprop("name", "crossbar-" + str(i_ble) + "-" + str(lut_input)) # Inputs inputs = [] for i_cb in range(1, cb + 1): - input_str = "" if input_idx < I_CLB: input_str = "clb.I[" + str(input_idx) + "]" @@ -835,7 +833,6 @@ def gen_arch( ) else: - # K - N - I - Fi - Frac - FF - L - 45 # Non-Fractured diff --git a/vtr_flow/scripts/benchtracker/flask_cors/core.py b/vtr_flow/scripts/benchtracker/flask_cors/core.py index 5a4d937b8ce..a15e3237a35 100644 --- a/vtr_flow/scripts/benchtracker/flask_cors/core.py +++ b/vtr_flow/scripts/benchtracker/flask_cors/core.py @@ -197,7 +197,6 @@ def get_cors_headers(options, request_headers, request_method, response_headers) # If there is no Access-Control-Request-Method header or if parsing # failed, do not set any additional headers if acl_request_method and acl_request_method in options.get("methods"): - # If method is not a case-sensitive match for any of the values in # list of methods do not set any additional headers and terminate # this set of steps. diff --git a/vtr_flow/scripts/benchtracker/interface_db.py b/vtr_flow/scripts/benchtracker/interface_db.py index 6f9c150988d..7068cbe6e71 100755 --- a/vtr_flow/scripts/benchtracker/interface_db.py +++ b/vtr_flow/scripts/benchtracker/interface_db.py @@ -82,7 +82,6 @@ def retrieve_data(x_param, y_param, filters, tasks, dbname="results.db"): if t == 0: filter_command = "WHERE " for f in range(len(filters)): - filter_command += str(filters[f]) sql_val_args.extend(filters[f].args) diff --git a/vtr_flow/scripts/benchtracker/populate_db.py b/vtr_flow/scripts/benchtracker/populate_db.py index 358ef190fd7..bf203c83aea 100755 --- a/vtr_flow/scripts/benchtracker/populate_db.py +++ b/vtr_flow/scripts/benchtracker/populate_db.py @@ -361,7 +361,6 @@ def load_next_task(params): # walk operations; all take params and run as arguments def check_result_exists(params, run): - run_number = get_trailing_num(run) if not os.path.isfile(get_result_file(params, params.run_prefix, run_number)): parsed_call = params.parse_script.format( diff --git a/vtr_flow/scripts/blif_splicer.py b/vtr_flow/scripts/blif_splicer.py index 08058163f65..35905b5a1b7 100755 --- a/vtr_flow/scripts/blif_splicer.py +++ b/vtr_flow/scripts/blif_splicer.py @@ -48,7 +48,6 @@ reverse=True, ) ): - # Parse each input blif into the following strings, which represent # different sections of the output file. diff --git a/vtr_flow/scripts/download_ispd.py b/vtr_flow/scripts/download_ispd.py index cffe4962d2c..7e1318f7154 100755 --- a/vtr_flow/scripts/download_ispd.py +++ b/vtr_flow/scripts/download_ispd.py @@ -70,7 +70,6 @@ def parse_args(): def main(): - args = parse_args() try: @@ -154,7 +153,6 @@ def verify(tar_gz_filename, md5_url): def md5_matches(filename_to_check, reference_md5): - local_md5 = hashlib.md5() with open(filename_to_check, "rb") as f: # Read in chunks to avoid reading the whole file into memory diff --git a/vtr_flow/scripts/download_symbiflow.py b/vtr_flow/scripts/download_symbiflow.py index b45f8fc9695..a909d87385f 100755 --- a/vtr_flow/scripts/download_symbiflow.py +++ b/vtr_flow/scripts/download_symbiflow.py @@ -112,7 +112,8 @@ def download_url(filename, url): """ Downloads the symbiflow release """ - latest_package_url = request.urlopen(url).read().decode("utf-8") + with request.urlopen(url) as response: + latest_package_url = response.read().decode("utf-8") print("Downloading latest package:\n{}".format(latest_package_url)) request.urlretrieve(latest_package_url, filename, reporthook=download_progress_callback) diff --git a/vtr_flow/scripts/download_titan.py b/vtr_flow/scripts/download_titan.py index a4494545540..5bc2bc8c348 100755 --- a/vtr_flow/scripts/download_titan.py +++ b/vtr_flow/scripts/download_titan.py @@ -79,7 +79,6 @@ def parse_args(): def main(): - args = parse_args() try: @@ -287,7 +286,6 @@ def determine_sdc_name(dirpath): def extract_callback(members, args): for tarinfo in members: for benchmark_subdir in get_benchmark_subdirs(args): - if compare_versions(args.titan_version, "2") >= 1: # if it is a 2.0.0 titan release or later use device family in the benchmark directory device_families = get_device_families(args) diff --git a/vtr_flow/scripts/ispd2vtr.py b/vtr_flow/scripts/ispd2vtr.py index ad4a5c2cdc7..2320d366f0c 100755 --- a/vtr_flow/scripts/ispd2vtr.py +++ b/vtr_flow/scripts/ispd2vtr.py @@ -91,7 +91,6 @@ def is_output(cell_type): def parse_args(): - parser = argparse.ArgumentParser( "Script to convert ISPD FPGA Bookshelf format benchmarks into VTR compatible formats" ) @@ -125,7 +124,6 @@ def main(): def parse_aux(aux_filepath): - dirname = os.path.dirname(aux_filepath) aux_info = AuxInfo() @@ -227,7 +225,6 @@ def bookshelf2blif(aux_info, out_filepath, merge_ports=False): pins = sorted(node_pins[node_name], key=lambda x: x.pin_name) for i, node_pin in enumerate(pins): - print >> f, " {}={}".format(node_pin.pin_name, node_pin.net_name), if i != len(node_pins[node_name]) - 1: @@ -282,7 +279,6 @@ def parse_lib(aux_info, merge_ports=False): cell = Cell() cell.name = cell_name.strip("\n") elif line.startswith(" PIN"): - pin = CellPin() tokens = line.split() assert len(tokens) >= 3 @@ -415,7 +411,6 @@ def parse_scl(aux_info): with open(aux_info.scl_filepath) as f: for line in f: - # SITEMAP if line.startswith("SITEMAP"): sitemap_token, width, height = line.split() @@ -529,7 +524,6 @@ def add_arch_block(root, site, resources, cells): def create_resource(resource, cells): - pb_type = ET.Element("pb_type") pb_type.set("name", resource.name) diff --git a/vtr_flow/scripts/noc/noc_benchmark_test.py b/vtr_flow/scripts/noc/noc_benchmark_test.py index 6ffc59d4469..ce8fd239ad3 100755 --- a/vtr_flow/scripts/noc/noc_benchmark_test.py +++ b/vtr_flow/scripts/noc/noc_benchmark_test.py @@ -5,7 +5,6 @@ """ from concurrent.futures import ThreadPoolExecutor -from distutils.log import error import os from pathlib import Path import sys @@ -248,42 +247,39 @@ def process_vpr_output(vpr_output_file): log after after. """ - open_file = open(vpr_output_file) - # datastructure below stors the placement data in a dictionary placement_data = {} - # process each line from the VPR output - for line in open_file: - - # we only care about three lines where the - # placement costs, noc costs and - # placement times are located - # and post route info - # so identify those lines below - if PLACEMENT_COST_PHRASE in line: - process_placement_costs(placement_data, line) + with open(vpr_output_file, "r", encoding="utf-8") as open_file: + # process each line from the VPR output + for line in open_file: + # we only care about three lines where the + # placement costs, noc costs and + # placement times are located + # and post route info + # so identify those lines below + if PLACEMENT_COST_PHRASE in line: + process_placement_costs(placement_data, line) - if POST_PLACE_CRITICAL_PATH_DELAY_PHRASE in line: - process_placement_cpd(placement_data, line) + if POST_PLACE_CRITICAL_PATH_DELAY_PHRASE in line: + process_placement_cpd(placement_data, line) - if NOC_PLACEMENT_COST_PHRASE in line: - process_noc_placement_costs(placement_data, line) + if NOC_PLACEMENT_COST_PHRASE in line: + process_noc_placement_costs(placement_data, line) - if PLACEMENT_TIME in line: - process_placement_time(placement_data, line) + if PLACEMENT_TIME in line: + process_placement_time(placement_data, line) - if POST_ROUTED_WIRE_LENGTH_SEGMENTS_PHRASE in line: - process_post_route_wirelength(placement_data, line) + if POST_ROUTED_WIRE_LENGTH_SEGMENTS_PHRASE in line: + process_post_route_wirelength(placement_data, line) - if POST_ROUTED_FREQ_PHRASE in line: - process_post_route_freq(placement_data, line) + if POST_ROUTED_FREQ_PHRASE in line: + process_post_route_freq(placement_data, line) - if ROUTE_TIME_PHRASE in line: - process_route_time(placement_data, line) + if ROUTE_TIME_PHRASE in line: + process_route_time(placement_data, line) # close and delete the output file - open_file.close() os.remove(vpr_output_file) return placement_data @@ -300,7 +296,7 @@ def process_placement_costs(placement_data, line_with_data): # quick check that the regex worked properly if (found_placement_metrics is None) or (found_placement_metrics.lastindex != 3): - raise Exception("Placement cost not written out correctly") + raise ValueError("Placement cost not written out correctly") # we know the order of the different placement costs # as they are found within the extracted metric list above. @@ -324,7 +320,7 @@ def process_placement_cpd(placement_data, line_with_data): # quick check that the regex worked properly if (found_placement_metrics is None) or (found_placement_metrics.lastindex != 1): - raise Exception("Placement cpd not written out correctly") + raise ValueError("Placement cpd not written out correctly") # there should be only one element, since we are only grabbing the placement critical path delay placement_data[PLACE_CPD] = float(found_placement_metrics.group(1)) @@ -344,7 +340,7 @@ def process_noc_placement_costs(placement_data, line_with_data): # quick check that the regex worked properly if (found_placement_metrics is None) or (found_placement_metrics.lastindex != 3): - raise Exception("Placement noc cost not written out correctly") + raise ValueError("Placement noc cost not written out correctly") # we know the order of the different noc placement costs as they are found # within the extracted metric list above. @@ -368,7 +364,7 @@ def process_placement_time(placement_data, line_with_data): # quick check that the regex worked properly if (found_placement_metrics is None) or (found_placement_metrics.lastindex != 1): - raise Exception("Placement time not written out correctly") + raise ValueError("Placement time not written out correctly") # there should be only one element, since we are only grabbing the placement time placement_data[PLACE_TIME] = float(found_placement_metrics.group(1)) @@ -385,7 +381,7 @@ def process_post_route_wirelength(placement_data, line_with_data): # check if regex worked correctly if (found_routing_metrics is None) or (found_routing_metrics.lastindex != 1): - raise Exception("Routed wirelength not written out correctly") + raise ValueError("Routed wirelength not written out correctly") # there should be only one element, since we are only grabbing the routeed wirelength placement_data[POST_ROUTED_WIRE_LENGTH_SEGMENTS] = float(found_routing_metrics.group(1)) @@ -402,7 +398,7 @@ def process_post_route_freq(placement_data, line_with_data): # check if regex worked correctly if (found_routing_metrics is None) or (found_routing_metrics.lastindex != 1): - raise Exception("Routed frequency not written out correctly") + raise ValueError("Routed frequency not written out correctly") # there should be only one element, since we are only grabbing the routed frequency placement_data[POST_ROUTED_FREQ] = float(found_routing_metrics.group(1)) @@ -419,7 +415,7 @@ def process_route_time(placement_data, line_with_data): # check if regex worked correctly if (found_routing_metrics is None) or (found_routing_metrics.lastindex != 1): - raise Exception("Routing time not written out correctly") + raise ValueError("Routing time not written out correctly") # there should be only one element, since we are only grabbing the route time placement_data[ROUTE_TIME] = float(found_routing_metrics.group(1)) @@ -534,11 +530,9 @@ def run_vpr_command_and_store_output(vpr_output_file, vpr_run_command): """ # create the file that will store the VPR output - vpr_output = open(vpr_output_file, "w") - # run VPR. Will timeout after 10 hours (should be good for any design) - subprocess.run(vpr_run_command, check=True, stdout=vpr_output, timeout=36000) - # close the output file - vpr_output.close() + with open(vpr_output_file, "w", encoding="utf-8") as vpr_output: + # run VPR. Will timeout after 10 hours (should be good for any design) + subprocess.run(vpr_run_command, check=True, stdout=vpr_output, timeout=36000) def process_vpr_runs(run_args, num_of_seeds, route): @@ -569,7 +563,6 @@ def process_vpr_runs(run_args, num_of_seeds, route): latency_weight = float(run_args[0][1][12]) for single_run_args in run_args: - # get the placement metrics for the current run curr_vpr_place_data = process_vpr_output(vpr_output_file=single_run_args[0]) @@ -636,17 +629,14 @@ def print_results(parsed_data, design_file, user_args): results_file_name = (os.path.splitext(user_args.flow_file))[-2] results_file_name = (results_file_name.split("/"))[-1] results_file_name = os.path.join(os.getcwd(), results_file_name + ".txt") - results_file = open(results_file_name, "w+") - - # write out placement info individually in separate lines - results_file.write("Design File: {0}\n".format(design_file)) - results_file.write("Flows File: {0}\n".format(user_args.flow_file)) + with open(results_file_name, "w+", encoding="utf-8") as results_file: + # write out placement info individually in separate lines + results_file.write("Design File: {0}\n".format(design_file)) + results_file.write("Flows File: {0}\n".format(user_args.flow_file)) - results_file.write("------------ Place & Route Info ------------\n") - for metric, value in parsed_data.items(): - results_file.write("{0}: {1}\n".format(metric, value)) - - results_file.close() + results_file.write("------------ Place & Route Info ------------\n") + for metric, value in parsed_data.items(): + results_file.write("{0}: {1}\n".format(metric, value)) def execute_vpr_and_process_output(vpr_command_list, num_of_seeds, num_of_threads, route): @@ -662,7 +652,6 @@ def execute_vpr_and_process_output(vpr_command_list, num_of_seeds, num_of_thread run_args = [] for single_vpr_command in vpr_command_list: - # generate VPR output file_name # the constants represent the positions of the variables in the command list design_file_name = single_vpr_command[2] @@ -690,7 +679,6 @@ def execute_vpr_and_process_output(vpr_command_list, num_of_seeds, num_of_thread if __name__ == "__main__": - try: # Load the arguments args = noc_test_command_line_parser().parse_args(sys.argv[1:]) @@ -715,7 +703,6 @@ def execute_vpr_and_process_output(vpr_command_list, num_of_seeds, num_of_thread for single_design, single_design_flows_file, single_design_name in zip( design_files_in_dir, design_flow_files_in_dir, design_names_in_dir ): - # generate all the vpr commands vpr_commands = gen_vpr_run_command( design_file=single_design, diff --git a/vtr_flow/scripts/python_libs/vtr/log_parse.py b/vtr_flow/scripts/python_libs/vtr/log_parse.py index 17c3ef6fdb6..dbe1b338277 100644 --- a/vtr_flow/scripts/python_libs/vtr/log_parse.py +++ b/vtr_flow/scripts/python_libs/vtr/log_parse.py @@ -7,12 +7,7 @@ from pathlib import Path import abc -try: - # Try for the fast c-based version first - import xml.etree.cElementTree as ET -except ImportError: - # Fall back on python implementation - import xml.etree.ElementTree as ET +import xml.etree.ElementTree as ET from vtr.error import InspectError from vtr import load_config_lines @@ -321,11 +316,9 @@ def load_parse_patterns(parse_config_filepath): parse_patterns = OrderedDict() for line in load_config_lines(parse_config_filepath): - components = line.split(";") if len(components) == 3 or len(components) == 4: - name = components[0] filepath = components[1] regex_str = components[2] @@ -431,7 +424,7 @@ def load_parse_results(parse_results_filepath): if not Path(parse_results_filepath).exists(): return parse_results - with open(parse_results_filepath) as file: + with open(parse_results_filepath, "r", encoding="utf-8") as file: for lineno, row in enumerate(file): if row[0] == "+": row = row[1:] @@ -530,7 +523,7 @@ def determine_min_w(log_filename): determines the minimum width. """ min_w_regex = re.compile(r"\s*Best routing used a channel width factor of (?P\d+).") - with open(log_filename) as file: + with open(log_filename, "r", encoding="utf-8") as file: for line in file: match = min_w_regex.match(line) if match: diff --git a/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py b/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py index e238f2d9bb8..465345c1d43 100755 --- a/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py +++ b/vtr_flow/scripts/python_libs/vtr/parse_vtr_flow.py @@ -33,7 +33,7 @@ def parse_file_and_update_results(filename, patterns, results): if len(filepaths) == 1: assert Path(filepaths[0]).exists - with open(filepaths[0], "r") as file: + with open(filepaths[0], "r", encoding="utf-8") as file: for line in file: for parse_pattern in patterns: match = parse_pattern.regex().match(line) diff --git a/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py b/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py index ad73a8053df..f46f8314fd9 100755 --- a/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py +++ b/vtr_flow/scripts/python_libs/vtr/parse_vtr_task.py @@ -222,17 +222,17 @@ def parse_task(config, config_jobs, flow_metrics_basename=FIRST_PARSE_FILE, alt_ job.qor_parse_command()[0] = work_dir if job.parse_command(): parse_filepath = str(PurePath(work_dir) / flow_metrics_basename) - with open(parse_filepath, "w+") as parse_file: + with open(parse_filepath, "w+", encoding="utf-8") as parse_file: with redirect_stdout(parse_file): parse_vtr_flow(job.parse_command()) if job.second_parse_command(): parse_filepath = str(PurePath(work_dir) / SECOND_PARSE_FILE) - with open(parse_filepath, "w+") as parse_file: + with open(parse_filepath, "w+", encoding="utf-8") as parse_file: with redirect_stdout(parse_file): parse_vtr_flow(job.second_parse_command()) if job.qor_parse_command(): parse_filepath = str(PurePath(work_dir) / QOR_PARSE_FILE) - with open(parse_filepath, "w+") as parse_file: + with open(parse_filepath, "w+", encoding="utf-8") as parse_file: with redirect_stdout(parse_file): parse_vtr_flow(job.qor_parse_command()) max_arch_len = max(max_arch_len, len(job.arch())) @@ -249,8 +249,7 @@ def parse_task(config, config_jobs, flow_metrics_basename=FIRST_PARSE_FILE, alt_ def parse_files(config_jobs, run_dir, flow_metrics_basename=FIRST_PARSE_FILE): """Parse the result files from the give jobs""" task_parse_results_filepath = str(PurePath(run_dir) / flow_metrics_basename) - with open(task_parse_results_filepath, "w") as out_f: - + with open(task_parse_results_filepath, "w", encoding="utf-8") as out_f: # Start the header header = True @@ -262,7 +261,7 @@ def parse_files(config_jobs, run_dir, flow_metrics_basename=FIRST_PARSE_FILE): # which we prefix to each line of the task result file job_parse_results_filepath = Path(job.work_dir(run_dir)) / flow_metrics_basename if job_parse_results_filepath.exists(): - with open(job_parse_results_filepath) as in_f: + with open(job_parse_results_filepath, "r", encoding="utf-8") as in_f: lines = in_f.readlines() assert len(lines) == 2 if header: @@ -324,7 +323,6 @@ def check_golden_results_for_task(config, alt_tasks_dir=None): ) ) else: - # Load the pass requirements file # Load the task's parse results @@ -451,8 +449,8 @@ def check_two_files( continue first_fail = True + # pylint: disable-next=consider-using-dict-items for metric in pass_requirements.keys(): - if not metric in second_metrics: print("Warning: Metric {} missing from {} results".format(metric, second_name)) continue diff --git a/vtr_flow/scripts/python_libs/vtr/task.py b/vtr_flow/scripts/python_libs/vtr/task.py index e5b1378250c..198b72780db 100644 --- a/vtr_flow/scripts/python_libs/vtr/task.py +++ b/vtr_flow/scripts/python_libs/vtr/task.py @@ -400,12 +400,10 @@ def find_longest_task_description(configs): if config.script_params_list_add: for param in config.script_params_list_add: arch_circuit_len = len(str(PurePath(arch) / circuit / "common_" / param)) - if arch_circuit_len > longest: - longest = arch_circuit_len + longest = max(longest, arch_circuit_len) else: arch_circuit_len = len(str(PurePath(arch) / circuit / "common")) - if arch_circuit_len > longest: - longest = arch_circuit_len + longest = max(longest, arch_circuit_len) return longest diff --git a/vtr_flow/scripts/python_libs/vtr/util.py b/vtr_flow/scripts/python_libs/vtr/util.py index ab4c9a02041..cfa80322878 100644 --- a/vtr_flow/scripts/python_libs/vtr/util.py +++ b/vtr_flow/scripts/python_libs/vtr/util.py @@ -181,6 +181,7 @@ def run_system_command( modified_environ = os.environ.copy() modified_environ["PWD"] = str(temp_dir) + # pylint: disable-next=consider-using-with proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, # We grab stdout @@ -202,7 +203,6 @@ def run_system_command( # Read from subprocess output for line in proc.stdout: - # Send to log file log_f.write(line) @@ -263,7 +263,7 @@ def pretty_print_table(file, border=False): table = PrettyTable() table.border = border reader = None - with open(file, "r") as csv_file: + with open(file, "r", encoding="utf-8") as csv_file: reader = csv.reader(csv_file, delimiter="\t") first = True for row in reader: @@ -277,7 +277,7 @@ def pretty_print_table(file, border=False): first = False else: table.add_row(row) - with open(file, "w+") as out_file: + with open(file, "w+", encoding="utf-8") as out_file: print(table, file=out_file) @@ -294,14 +294,13 @@ def write_tab_delimitted_csv(filepath, rows): columns = OrderedDict() for row in rows: for key, value in row.items(): - if key not in columns: columns[key] = max(len(key), len(str(value))) else: columns[key] = max(columns[key], len(str(value))) # Write the elements - with open(filepath, "w+") as file: + with open(filepath, "w+", encoding="utf-8") as file: writer = csv.writer(file, delimiter="\t") # Write out the header @@ -323,7 +322,7 @@ def load_tab_delimited_csv(filepath): loads a tab delimited csv as a list of ordered dictionaries """ data = [] - with open(filepath) as file: + with open(filepath, "r", encoding="utf-8") as file: reader = csv.reader(file, delimiter="\t") header = [] @@ -359,10 +358,10 @@ def file_replace(filename, search_replace_dict): searches file for specified values and replaces them with specified values. """ lines = [] - with open(filename, "r") as file: + with open(filename, "r", encoding="utf-8") as file: lines = file.readlines() - with open(filename, "w") as file: + with open(filename, "w", encoding="utf-8") as file: for line in lines: for search, replace in search_replace_dict.items(): line = line.replace(search, str(replace)) @@ -383,7 +382,7 @@ def load_list_file(list_file: str) -> List[str]: potentially with '#' comments """ values = [] - with open(list_file) as file: + with open(list_file, "r", encoding="utf-8") as file: for line in file: line = line.strip() # Handle comments @@ -413,7 +412,7 @@ def load_config_lines(filepath, allow_includes=True): blank_regex = re.compile(r"^\s*$") try: - with open(filepath) as file: + with open(filepath, "r", encoding="utf-8") as file: for line in file: # Trim '\n' line = line.strip() @@ -457,7 +456,7 @@ def verify_file(file, file_type, should_exist=True): if not isinstance(file, Path): file = Path(file) if should_exist and not file.is_file(): - raise Exception( + raise FileNotFoundError( "{file_type} file does not exist: {file} ".format(file_type=file_type, file=file) ) diff --git a/vtr_flow/scripts/qor_compare.py b/vtr_flow/scripts/qor_compare.py index f595d97b672..04ca3e1004e 100755 --- a/vtr_flow/scripts/qor_compare.py +++ b/vtr_flow/scripts/qor_compare.py @@ -52,7 +52,6 @@ def parse_args(): - parser = argparse.ArgumentParser( description="Utility script to generate spreadsheets comparing VTR metric files" ) @@ -124,7 +123,6 @@ def parse_args(): def main(): - args = parse_args() wb = openpyxl.Workbook() @@ -182,7 +180,6 @@ def main(): def make_transpose(dest_sheet, ref_sheet): - for ref_row in range(ref_sheet.min_row, ref_sheet.max_row + 1): for ref_col in range(ref_sheet.min_column, ref_sheet.max_column + 1): ref_cell = ref_sheet.cell(row=ref_row, column=ref_col) @@ -196,11 +193,9 @@ def make_transpose(dest_sheet, ref_sheet): def make_summary(summary_sheet, ratio_sheet, ratio_ranges, keys): dest_row = 1 for i, (ratio_name, cell_range) in enumerate(ratio_ranges.items()): - dest_col = 1 if i == 0: # First range, copy headers - dest_col += 1 ratio_row = cell_range.min_row for j, ratio_col in enumerate(range(cell_range.min_col, cell_range.max_col + 1)): @@ -239,7 +234,6 @@ def make_summary(summary_sheet, ratio_sheet, ratio_ranges, keys): def make_ratios(ratio_sheet, raw_sheets, keys, metrics): - ref_sheet_title = list(raw_sheets.keys())[0] ref_sheet = raw_sheets[ref_sheet_title] # Get the first raw sheet @@ -264,7 +258,6 @@ def make_ratios(ratio_sheet, raw_sheets, keys, metrics): def fill_ratio(ws, raw_sheet, ref_sheet, dest_row, dest_col, keys, metrics): - cell_range = CellRange( min_row=dest_row, max_row=dest_row, min_col=dest_col, max_col=dest_col, title=ws.title ) @@ -286,7 +279,6 @@ def fill_ratio(ws, raw_sheet, ref_sheet, dest_row, dest_col, keys, metrics): row_offset = 0 for ref_row in range(2, ref_sheet.max_row + 1): - ref_cell = ref_sheet.cell(row=ref_row, column=ref_col) raw_cell = raw_sheet.cell(row=ref_row, column=ref_col) @@ -343,7 +335,6 @@ def link_sheet_header(dest_sheet, ref_sheet, row, values=None): # Copy header dest_col = 1 for col in range(1, ref_sheet.max_column + 1): - ref_cell = ref_sheet.cell(row=1, column=col) if values != None and ref_cell.value.strip() not in values: @@ -357,7 +348,6 @@ def link_sheet_header(dest_sheet, ref_sheet, row, values=None): def dataframe_to_sheet(wb, df, sheet_name): - # Add to sheet ws = wb.create_sheet(title=sheet_name) for row in dataframe_to_rows(df, index=False, header=True): @@ -367,7 +357,6 @@ def dataframe_to_sheet(wb, df, sheet_name): def safe_sheet_title(raw_title): - # Keep only file name and drop file path safe_title = raw_title.split("/")[-1] @@ -389,7 +378,6 @@ def safe_sheet_title(raw_title): def get_task_result_files(task_name): - task_path = os.path.join("..", "tasks", task_name) if not os.path.isdir(task_path): raise RuntimeError("Task is not found at {}".format(task_path)) diff --git a/vtr_flow/scripts/run_vtr_flow.py b/vtr_flow/scripts/run_vtr_flow.py index 0c8ec314145..7c27c2ef5db 100755 --- a/vtr_flow/scripts/run_vtr_flow.py +++ b/vtr_flow/scripts/run_vtr_flow.py @@ -506,7 +506,7 @@ def get_max_memory_usage(temp_dir): def get_memory_usage(logfile): """Extracts the memory usage from the *.out log files""" - with open(logfile, "r") as fpmem: + with open(logfile, "r", encoding="utf-8") as fpmem: for line in fpmem.readlines(): if "Maximum resident set size" in line: return int(line.split()[-1]) @@ -593,7 +593,7 @@ def vtr_command_main(arg_list, prog=None): error, args.expect_fail, args.verbose ) - except KeyboardInterrupt as error: + except KeyboardInterrupt: print("{} received keyboard interrupt".format(prog)) exit_status = 4 return_status = exit_status @@ -797,6 +797,7 @@ def except_vtr_error(error, expect_fail, verbose): error_status = None actual_error = None exit_status = None + return_status = None if isinstance(error, vtr.CommandError): # An external command failed return_status = 1 diff --git a/vtr_flow/scripts/run_vtr_task.py b/vtr_flow/scripts/run_vtr_task.py index 1bdd9feb236..5335f64a6cb 100755 --- a/vtr_flow/scripts/run_vtr_task.py +++ b/vtr_flow/scripts/run_vtr_task.py @@ -405,10 +405,10 @@ def create_run_script(job, work_dir): Path(work_dir).mkdir(parents=True) run_script_file = Path(work_dir) / "vtr_flow.sh" template = str(paths.flow_template_path) - with open(template, "r") as in_file: + with open(template, "r", encoding="utf-8") as in_file: template_string = in_file.readlines() template_string = "".join(template_string) - with open(run_script_file, "w+") as out_file: + with open(run_script_file, "w+", encoding="utf-8") as out_file: print( template_string.format( estimated_time=runtime_estimate, @@ -486,7 +486,7 @@ def run_vtr_flow_process(queue, run_dirs, job, script) -> None: out = None vtr_flow_out = str(PurePath(work_dir) / "vtr_flow.out") - with open(vtr_flow_out, "w+") as out_file: + with open(vtr_flow_out, "w+", encoding="utf-8") as out_file: with redirect_stdout(out_file): if script == "run_vtr_flow.py": out = run_vtr_flow(job.run_command(), str(paths.run_vtr_flow_path)) @@ -497,7 +497,7 @@ def run_vtr_flow_process(queue, run_dirs, job, script) -> None: stdout=out_file, ) - with open(vtr_flow_out, "r") as out_file: + with open(vtr_flow_out, "r", encoding="utf-8") as out_file: for line in out_file.readlines(): print(line, end="") diff --git a/vtr_flow/scripts/tuning_runs/control_runs.py b/vtr_flow/scripts/tuning_runs/control_runs.py index 743b7e3796e..05be0618e19 100755 --- a/vtr_flow/scripts/tuning_runs/control_runs.py +++ b/vtr_flow/scripts/tuning_runs/control_runs.py @@ -61,9 +61,9 @@ def parse_results(input_path): sys.exit(1) # Read the parse_results.txt file and write to full_res.csv - with open(os.path.join(largest_run_path, "parse_results.txt"), "r") as txt_file, open( - full_res_csv_path, "w", newline="" - ) as csv_file: + with open( + os.path.join(largest_run_path, "parse_results.txt"), "r", encoding="utf-8" + ) as txt_file, open(full_res_csv_path, "w", newline="", encoding="utf-8") as csv_file: reader = csv.reader(txt_file, delimiter="\t") writer = csv.writer(csv_file) @@ -241,7 +241,7 @@ def main(): os.makedirs(os.path.dirname(config_path), exist_ok=True) # Append the lines to the config file - with open(config_path, "a") as file: + with open(config_path, "a", encoding="utf-8") as file: file.writelines(lines) print(f"Appended lines to {config_path}") diff --git a/vtr_flow/scripts/upgrade_arch.py b/vtr_flow/scripts/upgrade_arch.py index 6d6b0aa85ed..3c03a090fb5 100755 --- a/vtr_flow/scripts/upgrade_arch.py +++ b/vtr_flow/scripts/upgrade_arch.py @@ -194,7 +194,6 @@ def add_model_timing(arch): default_models = frozenset([".input", ".output", ".latch", ".names"]) primitive_timing_specs = {} for prim_pb in prim_pbs: - blif_model = prim_pb.attrib["blif_model"] if blif_model in default_models: @@ -271,7 +270,6 @@ def upgrade_fc_overrides(arch): ), "Can only have pin or seg overrides (not both)" for old_pin_override in old_fc_pin_overrides: - port = old_pin_override.attrib["name"] fc_type = old_pin_override.attrib["fc_type"] fc_val = old_pin_override.attrib["fc_val"] @@ -287,7 +285,6 @@ def upgrade_fc_overrides(arch): changed = True for old_seg_override in old_fc_seg_overrides: - seg_name = old_seg_override.attrib["name"] in_val = old_seg_override.attrib["in_val"] out_val = old_seg_override.attrib["out_val"] @@ -305,7 +302,6 @@ def upgrade_fc_overrides(arch): clocks = pb_type.findall("./clock") for input in inputs + clocks: - new_attrib = OrderedDict() new_attrib["port_name"] = input.attrib["name"] new_attrib["segment_name"] = seg_name @@ -315,7 +311,6 @@ def upgrade_fc_overrides(arch): fc_override = ET.SubElement(fc_tag, "fc_override", attrib=new_attrib) for output in outputs: - new_attrib = OrderedDict() new_attrib["port_name"] = output.attrib["name"] new_attrib["segment_name"] = seg_name @@ -516,7 +511,6 @@ def upgrade_device_layout(arch): col_empty_spec.tail = "\n" + 2 * INDENT elif loc_type == "fill": - comment = ET.Comment("Fill with '{}'".format(type_name)) device_auto.append(comment) comment.tail = "\n" + 2 * INDENT @@ -599,9 +593,7 @@ def upgrade_pinlocations(arch): height = int(pb_type.attrib["height"]) if width == 1: - if pinlocations.attrib["pattern"] == "custom": - for loc in pinlocations: if loc.tag is ET.Comment: continue @@ -741,7 +733,6 @@ def upgrade_switch_types(arch): assert switchlist_tag is not None for switch_tag in switchlist_tag.findall("./switch"): - switch_type = switch_tag.attrib["type"] if switch_type in ["buffered", "pass_trans"]: @@ -932,7 +923,6 @@ def add_missing_comb_model_internal_timing_edges(arch): model_tags = arch.findall("./models/model") for model_tag in model_tags: - input_clock_tags = model_tag.findall("./input_ports/port[@is_clock='1']") if len(input_clock_tags) > 0: continue # Sequential primitive -- no change @@ -953,7 +943,6 @@ def add_missing_comb_model_internal_timing_edges(arch): output_port_names.append(output_port_tag.attrib["name"]) for input_port_tag in input_port_tags: - assert "combinational_sink_ports" not in input_port_tag input_port_tag.attrib["combinational_sink_ports"] = " ".join(output_port_names)