From de245cf10e27dc7741adcdb6e50f82d2e08353b5 Mon Sep 17 00:00:00 2001 From: tonyaiuto Date: Sun, 21 Dec 2025 15:02:51 -0500 Subject: [PATCH 1/5] fix bcr metadata templates in main so we can test push workflow --- .bcr/metadata.template.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.bcr/metadata.template.json b/.bcr/metadata.template.json index c33094154..5406ae9da 100644 --- a/.bcr/metadata.template.json +++ b/.bcr/metadata.template.json @@ -3,7 +3,7 @@ "maintainers": [ { "email": "aiuto@google.com", - "name": "Tony Aiuto", + "name": "Tony Aiuto" } ], "repository": ["github:bazelbuild/rules_pkg"], From 7b0579093de5e4375af1f2abf65075d0d0fa04be Mon Sep 17 00:00:00 2001 From: tonyaiuto Date: Sun, 21 Dec 2025 15:04:36 -0500 Subject: [PATCH 2/5] fix email --- .bcr/metadata.template.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.bcr/metadata.template.json b/.bcr/metadata.template.json index 5406ae9da..fc840c63f 100644 --- a/.bcr/metadata.template.json +++ b/.bcr/metadata.template.json @@ -2,7 +2,7 @@ "homepage": "https://github.com/bazelbuild/rules_pkg", "maintainers": [ { - "email": "aiuto@google.com", + "email": "aiuto@datadoghq.com", "name": "Tony Aiuto" } ], From c9a06a33b470a724d05d160a837c41a7fe60bd69 Mon Sep 17 00:00:00 2001 From: tonyaiuto Date: Tue, 6 Jan 2026 00:12:38 -0500 Subject: [PATCH 3/5] Initial version of an rpm2cpio tool. - pure python - it requires lzma and gzip modules for the decompressor, we we need 3.? - cpio dumper is a separate tool. It should be a library method. --- tools/rpm/cpio.py | 200 +++++++++++++++++++++ tools/rpm/rpm2cpio.py | 396 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 596 insertions(+) create mode 100644 tools/rpm/cpio.py create mode 100755 tools/rpm/rpm2cpio.py diff --git a/tools/rpm/cpio.py b/tools/rpm/cpio.py new file mode 100644 index 000000000..26afa806a --- /dev/null +++ b/tools/rpm/cpio.py @@ -0,0 +1,200 @@ +# cpio format reader. + +""" +Decent docs at +https://github.com/libyal/dtformats/blob/main/documentation/Copy%20in%20and%20out%20(CPIO)%20archive%20format.asciidoc +""" + +from collections import namedtuple +import sys + +DEBUG = 1 + + +class CpioReader(object): + # TODO: maybe? Support compressed archives. These are exactly equivalent to the corresponding + # archive being passed to a 14-bit compress utility. + + Info = namedtuple( + "CpioInfo", "index, path, uid, gid, mode, mod_time, file_size, data_size" + ) + + def __init__(self, stream): + self.stream = stream + + def next(self, return_content=False): + header = self.read_header() + if not header: + return None + to_read = header.data_size if header.data_size else header.file_size + if DEBUG > 1: + print(f"{header} => to_read:{to_read}") + file_data = self.stream.read(to_read) + if return_content: + return header, file_data[: header.file_size] + else: + return header + + def read_ascii_int(self, size, base=10): + try: + return int(self.stream.read(size).decode("ASCII"), base=base) + except Exception as e: + print(e) + return -1 + + def read_header(self): + first_6 = self.stream.read(6) + if DEBUG > 1: + print("Got header", first_6) + if first_6 == b"070707": + return self.read_odc_ascii_header(magic=first_6) + if first_6 == b"070701" or first_6 == b"070702": + return self.read_newc_ascii_header(magic=first_6) + else: + print(f"Wonky header {first_6}") + print("128 after:", str(self.stream.peek(128))) + return self.read_newc_ascii_header(magic=first_6) + + def read_odc_ascii_header(self, magic): + # """ + # 6 magic Magic number 070707 + # /usr/share/file/magic/archive:0 short 070707 cpio archive + # /usr/share/file/magic/archive:0 short 0143561 byte-swapped cpio archive + # /usr/share/file/magic/archive:0 string 070707 ASCII cpio archive (pre-SVR4 or odc) + # /usr/share/file/magic/archive:0 string 070701 ASCII cpio archive (SVR4 with no CRC) + # /usr/share/file/magic/archive:0 string 070702 ASCII cpio archive (SVR4 with CRC) + # 6 dev Device where file resides + # 6 ino I-number of file + # 6 mode File mode + # 6 uid Owner user ID + # 6 gid Owner group ID + # 6 nlink Number of links to file + # 6 rdev Device major/minor for special file + # 11 mtime Modify time of file + # 6 namesize Length of file name + # 11 filesize Length of file + # After the header information, namesize bytes of path name is stored. namesize includes the null + # byte of the end of the path name. After this, filesize bytes of the file contents are recorded. + + assert magic[0:5] == b"07070" + magic = int(magic.decode("ASCII")) + dev = self.read_ascii_int(size=6) + inode = self.read_ascii_int(size=6) + mode = self.read_ascii_int(size=6) + _uid = self.read_ascii_int(size=6) + _gid = self.read_ascii_int(size=6) + _nlinks = self.read_ascii_int(size=6) + _rdev = self.read_ascii_int(size=6) + if DEBUG > 0: + print(f"magic: {magic}, dev/node: {dev}/{inode} mode: {mode:o}") + return 0 + + def read_newc_ascii_header(self, magic): + # Size Description + # 6 magic: "070701" or "070702" + # 8 inode index. 1-n and then 0 at TRAILER. + # 8 mode (permissions and type) + # 8 numeric user + # 8 numeric group + # 8 n_links + # 8 modification time + # 8 file_size: file size + # 8 device major number + # 8 device minor number + # 8 block or character special device major number + # 8 block or character special device minor number + # 8 path_len: Size of path string, including terminationg NUL. + # 8 Checksum Contains a Sum32 if magic is "070702", or 0 otherwise + # path_len path string (with null) + # . 4 byte alignment padding. set to 0 + # file_size File data + # . 4 byte alignment padding. set to 0 + + assert magic[0:5] == b"07070" + magic = magic.decode("ASCII") + inode = self.read_ascii_int(size=8, base=16) + mode = self.read_ascii_int(size=8, base=16) + uid = self.read_ascii_int(size=8, base=16) + gid = self.read_ascii_int(size=8, base=16) + _nlinks = self.read_ascii_int(size=8, base=16) + mod_time = self.read_ascii_int(size=8, base=16) + file_size = self.read_ascii_int(size=8, base=16) + _dev_major = self.read_ascii_int(size=8, base=16) + _dev_minor = self.read_ascii_int(size=8, base=16) + _blk_major = self.read_ascii_int(size=8, base=16) + _blk_minor = self.read_ascii_int(size=8, base=16) + path_len = self.read_ascii_int(size=8, base=16) + _checksum = self.read_ascii_int(size=8, base=16) + at = self.stream.tell() + path_block_len = 4 * ((at + path_len + 3) // 4) - at + raw_path = self.stream.read(path_block_len) + try: + path = raw_path[: (path_len - 1)].decode("utf-8") + except Exception: + path = str(raw_path[: (path_len - 1)]) + if DEBUG > 0: + print( + f"magic: {magic}, inode: {inode}, mode: {mode:o}, size: {file_size}, {path}" + ) + if DEBUG > 1: + print(f" path_len: {path_len}, block_len: {path_block_len}") + if path == "TRAILER!!!": + return None + + # Now we are positioned at the file data + data_size = 4 * ((file_size + 3) // 4) + info = self.Info( + index=inode, + path=path, + uid=uid, + gid=gid, + mode=mode, + mod_time=mod_time, + file_size=file_size, + data_size=data_size, + ) + return info + + def read_binary_header(self, magic): + # Binary headers contain the same information in 2-byte (short) and 4-byte (long) integers as follows: + # + # Bytes Field Name + # 2 magic + # 2 dev + # 2 ino + # 2 mode + # 2 uid + # 2 gid + # 2 nlink + # 2 rdev + # 4 mtime + # 2 namesize + # 2 reserved + # 4 filesize + # After the header information comes the file name, with namesize rounded up to the nearest + # 2-byte boundary. Then the file contents appear as in the ASCII archive. The byte ordering + # of the 2 and 4-byte integers in the binary format is machine-dependent and thus + # portability of this format is not easily guaranteed. + print("Binary CPIO is not supported.") + + +def main(args): + with open(args[1], "rb") as inp: + cpio = CpioReader(inp) + i = 0 + while True: + i = i + 1 + if i < 5: + header, content = cpio.next(return_content=True) + if not header: + break + with open("xxx%d" % i, "wb") as out: + out.write(content) + else: + header = cpio.next() + if not header: + break + + +if __name__ == "__main__": + main(sys.argv) diff --git a/tools/rpm/rpm2cpio.py b/tools/rpm/rpm2cpio.py new file mode 100755 index 000000000..1f323fc43 --- /dev/null +++ b/tools/rpm/rpm2cpio.py @@ -0,0 +1,396 @@ +#!/usr/bin/env python3 +# Copyright 2026 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Archive reader library for .rpm file testing. + +Gleaned from: http://ftp.rpm.org/max-rpm/s1-rpm-file-format-rpm-file-format.html. +""" + +import argparse +from collections import namedtuple +import bz2 +import json +import lzma +import sys +import zlib + +DEBUG = 1 + + +RpmLead = namedtuple( + "RpmLead", "magic, major, minor, type, arch, name, os, signature_type" +) + +RPM_MAGIC = b"\xed\xab\xee\xdb" +RPM_TYPE_BINARY = 0 +RPM_TYPE_SOURCE = 1 + +# This is probably YAGNI +RPM_ARCH_X86 = 1 +RPM_ARCH_ALPHA = 2 +RPM_ARCH_SPARC = 3 +RPM_ARCH_MIPS = 4 +RPM_ARCH_PPC = 5 +RPM_ARCH_68K = 6 +RPM_ARCH_SGI = 7 + +ARCH_2_S = { + RPM_ARCH_X86: "x86", + RPM_ARCH_ALPHA: "alpha", + RPM_ARCH_SPARC: "sparc", + RPM_ARCH_MIPS: "mips", + RPM_ARCH_PPC: "ppc", + RPM_ARCH_68K: "680000", + RPM_ARCH_SGI: "sgi", +} + +RPM_HEADER_MAGIC = b"\x8e\xad\xe8" +HEADER_INDEX_ENTRY_SIZE = 16 + +HEADER_NULL = 0 +HEADER_CHAR = 1 +HEADER_INT8 = 2 +HEADER_INT16 = 3 +HEADER_INT32 = 4 +HEADER_INT64 = 5 +HEADER_STRING = 6 +HEADER_BIN = 7 +HEADER_STRING_ARRAY = 8 + +# Some interesting tags. +RPMTAG_SUMMARY = 1004 +RPMTAG_DESCRIPTION = 1005 +RPMTAG_BUILDTIME = 1006 +RPMTAG_BUILDHOST = 1007 +RPMTAG_INSTALLTIME = 1008 +RPMTAG_SIZE = 1009 +RPMTAG_DISTRIBUTION = 1010 +RPMTAG_VENDOR = 1011 +RPMTAG_LICENSE = 1014 +RPMTAG_OS = 1021 +RPMTAG_ARCH = 1022 +RPMTAG_PAYLOADCOMPRESSOR = 1125 + + +def _read_network_byte(stream): + return int.from_bytes(stream.read(1), byteorder="big") + + +def _read_network_short(stream): + return int.from_bytes(stream.read(2), byteorder="big") + + +def _read_network_long(stream): + return int.from_bytes(stream.read(4), byteorder="big") + + +def _read_string(stream, max_len): + """Read an ASCIZ string.""" + buf = stream.read(max_len) + for i in range(max_len): + if buf[i] == 0: + return buf[0:i].decode("utf-8") + return buf.decode("utf-8") + + +def _get_int32(buf, pos): + return int.from_bytes(buf[pos : pos + 4], byteorder="big") + + +def _get_n_ints(buf, offset, count, width): + if count == 1: + return int.from_bytes(buf[offset : offset + width], byteorder="big") + ret = [] + for i in range(count): + pos = offset + i * width + ret.append(int.from_bytes(buf[pos : pos + width], byteorder="big")) + return ret + + +def _get_null_terminated_string(buf, pos): + ret = [] + while True: + c = buf[pos] + pos += 1 + if c == 0: + return bytes(ret).decode("utf-8") + ret.append(c) + + +class RpmReader(object): + BLOCKSIZE = 65536 + + def __init__(self, stream, verbose=False): + self.stream = stream + self.verbose = verbose + self.compression = None # compression of cpio payload + self.have_read_headers = False + + def log(self, s): + if self.verbose: + print(s, file=sys.stderr) + + def _get_rpm_lead(self): + """Get the legacy lead header.""" + magic = self.stream.read(4) + major = _read_network_byte(self.stream) + minor = _read_network_byte(self.stream) + type = _read_network_short(self.stream) + arch = _read_network_short(self.stream) + name = _read_string(self.stream, 66) + os = _read_network_short(self.stream) + signature_type = _read_network_short(self.stream) + _ = self.stream.read(16) + return RpmLead( + magic=magic, + major=major, + minor=minor, + type=type, + arch=arch, + name=name, + os=os, + signature_type=signature_type, + ) + + def _read_header_start(self): + """The start of the header is 16 bytes long.""" + magic = self.stream.read(3) + if magic != RPM_HEADER_MAGIC: + raise ValueError( + f"expected header magic '{RPM_HEADER_MAGIC}', got '{magic}'" + ) + version = _read_network_byte(self.stream) + if version != 1: + raise ValueError(f"expected header version '1', got '{version}'") + _ = self.stream.read(4) # skip reserved bytes + n_entries = _read_network_long(self.stream) + data_len = _read_network_long(self.stream) + return n_entries, data_len + + def _get_rpm_signature(self): + n_entries, data_len = self._read_header_start() + headers = [] + for i in range(n_entries): + tag = _read_network_long(self.stream) + type = _read_network_long(self.stream) + offset = _read_network_long(self.stream) + count = _read_network_long(self.stream) + headers.append((tag, type, offset, count)) + data_store = self.stream.read(data_len) + + for header in headers: + tag, type, offset, count = header + if DEBUG > 1: + print(f"sig header: {tag}, {type}, {offset} {count}", file=sys.stderr) + if tag == 1000: # SIGTAG_SIZE + # TODO: Report errors better. + assert type == HEADER_INT32 + assert count == 1 + file_size = _get_int32(data_store, offset) + if self.verbose: + self.log(f"Signature: file size: {file_size}") + if DEBUG > 1: + if type == HEADER_STRING: + print( + " STRING:", + _get_null_terminated_string(data_store, offset), + file=sys.stderr, + ) + if type == HEADER_STRING_ARRAY: + for i in range(count): + s = _get_null_terminated_string(data_store, offset) + print(" STRING:", i, s, file=sys.stderr) + offset += len(s) + 1 + # We could return some interesting stuff here. + return 0 + + def _get_headers(self): + n_entries, data_len = self._read_header_start() + headers = [] + for i in range(n_entries): + tag = _read_network_long(self.stream) + type = _read_network_long(self.stream) + offset = _read_network_long(self.stream) + count = _read_network_long(self.stream) + headers.append((tag, type, offset, count)) + data_store = self.stream.read(data_len) + ret = {} + for header in headers: + tag, type, offset, count = header + if DEBUG > 1: + print(f"header: {tag}, {type}, {offset} {count}", file=sys.stderr) + + # In verbose mode we print some generally interesting values. + if tag == RPMTAG_PAYLOADCOMPRESSOR: + self.compression = _get_null_terminated_string(data_store, offset) + self.log(f"Compression: {self.compression}") + if tag == RPMTAG_ARCH: + self.log(f"arch: {_get_null_terminated_string(data_store, offset)}") + if tag == RPMTAG_BUILDHOST: + self.log( + f"build_host: {_get_null_terminated_string(data_store, offset)}" + ) + if tag == RPMTAG_DESCRIPTION: + self.log( + f"description: {_get_null_terminated_string(data_store, offset)}" + ) + if tag == RPMTAG_DISTRIBUTION: + self.log( + f"distribution: {_get_null_terminated_string(data_store, offset)}" + ) + if tag == RPMTAG_LICENSE: + self.log(f"license: {_get_null_terminated_string(data_store, offset)}") + if tag == RPMTAG_OS: + self.log(f"os: {_get_null_terminated_string(data_store, offset)}") + if tag == RPMTAG_SUMMARY: + self.log(f"summary: {_get_null_terminated_string(data_store, offset)}") + if tag == RPMTAG_VENDOR: + self.log(f"vendor: {_get_null_terminated_string(data_store, offset)}") + if DEBUG > 1 and type == HEADER_STRING: + print( + " STRING:", + _get_null_terminated_string(data_store, offset), + file=sys.stderr, + ) + + # Save the headers in a dict so we can serialize to JSON. + if type == HEADER_INT16: + ret[tag] = _get_n_ints(data_store, offset, count, 2) + + elif type == HEADER_INT32: + ret[tag] = _get_n_ints(data_store, offset, count, 4) + + elif type == HEADER_INT64: + ret[tag] = _get_n_ints(data_store, offset, count, 8) + + elif type == HEADER_STRING: + ret[tag] = _get_null_terminated_string(data_store, offset) + + elif type == HEADER_STRING_ARRAY: + values = [] + for i in range(count): + s = _get_null_terminated_string(data_store, offset) + # print(" STRING:", i, s, file=sys.stderr) + values.append(s) + offset += len(s) + 1 + ret[tag] = values + + # We could return some interesting stuff here. + pass + + return ret + + def read_headers(self, headers_out=None): + """Read the initial part of the RPM file to get the headers.""" + lead = self._get_rpm_lead() + print(lead, file=sys.stderr) + if lead.magic != RPM_MAGIC: + raise ValueError(f"expected magic '{RPM_MAGIC}', got '{lead.magic}'") + if lead.major != 3: + raise ValueError(f"Can not handle RPM version '{lead.major}.{lead.minor}'") + if lead.signature_type != 5: + raise ValueError(f"Unexpected signature type '{lead.signature_type}'") + _ = self._get_rpm_signature() + self.stream.read(4) # Why are we off by 4? + headers = self._get_headers() + if headers_out: + with open(headers_out, "w") as out: + json.dump(headers, indent=2, fp=out) + self.have_read_headers = True + + def stream_cpio(self, out_stream): + if not self.have_read_headers: + raise IOError("Called stream_cpio before calling read_headers.") + + if not self.compression: + while True: + block = self.stream.read(128 * 1024) + if not block: + break + out_stream.write(block) + + elif self.compression == "lzma" or self.compression == "xz": + decompressor = lzma.LZMADecompressor() + while True: + block = self.stream.read(RpmReader.BLOCKSIZE) + if not block: + break + out_stream.write(decompressor.decompress(block)) + if decompressor.eof: + break + # If not at EOF, the input data was incomplete or corrupted. + if not decompressor.eof and not decompressor.needs_input: + raise IOError( + "Compressed data ended before the end-of-stream marker was reached" + ) + + elif self.compression == "gzip": + decompressor = zlib.decompressobj() + while True: + block = self.stream.read(RpmReader.BLOCKSIZE) + if not block: + break + out_stream.write(decompressor.decompress(block)) + if decompressor.eof: + break + if not decompressor.eof: + raise IOError( + "gzip data ended before the end-of-stream marker was reached" + ) + + elif self.compression == "bzip2": + decompressor = bz2.BZ2Decompressor() + while True: + block = self.stream.read(RpmReader.BLOCKSIZE) + if not block: + break + out_stream.write(decompressor.decompress(block)) + if decompressor.eof: + break + # If not at EOF, the input data was incomplete or corrupted. + if not decompressor.eof and not decompressor.needs_input: + raise IOError( + "Compressed data ended before the end-of-stream marker was reached" + ) + + # TODO: zstd + out_stream.flush() + + +def main(args): + parser = argparse.ArgumentParser( + description="RPM file reader", fromfile_prefix_chars="@" + ) + parser.add_argument("--rpm", required=False, help="path to an RPM file") + parser.add_argument("--cpio_out", help="output path for cpio stream") + parser.add_argument("--headers_out", help="output path for header dump") + parser.add_argument("--verbose", action="store_true") + options = parser.parse_args() + + inp = open(options.rpm, "rb") if options.rpm else sys.stdin + reader = RpmReader(stream=inp, verbose=options.verbose) + if options.cpio_out: + out = open(options.rpm, "wb") + else: + out = sys.stdout.buffer + reader.read_headers(headers_out=options.headers_out) + reader.stream_cpio(out_stream=out) + if inp != sys.stdin: + inp.close() + if out != sys.stdout: + out.close() + + +if __name__ == "__main__": + main(sys.argv) From b4d1d7497ceccdd4fdbb1ef88e977a2c9e74900e Mon Sep 17 00:00:00 2001 From: Tony Aiuto Date: Tue, 23 Dec 2025 09:57:28 -0500 Subject: [PATCH 4/5] Add workflow to publish a release to the bcr. (#998) * Add the workflow to publish a release to the bcr. - Update .bcr metadata template - Update .bcr test matrix to drop bazel 7 --- .bcr/README.md | 3 +-- .bcr/config.yml | 4 ++-- .bcr/metadata.template.json | 24 +++++++++++-------- .bcr/presubmit.yml | 35 ++++++++++++++-------------- .bcr/source.template.json | 2 +- .github/workflows/publish_to_bcr.yml | 31 ++++++++++++++++++++++++ 6 files changed, 67 insertions(+), 32 deletions(-) create mode 100644 .github/workflows/publish_to_bcr.yml diff --git a/.bcr/README.md b/.bcr/README.md index 44ae7fe55..4cc4f6042 100644 --- a/.bcr/README.md +++ b/.bcr/README.md @@ -1,8 +1,7 @@ # Bazel Central Registry When the ruleset is released, we want it to be published to the -Bazel Central Registry automatically: - +Bazel Central Registry: This folder contains configuration files to automate the publish step. See diff --git a/.bcr/config.yml b/.bcr/config.yml index b40ed206e..758894c59 100644 --- a/.bcr/config.yml +++ b/.bcr/config.yml @@ -1,3 +1,3 @@ fixedReleaser: - login: aiuto - email: aiuto@google.com + login: tonyaiuto + email: tony@aiuto.dev diff --git a/.bcr/metadata.template.json b/.bcr/metadata.template.json index fc840c63f..ac6a700b8 100644 --- a/.bcr/metadata.template.json +++ b/.bcr/metadata.template.json @@ -1,12 +1,16 @@ { - "homepage": "https://github.com/bazelbuild/rules_pkg", - "maintainers": [ - { - "email": "aiuto@datadoghq.com", - "name": "Tony Aiuto" - } - ], - "repository": ["github:bazelbuild/rules_pkg"], - "versions": [], - "yanked_versions": {} + "homepage": "https://github.com/bazelbuild/rules_pkg", + "maintainers": [ + { + "email": "tony@aiuto.dev", + "name": "Tony Aiuto", + "github": "aiuto", + "github_user_id": "3044252" + } + ], + "repository": [ + "github:bazelbuild/rules_pkg" + ], + "versions": [], + "yanked_versions": {} } diff --git a/.bcr/presubmit.yml b/.bcr/presubmit.yml index 58970362e..69c114079 100644 --- a/.bcr/presubmit.yml +++ b/.bcr/presubmit.yml @@ -1,17 +1,18 @@ -build_targets: &build_targets -- '@rules_pkg//...' -# Re-enable those targets when toolchain registration is supported. -- '-@rules_pkg//toolchains/...' -- '-@rules_pkg//pkg:make_rpm' - -platforms: - centos7_java11_devtoolset10: - build_targets: *build_targets - debian10: - build_targets: *build_targets - macos: - build_targets: *build_targets - ubuntu2204: - build_targets: *build_targets - windows: - build_targets: *build_targets +matrix: + platform: + - debian10 + - ubuntu2004 + - macos + - macos_arm64 + - windows + bazel: + - 8.x +tasks: + verify_targets: + name: Verify build targets + platform: ${{ platform }} + bazel: ${{ bazel }} + build_targets: + - '@rules_pkg//...' + - '-@rules_pkg//toolchains/...' + - '-@rules_pkg//pkg:make_rpm' diff --git a/.bcr/source.template.json b/.bcr/source.template.json index 7ea619a2c..54c9271a9 100644 --- a/.bcr/source.template.json +++ b/.bcr/source.template.json @@ -1,4 +1,4 @@ { - "integrity": "**leave this alone**", + "integrity": "", "url": "https://github.com/{OWNER}/{REPO}/releases/download/{TAG}/rules_pkg-{TAG}.tar.gz" } diff --git a/.github/workflows/publish_to_bcr.yml b/.github/workflows/publish_to_bcr.yml new file mode 100644 index 000000000..a16884483 --- /dev/null +++ b/.github/workflows/publish_to_bcr.yml @@ -0,0 +1,31 @@ +name: "Publish release to BCR" + +on: + workflow_dispatch: + inputs: + version: + required: true + type: "string" + +permissions: + id-token: write + attestations: write + contents: write + +jobs: + publish: + name: "Publish to BCR" + + permissions: + attestations: write + contents: write + id-token: write + + uses: "bazel-contrib/publish-to-bcr/.github/workflows/publish.yaml@v1.1.0" + with: + tag_name: "${{ inputs.version }}" + tag_prefix: "" + registry_fork: "bazel-contrib/bazel-central-registry" + draft: false + secrets: + publish_token: "${{ secrets.BCR_PUBLISH_TOKEN }}" From 3e942b0515796694ea1074e7fd255ff296b7fd1b Mon Sep 17 00:00:00 2001 From: tonyaiuto Date: Wed, 28 Jan 2026 21:40:14 -0500 Subject: [PATCH 5/5] add tar_size --- tools/tar/tar_size.py | 65 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 tools/tar/tar_size.py diff --git a/tools/tar/tar_size.py b/tools/tar/tar_size.py new file mode 100644 index 000000000..5293dda04 --- /dev/null +++ b/tools/tar/tar_size.py @@ -0,0 +1,65 @@ + +import gzip +import lzma +import sys +import tarfile + +PORTABLE_MTIME = 946684800 # 2000-01-01 00:00:00.000 UTC + +class TarReader(): + """Testing for pkg_tar rule.""" + + def __init__(self, stream, compress=None): + if compress == "gz": + self.stream = gzip.GzipFile(fileobj=stream, mode='r') + elif compress == "xz": + self.stream = lzma.LZMAFile(filename=stream, mode='r') + else: + self.stream = stream + self.tarfile = tarfile.TarFile(fileobj=self.stream, mode='r') + + def next(self, return_content=False): + info = self.tarfile.next() + return info + + +def main(args): + with open(args[1], "rb") as inp: + reader = TarReader(inp, compress="xz") + size = 0 + while True: + info = reader.next() + if not info: + break + size += info.size + print(f"Total size: {size}, {size / 1000000} MiB") + +if __name__ == "__main__": + main(sys.argv) + +""" + elif k == 'data': + value = f.extractfile(info).read() + elif k == 'isdir': + value = info.isdir() + else: + value = getattr(info, k) + if k == 'mode': + p_value = '0o%o' % value + p_v = '0o%o' % v + else: + p_value = str(value) + p_v = str(v) + error_msg = ' '.join([ + 'Value `%s` for key `%s` of file' % (p_value, k), + '%s in archive %s does' % (info.name, file_path), + 'not match expected value `%s`' % p_v + ]) + self.assertEqual(value, v, error_msg) + if value != v: + print(error_msg) + i += 1 + if i < len(content): + self.fail('Missing file %s in archive %s of [%s]' % ( + content[i], file_path, ',\n '.join(got))) +"""