treewide: format repo with buildifier
Change-Id: Ia7aebeb7bba5b119c9157d1ad805cc477bcbb68a
Reviewed-on: https://review.monogon.dev/c/monogon/+/3774
Tested-by: Jenkins CI
Reviewed-by: Leopold Schabel <leo@monogon.tech>
diff --git a/build/analysis/BUILD.bazel b/build/analysis/BUILD.bazel
index 8110f9e..930ba25 100644
--- a/build/analysis/BUILD.bazel
+++ b/build/analysis/BUILD.bazel
@@ -1,6 +1,5 @@
-load("@com_github_sluongng_nogo_analyzer//staticcheck:def.bzl", ALL_STATICCHECK_ANALYZERS = "ANALYZERS", format_staticcheck_analyzers = "staticcheck_analyzers")
-load("@com_github_sluongng_nogo_analyzer//:def.bzl", gen_nogo_config = "nogo_config")
load("@bazel_skylib//rules:write_file.bzl", "write_file")
+load("@com_github_sluongng_nogo_analyzer//staticcheck:def.bzl", ALL_STATICCHECK_ANALYZERS = "ANALYZERS", format_staticcheck_analyzers = "staticcheck_analyzers")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
# NOGO_PASSES contains all enabled analyzers that nogo should execute.
@@ -94,9 +93,9 @@
"//build/analysis/importsort",
]
-# NOGO_CONFIG contains the overrides for nogo to exempt specific files
+# NOGO_CONFIG_OVERRIDES contains the overrides for nogo to exempt specific files
# from being analyzed.
-NOGO_CONFIG = {
+NOGO_CONFIG_OVERRIDES = {
"shift": {
"exclude_files": {
"external/dev_gvisor_gvisor": "third_party",
@@ -218,7 +217,7 @@
# We override the variable with itself unioned with the other
# config part, as the Intellij integration doesn't understand
# the |= expression which makes editing this file kinda annoying.
-NOGO_CONFIG = NOGO_CONFIG | {
+NOGO_CONFIG_EXTERNAL = {
analyzer: {
"exclude_files": {
# Don't run linters on external dependencies
@@ -230,6 +229,8 @@
for analyzer in DISABLED_FOR_EXTERNAL_CODE
}
+NOGO_CONFIG = NOGO_CONFIG_OVERRIDES | NOGO_CONFIG_EXTERNAL
+
write_file(
name = "nogo_config",
out = "nogo_config.json",
diff --git a/build/bazel_cc_fix/ccfixspec/BUILD.bazel b/build/bazel_cc_fix/ccfixspec/BUILD.bazel
index b24f3f3..0318e17 100644
--- a/build/bazel_cc_fix/ccfixspec/BUILD.bazel
+++ b/build/bazel_cc_fix/ccfixspec/BUILD.bazel
@@ -1,6 +1,6 @@
-load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
proto_library(
name = "build_bazel_cc_fix_ccfixspec_proto",
diff --git a/build/bindata/bindata.bzl b/build/bindata/bindata.bzl
index 5903754..a10b543 100644
--- a/build/bindata/bindata.bzl
+++ b/build/bindata/bindata.bzl
@@ -14,12 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-load("@bazel_gazelle//:deps.bzl", "go_repository")
load(
"@io_bazel_rules_go//go:def.bzl",
- "GoLibrary",
"go_context",
- "go_library",
)
def _bindata_impl(ctx):
@@ -65,10 +62,10 @@
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
- ),
+ ),
"package": attr.string(
mandatory = True,
- ),
+ ),
"bindata": attr.label(
allow_single_file = True,
default = Label("@com_github_kevinburke_go_bindata//go-bindata"),
diff --git a/build/ci/jenkins-presubmit.groovy b/build/ci/jenkins-presubmit.groovy
index 861e967..c658133 100644
--- a/build/ci/jenkins-presubmit.groovy
+++ b/build/ci/jenkins-presubmit.groovy
@@ -51,8 +51,7 @@
echo "Gerrit change: ${GERRIT_CHANGE_URL}"
sh "git clean -fdx -e '/bazel-*'"
sh "JENKINS_NODE_COOKIE=dontKillMe tools/bazel --bazelrc=.bazelrc.ci mod tidy --lockfile_mode=update"
- sh "JENKINS_NODE_COOKIE=dontKillMe tools/bazel --bazelrc=.bazelrc.ci run //:go -- mod tidy"
- sh "JENKINS_NODE_COOKIE=dontKillMe tools/bazel --bazelrc=.bazelrc.ci run //:gazelle -- update"
+ sh "JENKINS_NODE_COOKIE=dontKillMe tools/bazel --bazelrc=.bazelrc.ci run //:tidy"
}
post {
always {
@@ -65,8 +64,7 @@
Please run:
\$ bazel mod tidy --lockfile_mode=update
- \$ bazel run //:go -- mod tidy
- \$ bazel run //:gazelle -- update
+ \$ bazel run //:tidy
In your git checkout and amend the resulting diff to this changelist.
"""
diff --git a/build/mirror_proxy/BUILD.bazel b/build/mirror_proxy/BUILD.bazel
index f3c7778..0cafbbc 100644
--- a/build/mirror_proxy/BUILD.bazel
+++ b/build/mirror_proxy/BUILD.bazel
@@ -1,4 +1,6 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
+load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push")
+load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
go_library(
name = "mirror_proxy_lib",
@@ -17,15 +19,11 @@
visibility = ["//visibility:public"],
)
-load("@rules_pkg//pkg:tar.bzl", "pkg_tar")
-
pkg_tar(
name = "mirror_proxy_layer",
srcs = [":mirror_proxy"],
)
-load("@rules_oci//oci:defs.bzl", "oci_image")
-
oci_image(
name = "mirror_proxy_image",
base = "@distroless_base",
@@ -35,8 +33,6 @@
workdir = "/app",
)
-load("@rules_oci//oci:defs.bzl", "oci_push")
-
oci_push(
name = "mirror_proxy_push",
image = ":mirror_proxy_image",
diff --git a/build/sqlc/sqlc.bzl b/build/sqlc/sqlc.bzl
index 034a04f..cfaa3b1 100644
--- a/build/sqlc/sqlc.bzl
+++ b/build/sqlc/sqlc.bzl
@@ -14,37 +14,37 @@
for file in files:
if not file.basename.endswith(".up.sql") and not file.basename.endswith(".down.sql"):
fail("migration %s must end woth .{up,down}.sql" % file.basename)
- if len(file.basename.split('.')) != 3:
+ if len(file.basename.split(".")) != 3:
fail("migration %s must not contain any . other than in .{up,down}.sql extension" % file.basename)
- first = file.basename.split('.')[0]
- if len(first.split('_')) < 2:
+ first = file.basename.split(".")[0]
+ if len(first.split("_")) < 2:
fail("migration %s must be in <timestamp>_<name>.{up,down}.sql format" % file.basename)
- timestamp = first.split('_')[0]
+ timestamp = first.split("_")[0]
if not timestamp.isdigit():
fail("migration %s must be in <timestamp>_<name>.{up,down}.sql format" % file.basename)
timestamp = int(timestamp)
if timestamp < 1662136250:
fail("migration %s must be in <timestamp>_<name>.{up,down}.sql format" % file.basename)
- if file.basename.endswith('.up.sql'):
+ if file.basename.endswith(".up.sql"):
if timestamp in uppers:
- fail("migration %s conflicts with %s" % [file.basename, uppers[timestamp].basename])
+ fail("migration %s conflicts with %s" % [file.basename, uppers[timestamp].basename])
uppers[timestamp] = file
- if file.basename.endswith('.down.sql'):
+ if file.basename.endswith(".down.sql"):
if timestamp in downers:
- fail("migration %s conflicts with %s" % [file.basename, downers[timestamp].basename])
+ fail("migration %s conflicts with %s" % [file.basename, downers[timestamp].basename])
downers[timestamp] = file
# Check each 'up' has a corresponding 'down', and vice-versa.
for timestamp, up in uppers.items():
if timestamp not in downers:
fail("%s has no corresponding 'down' migration" % up.basename)
- if downers[timestamp].basename.replace('down.sql', 'up.sql') != up.basename:
+ if downers[timestamp].basename.replace("down.sql", "up.sql") != up.basename:
fail("%s has no corresponding 'down' migration" % up.basename)
for timestamp, down in downers.items():
if timestamp not in uppers:
fail("%s has no corresponding 'up' migration" % down.basename)
- if uppers[timestamp].basename.replace('up.sql', 'down.sql') != down.basename:
+ if uppers[timestamp].basename.replace("up.sql", "down.sql") != down.basename:
fail("%s has no corresponding 'up' migration" % down.basename)
return uppers, downers
@@ -57,7 +57,7 @@
# Split migrations into 'up' and 'down'. Only pass 'up' to sqlc. Use both
# to generate golang-migrate compatible bindata.
- uppers, downers = _parse_migrations(ctx.files.migrations)
+ uppers, _ = _parse_migrations(ctx.files.migrations)
# Make sure given queries have no repeating basenames. This ensures clean
# mapping source SQL file name and generated Go file.
@@ -73,6 +73,7 @@
ctx.actions.declare_file("db.go"),
ctx.actions.declare_file("models.go"),
]
+
# For every query file, basename.go is also generated.
for basename in query_basenames:
sqlc_go_sources.append(ctx.actions.declare_file(basename + ".go"))
@@ -83,15 +84,16 @@
if ctx.attr.dialect == "cockroachdb":
overrides = [
# INT is 64-bit in cockroachdb (32-bit in postgres).
- { "go_type": "int64", "db_type": "pg_catalog.int4" },
+ {"go_type": "int64", "db_type": "pg_catalog.int4"},
]
config = ctx.actions.declare_file("_config.yaml")
+
# All paths in config are relative to the config file. However, Bazel paths
# are relative to the execution root/CWD. To make things work regardless of
# config file placement, we prepend all config paths with a `../../ ...`
# path walk that makes the path be execroot relative again.
- config_walk = '../' * config.path.count('/')
+ config_walk = "../" * config.path.count("/")
config_data = json.encode({
"version": 2,
"sql": [
@@ -117,10 +119,11 @@
executable = ctx.executable._sqlc,
arguments = [
"generate",
- "-f", config.path,
+ "-f",
+ config.path,
],
inputs = [
- config
+ config,
] + uppers.values() + ctx.files.queries,
outputs = sqlc_go_sources,
)
@@ -133,7 +136,6 @@
OutputGroupInfo(go_generated_srcs = depset(library.srcs)),
]
-
sqlc_go_library = rule(
implementation = _sqlc_go_library,
attrs = {
diff --git a/build/static_binary_tarball/spec/BUILD.bazel b/build/static_binary_tarball/spec/BUILD.bazel
index 330d4cc..1765147 100644
--- a/build/static_binary_tarball/spec/BUILD.bazel
+++ b/build/static_binary_tarball/spec/BUILD.bazel
@@ -1,6 +1,6 @@
-load("@rules_proto//proto:defs.bzl", "proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
proto_library(
name = "spec_proto",
diff --git a/build/toolbase/gotoolchain/def.bzl b/build/toolbase/gotoolchain/def.bzl
index dbbba72..efd98a4 100644
--- a/build/toolbase/gotoolchain/def.bzl
+++ b/build/toolbase/gotoolchain/def.bzl
@@ -10,8 +10,6 @@
def _toolchain_library_impl(ctx):
go = go_context(ctx)
- importpath = ctx.attr.importpath
-
out = go.declare_file(go, ext = ".go")
ctx.actions.expand_template(
template = ctx.file._template,
diff --git a/build/toolchain/cc_toolchain_config.bzl b/build/toolchain/cc_toolchain_config.bzl
index 0fdb23e..06651d4 100644
--- a/build/toolchain/cc_toolchain_config.bzl
+++ b/build/toolchain/cc_toolchain_config.bzl
@@ -14,8 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-load("@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "feature", "flag_group", "flag_set", "tool", "tool_path")
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
+load("@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "feature", "flag_group", "flag_set", "tool_path")
all_compile_actions = [
ACTION_NAMES.c_compile,
diff --git a/build/toolchain/llvm-efi/cc_toolchain_config.bzl b/build/toolchain/llvm-efi/cc_toolchain_config.bzl
index 076f5d9..f6de95d 100644
--- a/build/toolchain/llvm-efi/cc_toolchain_config.bzl
+++ b/build/toolchain/llvm-efi/cc_toolchain_config.bzl
@@ -1,5 +1,5 @@
-load("@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "feature", "flag_group", "flag_set", "tool", "tool_path", "with_feature_set")
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
+load("@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl", "feature", "flag_group", "flag_set", "tool_path", "with_feature_set")
all_compile_actions = [
ACTION_NAMES.c_compile,
@@ -197,7 +197,7 @@
features = [default_link_flags_feature, default_compile_flags_feature, hybrid_gnu_msvc_feature, lto_feature],
# Needed for various compiler built-in headers and auxiliary data. No system libraries are being used.
cxx_builtin_include_directories = [
- "/usr/lib/clang/18/include/"
+ "/usr/lib/clang/18/include/",
],
toolchain_identifier = "k8-toolchain",
host_system_name = "local",
diff --git a/build/toolchain/llvm-efi/transition.bzl b/build/toolchain/llvm-efi/transition.bzl
index 1edb86b..9c222e6 100644
--- a/build/toolchain/llvm-efi/transition.bzl
+++ b/build/toolchain/llvm-efi/transition.bzl
@@ -1,9 +1,9 @@
-def _build_efi_transition_impl(settings, attr):
+def _build_efi_transition_impl(_settings, _attr):
"""
Transition that enables building for an EFI environment. Currently only supports C code.
"""
return {
- "//command_line_option:platforms": "//build/platforms:efi_amd64"
+ "//command_line_option:platforms": "//build/platforms:efi_amd64",
}
build_efi_transition = transition(
diff --git a/build/toolchain/musl-host-gcc/sysroot/BUILD.bazel b/build/toolchain/musl-host-gcc/sysroot/BUILD.bazel
index 2979ee1..dbd2982 100644
--- a/build/toolchain/musl-host-gcc/sysroot/BUILD.bazel
+++ b/build/toolchain/musl-host-gcc/sysroot/BUILD.bazel
@@ -1,5 +1,5 @@
-load(":musl.bzl", "musl_headers")
load(":linux.bzl", "linux_headers")
+load(":musl.bzl", "musl_headers")
load(":tarball.bzl", "musl_gcc_tarball")
linux_headers(
diff --git a/build/toolchain/musl-host-gcc/sysroot/linux.bzl b/build/toolchain/musl-host-gcc/sysroot/linux.bzl
index e9cf40a..0d6b999 100644
--- a/build/toolchain/musl-host-gcc/sysroot/linux.bzl
+++ b/build/toolchain/musl-host-gcc/sysroot/linux.bzl
@@ -33,7 +33,7 @@
use_default_shell_env = True,
command = "make -C \"$1\" headers_install ARCH=\"$2\" INSTALL_HDR_PATH=\"$(pwd)/$3\" > /dev/null && mv \"$3/include/\"* \"$3/\" && rmdir \"$3/include\"",
)
- return [DefaultInfo(files=depset([hdrs_dir]))]
+ return [DefaultInfo(files = depset([hdrs_dir]))]
linux_headers = rule(
implementation = _linux_headers,
diff --git a/build/toolchain/musl-host-gcc/sysroot/musl.bzl b/build/toolchain/musl-host-gcc/sysroot/musl.bzl
index 5055b83..f5dba26 100644
--- a/build/toolchain/musl-host-gcc/sysroot/musl.bzl
+++ b/build/toolchain/musl-host-gcc/sysroot/musl.bzl
@@ -33,7 +33,7 @@
use_default_shell_env = True,
command = "make -C \"$1\" install-headers ARCH=\"$2\" includedir=\"$(pwd)/$3\" > /dev/null",
)
- return [DefaultInfo(files=depset([hdrs_dir]))]
+ return [DefaultInfo(files = depset([hdrs_dir]))]
musl_headers = rule(
implementation = _musl_headers,
diff --git a/build/toolchain/musl-host-gcc/sysroot/tarball.bzl b/build/toolchain/musl-host-gcc/sysroot/tarball.bzl
index d7f18aa..a7e407d 100644
--- a/build/toolchain/musl-host-gcc/sysroot/tarball.bzl
+++ b/build/toolchain/musl-host-gcc/sysroot/tarball.bzl
@@ -14,17 +14,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-load(
- "//build/utils:detect_root.bzl",
- "detect_root",
-)
-
"""
Build a sysroot-style tarball containing musl/linux headers and libraries.
This can then be used to build a C toolchain that builds C/C++ binaries for Metropolis nodes.
"""
+load(
+ "//build/utils:detect_root.bzl",
+ "detect_root",
+)
+
def _musl_gcc_tarball(ctx):
tarball_name = ctx.attr.name + ".tar.xz"
tarball = ctx.actions.declare_file(tarball_name)
@@ -37,7 +37,6 @@
compiler_headers_path = "lib/gcc/x86_64-redhat-linux/14/include"
musl_root = detect_root(ctx.attr.musl)
- musl_files = ctx.files.musl
# This builds a tarball containing musl, musl headers and linux headers.
# This is done by some carefully crafted tar command line arguments that rewrite
@@ -50,16 +49,16 @@
# Order is important here as this is a terrible hack producing a tar file with duplicate files. The decompressor
# will then overwrite the wrong one with the correct one for us.
- arguments += [compiler_headers_path]
+ arguments.append(compiler_headers_path)
command += " --transform 's|^'$2'|include|' /$2"
- arguments += [musl_headers_path]
+ arguments.append(musl_headers_path)
command += " --transform 's|^'$3'|include|' $3"
- arguments += [linux_headers_path]
+ arguments.append(linux_headers_path)
command += " --transform 's|^'$4'|include|' $4"
- arguments += [musl_root]
+ arguments.append(musl_root)
command += " --transform 's|^'$5'|lib|' $5"
ctx.actions.run_shell(
diff --git a/build/toolchain/musl-host-gcc/sysroot_repository.bzl b/build/toolchain/musl-host-gcc/sysroot_repository.bzl
index 253abbf..72882ea 100644
--- a/build/toolchain/musl-host-gcc/sysroot_repository.bzl
+++ b/build/toolchain/musl-host-gcc/sysroot_repository.bzl
@@ -30,7 +30,6 @@
)
""")
-
musl_sysroot_rule = repository_rule(
implementation = _musl_sysroot_rule_impl,
attrs = {
diff --git a/build/utils/detect_root.bzl b/build/utils/detect_root.bzl
index 50c9574..e52c6ca 100644
--- a/build/utils/detect_root.bzl
+++ b/build/utils/detect_root.bzl
@@ -57,7 +57,7 @@
def _get_level(path):
normalized = path
- for i in range(len(path)):
+ for _ in range(len(path)):
new_normalized = normalized.replace("//", "/")
if len(new_normalized) == len(normalized):
break