From 497cac49dd845b37ab48056357dfbf99de4d5e26 Mon Sep 17 00:00:00 2001 From: Jared Allard Date: Sat, 22 Feb 2025 18:39:14 -0800 Subject: [PATCH] feat: stub gRPC server and DB layer --- .editorconfig | 5 + .gitattributes | 3 +- .gitignore | 3 +- .mise.toml | 15 + buf.gen.yaml | 11 + buf.yaml | 10 + cmd/klefki/klefki.go | 50 +- docs/DESIGN.md | 62 ++ go.mod | 35 + go.sum | 90 +++ internal/db/ent/client.go | 340 ++++++++++ internal/db/ent/ent.go | 608 ++++++++++++++++++ internal/db/ent/enttest/enttest.go | 84 +++ internal/db/ent/hook/hook.go | 199 ++++++ internal/db/ent/machine.go | 102 +++ internal/db/ent/machine/machine.go | 47 ++ internal/db/ent/machine/where.go | 148 +++++ internal/db/ent/machine_create.go | 194 ++++++ internal/db/ent/machine_delete.go | 88 +++ internal/db/ent/machine_query.go | 527 +++++++++++++++ internal/db/ent/machine_update.go | 209 ++++++ internal/db/ent/migrate/migrate.go | 64 ++ internal/db/ent/migrate/schema.go | 29 + internal/db/ent/mutation.go | 359 +++++++++++ internal/db/ent/predicate/predicate.go | 10 + internal/db/ent/runtime.go | 9 + internal/db/ent/runtime/runtime.go | 10 + internal/db/ent/schema/machine.go | 36 ++ internal/db/ent/tx.go | 210 ++++++ internal/db/generate.go | 20 + internal/server/grpc/generate.go | 22 + .../generated/go/rgst/klefki/v1/kelfki.pb.go | 207 ++++++ .../go/rgst/klefki/v1/kelfki_grpc.pb.go | 122 ++++ .../grpc/proto/rgst/klefki/v1/kelfki.proto | 18 + internal/server/server.go | 63 ++ 35 files changed, 4006 insertions(+), 3 deletions(-) create mode 100644 buf.gen.yaml create mode 100644 buf.yaml create mode 100644 docs/DESIGN.md create mode 100644 go.sum create mode 100644 internal/db/ent/client.go create mode 100644 internal/db/ent/ent.go create mode 100644 internal/db/ent/enttest/enttest.go create mode 100644 internal/db/ent/hook/hook.go create mode 100644 internal/db/ent/machine.go create mode 100644 internal/db/ent/machine/machine.go create mode 100644 internal/db/ent/machine/where.go create mode 100644 internal/db/ent/machine_create.go create mode 100644 internal/db/ent/machine_delete.go create mode 100644 internal/db/ent/machine_query.go create mode 100644 internal/db/ent/machine_update.go create mode 100644 internal/db/ent/migrate/migrate.go create mode 100644 internal/db/ent/migrate/schema.go create mode 100644 internal/db/ent/mutation.go create mode 100644 internal/db/ent/predicate/predicate.go create mode 100644 internal/db/ent/runtime.go create mode 100644 internal/db/ent/runtime/runtime.go create mode 100644 internal/db/ent/schema/machine.go create mode 100644 internal/db/ent/tx.go create mode 100644 internal/db/generate.go create mode 100644 internal/server/grpc/generate.go create mode 100644 internal/server/grpc/generated/go/rgst/klefki/v1/kelfki.pb.go create mode 100644 internal/server/grpc/generated/go/rgst/klefki/v1/kelfki_grpc.pb.go create mode 100644 internal/server/grpc/proto/rgst/klefki/v1/kelfki.proto create mode 100644 internal/server/server.go diff --git a/.editorconfig b/.editorconfig index 109b54a..263d706 100644 --- a/.editorconfig +++ b/.editorconfig @@ -10,3 +10,8 @@ indent_style = tab [*.{yml,yaml}] indent_style = space indent_size = 2 + +[*.proto] +indent_size = 2 +indent_style = space +insert_final_newline = true diff --git a/.gitattributes b/.gitattributes index 6d7504c..d94ad5e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,5 +3,6 @@ stencil.lock linguist-generated bun.lockb linguist-generated ## <> - +internal/db/ent/** linguist-generated +internal/db/ent/schema/** -linguist-generated ## <> diff --git a/.gitignore b/.gitignore index d59a51e..b2a1397 100644 --- a/.gitignore +++ b/.gitignore @@ -47,5 +47,6 @@ launch.json CHANGELOG.md ## <> - +data/ +!data/.gitkeep ## <> diff --git a/.mise.toml b/.mise.toml index 9093a8d..ed096cc 100644 --- a/.mise.toml +++ b/.mise.toml @@ -11,6 +11,13 @@ goreleaser = "latest" "go:mvdan.cc/sh/v3/cmd/shfmt" = "latest" "go:github.com/thenativeweb/get-next-version" = "latest" +# gRPC +"aqua:bufbuild/buf" = "1.50.0" +"go:google.golang.org/protobuf/cmd/protoc-gen-go" = "1.36.5" +"go:google.golang.org/grpc/cmd/protoc-gen-go-grpc" = "1.5.1" +"go:github.com/fullstorydev/grpcui/cmd/grpcui" = "1.4.2" +"aqua:protocolbuffers/protobuf/protoc" = "24.3" + [tasks.build] description = "Build a binary for the current platform/architecture" run = "go build -trimpath -o ./bin/ -v ./cmd/..." @@ -29,6 +36,7 @@ run = [ "goimports -w .", "shfmt -w .", "bun node_modules/.bin/prettier --write '**/*.{json,yaml,yml,md,jsonschema.json}'", + "buf format -w", ] [tasks.lint] @@ -46,5 +54,12 @@ description = "Run tests" run = "gotestsum" ## <> +[tasks.dev] +description = "Live reload target (use with `mise run watch`)" +depends = ["build"] +run = ["./bin/klefki"] +[tasks.watch] +description = "Watch for changes" +run = ["mise watch -t dev --restart"] ## <> diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 0000000..921d8dc --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,11 @@ +version: v2 +clean: true +plugins: + - local: protoc-gen-go + out: internal/server/grpc/generated/go + opt: paths=source_relative + - local: protoc-gen-go-grpc + out: internal/server/grpc/generated/go + opt: paths=source_relative +inputs: + - directory: internal/server/grpc/proto diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 0000000..b1db5e0 --- /dev/null +++ b/buf.yaml @@ -0,0 +1,10 @@ +# For details on buf.yaml configuration, visit https://buf.build/docs/configuration/v2/buf-yaml +version: v2 +modules: + - path: internal/server/grpc/proto +lint: + use: + - STANDARD +breaking: + use: + - FILE diff --git a/cmd/klefki/klefki.go b/cmd/klefki/klefki.go index 7b8767a..6a74bc9 100644 --- a/cmd/klefki/klefki.go +++ b/cmd/klefki/klefki.go @@ -1,5 +1,53 @@ +// Copyright (C) 2025 klefki contributors +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . +// +// SPDX-License-Identifier: AGPL-3.0 + package main +import ( + "context" + "fmt" + "os" + "os/signal" + + "git.rgst.io/homelab/klefki/internal/server" +) + func main() { - // Your logic here + exitCode := 0 + defer func() { os.Exit(exitCode) }() + + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) + defer cancel() + + s := (server.Server{}) + go func() { + if err := s.Run(ctx); err != nil { + fmt.Fprintf(os.Stderr, "failed to start server: %v\n", err) + exitCode = 1 + cancel() + } + }() + + <-ctx.Done() + fmt.Println() // better XP for ^C + + if err := s.Close(context.Background()); err != nil { + fmt.Fprintf(os.Stderr, "failed to close server: %v\n", err) + exitCode = 1 + return + } } diff --git a/docs/DESIGN.md b/docs/DESIGN.md new file mode 100644 index 0000000..c5ed3db --- /dev/null +++ b/docs/DESIGN.md @@ -0,0 +1,62 @@ +# [WIP] klefki + +## User Stories + +- As a server operator, I want to utilize full disk encryption without + needing to decrypt every drive manually after every restart. +- As a server operator, I want to know my full disk encryption keys are + not stored on disk in plain-text, rendering the encryption useless. +- As a server operator, I wish to be able to store my FDE keys wherever + I would like to. + +## High-level Overview + +Klekfi provides the following: + +- gRPC API for fetching keys used to decrypt a single FDE device. + Multiple devices is out of scope. + +## gRPC API + +### Authorization + +Authorization is determined by a private key issued for each caller. +This private key is used to determine which machine is which. + +While the key is "private", it should always be assumed that it could be +an attacker using it, as it will need to be stored readily accessible on +disk. + +### Endpoints + +- `GetKey() string` - Creates a new session for the authenticated + machine, waits for `SubmitKey` to be called, then returns the + plain-text pass-phrase. +- `ListSessions() []MachineID` - Returns a list of machine IDs waiting + for a key to be provided. +- `SubmitKey(key string, machineID string)` - Finds the active sessions + for the provided `machineID` and submits the key to it. + +### Security + +- Pass-phrases are encrypted using to public key of the authenticated + machine to prevent the pass-phrase from ever being send unencrypted or + being able to decrypted the key. +- Machine IDs are derived from the authenticated machine, through a + signature check (public keys are stored on the server side). + +### Flow + +1. Machine A boots initramfs+kernel +2. Machine A calls `GetKey()` + +## Machine Registration + +Adding a new machine requires the generation of a new private key. This +can be done through `klekfictl`. Example usage: + +```bash +klefkictl new +``` + +This will create a new entry in `data/klefkictl.sql`. diff --git a/go.mod b/go.mod index a93165d..18b4620 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,38 @@ module git.rgst.io/homelab/klefki go 1.23 + +require ( + entgo.io/ent v0.14.2 + google.golang.org/grpc v1.70.0 + google.golang.org/protobuf v1.36.5 +) + +require ( + ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect + github.com/go-openapi/inflect v0.21.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.6 // indirect + github.com/zclconf/go-cty v1.16.2 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect + golang.org/x/mod v0.23.0 // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect + golang.org/x/tools v0.30.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect +) + +tool entgo.io/ent/cmd/ent diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..9e6b74d --- /dev/null +++ b/go.sum @@ -0,0 +1,90 @@ +ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 h1:nX4HXncwIdvQ8/8sIUIf1nyCkK8qdBaHQ7EtzPpuiGE= +ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= +entgo.io/ent v0.14.2 h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0= +entgo.io/ent v0.14.2/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk= +github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= +google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/db/ent/client.go b/internal/db/ent/client.go new file mode 100644 index 0000000..53b0440 --- /dev/null +++ b/internal/db/ent/client.go @@ -0,0 +1,340 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "git.rgst.io/homelab/klefki/internal/db/ent/migrate" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Machine is the client for interacting with the Machine builders. + Machine *MachineClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.Machine = NewMachineClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + Machine: NewMachineClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + Machine: NewMachineClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// Machine. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + c.Machine.Use(hooks...) +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + c.Machine.Intercept(interceptors...) +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *MachineMutation: + return c.Machine.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// MachineClient is a client for the Machine schema. +type MachineClient struct { + config +} + +// NewMachineClient returns a client for the Machine from the given config. +func NewMachineClient(c config) *MachineClient { + return &MachineClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `machine.Hooks(f(g(h())))`. +func (c *MachineClient) Use(hooks ...Hook) { + c.hooks.Machine = append(c.hooks.Machine, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `machine.Intercept(f(g(h())))`. +func (c *MachineClient) Intercept(interceptors ...Interceptor) { + c.inters.Machine = append(c.inters.Machine, interceptors...) +} + +// Create returns a builder for creating a Machine entity. +func (c *MachineClient) Create() *MachineCreate { + mutation := newMachineMutation(c.config, OpCreate) + return &MachineCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Machine entities. +func (c *MachineClient) CreateBulk(builders ...*MachineCreate) *MachineCreateBulk { + return &MachineCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MachineClient) MapCreateBulk(slice any, setFunc func(*MachineCreate, int)) *MachineCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MachineCreateBulk{err: fmt.Errorf("calling to MachineClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MachineCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MachineCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Machine. +func (c *MachineClient) Update() *MachineUpdate { + mutation := newMachineMutation(c.config, OpUpdate) + return &MachineUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MachineClient) UpdateOne(m *Machine) *MachineUpdateOne { + mutation := newMachineMutation(c.config, OpUpdateOne, withMachine(m)) + return &MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MachineClient) UpdateOneID(id string) *MachineUpdateOne { + mutation := newMachineMutation(c.config, OpUpdateOne, withMachineID(id)) + return &MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Machine. +func (c *MachineClient) Delete() *MachineDelete { + mutation := newMachineMutation(c.config, OpDelete) + return &MachineDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *MachineClient) DeleteOne(m *Machine) *MachineDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *MachineClient) DeleteOneID(id string) *MachineDeleteOne { + builder := c.Delete().Where(machine.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MachineDeleteOne{builder} +} + +// Query returns a query builder for Machine. +func (c *MachineClient) Query() *MachineQuery { + return &MachineQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeMachine}, + inters: c.Interceptors(), + } +} + +// Get returns a Machine entity by its id. +func (c *MachineClient) Get(ctx context.Context, id string) (*Machine, error) { + return c.Query().Where(machine.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MachineClient) GetX(ctx context.Context, id string) *Machine { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *MachineClient) Hooks() []Hook { + return c.hooks.Machine +} + +// Interceptors returns the client interceptors. +func (c *MachineClient) Interceptors() []Interceptor { + return c.inters.Machine +} + +func (c *MachineClient) mutate(ctx context.Context, m *MachineMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MachineCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MachineUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MachineDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Machine mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Machine []ent.Hook + } + inters struct { + Machine []ent.Interceptor + } +) diff --git a/internal/db/ent/ent.go b/internal/db/ent/ent.go new file mode 100644 index 0000000..a0bd362 --- /dev/null +++ b/internal/db/ent/ent.go @@ -0,0 +1,608 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// checkColumn checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + machine.Table: machine.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/internal/db/ent/enttest/enttest.go b/internal/db/ent/enttest/enttest.go new file mode 100644 index 0000000..a122d55 --- /dev/null +++ b/internal/db/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "git.rgst.io/homelab/klefki/internal/db/ent" + // required by schema hooks. + _ "git.rgst.io/homelab/klefki/internal/db/ent/runtime" + + "entgo.io/ent/dialect/sql/schema" + "git.rgst.io/homelab/klefki/internal/db/ent/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/internal/db/ent/hook/hook.go b/internal/db/ent/hook/hook.go new file mode 100644 index 0000000..b987461 --- /dev/null +++ b/internal/db/ent/hook/hook.go @@ -0,0 +1,199 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "git.rgst.io/homelab/klefki/internal/db/ent" +) + +// The MachineFunc type is an adapter to allow the use of ordinary +// function as Machine mutator. +type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MachineFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.MachineMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/internal/db/ent/machine.go b/internal/db/ent/machine.go new file mode 100644 index 0000000..4502f52 --- /dev/null +++ b/internal/db/ent/machine.go @@ -0,0 +1,102 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" +) + +// Machine is the model entity for the Machine schema. +type Machine struct { + config `json:"-"` + // ID of the ent. + // Fingerprint of the public key + ID string `json:"id,omitempty"` + // Public key of the machine + PublicKey string `json:"public_key,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Machine) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case machine.FieldID, machine.FieldPublicKey: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Machine fields. +func (m *Machine) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case machine.FieldID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value.Valid { + m.ID = value.String + } + case machine.FieldPublicKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field public_key", values[i]) + } else if value.Valid { + m.PublicKey = value.String + } + default: + m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Machine. +// This includes values selected through modifiers, order, etc. +func (m *Machine) Value(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + +// Update returns a builder for updating this Machine. +// Note that you need to call Machine.Unwrap() before calling this method if this Machine +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Machine) Update() *MachineUpdateOne { + return NewMachineClient(m.config).UpdateOne(m) +} + +// Unwrap unwraps the Machine entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (m *Machine) Unwrap() *Machine { + _tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Machine is not a transactional entity") + } + m.config.driver = _tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Machine) String() string { + var builder strings.Builder + builder.WriteString("Machine(") + builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + builder.WriteString("public_key=") + builder.WriteString(m.PublicKey) + builder.WriteByte(')') + return builder.String() +} + +// Machines is a parsable slice of Machine. +type Machines []*Machine diff --git a/internal/db/ent/machine/machine.go b/internal/db/ent/machine/machine.go new file mode 100644 index 0000000..b40116e --- /dev/null +++ b/internal/db/ent/machine/machine.go @@ -0,0 +1,47 @@ +// Code generated by ent, DO NOT EDIT. + +package machine + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the machine type in the database. + Label = "machine" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldPublicKey holds the string denoting the public_key field in the database. + FieldPublicKey = "public_key" + // Table holds the table name of the machine in the database. + Table = "machines" +) + +// Columns holds all SQL columns for machine fields. +var Columns = []string{ + FieldID, + FieldPublicKey, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// OrderOption defines the ordering options for the Machine queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByPublicKey orders the results by the public_key field. +func ByPublicKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPublicKey, opts...).ToFunc() +} diff --git a/internal/db/ent/machine/where.go b/internal/db/ent/machine/where.go new file mode 100644 index 0000000..601fb09 --- /dev/null +++ b/internal/db/ent/machine/where.go @@ -0,0 +1,148 @@ +// Code generated by ent, DO NOT EDIT. + +package machine + +import ( + "entgo.io/ent/dialect/sql" + "git.rgst.io/homelab/klefki/internal/db/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldID, id)) +} + +// IDEqualFold applies the EqualFold predicate on the ID field. +func IDEqualFold(id string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldID, id)) +} + +// IDContainsFold applies the ContainsFold predicate on the ID field. +func IDContainsFold(id string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldID, id)) +} + +// PublicKey applies equality check predicate on the "public_key" field. It's identical to PublicKeyEQ. +func PublicKey(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldPublicKey, v)) +} + +// PublicKeyEQ applies the EQ predicate on the "public_key" field. +func PublicKeyEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldPublicKey, v)) +} + +// PublicKeyNEQ applies the NEQ predicate on the "public_key" field. +func PublicKeyNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldPublicKey, v)) +} + +// PublicKeyIn applies the In predicate on the "public_key" field. +func PublicKeyIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldPublicKey, vs...)) +} + +// PublicKeyNotIn applies the NotIn predicate on the "public_key" field. +func PublicKeyNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldPublicKey, vs...)) +} + +// PublicKeyGT applies the GT predicate on the "public_key" field. +func PublicKeyGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldPublicKey, v)) +} + +// PublicKeyGTE applies the GTE predicate on the "public_key" field. +func PublicKeyGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldPublicKey, v)) +} + +// PublicKeyLT applies the LT predicate on the "public_key" field. +func PublicKeyLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldPublicKey, v)) +} + +// PublicKeyLTE applies the LTE predicate on the "public_key" field. +func PublicKeyLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldPublicKey, v)) +} + +// PublicKeyContains applies the Contains predicate on the "public_key" field. +func PublicKeyContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldPublicKey, v)) +} + +// PublicKeyHasPrefix applies the HasPrefix predicate on the "public_key" field. +func PublicKeyHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldPublicKey, v)) +} + +// PublicKeyHasSuffix applies the HasSuffix predicate on the "public_key" field. +func PublicKeyHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldPublicKey, v)) +} + +// PublicKeyEqualFold applies the EqualFold predicate on the "public_key" field. +func PublicKeyEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldPublicKey, v)) +} + +// PublicKeyContainsFold applies the ContainsFold predicate on the "public_key" field. +func PublicKeyContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldPublicKey, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Machine) predicate.Machine { + return predicate.Machine(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Machine) predicate.Machine { + return predicate.Machine(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Machine) predicate.Machine { + return predicate.Machine(sql.NotPredicates(p)) +} diff --git a/internal/db/ent/machine_create.go b/internal/db/ent/machine_create.go new file mode 100644 index 0000000..66aa732 --- /dev/null +++ b/internal/db/ent/machine_create.go @@ -0,0 +1,194 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" +) + +// MachineCreate is the builder for creating a Machine entity. +type MachineCreate struct { + config + mutation *MachineMutation + hooks []Hook +} + +// SetPublicKey sets the "public_key" field. +func (mc *MachineCreate) SetPublicKey(s string) *MachineCreate { + mc.mutation.SetPublicKey(s) + return mc +} + +// SetID sets the "id" field. +func (mc *MachineCreate) SetID(s string) *MachineCreate { + mc.mutation.SetID(s) + return mc +} + +// Mutation returns the MachineMutation object of the builder. +func (mc *MachineCreate) Mutation() *MachineMutation { + return mc.mutation +} + +// Save creates the Machine in the database. +func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) { + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MachineCreate) SaveX(ctx context.Context) *Machine { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mc *MachineCreate) Exec(ctx context.Context) error { + _, err := mc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mc *MachineCreate) ExecX(ctx context.Context) { + if err := mc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MachineCreate) check() error { + if _, ok := mc.mutation.PublicKey(); !ok { + return &ValidationError{Name: "public_key", err: errors.New(`ent: missing required field "Machine.public_key"`)} + } + return nil +} + +func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) { + if err := mc.check(); err != nil { + return nil, err + } + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(string); ok { + _node.ID = id + } else { + return nil, fmt.Errorf("unexpected Machine.ID type: %T", _spec.ID.Value) + } + } + mc.mutation.id = &_node.ID + mc.mutation.done = true + return _node, nil +} + +func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { + var ( + _node = &Machine{config: mc.config} + _spec = sqlgraph.NewCreateSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeString)) + ) + if id, ok := mc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = id + } + if value, ok := mc.mutation.PublicKey(); ok { + _spec.SetField(machine.FieldPublicKey, field.TypeString, value) + _node.PublicKey = value + } + return _node, _spec +} + +// MachineCreateBulk is the builder for creating many Machine entities in bulk. +type MachineCreateBulk struct { + config + err error + builders []*MachineCreate +} + +// Save creates the Machine entities in the database. +func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) { + if mcb.err != nil { + return nil, mcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Machine, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MachineMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (mcb *MachineCreateBulk) SaveX(ctx context.Context) []*Machine { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mcb *MachineCreateBulk) Exec(ctx context.Context) error { + _, err := mcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mcb *MachineCreateBulk) ExecX(ctx context.Context) { + if err := mcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/db/ent/machine_delete.go b/internal/db/ent/machine_delete.go new file mode 100644 index 0000000..e85a367 --- /dev/null +++ b/internal/db/ent/machine_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" + "git.rgst.io/homelab/klefki/internal/db/ent/predicate" +) + +// MachineDelete is the builder for deleting a Machine entity. +type MachineDelete struct { + config + hooks []Hook + mutation *MachineMutation +} + +// Where appends a list predicates to the MachineDelete builder. +func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete { + md.mutation.Where(ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MachineDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MachineDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeString)) + if ps := md.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + md.mutation.done = true + return affected, err +} + +// MachineDeleteOne is the builder for deleting a single Machine entity. +type MachineDeleteOne struct { + md *MachineDelete +} + +// Where appends a list predicates to the MachineDelete builder. +func (mdo *MachineDeleteOne) Where(ps ...predicate.Machine) *MachineDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + +// Exec executes the deletion query. +func (mdo *MachineDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{machine.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MachineDeleteOne) ExecX(ctx context.Context) { + if err := mdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/db/ent/machine_query.go b/internal/db/ent/machine_query.go new file mode 100644 index 0000000..d5d4413 --- /dev/null +++ b/internal/db/ent/machine_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" + "git.rgst.io/homelab/klefki/internal/db/ent/predicate" +) + +// MachineQuery is the builder for querying Machine entities. +type MachineQuery struct { + config + ctx *QueryContext + order []machine.OrderOption + inters []Interceptor + predicates []predicate.Machine + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the MachineQuery builder. +func (mq *MachineQuery) Where(ps ...predicate.Machine) *MachineQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit the number of records to be returned by this query. +func (mq *MachineQuery) Limit(limit int) *MachineQuery { + mq.ctx.Limit = &limit + return mq +} + +// Offset to start from. +func (mq *MachineQuery) Offset(offset int) *MachineQuery { + mq.ctx.Offset = &offset + return mq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (mq *MachineQuery) Unique(unique bool) *MachineQuery { + mq.ctx.Unique = &unique + return mq +} + +// Order specifies how the records should be ordered. +func (mq *MachineQuery) Order(o ...machine.OrderOption) *MachineQuery { + mq.order = append(mq.order, o...) + return mq +} + +// First returns the first Machine entity from the query. +// Returns a *NotFoundError when no Machine was found. +func (mq *MachineQuery) First(ctx context.Context) (*Machine, error) { + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{machine.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MachineQuery) FirstX(ctx context.Context) *Machine { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Machine ID from the query. +// Returns a *NotFoundError when no Machine ID was found. +func (mq *MachineQuery) FirstID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{machine.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (mq *MachineQuery) FirstIDX(ctx context.Context) string { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Machine entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Machine entity is found. +// Returns a *NotFoundError when no Machine entities are found. +func (mq *MachineQuery) Only(ctx context.Context) (*Machine, error) { + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{machine.Label} + default: + return nil, &NotSingularError{machine.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MachineQuery) OnlyX(ctx context.Context) *Machine { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Machine ID in the query. +// Returns a *NotSingularError when more than one Machine ID is found. +// Returns a *NotFoundError when no entities are found. +func (mq *MachineQuery) OnlyID(ctx context.Context) (id string, err error) { + var ids []string + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{machine.Label} + default: + err = &NotSingularError{machine.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MachineQuery) OnlyIDX(ctx context.Context) string { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Machines. +func (mq *MachineQuery) All(ctx context.Context) ([]*Machine, error) { + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryAll) + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Machine, *MachineQuery]() + return withInterceptors[[]*Machine](ctx, mq, qr, mq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MachineQuery) AllX(ctx context.Context) []*Machine { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Machine IDs. +func (mq *MachineQuery) IDs(ctx context.Context) (ids []string, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryIDs) + if err = mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MachineQuery) IDsX(ctx context.Context) []string { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MachineQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryCount) + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, mq, querierCount[*MachineQuery](), mq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MachineQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MachineQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryExist) + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MachineQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the MachineQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MachineQuery) Clone() *MachineQuery { + if mq == nil { + return nil + } + return &MachineQuery{ + config: mq.config, + ctx: mq.ctx.Clone(), + order: append([]machine.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), + predicates: append([]predicate.Machine{}, mq.predicates...), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// PublicKey string `json:"public_key,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Machine.Query(). +// GroupBy(machine.FieldPublicKey). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy { + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MachineGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields + grbuild.label = machine.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// PublicKey string `json:"public_key,omitempty"` +// } +// +// client.Machine.Query(). +// Select(machine.FieldPublicKey). +// Scan(ctx, &v) +func (mq *MachineQuery) Select(fields ...string) *MachineSelect { + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MachineSelect{MachineQuery: mq} + sbuild.label = machine.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MachineSelect configured with the given aggregations. +func (mq *MachineQuery) Aggregate(fns ...AggregateFunc) *MachineSelect { + return mq.Select().Aggregate(fns...) +} + +func (mq *MachineQuery) prepareQuery(ctx context.Context) error { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { + if !machine.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MachineQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Machine, error) { + var ( + nodes = []*Machine{} + _spec = mq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Machine).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Machine{config: mq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeString)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true + } + if fields := mq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID) + for i := range fields { + if fields[i] != machine.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(machine.Table) + columns := mq.ctx.Fields + if len(columns) == 0 { + columns = machine.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(columns...)...) + } + if mq.ctx.Unique != nil && *mq.ctx.Unique { + selector.Distinct() + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector) + } + if offset := mq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MachineGroupBy is the group-by builder for Machine entities. +type MachineGroupBy struct { + selector + build *MachineQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MachineGroupBy) Aggregate(fns ...AggregateFunc) *MachineGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the selector query and scans the result into the given value. +func (mgb *MachineGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, mgb.build.ctx, ent.OpQueryGroupBy) + if err := mgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MachineQuery, *MachineGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) +} + +func (mgb *MachineGroupBy) sqlScan(ctx context.Context, root *MachineQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*mgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// MachineSelect is the builder for selecting fields of Machine entities. +type MachineSelect struct { + *MachineQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MachineSelect) Aggregate(fns ...AggregateFunc) *MachineSelect { + ms.fns = append(ms.fns, fns...) + return ms +} + +// Scan applies the selector query and scans the result into the given value. +func (ms *MachineSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, ent.OpQuerySelect) + if err := ms.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MachineQuery, *MachineSelect](ctx, ms.MachineQuery, ms, ms.inters, v) +} + +func (ms *MachineSelect) sqlScan(ctx context.Context, root *MachineQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/internal/db/ent/machine_update.go b/internal/db/ent/machine_update.go new file mode 100644 index 0000000..e1b41b2 --- /dev/null +++ b/internal/db/ent/machine_update.go @@ -0,0 +1,209 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" + "git.rgst.io/homelab/klefki/internal/db/ent/predicate" +) + +// MachineUpdate is the builder for updating Machine entities. +type MachineUpdate struct { + config + hooks []Hook + mutation *MachineMutation +} + +// Where appends a list predicates to the MachineUpdate builder. +func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate { + mu.mutation.Where(ps...) + return mu +} + +// SetPublicKey sets the "public_key" field. +func (mu *MachineUpdate) SetPublicKey(s string) *MachineUpdate { + mu.mutation.SetPublicKey(s) + return mu +} + +// SetNillablePublicKey sets the "public_key" field if the given value is not nil. +func (mu *MachineUpdate) SetNillablePublicKey(s *string) *MachineUpdate { + if s != nil { + mu.SetPublicKey(*s) + } + return mu +} + +// Mutation returns the MachineMutation object of the builder. +func (mu *MachineUpdate) Mutation() *MachineMutation { + return mu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (mu *MachineUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MachineUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MachineUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MachineUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeString)) + if ps := mu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.PublicKey(); ok { + _spec.SetField(machine.FieldPublicKey, field.TypeString, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{machine.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + mu.mutation.done = true + return n, nil +} + +// MachineUpdateOne is the builder for updating a single Machine entity. +type MachineUpdateOne struct { + config + fields []string + hooks []Hook + mutation *MachineMutation +} + +// SetPublicKey sets the "public_key" field. +func (muo *MachineUpdateOne) SetPublicKey(s string) *MachineUpdateOne { + muo.mutation.SetPublicKey(s) + return muo +} + +// SetNillablePublicKey sets the "public_key" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillablePublicKey(s *string) *MachineUpdateOne { + if s != nil { + muo.SetPublicKey(*s) + } + return muo +} + +// Mutation returns the MachineMutation object of the builder. +func (muo *MachineUpdateOne) Mutation() *MachineMutation { + return muo.mutation +} + +// Where appends a list predicates to the MachineUpdate builder. +func (muo *MachineUpdateOne) Where(ps ...predicate.Machine) *MachineUpdateOne { + muo.mutation.Where(ps...) + return muo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpdateOne { + muo.fields = append([]string{field}, fields...) + return muo +} + +// Save executes the query and returns the updated Machine entity. +func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) { + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MachineUpdateOne) SaveX(ctx context.Context) *Machine { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MachineUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MachineUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err error) { + _spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeString)) + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Machine.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := muo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID) + for _, f := range fields { + if !machine.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != machine.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := muo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := muo.mutation.PublicKey(); ok { + _spec.SetField(machine.FieldPublicKey, field.TypeString, value) + } + _node = &Machine{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{machine.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + muo.mutation.done = true + return _node, nil +} diff --git a/internal/db/ent/migrate/migrate.go b/internal/db/ent/migrate/migrate.go new file mode 100644 index 0000000..1956a6b --- /dev/null +++ b/internal/db/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/internal/db/ent/migrate/schema.go b/internal/db/ent/migrate/schema.go new file mode 100644 index 0000000..e93e0b2 --- /dev/null +++ b/internal/db/ent/migrate/schema.go @@ -0,0 +1,29 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // MachinesColumns holds the columns for the "machines" table. + MachinesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeString}, + {Name: "public_key", Type: field.TypeString}, + } + // MachinesTable holds the schema information for the "machines" table. + MachinesTable = &schema.Table{ + Name: "machines", + Columns: MachinesColumns, + PrimaryKey: []*schema.Column{MachinesColumns[0]}, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + MachinesTable, + } +) + +func init() { +} diff --git a/internal/db/ent/mutation.go b/internal/db/ent/mutation.go new file mode 100644 index 0000000..95cfa7a --- /dev/null +++ b/internal/db/ent/mutation.go @@ -0,0 +1,359 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "git.rgst.io/homelab/klefki/internal/db/ent/machine" + "git.rgst.io/homelab/klefki/internal/db/ent/predicate" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeMachine = "Machine" +) + +// MachineMutation represents an operation that mutates the Machine nodes in the graph. +type MachineMutation struct { + config + op Op + typ string + id *string + public_key *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Machine, error) + predicates []predicate.Machine +} + +var _ ent.Mutation = (*MachineMutation)(nil) + +// machineOption allows management of the mutation configuration using functional options. +type machineOption func(*MachineMutation) + +// newMachineMutation creates new mutation for the Machine entity. +func newMachineMutation(c config, op Op, opts ...machineOption) *MachineMutation { + m := &MachineMutation{ + config: c, + op: op, + typ: TypeMachine, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMachineID sets the ID field of the mutation. +func withMachineID(id string) machineOption { + return func(m *MachineMutation) { + var ( + err error + once sync.Once + value *Machine + ) + m.oldValue = func(ctx context.Context) (*Machine, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Machine.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMachine sets the old Machine of the mutation. +func withMachine(node *Machine) machineOption { + return func(m *MachineMutation) { + m.oldValue = func(context.Context) (*Machine, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MachineMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MachineMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Machine entities. +func (m *MachineMutation) SetID(id string) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *MachineMutation) ID() (id string, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *MachineMutation) IDs(ctx context.Context) ([]string, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []string{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Machine.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetPublicKey sets the "public_key" field. +func (m *MachineMutation) SetPublicKey(s string) { + m.public_key = &s +} + +// PublicKey returns the value of the "public_key" field in the mutation. +func (m *MachineMutation) PublicKey() (r string, exists bool) { + v := m.public_key + if v == nil { + return + } + return *v, true +} + +// OldPublicKey returns the old "public_key" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldPublicKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPublicKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPublicKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPublicKey: %w", err) + } + return oldValue.PublicKey, nil +} + +// ResetPublicKey resets all changes to the "public_key" field. +func (m *MachineMutation) ResetPublicKey() { + m.public_key = nil +} + +// Where appends a list predicates to the MachineMutation builder. +func (m *MachineMutation) Where(ps ...predicate.Machine) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the MachineMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MachineMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Machine, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *MachineMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *MachineMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Machine). +func (m *MachineMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *MachineMutation) Fields() []string { + fields := make([]string, 0, 1) + if m.public_key != nil { + fields = append(fields, machine.FieldPublicKey) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *MachineMutation) Field(name string) (ent.Value, bool) { + switch name { + case machine.FieldPublicKey: + return m.PublicKey() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case machine.FieldPublicKey: + return m.OldPublicKey(ctx) + } + return nil, fmt.Errorf("unknown Machine field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MachineMutation) SetField(name string, value ent.Value) error { + switch name { + case machine.FieldPublicKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPublicKey(v) + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MachineMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MachineMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MachineMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Machine numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *MachineMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *MachineMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *MachineMutation) ClearField(name string) error { + return fmt.Errorf("unknown Machine nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *MachineMutation) ResetField(name string) error { + switch name { + case machine.FieldPublicKey: + m.ResetPublicKey() + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *MachineMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *MachineMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *MachineMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *MachineMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *MachineMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *MachineMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *MachineMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Machine unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *MachineMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Machine edge %s", name) +} diff --git a/internal/db/ent/predicate/predicate.go b/internal/db/ent/predicate/predicate.go new file mode 100644 index 0000000..4c7c577 --- /dev/null +++ b/internal/db/ent/predicate/predicate.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// Machine is the predicate function for machine builders. +type Machine func(*sql.Selector) diff --git a/internal/db/ent/runtime.go b/internal/db/ent/runtime.go new file mode 100644 index 0000000..793d053 --- /dev/null +++ b/internal/db/ent/runtime.go @@ -0,0 +1,9 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { +} diff --git a/internal/db/ent/runtime/runtime.go b/internal/db/ent/runtime/runtime.go new file mode 100644 index 0000000..fbf43da --- /dev/null +++ b/internal/db/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in git.rgst.io/homelab/klefki/internal/db/ent/runtime.go + +const ( + Version = "v0.14.2" // Version of ent codegen. + Sum = "h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0=" // Sum of ent codegen. +) diff --git a/internal/db/ent/schema/machine.go b/internal/db/ent/schema/machine.go new file mode 100644 index 0000000..fdd735e --- /dev/null +++ b/internal/db/ent/schema/machine.go @@ -0,0 +1,36 @@ +// Copyright (C) 2025 klefki contributors +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . +// +// SPDX-License-Identifier: AGPL-3.0 + +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +// Machine holds the schema definition for the Machine entity. +type Machine struct { + ent.Schema +} + +// Fields of the Machine. +func (Machine) Fields() []ent.Field { + return []ent.Field{ + field.String("id").Comment("Fingerprint of the public key"), + field.String("public_key").Comment("Public key of the machine"), + } +} diff --git a/internal/db/ent/tx.go b/internal/db/ent/tx.go new file mode 100644 index 0000000..fe22256 --- /dev/null +++ b/internal/db/ent/tx.go @@ -0,0 +1,210 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Machine is the client for interacting with the Machine builders. + Machine *MachineClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.Machine = NewMachineClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Machine.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/internal/db/generate.go b/internal/db/generate.go new file mode 100644 index 0000000..a34e2b6 --- /dev/null +++ b/internal/db/generate.go @@ -0,0 +1,20 @@ +// Copyright (C) 2025 klefki contributors +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . +// +// SPDX-License-Identifier: AGPL-3.0 + +package db + +//go:generate go tool entgo.io/ent/cmd/ent generate ./ent/schema diff --git a/internal/server/grpc/generate.go b/internal/server/grpc/generate.go new file mode 100644 index 0000000..b377f2d --- /dev/null +++ b/internal/server/grpc/generate.go @@ -0,0 +1,22 @@ +// Copyright (C) 2025 klefki contributors +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . +// +// SPDX-License-Identifier: AGPL-3.0 + +// Package grpc is not a consumable package. This is used only to +// trigger the generation of protobuf and associated gRPC artifacts. +package grpc + +//go:generate bash -c "cd ../../../ && exec buf generate" diff --git a/internal/server/grpc/generated/go/rgst/klefki/v1/kelfki.pb.go b/internal/server/grpc/generated/go/rgst/klefki/v1/kelfki.pb.go new file mode 100644 index 0000000..0ba5141 --- /dev/null +++ b/internal/server/grpc/generated/go/rgst/klefki/v1/kelfki.pb.go @@ -0,0 +1,207 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.5 +// protoc (unknown) +// source: rgst/klefki/v1/kelfki.proto + +package v1 + +import ( + reflect "reflect" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + _ "google.golang.org/protobuf/types/gofeaturespb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetKeyRequest struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetKeyRequest) Reset() { + *x = GetKeyRequest{} + mi := &file_rgst_klefki_v1_kelfki_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetKeyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetKeyRequest) ProtoMessage() {} + +func (x *GetKeyRequest) ProtoReflect() protoreflect.Message { + mi := &file_rgst_klefki_v1_kelfki_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +type GetKeyRequest_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + +} + +func (b0 GetKeyRequest_builder) Build() *GetKeyRequest { + m0 := &GetKeyRequest{} + b, x := &b0, m0 + _, _ = b, x + return m0 +} + +type GetKeyResponse struct { + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Key *string `protobuf:"bytes,1,opt,name=key"` + XXX_raceDetectHookData protoimpl.RaceDetectHookData + XXX_presence [1]uint32 + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetKeyResponse) Reset() { + *x = GetKeyResponse{} + mi := &file_rgst_klefki_v1_kelfki_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetKeyResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetKeyResponse) ProtoMessage() {} + +func (x *GetKeyResponse) ProtoReflect() protoreflect.Message { + mi := &file_rgst_klefki_v1_kelfki_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +func (x *GetKeyResponse) GetKey() string { + if x != nil { + if x.xxx_hidden_Key != nil { + return *x.xxx_hidden_Key + } + return "" + } + return "" +} + +func (x *GetKeyResponse) SetKey(v string) { + x.xxx_hidden_Key = &v + protoimpl.X.SetPresent(&(x.XXX_presence[0]), 0, 1) +} + +func (x *GetKeyResponse) HasKey() bool { + if x == nil { + return false + } + return protoimpl.X.Present(&(x.XXX_presence[0]), 0) +} + +func (x *GetKeyResponse) ClearKey() { + protoimpl.X.ClearPresent(&(x.XXX_presence[0]), 0) + x.xxx_hidden_Key = nil +} + +type GetKeyResponse_builder struct { + _ [0]func() // Prevents comparability and use of unkeyed literals for the builder. + + Key *string +} + +func (b0 GetKeyResponse_builder) Build() *GetKeyResponse { + m0 := &GetKeyResponse{} + b, x := &b0, m0 + _, _ = b, x + if b.Key != nil { + protoimpl.X.SetPresentNonAtomic(&(x.XXX_presence[0]), 0, 1) + x.xxx_hidden_Key = b.Key + } + return m0 +} + +var File_rgst_klefki_v1_kelfki_proto protoreflect.FileDescriptor + +var file_rgst_klefki_v1_kelfki_proto_rawDesc = string([]byte{ + 0x0a, 0x1b, 0x72, 0x67, 0x73, 0x74, 0x2f, 0x6b, 0x6c, 0x65, 0x66, 0x6b, 0x69, 0x2f, 0x76, 0x31, + 0x2f, 0x6b, 0x65, 0x6c, 0x66, 0x6b, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x72, + 0x67, 0x73, 0x74, 0x2e, 0x6b, 0x6c, 0x65, 0x66, 0x6b, 0x69, 0x2e, 0x76, 0x31, 0x1a, 0x21, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x67, + 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x0f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x22, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x32, 0x58, 0x0a, 0x0d, 0x4b, 0x6c, 0x65, 0x66, 0x6b, 0x69, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, + 0x12, 0x1d, 0x2e, 0x72, 0x67, 0x73, 0x74, 0x2e, 0x6b, 0x6c, 0x65, 0x66, 0x6b, 0x69, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x72, 0x67, 0x73, 0x74, 0x2e, 0x6b, 0x6c, 0x65, 0x66, 0x6b, 0x69, 0x2e, 0x76, 0x31, + 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0x3f, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x2e, 0x72, 0x67, 0x73, 0x74, 0x2e, 0x69, 0x6f, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x2f, 0x67, 0x6f, 0x2f, 0x72, 0x67, 0x73, 0x74, 0x2f, 0x6b, + 0x6c, 0x65, 0x66, 0x6b, 0x69, 0x2f, 0x76, 0x31, 0x92, 0x03, 0x05, 0xd2, 0x3e, 0x02, 0x10, 0x03, + 0x62, 0x08, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x70, 0xe8, 0x07, +}) + +var file_rgst_klefki_v1_kelfki_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_rgst_klefki_v1_kelfki_proto_goTypes = []any{ + (*GetKeyRequest)(nil), // 0: rgst.klefki.v1.GetKeyRequest + (*GetKeyResponse)(nil), // 1: rgst.klefki.v1.GetKeyResponse +} +var file_rgst_klefki_v1_kelfki_proto_depIdxs = []int32{ + 0, // 0: rgst.klefki.v1.KlefkiService.GetKey:input_type -> rgst.klefki.v1.GetKeyRequest + 1, // 1: rgst.klefki.v1.KlefkiService.GetKey:output_type -> rgst.klefki.v1.GetKeyResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_rgst_klefki_v1_kelfki_proto_init() } +func file_rgst_klefki_v1_kelfki_proto_init() { + if File_rgst_klefki_v1_kelfki_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_rgst_klefki_v1_kelfki_proto_rawDesc), len(file_rgst_klefki_v1_kelfki_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_rgst_klefki_v1_kelfki_proto_goTypes, + DependencyIndexes: file_rgst_klefki_v1_kelfki_proto_depIdxs, + MessageInfos: file_rgst_klefki_v1_kelfki_proto_msgTypes, + }.Build() + File_rgst_klefki_v1_kelfki_proto = out.File + file_rgst_klefki_v1_kelfki_proto_goTypes = nil + file_rgst_klefki_v1_kelfki_proto_depIdxs = nil +} diff --git a/internal/server/grpc/generated/go/rgst/klefki/v1/kelfki_grpc.pb.go b/internal/server/grpc/generated/go/rgst/klefki/v1/kelfki_grpc.pb.go new file mode 100644 index 0000000..5514b65 --- /dev/null +++ b/internal/server/grpc/generated/go/rgst/klefki/v1/kelfki_grpc.pb.go @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc (unknown) +// source: rgst/klefki/v1/kelfki.proto + +package v1 + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + KlefkiService_GetKey_FullMethodName = "/rgst.klefki.v1.KlefkiService/GetKey" +) + +// KlefkiServiceClient is the client API for KlefkiService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type KlefkiServiceClient interface { + GetKey(ctx context.Context, in *GetKeyRequest, opts ...grpc.CallOption) (*GetKeyResponse, error) +} + +type klefkiServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewKlefkiServiceClient(cc grpc.ClientConnInterface) KlefkiServiceClient { + return &klefkiServiceClient{cc} +} + +func (c *klefkiServiceClient) GetKey(ctx context.Context, in *GetKeyRequest, opts ...grpc.CallOption) (*GetKeyResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetKeyResponse) + err := c.cc.Invoke(ctx, KlefkiService_GetKey_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KlefkiServiceServer is the server API for KlefkiService service. +// All implementations must embed UnimplementedKlefkiServiceServer +// for forward compatibility. +type KlefkiServiceServer interface { + GetKey(context.Context, *GetKeyRequest) (*GetKeyResponse, error) + mustEmbedUnimplementedKlefkiServiceServer() +} + +// UnimplementedKlefkiServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedKlefkiServiceServer struct{} + +func (UnimplementedKlefkiServiceServer) GetKey(context.Context, *GetKeyRequest) (*GetKeyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetKey not implemented") +} +func (UnimplementedKlefkiServiceServer) mustEmbedUnimplementedKlefkiServiceServer() {} +func (UnimplementedKlefkiServiceServer) testEmbeddedByValue() {} + +// UnsafeKlefkiServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to KlefkiServiceServer will +// result in compilation errors. +type UnsafeKlefkiServiceServer interface { + mustEmbedUnimplementedKlefkiServiceServer() +} + +func RegisterKlefkiServiceServer(s grpc.ServiceRegistrar, srv KlefkiServiceServer) { + // If the following call pancis, it indicates UnimplementedKlefkiServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&KlefkiService_ServiceDesc, srv) +} + +func _KlefkiService_GetKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KlefkiServiceServer).GetKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KlefkiService_GetKey_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KlefkiServiceServer).GetKey(ctx, req.(*GetKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// KlefkiService_ServiceDesc is the grpc.ServiceDesc for KlefkiService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var KlefkiService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "rgst.klefki.v1.KlefkiService", + HandlerType: (*KlefkiServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetKey", + Handler: _KlefkiService_GetKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "rgst/klefki/v1/kelfki.proto", +} diff --git a/internal/server/grpc/proto/rgst/klefki/v1/kelfki.proto b/internal/server/grpc/proto/rgst/klefki/v1/kelfki.proto new file mode 100644 index 0000000..aa8fb9e --- /dev/null +++ b/internal/server/grpc/proto/rgst/klefki/v1/kelfki.proto @@ -0,0 +1,18 @@ +edition = "2023"; + +package rgst.klefki.v1; + +import "google/protobuf/go_features.proto"; + +option features.(pb.go).api_level = API_OPAQUE; +option go_package = "git.rgst.io/internal/grpc/generated/go/rgst/klefki/v1"; + +message GetKeyRequest {} + +message GetKeyResponse { + string key = 1; +} + +service KlefkiService { + rpc GetKey(GetKeyRequest) returns (GetKeyResponse); +} diff --git a/internal/server/server.go b/internal/server/server.go new file mode 100644 index 0000000..50422f6 --- /dev/null +++ b/internal/server/server.go @@ -0,0 +1,63 @@ +// Copyright (C) 2025 klefki contributors +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . +// +// SPDX-License-Identifier: AGPL-3.0 + +// Package server contains the gRPC server logic for the service. +package server + +import ( + "context" + "fmt" + "net" + + pbgrpcv1 "git.rgst.io/homelab/klefki/internal/server/grpc/generated/go/rgst/klefki/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +// Server is a Klefki gRPC server +type Server struct { + gs *grpc.Server + pbgrpcv1.UnimplementedKlefkiServiceServer +} + +// Run starts the server +func (s *Server) Run(_ context.Context) error { + s.gs = grpc.NewServer() + pbgrpcv1.RegisterKlefkiServiceServer(s.gs, s) + reflection.Register(s.gs) + + lis, err := net.Listen("tcp", ":5300") //nolint:gosec // Why: This is fine. + if err != nil { + return fmt.Errorf("failed to create listener: %w", err) + } + + fmt.Printf("starting gRPC server on %s\n", lis.Addr()) + + return s.gs.Serve(lis) +} + +// Close closes the server +func (s *Server) Close(_ context.Context) error { + if s.gs == nil { + return nil + } + + fmt.Println("shutting down server") + + s.gs.GracefulStop() + return nil +}