rowan.wu 1 jaar geleden
commit
1ff55e349a
75 gewijzigde bestanden met toevoegingen van 12893 en 0 verwijderingen
  1. 32 0
      .gitignore
  2. 25 0
      Dockerfile
  3. 201 0
      LICENSE
  4. 104 0
      Makefile
  5. 4 0
      README.md
  6. 48 0
      desc/base.proto
  7. 45 0
      desc/task.proto
  8. 42 0
      desc/tasklog.proto
  9. 542 0
      ent/client.go
  10. 610 0
      ent/ent.go
  11. 84 0
      ent/enttest/enttest.go
  12. 3 0
      ent/generate.go
  13. 211 0
      ent/hook/hook.go
  14. 64 0
      ent/migrate/migrate.go
  15. 74 0
      ent/migrate/schema.go
  16. 1435 0
      ent/mutation.go
  17. 215 0
      ent/pagination.go
  18. 13 0
      ent/predicate/predicate.go
  19. 44 0
      ent/runtime.go
  20. 10 0
      ent/runtime/runtime.go
  21. 53 0
      ent/schema/task.go
  22. 43 0
      ent/schema/task_log.go
  23. 221 0
      ent/set_not_nil.go
  24. 209 0
      ent/task.go
  25. 147 0
      ent/task/task.go
  26. 589 0
      ent/task/where.go
  27. 358 0
      ent/task_create.go
  28. 88 0
      ent/task_delete.go
  29. 606 0
      ent/task_query.go
  30. 617 0
      ent/task_update.go
  31. 165 0
      ent/tasklog.go
  32. 105 0
      ent/tasklog/tasklog.go
  33. 229 0
      ent/tasklog/where.go
  34. 277 0
      ent/tasklog_create.go
  35. 88 0
      ent/tasklog_delete.go
  36. 613 0
      ent/tasklog_query.go
  37. 373 0
      ent/tasklog_update.go
  38. 162 0
      ent/template/pagination.tmpl
  39. 26 0
      ent/template/set_not_nil.tmpl
  40. 239 0
      ent/tx.go
  41. 46 0
      etc/job.yaml
  42. 126 0
      go.mod
  43. 393 0
      go.sum
  44. 21 0
      internal/config/config.go
  45. 6 0
      internal/enum/taskresult/task_result.go
  46. 75 0
      internal/logic/base/init_database_logic.go
  47. 45 0
      internal/logic/task/create_task_logic.go
  48. 51 0
      internal/logic/task/delete_task_logic.go
  49. 45 0
      internal/logic/task/get_task_by_id_logic.go
  50. 63 0
      internal/logic/task/get_task_list_logic.go
  51. 45 0
      internal/logic/task/update_task_logic.go
  52. 41 0
      internal/logic/tasklog/create_task_log_logic.go
  53. 37 0
      internal/logic/tasklog/delete_task_log_logic.go
  54. 40 0
      internal/logic/tasklog/get_task_log_by_id_logic.go
  55. 61 0
      internal/logic/tasklog/get_task_log_list_logic.go
  56. 41 0
      internal/logic/tasklog/update_task_log_logic.go
  57. 69 0
      internal/mqs/amq/handler/amq/base/hello_world.go
  58. 87 0
      internal/mqs/amq/handler/amq/wxhook/say_morning.go
  59. 49 0
      internal/mqs/amq/task/dynamicperiodictask/dynamic_periodic_task.go
  60. 51 0
      internal/mqs/amq/task/mqtask/mqtask.go
  61. 21 0
      internal/mqs/amq/task/mqtask/register.go
  62. 14 0
      internal/mqs/amq/task/scheduletask/register.go
  63. 45 0
      internal/mqs/amq/task/scheduletask/scheduletask.go
  64. 5 0
      internal/mqs/amq/types/pattern/pattern.go
  65. 14 0
      internal/mqs/amq/types/payload/payload.go
  66. 58 0
      internal/mqs/amq/types/periodicconfig/provider.go
  67. 82 0
      internal/server/job_server.go
  68. 51 0
      internal/svc/service_context.go
  69. 35 0
      internal/utils/dberrorhandler/error_handler.go
  70. 37 0
      internal/utils/entx/ent_tx.go
  71. 75 0
      job.go
  72. 115 0
      job.proto
  73. 114 0
      jobclient/job.go
  74. 1316 0
      types/job/job.pb.go
  75. 505 0
      types/job/job_grpc.pb.go

+ 32 - 0
.gitignore

@@ -0,0 +1,32 @@
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+*.gz
+*.rar
+*.tmp
+
+# Other files and folders
+.settings/
+.idea/
+.vscode/
+
+# config file avoid exposing private data
+*_dev.yaml
+
+# Build files
+*_api
+*_rpc

+ 25 - 0
Dockerfile

@@ -0,0 +1,25 @@
+FROM alpine:3.19
+
+# Define the project name | 定义项目名称
+ARG PROJECT=job
+# Define the config file name | 定义配置文件名
+ARG CONFIG_FILE=job.yaml
+# Define the author | 定义作者
+ARG AUTHOR="yuansu.china.work@gmail.com"
+
+LABEL org.opencontainers.image.authors=${AUTHOR}
+
+WORKDIR /app
+ENV PROJECT=${PROJECT}
+ENV CONFIG_FILE=${CONFIG_FILE}
+
+ENV TZ=Asia/Shanghai
+RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
+RUN apk update --no-cache && apk add --no-cache tzdata
+
+COPY ./${PROJECT}_rpc ./
+COPY ./etc/${CONFIG_FILE} ./etc/
+
+EXPOSE 9105
+
+ENTRYPOINT ./${PROJECT}_rpc -f etc/${CONFIG_FILE}

+ 201 - 0
LICENSE

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [2023] [Ryan SU]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 104 - 0
Makefile

@@ -0,0 +1,104 @@
+# Custom configuration | 独立配置
+# Service name | 项目名称
+SERVICE=Job
+# Service name in specific style | 项目经过style格式化的名称
+SERVICE_STYLE=job
+# Service name in lowercase | 项目名称全小写格式
+SERVICE_LOWER=job
+# Service name in snake format | 项目名称下划线格式
+SERVICE_SNAKE=job
+# Service name in snake format | 项目名称短杠格式
+SERVICE_DASH=job
+
+# The project version, if you don't use git, you should set it manually | 项目版本,如果不使用git请手动设置
+VERSION=$(shell git describe --tags --always)
+
+# The project file name style | 项目文件命名风格
+PROJECT_STYLE=go_zero
+
+# Whether to use i18n | 是否启用 i18n
+PROJECT_I18N=true
+
+# The suffix after build or compile | 构建后缀
+PROJECT_BUILD_SUFFIX=rpc
+
+
+# Ent enabled features | Ent 启用的官方特性
+ENT_FEATURE=sql/execquery
+
+
+# The arch of the build | 构建的架构
+GOARCH=amd64
+
+# ---- You may not need to modify the codes below | 下面的代码大概率不需要更改 ----
+
+GO ?= go
+GOFMT ?= gofmt "-s"
+GOFILES := $(shell find . -name "*.go")
+LDFLAGS := -s -w
+
+.PHONY: test
+test: # Run test for the project | 运行项目测试
+	go test -v --cover ./internal/..
+
+.PHONY: fmt
+fmt: # Format the codes | 格式化代码
+	$(GOFMT) -w $(GOFILES)
+
+.PHONY: lint
+lint: # Run go linter | 运行代码错误分析
+	golangci-lint run -D staticcheck
+
+.PHONY: tools
+tools: # Install the necessary tools | 安装必要的工具
+	$(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@latest;
+
+.PHONY: docker
+docker: # Build the docker image | 构建 docker 镜像
+	docker build -f Dockerfile -t ${DOCKER_USERNAME}/$(SERVICE_DASH)-$(PROJECT_BUILD_SUFFIX):${VERSION} .
+	@echo "Build docker successfully"
+
+.PHONY: publish-docker
+publish-docker: # Publish docker image | 发布 docker 镜像
+	echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin https://${REPO}
+	docker push ${DOCKER_USERNAME}/$(SERVICE_DASH)-$(PROJECT_BUILD_SUFFIX):${VERSION}
+	@echo "Publish docker successfully"
+
+.PHONY: gen-rpc
+gen-rpc: # Generate RPC files from proto | 生成 RPC 的代码
+	goctls rpc protoc ./$(SERVICE_STYLE).proto --go_out=./types --go-grpc_out=./types --zrpc_out=. --style=$(PROJECT_STYLE)
+ifeq ($(shell uname -s), Darwin)
+	sed -i "" 's/,omitempty//g' ./types/$(SERVICE_LOWER)/*.pb.go
+else
+	sed -i 's/,omitempty//g' ./types/$(SERVICE_LOWER)/*.pb.go
+endif
+	@echo "Generate RPC codes successfully"
+
+.PHONY: gen-ent
+gen-ent: # Generate Ent codes | 生成 Ent 的代码
+	go run -mod=mod entgo.io/ent/cmd/ent generate --template glob="./ent/template/*.tmpl" ./ent/schema --feature $(ENT_FEATURE)
+	@echo "Generate Ent codes successfully"
+
+.PHONY: gen-rpc-ent-logic
+gen-rpc-ent-logic: # Generate logic code from Ent, need model and group params | 根据 Ent 生成逻辑代码, 需要设置 model 和 group
+	goctls rpc ent --schema=./ent/schema  --style=$(PROJECT_STYLE) --multiple=false --service_name=$(SERVICE) --search_key_num=3 --output=./ --model=$(model) --group=$(group) --proto_out=./desc/$(shell echo $(model) | tr A-Z a-z).proto --i18n=$(PROJECT_I18N) --overwrite=true
+	@echo "Generate logic codes from Ent successfully"
+
+.PHONY: build-win
+build-win: # Build project for Windows | 构建Windows下的可执行文件
+	env CGO_ENABLED=0 GOOS=windows GOARCH=$(GOARCH) go build -ldflags "$(LDFLAGS)" -trimpath -o $(SERVICE_STYLE)_$(PROJECT_BUILD_SUFFIX).exe $(SERVICE_STYLE).go
+	@echo "Build project for Windows successfully"
+
+.PHONY: build-mac
+build-mac: # Build project for MacOS | 构建MacOS下的可执行文件
+	env CGO_ENABLED=0 GOOS=darwin GOARCH=$(GOARCH) go build -ldflags "$(LDFLAGS)" -trimpath -o $(SERVICE_STYLE)_$(PROJECT_BUILD_SUFFIX) $(SERVICE_STYLE).go
+	@echo "Build project for MacOS successfully"
+
+.PHONY: build-linux
+build-linux: # Build project for Linux | 构建Linux下的可执行文件
+	env CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -ldflags "$(LDFLAGS)" -trimpath -o $(SERVICE_STYLE)_$(PROJECT_BUILD_SUFFIX) $(SERVICE_STYLE).go
+	@echo "Build project for Linux successfully"
+
+.PHONY: help
+help: # Show help | 显示帮助
+	@grep -E '^[a-zA-Z0-9 -]+:.*#'  Makefile | sort | while read -r l; do printf "\033[1;32m$$(echo $$l | cut -f 1 -d':')\033[00m:$$(echo $$l | cut -f 2- -d'#')\n"; done

+ 4 - 0
README.md

@@ -0,0 +1,4 @@
+# wechat-job
+wechat-job 在线定时任务扩展模块。
+
+目前支持: 基于 asynq 的定时任务

+ 48 - 0
desc/base.proto

@@ -0,0 +1,48 @@
+syntax = "proto3";
+
+package job;
+option go_package="./job";
+
+// base message
+message Empty {}
+
+message IDReq {
+  uint64 id = 1;
+}
+
+message IDsReq {
+  repeated uint64 ids = 1;
+}
+
+message UUIDsReq {
+  repeated string ids = 1;
+}
+
+message UUIDReq {
+  string id = 1;
+}
+
+message BaseResp {
+  string msg = 1;
+}
+
+message PageInfoReq {
+  uint64 page = 1;
+  uint64 page_size = 2;
+}
+
+message BaseIDResp {
+  uint64 id = 1;
+  string msg = 2;
+}
+
+message BaseUUIDResp {
+  string id = 1;
+  string msg = 2;
+}
+
+
+service Job {
+  // group: base
+  rpc initDatabase (Empty) returns (BaseResp);
+}

+ 45 - 0
desc/task.proto

@@ -0,0 +1,45 @@
+syntax = "proto3";
+
+// Task message
+
+message TaskInfo {
+  optional uint64 id = 1;
+  optional int64 created_at = 2;
+  optional int64 updated_at = 3;
+  optional uint32 status = 4;
+  optional string name = 5;
+  optional string task_group = 6;
+  optional string cron_expression = 7;
+  optional string pattern = 8;
+  optional string payload = 9;
+}
+
+message TaskListResp {
+  uint64 total = 1;
+  repeated TaskInfo data = 2;
+}
+
+message TaskListReq {
+  uint64 page = 1;
+  uint64 page_size = 2;
+  optional string name = 3;
+  optional string task_group = 4;
+}
+
+
+service Job {
+
+  // Task management
+  // group: task
+  rpc createTask (TaskInfo) returns (BaseIDResp);
+  // group: task
+  rpc updateTask (TaskInfo) returns (BaseResp);
+  // group: task
+  rpc getTaskList (TaskListReq) returns (TaskListResp);
+  // group: task
+  rpc getTaskById (IDReq) returns (TaskInfo);
+  // group: task
+  rpc deleteTask (IDsReq) returns (BaseResp);
+
+
+}

+ 42 - 0
desc/tasklog.proto

@@ -0,0 +1,42 @@
+syntax = "proto3";
+
+// TaskLog message
+
+message TaskLogInfo {
+  optional uint64 id = 1;
+  optional int64 created_at = 2;
+  optional int64 updated_at = 3;
+  optional int64  started_at = 4;
+  optional int64  finished_at = 5;
+  optional uint32 result = 6;
+}
+
+message TaskLogListResp {
+  uint64 total = 1;
+  repeated TaskLogInfo data = 2;
+}
+
+message TaskLogListReq {
+  uint64 page = 1;
+  uint64 page_size = 2;
+  optional uint64 task_id = 3;
+  optional uint32 result = 4;
+}
+
+
+service Job {
+
+  // TaskLog management
+  // group: tasklog
+  rpc createTaskLog (TaskLogInfo) returns (BaseIDResp);
+  // group: tasklog
+  rpc updateTaskLog (TaskLogInfo) returns (BaseResp);
+  // group: tasklog
+  rpc getTaskLogList (TaskLogListReq) returns (TaskLogListResp);
+  // group: tasklog
+  rpc getTaskLogById (IDReq) returns (TaskLogInfo);
+  // group: tasklog
+  rpc deleteTaskLog (IDsReq) returns (BaseResp);
+
+
+}

+ 542 - 0
ent/client.go

@@ -0,0 +1,542 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"log"
+	"reflect"
+
+	"github.com/suyuan32/simple-admin-job/ent/migrate"
+
+	"entgo.io/ent"
+	"entgo.io/ent/dialect"
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+
+	stdsql "database/sql"
+)
+
+// Client is the client that holds all ent builders.
+type Client struct {
+	config
+	// Schema is the client for creating, migrating and dropping schema.
+	Schema *migrate.Schema
+	// Task is the client for interacting with the Task builders.
+	Task *TaskClient
+	// TaskLog is the client for interacting with the TaskLog builders.
+	TaskLog *TaskLogClient
+}
+
+// NewClient creates a new client configured with the given options.
+func NewClient(opts ...Option) *Client {
+	client := &Client{config: newConfig(opts...)}
+	client.init()
+	return client
+}
+
+func (c *Client) init() {
+	c.Schema = migrate.NewSchema(c.driver)
+	c.Task = NewTaskClient(c.config)
+	c.TaskLog = NewTaskLogClient(c.config)
+}
+
+type (
+	// config is the configuration for the client and its builder.
+	config struct {
+		// driver used for executing database requests.
+		driver dialect.Driver
+		// debug enable a debug logging.
+		debug bool
+		// log used for logging on debug mode.
+		log func(...any)
+		// hooks to execute on mutations.
+		hooks *hooks
+		// interceptors to execute on queries.
+		inters *inters
+	}
+	// Option function to configure the client.
+	Option func(*config)
+)
+
+// newConfig creates a new config for the client.
+func newConfig(opts ...Option) config {
+	cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
+	cfg.options(opts...)
+	return cfg
+}
+
+// options applies the options on the config object.
+func (c *config) options(opts ...Option) {
+	for _, opt := range opts {
+		opt(c)
+	}
+	if c.debug {
+		c.driver = dialect.Debug(c.driver, c.log)
+	}
+}
+
+// Debug enables debug logging on the ent.Driver.
+func Debug() Option {
+	return func(c *config) {
+		c.debug = true
+	}
+}
+
+// Log sets the logging function for debug mode.
+func Log(fn func(...any)) Option {
+	return func(c *config) {
+		c.log = fn
+	}
+}
+
+// Driver configures the client driver.
+func Driver(driver dialect.Driver) Option {
+	return func(c *config) {
+		c.driver = driver
+	}
+}
+
+// Open opens a database/sql.DB specified by the driver name and
+// the data source name, and returns a new client attached to it.
+// Optional parameters can be added for configuring the client.
+func Open(driverName, dataSourceName string, options ...Option) (*Client, error) {
+	switch driverName {
+	case dialect.MySQL, dialect.Postgres, dialect.SQLite:
+		drv, err := sql.Open(driverName, dataSourceName)
+		if err != nil {
+			return nil, err
+		}
+		return NewClient(append(options, Driver(drv))...), nil
+	default:
+		return nil, fmt.Errorf("unsupported driver: %q", driverName)
+	}
+}
+
+// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
+var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
+
+// Tx returns a new transactional client. The provided context
+// is used until the transaction is committed or rolled back.
+func (c *Client) Tx(ctx context.Context) (*Tx, error) {
+	if _, ok := c.driver.(*txDriver); ok {
+		return nil, ErrTxStarted
+	}
+	tx, err := newTx(ctx, c.driver)
+	if err != nil {
+		return nil, fmt.Errorf("ent: starting a transaction: %w", err)
+	}
+	cfg := c.config
+	cfg.driver = tx
+	return &Tx{
+		ctx:     ctx,
+		config:  cfg,
+		Task:    NewTaskClient(cfg),
+		TaskLog: NewTaskLogClient(cfg),
+	}, nil
+}
+
+// BeginTx returns a transactional client with specified options.
+func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
+	if _, ok := c.driver.(*txDriver); ok {
+		return nil, errors.New("ent: cannot start a transaction within a transaction")
+	}
+	tx, err := c.driver.(interface {
+		BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error)
+	}).BeginTx(ctx, opts)
+	if err != nil {
+		return nil, fmt.Errorf("ent: starting a transaction: %w", err)
+	}
+	cfg := c.config
+	cfg.driver = &txDriver{tx: tx, drv: c.driver}
+	return &Tx{
+		ctx:     ctx,
+		config:  cfg,
+		Task:    NewTaskClient(cfg),
+		TaskLog: NewTaskLogClient(cfg),
+	}, nil
+}
+
+// Debug returns a new debug-client. It's used to get verbose logging on specific operations.
+//
+//	client.Debug().
+//		Task.
+//		Query().
+//		Count(ctx)
+func (c *Client) Debug() *Client {
+	if c.debug {
+		return c
+	}
+	cfg := c.config
+	cfg.driver = dialect.Debug(c.driver, c.log)
+	client := &Client{config: cfg}
+	client.init()
+	return client
+}
+
+// Close closes the database connection and prevents new queries from starting.
+func (c *Client) Close() error {
+	return c.driver.Close()
+}
+
+// Use adds the mutation hooks to all the entity clients.
+// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
+func (c *Client) Use(hooks ...Hook) {
+	c.Task.Use(hooks...)
+	c.TaskLog.Use(hooks...)
+}
+
+// Intercept adds the query interceptors to all the entity clients.
+// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
+func (c *Client) Intercept(interceptors ...Interceptor) {
+	c.Task.Intercept(interceptors...)
+	c.TaskLog.Intercept(interceptors...)
+}
+
+// Mutate implements the ent.Mutator interface.
+func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
+	switch m := m.(type) {
+	case *TaskMutation:
+		return c.Task.mutate(ctx, m)
+	case *TaskLogMutation:
+		return c.TaskLog.mutate(ctx, m)
+	default:
+		return nil, fmt.Errorf("ent: unknown mutation type %T", m)
+	}
+}
+
+// TaskClient is a client for the Task schema.
+type TaskClient struct {
+	config
+}
+
+// NewTaskClient returns a client for the Task from the given config.
+func NewTaskClient(c config) *TaskClient {
+	return &TaskClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `task.Hooks(f(g(h())))`.
+func (c *TaskClient) Use(hooks ...Hook) {
+	c.hooks.Task = append(c.hooks.Task, hooks...)
+}
+
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `task.Intercept(f(g(h())))`.
+func (c *TaskClient) Intercept(interceptors ...Interceptor) {
+	c.inters.Task = append(c.inters.Task, interceptors...)
+}
+
+// Create returns a builder for creating a Task entity.
+func (c *TaskClient) Create() *TaskCreate {
+	mutation := newTaskMutation(c.config, OpCreate)
+	return &TaskCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of Task entities.
+func (c *TaskClient) CreateBulk(builders ...*TaskCreate) *TaskCreateBulk {
+	return &TaskCreateBulk{config: c.config, builders: builders}
+}
+
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *TaskClient) MapCreateBulk(slice any, setFunc func(*TaskCreate, int)) *TaskCreateBulk {
+	rv := reflect.ValueOf(slice)
+	if rv.Kind() != reflect.Slice {
+		return &TaskCreateBulk{err: fmt.Errorf("calling to TaskClient.MapCreateBulk with wrong type %T, need slice", slice)}
+	}
+	builders := make([]*TaskCreate, rv.Len())
+	for i := 0; i < rv.Len(); i++ {
+		builders[i] = c.Create()
+		setFunc(builders[i], i)
+	}
+	return &TaskCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for Task.
+func (c *TaskClient) Update() *TaskUpdate {
+	mutation := newTaskMutation(c.config, OpUpdate)
+	return &TaskUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *TaskClient) UpdateOne(t *Task) *TaskUpdateOne {
+	mutation := newTaskMutation(c.config, OpUpdateOne, withTask(t))
+	return &TaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *TaskClient) UpdateOneID(id uint64) *TaskUpdateOne {
+	mutation := newTaskMutation(c.config, OpUpdateOne, withTaskID(id))
+	return &TaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for Task.
+func (c *TaskClient) Delete() *TaskDelete {
+	mutation := newTaskMutation(c.config, OpDelete)
+	return &TaskDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *TaskClient) DeleteOne(t *Task) *TaskDeleteOne {
+	return c.DeleteOneID(t.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *TaskClient) DeleteOneID(id uint64) *TaskDeleteOne {
+	builder := c.Delete().Where(task.ID(id))
+	builder.mutation.id = &id
+	builder.mutation.op = OpDeleteOne
+	return &TaskDeleteOne{builder}
+}
+
+// Query returns a query builder for Task.
+func (c *TaskClient) Query() *TaskQuery {
+	return &TaskQuery{
+		config: c.config,
+		ctx:    &QueryContext{Type: TypeTask},
+		inters: c.Interceptors(),
+	}
+}
+
+// Get returns a Task entity by its id.
+func (c *TaskClient) Get(ctx context.Context, id uint64) (*Task, error) {
+	return c.Query().Where(task.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *TaskClient) GetX(ctx context.Context, id uint64) *Task {
+	obj, err := c.Get(ctx, id)
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// QueryTaskLogs queries the task_logs edge of a Task.
+func (c *TaskClient) QueryTaskLogs(t *Task) *TaskLogQuery {
+	query := (&TaskLogClient{config: c.config}).Query()
+	query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+		id := t.ID
+		step := sqlgraph.NewStep(
+			sqlgraph.From(task.Table, task.FieldID, id),
+			sqlgraph.To(tasklog.Table, tasklog.FieldID),
+			sqlgraph.Edge(sqlgraph.O2M, false, task.TaskLogsTable, task.TaskLogsColumn),
+		)
+		fromV = sqlgraph.Neighbors(t.driver.Dialect(), step)
+		return fromV, nil
+	}
+	return query
+}
+
+// Hooks returns the client hooks.
+func (c *TaskClient) Hooks() []Hook {
+	return c.hooks.Task
+}
+
+// Interceptors returns the client interceptors.
+func (c *TaskClient) Interceptors() []Interceptor {
+	return c.inters.Task
+}
+
+func (c *TaskClient) mutate(ctx context.Context, m *TaskMutation) (Value, error) {
+	switch m.Op() {
+	case OpCreate:
+		return (&TaskCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+	case OpUpdate:
+		return (&TaskUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+	case OpUpdateOne:
+		return (&TaskUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+	case OpDelete, OpDeleteOne:
+		return (&TaskDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+	default:
+		return nil, fmt.Errorf("ent: unknown Task mutation op: %q", m.Op())
+	}
+}
+
+// TaskLogClient is a client for the TaskLog schema.
+type TaskLogClient struct {
+	config
+}
+
+// NewTaskLogClient returns a client for the TaskLog from the given config.
+func NewTaskLogClient(c config) *TaskLogClient {
+	return &TaskLogClient{config: c}
+}
+
+// Use adds a list of mutation hooks to the hooks stack.
+// A call to `Use(f, g, h)` equals to `tasklog.Hooks(f(g(h())))`.
+func (c *TaskLogClient) Use(hooks ...Hook) {
+	c.hooks.TaskLog = append(c.hooks.TaskLog, hooks...)
+}
+
+// Intercept adds a list of query interceptors to the interceptors stack.
+// A call to `Intercept(f, g, h)` equals to `tasklog.Intercept(f(g(h())))`.
+func (c *TaskLogClient) Intercept(interceptors ...Interceptor) {
+	c.inters.TaskLog = append(c.inters.TaskLog, interceptors...)
+}
+
+// Create returns a builder for creating a TaskLog entity.
+func (c *TaskLogClient) Create() *TaskLogCreate {
+	mutation := newTaskLogMutation(c.config, OpCreate)
+	return &TaskLogCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// CreateBulk returns a builder for creating a bulk of TaskLog entities.
+func (c *TaskLogClient) CreateBulk(builders ...*TaskLogCreate) *TaskLogCreateBulk {
+	return &TaskLogCreateBulk{config: c.config, builders: builders}
+}
+
+// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
+// a builder and applies setFunc on it.
+func (c *TaskLogClient) MapCreateBulk(slice any, setFunc func(*TaskLogCreate, int)) *TaskLogCreateBulk {
+	rv := reflect.ValueOf(slice)
+	if rv.Kind() != reflect.Slice {
+		return &TaskLogCreateBulk{err: fmt.Errorf("calling to TaskLogClient.MapCreateBulk with wrong type %T, need slice", slice)}
+	}
+	builders := make([]*TaskLogCreate, rv.Len())
+	for i := 0; i < rv.Len(); i++ {
+		builders[i] = c.Create()
+		setFunc(builders[i], i)
+	}
+	return &TaskLogCreateBulk{config: c.config, builders: builders}
+}
+
+// Update returns an update builder for TaskLog.
+func (c *TaskLogClient) Update() *TaskLogUpdate {
+	mutation := newTaskLogMutation(c.config, OpUpdate)
+	return &TaskLogUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOne returns an update builder for the given entity.
+func (c *TaskLogClient) UpdateOne(tl *TaskLog) *TaskLogUpdateOne {
+	mutation := newTaskLogMutation(c.config, OpUpdateOne, withTaskLog(tl))
+	return &TaskLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// UpdateOneID returns an update builder for the given id.
+func (c *TaskLogClient) UpdateOneID(id uint64) *TaskLogUpdateOne {
+	mutation := newTaskLogMutation(c.config, OpUpdateOne, withTaskLogID(id))
+	return &TaskLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// Delete returns a delete builder for TaskLog.
+func (c *TaskLogClient) Delete() *TaskLogDelete {
+	mutation := newTaskLogMutation(c.config, OpDelete)
+	return &TaskLogDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
+}
+
+// DeleteOne returns a builder for deleting the given entity.
+func (c *TaskLogClient) DeleteOne(tl *TaskLog) *TaskLogDeleteOne {
+	return c.DeleteOneID(tl.ID)
+}
+
+// DeleteOneID returns a builder for deleting the given entity by its id.
+func (c *TaskLogClient) DeleteOneID(id uint64) *TaskLogDeleteOne {
+	builder := c.Delete().Where(tasklog.ID(id))
+	builder.mutation.id = &id
+	builder.mutation.op = OpDeleteOne
+	return &TaskLogDeleteOne{builder}
+}
+
+// Query returns a query builder for TaskLog.
+func (c *TaskLogClient) Query() *TaskLogQuery {
+	return &TaskLogQuery{
+		config: c.config,
+		ctx:    &QueryContext{Type: TypeTaskLog},
+		inters: c.Interceptors(),
+	}
+}
+
+// Get returns a TaskLog entity by its id.
+func (c *TaskLogClient) Get(ctx context.Context, id uint64) (*TaskLog, error) {
+	return c.Query().Where(tasklog.ID(id)).Only(ctx)
+}
+
+// GetX is like Get, but panics if an error occurs.
+func (c *TaskLogClient) GetX(ctx context.Context, id uint64) *TaskLog {
+	obj, err := c.Get(ctx, id)
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// QueryTasks queries the tasks edge of a TaskLog.
+func (c *TaskLogClient) QueryTasks(tl *TaskLog) *TaskQuery {
+	query := (&TaskClient{config: c.config}).Query()
+	query.path = func(context.Context) (fromV *sql.Selector, _ error) {
+		id := tl.ID
+		step := sqlgraph.NewStep(
+			sqlgraph.From(tasklog.Table, tasklog.FieldID, id),
+			sqlgraph.To(task.Table, task.FieldID),
+			sqlgraph.Edge(sqlgraph.M2O, true, tasklog.TasksTable, tasklog.TasksColumn),
+		)
+		fromV = sqlgraph.Neighbors(tl.driver.Dialect(), step)
+		return fromV, nil
+	}
+	return query
+}
+
+// Hooks returns the client hooks.
+func (c *TaskLogClient) Hooks() []Hook {
+	return c.hooks.TaskLog
+}
+
+// Interceptors returns the client interceptors.
+func (c *TaskLogClient) Interceptors() []Interceptor {
+	return c.inters.TaskLog
+}
+
+func (c *TaskLogClient) mutate(ctx context.Context, m *TaskLogMutation) (Value, error) {
+	switch m.Op() {
+	case OpCreate:
+		return (&TaskLogCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+	case OpUpdate:
+		return (&TaskLogUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+	case OpUpdateOne:
+		return (&TaskLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
+	case OpDelete, OpDeleteOne:
+		return (&TaskLogDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
+	default:
+		return nil, fmt.Errorf("ent: unknown TaskLog mutation op: %q", m.Op())
+	}
+}
+
+// hooks and interceptors per client, for fast access.
+type (
+	hooks struct {
+		Task, TaskLog []ent.Hook
+	}
+	inters struct {
+		Task, TaskLog []ent.Interceptor
+	}
+)
+
+// ExecContext allows calling the underlying ExecContext method of the driver if it is supported by it.
+// See, database/sql#DB.ExecContext for more information.
+func (c *config) ExecContext(ctx context.Context, query string, args ...any) (stdsql.Result, error) {
+	ex, ok := c.driver.(interface {
+		ExecContext(context.Context, string, ...any) (stdsql.Result, error)
+	})
+	if !ok {
+		return nil, fmt.Errorf("Driver.ExecContext is not supported")
+	}
+	return ex.ExecContext(ctx, query, args...)
+}
+
+// QueryContext allows calling the underlying QueryContext method of the driver if it is supported by it.
+// See, database/sql#DB.QueryContext for more information.
+func (c *config) QueryContext(ctx context.Context, query string, args ...any) (*stdsql.Rows, error) {
+	q, ok := c.driver.(interface {
+		QueryContext(context.Context, string, ...any) (*stdsql.Rows, error)
+	})
+	if !ok {
+		return nil, fmt.Errorf("Driver.QueryContext is not supported")
+	}
+	return q.QueryContext(ctx, query, args...)
+}

+ 610 - 0
ent/ent.go

@@ -0,0 +1,610 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"entgo.io/ent"
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// ent aliases to avoid import conflicts in user's code.
+type (
+	Op            = ent.Op
+	Hook          = ent.Hook
+	Value         = ent.Value
+	Query         = ent.Query
+	QueryContext  = ent.QueryContext
+	Querier       = ent.Querier
+	QuerierFunc   = ent.QuerierFunc
+	Interceptor   = ent.Interceptor
+	InterceptFunc = ent.InterceptFunc
+	Traverser     = ent.Traverser
+	TraverseFunc  = ent.TraverseFunc
+	Policy        = ent.Policy
+	Mutator       = ent.Mutator
+	Mutation      = ent.Mutation
+	MutateFunc    = ent.MutateFunc
+)
+
+type clientCtxKey struct{}
+
+// FromContext returns a Client stored inside a context, or nil if there isn't one.
+func FromContext(ctx context.Context) *Client {
+	c, _ := ctx.Value(clientCtxKey{}).(*Client)
+	return c
+}
+
+// NewContext returns a new context with the given Client attached.
+func NewContext(parent context.Context, c *Client) context.Context {
+	return context.WithValue(parent, clientCtxKey{}, c)
+}
+
+type txCtxKey struct{}
+
+// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
+func TxFromContext(ctx context.Context) *Tx {
+	tx, _ := ctx.Value(txCtxKey{}).(*Tx)
+	return tx
+}
+
+// NewTxContext returns a new context with the given Tx attached.
+func NewTxContext(parent context.Context, tx *Tx) context.Context {
+	return context.WithValue(parent, txCtxKey{}, tx)
+}
+
+// OrderFunc applies an ordering on the sql selector.
+// Deprecated: Use Asc/Desc functions or the package builders instead.
+type OrderFunc func(*sql.Selector)
+
+var (
+	initCheck   sync.Once
+	columnCheck sql.ColumnCheck
+)
+
+// columnChecker checks if the column exists in the given table.
+func checkColumn(table, column string) error {
+	initCheck.Do(func() {
+		columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
+			task.Table:    task.ValidColumn,
+			tasklog.Table: tasklog.ValidColumn,
+		})
+	})
+	return columnCheck(table, column)
+}
+
+// Asc applies the given fields in ASC order.
+func Asc(fields ...string) func(*sql.Selector) {
+	return func(s *sql.Selector) {
+		for _, f := range fields {
+			if err := checkColumn(s.TableName(), f); err != nil {
+				s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
+			}
+			s.OrderBy(sql.Asc(s.C(f)))
+		}
+	}
+}
+
+// Desc applies the given fields in DESC order.
+func Desc(fields ...string) func(*sql.Selector) {
+	return func(s *sql.Selector) {
+		for _, f := range fields {
+			if err := checkColumn(s.TableName(), f); err != nil {
+				s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
+			}
+			s.OrderBy(sql.Desc(s.C(f)))
+		}
+	}
+}
+
+// AggregateFunc applies an aggregation step on the group-by traversal/selector.
+type AggregateFunc func(*sql.Selector) string
+
+// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
+//
+//	GroupBy(field1, field2).
+//	Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
+//	Scan(ctx, &v)
+func As(fn AggregateFunc, end string) AggregateFunc {
+	return func(s *sql.Selector) string {
+		return sql.As(fn(s), end)
+	}
+}
+
+// Count applies the "count" aggregation function on each group.
+func Count() AggregateFunc {
+	return func(s *sql.Selector) string {
+		return sql.Count("*")
+	}
+}
+
+// Max applies the "max" aggregation function on the given field of each group.
+func Max(field string) AggregateFunc {
+	return func(s *sql.Selector) string {
+		if err := checkColumn(s.TableName(), field); err != nil {
+			s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
+			return ""
+		}
+		return sql.Max(s.C(field))
+	}
+}
+
+// Mean applies the "mean" aggregation function on the given field of each group.
+func Mean(field string) AggregateFunc {
+	return func(s *sql.Selector) string {
+		if err := checkColumn(s.TableName(), field); err != nil {
+			s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
+			return ""
+		}
+		return sql.Avg(s.C(field))
+	}
+}
+
+// Min applies the "min" aggregation function on the given field of each group.
+func Min(field string) AggregateFunc {
+	return func(s *sql.Selector) string {
+		if err := checkColumn(s.TableName(), field); err != nil {
+			s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
+			return ""
+		}
+		return sql.Min(s.C(field))
+	}
+}
+
+// Sum applies the "sum" aggregation function on the given field of each group.
+func Sum(field string) AggregateFunc {
+	return func(s *sql.Selector) string {
+		if err := checkColumn(s.TableName(), field); err != nil {
+			s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
+			return ""
+		}
+		return sql.Sum(s.C(field))
+	}
+}
+
+// ValidationError returns when validating a field or edge fails.
+type ValidationError struct {
+	Name string // Field or edge name.
+	err  error
+}
+
+// Error implements the error interface.
+func (e *ValidationError) Error() string {
+	return e.err.Error()
+}
+
+// Unwrap implements the errors.Wrapper interface.
+func (e *ValidationError) Unwrap() error {
+	return e.err
+}
+
+// IsValidationError returns a boolean indicating whether the error is a validation error.
+func IsValidationError(err error) bool {
+	if err == nil {
+		return false
+	}
+	var e *ValidationError
+	return errors.As(err, &e)
+}
+
+// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
+type NotFoundError struct {
+	label string
+}
+
+// Error implements the error interface.
+func (e *NotFoundError) Error() string {
+	return "ent: " + e.label + " not found"
+}
+
+// IsNotFound returns a boolean indicating whether the error is a not found error.
+func IsNotFound(err error) bool {
+	if err == nil {
+		return false
+	}
+	var e *NotFoundError
+	return errors.As(err, &e)
+}
+
+// MaskNotFound masks not found error.
+func MaskNotFound(err error) error {
+	if IsNotFound(err) {
+		return nil
+	}
+	return err
+}
+
+// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
+type NotSingularError struct {
+	label string
+}
+
+// Error implements the error interface.
+func (e *NotSingularError) Error() string {
+	return "ent: " + e.label + " not singular"
+}
+
+// IsNotSingular returns a boolean indicating whether the error is a not singular error.
+func IsNotSingular(err error) bool {
+	if err == nil {
+		return false
+	}
+	var e *NotSingularError
+	return errors.As(err, &e)
+}
+
+// NotLoadedError returns when trying to get a node that was not loaded by the query.
+type NotLoadedError struct {
+	edge string
+}
+
+// Error implements the error interface.
+func (e *NotLoadedError) Error() string {
+	return "ent: " + e.edge + " edge was not loaded"
+}
+
+// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
+func IsNotLoaded(err error) bool {
+	if err == nil {
+		return false
+	}
+	var e *NotLoadedError
+	return errors.As(err, &e)
+}
+
+// ConstraintError returns when trying to create/update one or more entities and
+// one or more of their constraints failed. For example, violation of edge or
+// field uniqueness.
+type ConstraintError struct {
+	msg  string
+	wrap error
+}
+
+// Error implements the error interface.
+func (e ConstraintError) Error() string {
+	return "ent: constraint failed: " + e.msg
+}
+
+// Unwrap implements the errors.Wrapper interface.
+func (e *ConstraintError) Unwrap() error {
+	return e.wrap
+}
+
+// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
+func IsConstraintError(err error) bool {
+	if err == nil {
+		return false
+	}
+	var e *ConstraintError
+	return errors.As(err, &e)
+}
+
+// selector embedded by the different Select/GroupBy builders.
+type selector struct {
+	label string
+	flds  *[]string
+	fns   []AggregateFunc
+	scan  func(context.Context, any) error
+}
+
+// ScanX is like Scan, but panics if an error occurs.
+func (s *selector) ScanX(ctx context.Context, v any) {
+	if err := s.scan(ctx, v); err != nil {
+		panic(err)
+	}
+}
+
+// Strings returns list of strings from a selector. It is only allowed when selecting one field.
+func (s *selector) Strings(ctx context.Context) ([]string, error) {
+	if len(*s.flds) > 1 {
+		return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
+	}
+	var v []string
+	if err := s.scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// StringsX is like Strings, but panics if an error occurs.
+func (s *selector) StringsX(ctx context.Context) []string {
+	v, err := s.Strings(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// String returns a single string from a selector. It is only allowed when selecting one field.
+func (s *selector) String(ctx context.Context) (_ string, err error) {
+	var v []string
+	if v, err = s.Strings(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{s.label}
+	default:
+		err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// StringX is like String, but panics if an error occurs.
+func (s *selector) StringX(ctx context.Context) string {
+	v, err := s.String(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Ints returns list of ints from a selector. It is only allowed when selecting one field.
+func (s *selector) Ints(ctx context.Context) ([]int, error) {
+	if len(*s.flds) > 1 {
+		return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
+	}
+	var v []int
+	if err := s.scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// IntsX is like Ints, but panics if an error occurs.
+func (s *selector) IntsX(ctx context.Context) []int {
+	v, err := s.Ints(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Int returns a single int from a selector. It is only allowed when selecting one field.
+func (s *selector) Int(ctx context.Context) (_ int, err error) {
+	var v []int
+	if v, err = s.Ints(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{s.label}
+	default:
+		err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// IntX is like Int, but panics if an error occurs.
+func (s *selector) IntX(ctx context.Context) int {
+	v, err := s.Int(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
+func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
+	if len(*s.flds) > 1 {
+		return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
+	}
+	var v []float64
+	if err := s.scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// Float64sX is like Float64s, but panics if an error occurs.
+func (s *selector) Float64sX(ctx context.Context) []float64 {
+	v, err := s.Float64s(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
+func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
+	var v []float64
+	if v, err = s.Float64s(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{s.label}
+	default:
+		err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// Float64X is like Float64, but panics if an error occurs.
+func (s *selector) Float64X(ctx context.Context) float64 {
+	v, err := s.Float64(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Bools returns list of bools from a selector. It is only allowed when selecting one field.
+func (s *selector) Bools(ctx context.Context) ([]bool, error) {
+	if len(*s.flds) > 1 {
+		return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
+	}
+	var v []bool
+	if err := s.scan(ctx, &v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// BoolsX is like Bools, but panics if an error occurs.
+func (s *selector) BoolsX(ctx context.Context) []bool {
+	v, err := s.Bools(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Bool returns a single bool from a selector. It is only allowed when selecting one field.
+func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
+	var v []bool
+	if v, err = s.Bools(ctx); err != nil {
+		return
+	}
+	switch len(v) {
+	case 1:
+		return v[0], nil
+	case 0:
+		err = &NotFoundError{s.label}
+	default:
+		err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
+	}
+	return
+}
+
+// BoolX is like Bool, but panics if an error occurs.
+func (s *selector) BoolX(ctx context.Context) bool {
+	v, err := s.Bool(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// withHooks invokes the builder operation with the given hooks, if any.
+func withHooks[V Value, M any, PM interface {
+	*M
+	Mutation
+}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
+	if len(hooks) == 0 {
+		return exec(ctx)
+	}
+	var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+		mutationT, ok := any(m).(PM)
+		if !ok {
+			return nil, fmt.Errorf("unexpected mutation type %T", m)
+		}
+		// Set the mutation to the builder.
+		*mutation = *mutationT
+		return exec(ctx)
+	})
+	for i := len(hooks) - 1; i >= 0; i-- {
+		if hooks[i] == nil {
+			return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
+		}
+		mut = hooks[i](mut)
+	}
+	v, err := mut.Mutate(ctx, mutation)
+	if err != nil {
+		return value, err
+	}
+	nv, ok := v.(V)
+	if !ok {
+		return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
+	}
+	return nv, nil
+}
+
+// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
+func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
+	if ent.QueryFromContext(ctx) == nil {
+		qc.Op = op
+		ctx = ent.NewQueryContext(ctx, qc)
+	}
+	return ctx
+}
+
+func querierAll[V Value, Q interface {
+	sqlAll(context.Context, ...queryHook) (V, error)
+}]() Querier {
+	return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+		query, ok := q.(Q)
+		if !ok {
+			return nil, fmt.Errorf("unexpected query type %T", q)
+		}
+		return query.sqlAll(ctx)
+	})
+}
+
+func querierCount[Q interface {
+	sqlCount(context.Context) (int, error)
+}]() Querier {
+	return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+		query, ok := q.(Q)
+		if !ok {
+			return nil, fmt.Errorf("unexpected query type %T", q)
+		}
+		return query.sqlCount(ctx)
+	})
+}
+
+func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
+	for i := len(inters) - 1; i >= 0; i-- {
+		qr = inters[i].Intercept(qr)
+	}
+	rv, err := qr.Query(ctx, q)
+	if err != nil {
+		return v, err
+	}
+	vt, ok := rv.(V)
+	if !ok {
+		return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
+	}
+	return vt, nil
+}
+
+func scanWithInterceptors[Q1 ent.Query, Q2 interface {
+	sqlScan(context.Context, Q1, any) error
+}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
+	rv := reflect.ValueOf(v)
+	var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
+		query, ok := q.(Q1)
+		if !ok {
+			return nil, fmt.Errorf("unexpected query type %T", q)
+		}
+		if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
+			return nil, err
+		}
+		if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
+			return rv.Elem().Interface(), nil
+		}
+		return v, nil
+	})
+	for i := len(inters) - 1; i >= 0; i-- {
+		qr = inters[i].Intercept(qr)
+	}
+	vv, err := qr.Query(ctx, rootQuery)
+	if err != nil {
+		return err
+	}
+	switch rv2 := reflect.ValueOf(vv); {
+	case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
+	case rv.Type() == rv2.Type():
+		rv.Elem().Set(rv2.Elem())
+	case rv.Elem().Type() == rv2.Type():
+		rv.Elem().Set(rv2)
+	}
+	return nil
+}
+
+// queryHook describes an internal hook for the different sqlAll methods.
+type queryHook func(context.Context, *sqlgraph.QuerySpec)

+ 84 - 0
ent/enttest/enttest.go

@@ -0,0 +1,84 @@
+// Code generated by ent, DO NOT EDIT.
+
+package enttest
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/ent"
+	// required by schema hooks.
+	_ "github.com/suyuan32/simple-admin-job/ent/runtime"
+
+	"entgo.io/ent/dialect/sql/schema"
+	"github.com/suyuan32/simple-admin-job/ent/migrate"
+)
+
+type (
+	// TestingT is the interface that is shared between
+	// testing.T and testing.B and used by enttest.
+	TestingT interface {
+		FailNow()
+		Error(...any)
+	}
+
+	// Option configures client creation.
+	Option func(*options)
+
+	options struct {
+		opts        []ent.Option
+		migrateOpts []schema.MigrateOption
+	}
+)
+
+// WithOptions forwards options to client creation.
+func WithOptions(opts ...ent.Option) Option {
+	return func(o *options) {
+		o.opts = append(o.opts, opts...)
+	}
+}
+
+// WithMigrateOptions forwards options to auto migration.
+func WithMigrateOptions(opts ...schema.MigrateOption) Option {
+	return func(o *options) {
+		o.migrateOpts = append(o.migrateOpts, opts...)
+	}
+}
+
+func newOptions(opts []Option) *options {
+	o := &options{}
+	for _, opt := range opts {
+		opt(o)
+	}
+	return o
+}
+
+// Open calls ent.Open and auto-run migration.
+func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
+	o := newOptions(opts)
+	c, err := ent.Open(driverName, dataSourceName, o.opts...)
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+	migrateSchema(t, c, o)
+	return c
+}
+
+// NewClient calls ent.NewClient and auto-run migration.
+func NewClient(t TestingT, opts ...Option) *ent.Client {
+	o := newOptions(opts)
+	c := ent.NewClient(o.opts...)
+	migrateSchema(t, c, o)
+	return c
+}
+func migrateSchema(t TestingT, c *ent.Client, o *options) {
+	tables, err := schema.CopyTables(migrate.Tables)
+	if err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+	if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
+		t.Error(err)
+		t.FailNow()
+	}
+}

+ 3 - 0
ent/generate.go

@@ -0,0 +1,3 @@
+package ent
+
+//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema

+ 211 - 0
ent/hook/hook.go

@@ -0,0 +1,211 @@
+// Code generated by ent, DO NOT EDIT.
+
+package hook
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/suyuan32/simple-admin-job/ent"
+)
+
+// The TaskFunc type is an adapter to allow the use of ordinary
+// function as Task mutator.
+type TaskFunc func(context.Context, *ent.TaskMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f TaskFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+	if mv, ok := m.(*ent.TaskMutation); ok {
+		return f(ctx, mv)
+	}
+	return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TaskMutation", m)
+}
+
+// The TaskLogFunc type is an adapter to allow the use of ordinary
+// function as TaskLog mutator.
+type TaskLogFunc func(context.Context, *ent.TaskLogMutation) (ent.Value, error)
+
+// Mutate calls f(ctx, m).
+func (f TaskLogFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+	if mv, ok := m.(*ent.TaskLogMutation); ok {
+		return f(ctx, mv)
+	}
+	return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.TaskLogMutation", m)
+}
+
+// Condition is a hook condition function.
+type Condition func(context.Context, ent.Mutation) bool
+
+// And groups conditions with the AND operator.
+func And(first, second Condition, rest ...Condition) Condition {
+	return func(ctx context.Context, m ent.Mutation) bool {
+		if !first(ctx, m) || !second(ctx, m) {
+			return false
+		}
+		for _, cond := range rest {
+			if !cond(ctx, m) {
+				return false
+			}
+		}
+		return true
+	}
+}
+
+// Or groups conditions with the OR operator.
+func Or(first, second Condition, rest ...Condition) Condition {
+	return func(ctx context.Context, m ent.Mutation) bool {
+		if first(ctx, m) || second(ctx, m) {
+			return true
+		}
+		for _, cond := range rest {
+			if cond(ctx, m) {
+				return true
+			}
+		}
+		return false
+	}
+}
+
+// Not negates a given condition.
+func Not(cond Condition) Condition {
+	return func(ctx context.Context, m ent.Mutation) bool {
+		return !cond(ctx, m)
+	}
+}
+
+// HasOp is a condition testing mutation operation.
+func HasOp(op ent.Op) Condition {
+	return func(_ context.Context, m ent.Mutation) bool {
+		return m.Op().Is(op)
+	}
+}
+
+// HasAddedFields is a condition validating `.AddedField` on fields.
+func HasAddedFields(field string, fields ...string) Condition {
+	return func(_ context.Context, m ent.Mutation) bool {
+		if _, exists := m.AddedField(field); !exists {
+			return false
+		}
+		for _, field := range fields {
+			if _, exists := m.AddedField(field); !exists {
+				return false
+			}
+		}
+		return true
+	}
+}
+
+// HasClearedFields is a condition validating `.FieldCleared` on fields.
+func HasClearedFields(field string, fields ...string) Condition {
+	return func(_ context.Context, m ent.Mutation) bool {
+		if exists := m.FieldCleared(field); !exists {
+			return false
+		}
+		for _, field := range fields {
+			if exists := m.FieldCleared(field); !exists {
+				return false
+			}
+		}
+		return true
+	}
+}
+
+// HasFields is a condition validating `.Field` on fields.
+func HasFields(field string, fields ...string) Condition {
+	return func(_ context.Context, m ent.Mutation) bool {
+		if _, exists := m.Field(field); !exists {
+			return false
+		}
+		for _, field := range fields {
+			if _, exists := m.Field(field); !exists {
+				return false
+			}
+		}
+		return true
+	}
+}
+
+// If executes the given hook under condition.
+//
+//	hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
+func If(hk ent.Hook, cond Condition) ent.Hook {
+	return func(next ent.Mutator) ent.Mutator {
+		return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
+			if cond(ctx, m) {
+				return hk(next).Mutate(ctx, m)
+			}
+			return next.Mutate(ctx, m)
+		})
+	}
+}
+
+// On executes the given hook only for the given operation.
+//
+//	hook.On(Log, ent.Delete|ent.Create)
+func On(hk ent.Hook, op ent.Op) ent.Hook {
+	return If(hk, HasOp(op))
+}
+
+// Unless skips the given hook only for the given operation.
+//
+//	hook.Unless(Log, ent.Update|ent.UpdateOne)
+func Unless(hk ent.Hook, op ent.Op) ent.Hook {
+	return If(hk, Not(HasOp(op)))
+}
+
+// FixedError is a hook returning a fixed error.
+func FixedError(err error) ent.Hook {
+	return func(ent.Mutator) ent.Mutator {
+		return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) {
+			return nil, err
+		})
+	}
+}
+
+// Reject returns a hook that rejects all operations that match op.
+//
+//	func (T) Hooks() []ent.Hook {
+//		return []ent.Hook{
+//			Reject(ent.Delete|ent.Update),
+//		}
+//	}
+func Reject(op ent.Op) ent.Hook {
+	hk := FixedError(fmt.Errorf("%s operation is not allowed", op))
+	return On(hk, op)
+}
+
+// Chain acts as a list of hooks and is effectively immutable.
+// Once created, it will always hold the same set of hooks in the same order.
+type Chain struct {
+	hooks []ent.Hook
+}
+
+// NewChain creates a new chain of hooks.
+func NewChain(hooks ...ent.Hook) Chain {
+	return Chain{append([]ent.Hook(nil), hooks...)}
+}
+
+// Hook chains the list of hooks and returns the final hook.
+func (c Chain) Hook() ent.Hook {
+	return func(mutator ent.Mutator) ent.Mutator {
+		for i := len(c.hooks) - 1; i >= 0; i-- {
+			mutator = c.hooks[i](mutator)
+		}
+		return mutator
+	}
+}
+
+// Append extends a chain, adding the specified hook
+// as the last ones in the mutation flow.
+func (c Chain) Append(hooks ...ent.Hook) Chain {
+	newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))
+	newHooks = append(newHooks, c.hooks...)
+	newHooks = append(newHooks, hooks...)
+	return Chain{newHooks}
+}
+
+// Extend extends a chain, adding the specified chain
+// as the last ones in the mutation flow.
+func (c Chain) Extend(chain Chain) Chain {
+	return c.Append(chain.hooks...)
+}

+ 64 - 0
ent/migrate/migrate.go

@@ -0,0 +1,64 @@
+// Code generated by ent, DO NOT EDIT.
+
+package migrate
+
+import (
+	"context"
+	"fmt"
+	"io"
+
+	"entgo.io/ent/dialect"
+	"entgo.io/ent/dialect/sql/schema"
+)
+
+var (
+	// WithGlobalUniqueID sets the universal ids options to the migration.
+	// If this option is enabled, ent migration will allocate a 1<<32 range
+	// for the ids of each entity (table).
+	// Note that this option cannot be applied on tables that already exist.
+	WithGlobalUniqueID = schema.WithGlobalUniqueID
+	// WithDropColumn sets the drop column option to the migration.
+	// If this option is enabled, ent migration will drop old columns
+	// that were used for both fields and edges. This defaults to false.
+	WithDropColumn = schema.WithDropColumn
+	// WithDropIndex sets the drop index option to the migration.
+	// If this option is enabled, ent migration will drop old indexes
+	// that were defined in the schema. This defaults to false.
+	// Note that unique constraints are defined using `UNIQUE INDEX`,
+	// and therefore, it's recommended to enable this option to get more
+	// flexibility in the schema changes.
+	WithDropIndex = schema.WithDropIndex
+	// WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true.
+	WithForeignKeys = schema.WithForeignKeys
+)
+
+// Schema is the API for creating, migrating and dropping a schema.
+type Schema struct {
+	drv dialect.Driver
+}
+
+// NewSchema creates a new schema client.
+func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} }
+
+// Create creates all schema resources.
+func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error {
+	return Create(ctx, s, Tables, opts...)
+}
+
+// Create creates all table resources using the given schema driver.
+func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error {
+	migrate, err := schema.NewMigrate(s.drv, opts...)
+	if err != nil {
+		return fmt.Errorf("ent/migrate: %w", err)
+	}
+	return migrate.Create(ctx, tables...)
+}
+
+// WriteTo writes the schema changes to w instead of running them against the database.
+//
+//	if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil {
+//		log.Fatal(err)
+//	}
+func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error {
+	return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...)
+}

+ 74 - 0
ent/migrate/schema.go

@@ -0,0 +1,74 @@
+// Code generated by ent, DO NOT EDIT.
+
+package migrate
+
+import (
+	"entgo.io/ent/dialect/entsql"
+	"entgo.io/ent/dialect/sql/schema"
+	"entgo.io/ent/schema/field"
+)
+
+var (
+	// SysTasksColumns holds the columns for the "sys_tasks" table.
+	SysTasksColumns = []*schema.Column{
+		{Name: "id", Type: field.TypeUint64, Increment: true},
+		{Name: "created_at", Type: field.TypeTime, Comment: "Create Time | 创建日期"},
+		{Name: "updated_at", Type: field.TypeTime, Comment: "Update Time | 修改日期"},
+		{Name: "status", Type: field.TypeUint8, Nullable: true, Comment: "Status 1: normal 2: ban | 状态 1 正常 2 禁用", Default: 1},
+		{Name: "name", Type: field.TypeString},
+		{Name: "task_group", Type: field.TypeString},
+		{Name: "cron_expression", Type: field.TypeString},
+		{Name: "pattern", Type: field.TypeString},
+		{Name: "payload", Type: field.TypeString},
+	}
+	// SysTasksTable holds the schema information for the "sys_tasks" table.
+	SysTasksTable = &schema.Table{
+		Name:       "sys_tasks",
+		Columns:    SysTasksColumns,
+		PrimaryKey: []*schema.Column{SysTasksColumns[0]},
+		Indexes: []*schema.Index{
+			{
+				Name:    "task_pattern",
+				Unique:  true,
+				Columns: []*schema.Column{SysTasksColumns[7]},
+			},
+		},
+	}
+	// SysTaskLogsColumns holds the columns for the "sys_task_logs" table.
+	SysTaskLogsColumns = []*schema.Column{
+		{Name: "id", Type: field.TypeUint64, Increment: true},
+		{Name: "started_at", Type: field.TypeTime, Comment: "Task Started Time | 任务启动时间"},
+		{Name: "finished_at", Type: field.TypeTime, Comment: "Task Finished Time | 任务完成时间"},
+		{Name: "result", Type: field.TypeUint8, Comment: "The Task Process Result | 任务执行结果"},
+		{Name: "task_task_logs", Type: field.TypeUint64, Nullable: true},
+	}
+	// SysTaskLogsTable holds the schema information for the "sys_task_logs" table.
+	SysTaskLogsTable = &schema.Table{
+		Name:       "sys_task_logs",
+		Columns:    SysTaskLogsColumns,
+		PrimaryKey: []*schema.Column{SysTaskLogsColumns[0]},
+		ForeignKeys: []*schema.ForeignKey{
+			{
+				Symbol:     "sys_task_logs_sys_tasks_task_logs",
+				Columns:    []*schema.Column{SysTaskLogsColumns[4]},
+				RefColumns: []*schema.Column{SysTasksColumns[0]},
+				OnDelete:   schema.SetNull,
+			},
+		},
+	}
+	// Tables holds all the tables in the schema.
+	Tables = []*schema.Table{
+		SysTasksTable,
+		SysTaskLogsTable,
+	}
+)
+
+func init() {
+	SysTasksTable.Annotation = &entsql.Annotation{
+		Table: "sys_tasks",
+	}
+	SysTaskLogsTable.ForeignKeys[0].RefTable = SysTasksTable
+	SysTaskLogsTable.Annotation = &entsql.Annotation{
+		Table: "sys_task_logs",
+	}
+}

+ 1435 - 0
ent/mutation.go

@@ -0,0 +1,1435 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"entgo.io/ent"
+	"entgo.io/ent/dialect/sql"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+const (
+	// Operation types.
+	OpCreate    = ent.OpCreate
+	OpDelete    = ent.OpDelete
+	OpDeleteOne = ent.OpDeleteOne
+	OpUpdate    = ent.OpUpdate
+	OpUpdateOne = ent.OpUpdateOne
+
+	// Node types.
+	TypeTask    = "Task"
+	TypeTaskLog = "TaskLog"
+)
+
+// TaskMutation represents an operation that mutates the Task nodes in the graph.
+type TaskMutation struct {
+	config
+	op               Op
+	typ              string
+	id               *uint64
+	created_at       *time.Time
+	updated_at       *time.Time
+	status           *uint8
+	addstatus        *int8
+	name             *string
+	task_group       *string
+	cron_expression  *string
+	pattern          *string
+	payload          *string
+	clearedFields    map[string]struct{}
+	task_logs        map[uint64]struct{}
+	removedtask_logs map[uint64]struct{}
+	clearedtask_logs bool
+	done             bool
+	oldValue         func(context.Context) (*Task, error)
+	predicates       []predicate.Task
+}
+
+var _ ent.Mutation = (*TaskMutation)(nil)
+
+// taskOption allows management of the mutation configuration using functional options.
+type taskOption func(*TaskMutation)
+
+// newTaskMutation creates new mutation for the Task entity.
+func newTaskMutation(c config, op Op, opts ...taskOption) *TaskMutation {
+	m := &TaskMutation{
+		config:        c,
+		op:            op,
+		typ:           TypeTask,
+		clearedFields: make(map[string]struct{}),
+	}
+	for _, opt := range opts {
+		opt(m)
+	}
+	return m
+}
+
+// withTaskID sets the ID field of the mutation.
+func withTaskID(id uint64) taskOption {
+	return func(m *TaskMutation) {
+		var (
+			err   error
+			once  sync.Once
+			value *Task
+		)
+		m.oldValue = func(ctx context.Context) (*Task, error) {
+			once.Do(func() {
+				if m.done {
+					err = errors.New("querying old values post mutation is not allowed")
+				} else {
+					value, err = m.Client().Task.Get(ctx, id)
+				}
+			})
+			return value, err
+		}
+		m.id = &id
+	}
+}
+
+// withTask sets the old Task of the mutation.
+func withTask(node *Task) taskOption {
+	return func(m *TaskMutation) {
+		m.oldValue = func(context.Context) (*Task, error) {
+			return node, nil
+		}
+		m.id = &node.ID
+	}
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m TaskMutation) Client() *Client {
+	client := &Client{config: m.config}
+	client.init()
+	return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m TaskMutation) Tx() (*Tx, error) {
+	if _, ok := m.driver.(*txDriver); !ok {
+		return nil, errors.New("ent: mutation is not running in a transaction")
+	}
+	tx := &Tx{config: m.config}
+	tx.init()
+	return tx, nil
+}
+
+// SetID sets the value of the id field. Note that this
+// operation is only accepted on creation of Task entities.
+func (m *TaskMutation) SetID(id uint64) {
+	m.id = &id
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *TaskMutation) ID() (id uint64, exists bool) {
+	if m.id == nil {
+		return
+	}
+	return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *TaskMutation) IDs(ctx context.Context) ([]uint64, error) {
+	switch {
+	case m.op.Is(OpUpdateOne | OpDeleteOne):
+		id, exists := m.ID()
+		if exists {
+			return []uint64{id}, nil
+		}
+		fallthrough
+	case m.op.Is(OpUpdate | OpDelete):
+		return m.Client().Task.Query().Where(m.predicates...).IDs(ctx)
+	default:
+		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+	}
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (m *TaskMutation) SetCreatedAt(t time.Time) {
+	m.created_at = &t
+}
+
+// CreatedAt returns the value of the "created_at" field in the mutation.
+func (m *TaskMutation) CreatedAt() (r time.Time, exists bool) {
+	v := m.created_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldCreatedAt returns the old "created_at" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldCreatedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err)
+	}
+	return oldValue.CreatedAt, nil
+}
+
+// ResetCreatedAt resets all changes to the "created_at" field.
+func (m *TaskMutation) ResetCreatedAt() {
+	m.created_at = nil
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (m *TaskMutation) SetUpdatedAt(t time.Time) {
+	m.updated_at = &t
+}
+
+// UpdatedAt returns the value of the "updated_at" field in the mutation.
+func (m *TaskMutation) UpdatedAt() (r time.Time, exists bool) {
+	v := m.updated_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldUpdatedAt returns the old "updated_at" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldUpdatedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err)
+	}
+	return oldValue.UpdatedAt, nil
+}
+
+// ResetUpdatedAt resets all changes to the "updated_at" field.
+func (m *TaskMutation) ResetUpdatedAt() {
+	m.updated_at = nil
+}
+
+// SetStatus sets the "status" field.
+func (m *TaskMutation) SetStatus(u uint8) {
+	m.status = &u
+	m.addstatus = nil
+}
+
+// Status returns the value of the "status" field in the mutation.
+func (m *TaskMutation) Status() (r uint8, exists bool) {
+	v := m.status
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldStatus returns the old "status" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldStatus(ctx context.Context) (v uint8, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldStatus is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldStatus requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldStatus: %w", err)
+	}
+	return oldValue.Status, nil
+}
+
+// AddStatus adds u to the "status" field.
+func (m *TaskMutation) AddStatus(u int8) {
+	if m.addstatus != nil {
+		*m.addstatus += u
+	} else {
+		m.addstatus = &u
+	}
+}
+
+// AddedStatus returns the value that was added to the "status" field in this mutation.
+func (m *TaskMutation) AddedStatus() (r int8, exists bool) {
+	v := m.addstatus
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// ClearStatus clears the value of the "status" field.
+func (m *TaskMutation) ClearStatus() {
+	m.status = nil
+	m.addstatus = nil
+	m.clearedFields[task.FieldStatus] = struct{}{}
+}
+
+// StatusCleared returns if the "status" field was cleared in this mutation.
+func (m *TaskMutation) StatusCleared() bool {
+	_, ok := m.clearedFields[task.FieldStatus]
+	return ok
+}
+
+// ResetStatus resets all changes to the "status" field.
+func (m *TaskMutation) ResetStatus() {
+	m.status = nil
+	m.addstatus = nil
+	delete(m.clearedFields, task.FieldStatus)
+}
+
+// SetName sets the "name" field.
+func (m *TaskMutation) SetName(s string) {
+	m.name = &s
+}
+
+// Name returns the value of the "name" field in the mutation.
+func (m *TaskMutation) Name() (r string, exists bool) {
+	v := m.name
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldName returns the old "name" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldName(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldName is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldName requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldName: %w", err)
+	}
+	return oldValue.Name, nil
+}
+
+// ResetName resets all changes to the "name" field.
+func (m *TaskMutation) ResetName() {
+	m.name = nil
+}
+
+// SetTaskGroup sets the "task_group" field.
+func (m *TaskMutation) SetTaskGroup(s string) {
+	m.task_group = &s
+}
+
+// TaskGroup returns the value of the "task_group" field in the mutation.
+func (m *TaskMutation) TaskGroup() (r string, exists bool) {
+	v := m.task_group
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldTaskGroup returns the old "task_group" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldTaskGroup(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldTaskGroup is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldTaskGroup requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldTaskGroup: %w", err)
+	}
+	return oldValue.TaskGroup, nil
+}
+
+// ResetTaskGroup resets all changes to the "task_group" field.
+func (m *TaskMutation) ResetTaskGroup() {
+	m.task_group = nil
+}
+
+// SetCronExpression sets the "cron_expression" field.
+func (m *TaskMutation) SetCronExpression(s string) {
+	m.cron_expression = &s
+}
+
+// CronExpression returns the value of the "cron_expression" field in the mutation.
+func (m *TaskMutation) CronExpression() (r string, exists bool) {
+	v := m.cron_expression
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldCronExpression returns the old "cron_expression" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldCronExpression(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldCronExpression is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldCronExpression requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldCronExpression: %w", err)
+	}
+	return oldValue.CronExpression, nil
+}
+
+// ResetCronExpression resets all changes to the "cron_expression" field.
+func (m *TaskMutation) ResetCronExpression() {
+	m.cron_expression = nil
+}
+
+// SetPattern sets the "pattern" field.
+func (m *TaskMutation) SetPattern(s string) {
+	m.pattern = &s
+}
+
+// Pattern returns the value of the "pattern" field in the mutation.
+func (m *TaskMutation) Pattern() (r string, exists bool) {
+	v := m.pattern
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldPattern returns the old "pattern" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldPattern(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldPattern is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldPattern requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldPattern: %w", err)
+	}
+	return oldValue.Pattern, nil
+}
+
+// ResetPattern resets all changes to the "pattern" field.
+func (m *TaskMutation) ResetPattern() {
+	m.pattern = nil
+}
+
+// SetPayload sets the "payload" field.
+func (m *TaskMutation) SetPayload(s string) {
+	m.payload = &s
+}
+
+// Payload returns the value of the "payload" field in the mutation.
+func (m *TaskMutation) Payload() (r string, exists bool) {
+	v := m.payload
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldPayload returns the old "payload" field's value of the Task entity.
+// If the Task object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskMutation) OldPayload(ctx context.Context) (v string, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldPayload is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldPayload requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldPayload: %w", err)
+	}
+	return oldValue.Payload, nil
+}
+
+// ResetPayload resets all changes to the "payload" field.
+func (m *TaskMutation) ResetPayload() {
+	m.payload = nil
+}
+
+// AddTaskLogIDs adds the "task_logs" edge to the TaskLog entity by ids.
+func (m *TaskMutation) AddTaskLogIDs(ids ...uint64) {
+	if m.task_logs == nil {
+		m.task_logs = make(map[uint64]struct{})
+	}
+	for i := range ids {
+		m.task_logs[ids[i]] = struct{}{}
+	}
+}
+
+// ClearTaskLogs clears the "task_logs" edge to the TaskLog entity.
+func (m *TaskMutation) ClearTaskLogs() {
+	m.clearedtask_logs = true
+}
+
+// TaskLogsCleared reports if the "task_logs" edge to the TaskLog entity was cleared.
+func (m *TaskMutation) TaskLogsCleared() bool {
+	return m.clearedtask_logs
+}
+
+// RemoveTaskLogIDs removes the "task_logs" edge to the TaskLog entity by IDs.
+func (m *TaskMutation) RemoveTaskLogIDs(ids ...uint64) {
+	if m.removedtask_logs == nil {
+		m.removedtask_logs = make(map[uint64]struct{})
+	}
+	for i := range ids {
+		delete(m.task_logs, ids[i])
+		m.removedtask_logs[ids[i]] = struct{}{}
+	}
+}
+
+// RemovedTaskLogs returns the removed IDs of the "task_logs" edge to the TaskLog entity.
+func (m *TaskMutation) RemovedTaskLogsIDs() (ids []uint64) {
+	for id := range m.removedtask_logs {
+		ids = append(ids, id)
+	}
+	return
+}
+
+// TaskLogsIDs returns the "task_logs" edge IDs in the mutation.
+func (m *TaskMutation) TaskLogsIDs() (ids []uint64) {
+	for id := range m.task_logs {
+		ids = append(ids, id)
+	}
+	return
+}
+
+// ResetTaskLogs resets all changes to the "task_logs" edge.
+func (m *TaskMutation) ResetTaskLogs() {
+	m.task_logs = nil
+	m.clearedtask_logs = false
+	m.removedtask_logs = nil
+}
+
+// Where appends a list predicates to the TaskMutation builder.
+func (m *TaskMutation) Where(ps ...predicate.Task) {
+	m.predicates = append(m.predicates, ps...)
+}
+
+// WhereP appends storage-level predicates to the TaskMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *TaskMutation) WhereP(ps ...func(*sql.Selector)) {
+	p := make([]predicate.Task, len(ps))
+	for i := range ps {
+		p[i] = ps[i]
+	}
+	m.Where(p...)
+}
+
+// Op returns the operation name.
+func (m *TaskMutation) Op() Op {
+	return m.op
+}
+
+// SetOp allows setting the mutation operation.
+func (m *TaskMutation) SetOp(op Op) {
+	m.op = op
+}
+
+// Type returns the node type of this mutation (Task).
+func (m *TaskMutation) Type() string {
+	return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *TaskMutation) Fields() []string {
+	fields := make([]string, 0, 8)
+	if m.created_at != nil {
+		fields = append(fields, task.FieldCreatedAt)
+	}
+	if m.updated_at != nil {
+		fields = append(fields, task.FieldUpdatedAt)
+	}
+	if m.status != nil {
+		fields = append(fields, task.FieldStatus)
+	}
+	if m.name != nil {
+		fields = append(fields, task.FieldName)
+	}
+	if m.task_group != nil {
+		fields = append(fields, task.FieldTaskGroup)
+	}
+	if m.cron_expression != nil {
+		fields = append(fields, task.FieldCronExpression)
+	}
+	if m.pattern != nil {
+		fields = append(fields, task.FieldPattern)
+	}
+	if m.payload != nil {
+		fields = append(fields, task.FieldPayload)
+	}
+	return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *TaskMutation) Field(name string) (ent.Value, bool) {
+	switch name {
+	case task.FieldCreatedAt:
+		return m.CreatedAt()
+	case task.FieldUpdatedAt:
+		return m.UpdatedAt()
+	case task.FieldStatus:
+		return m.Status()
+	case task.FieldName:
+		return m.Name()
+	case task.FieldTaskGroup:
+		return m.TaskGroup()
+	case task.FieldCronExpression:
+		return m.CronExpression()
+	case task.FieldPattern:
+		return m.Pattern()
+	case task.FieldPayload:
+		return m.Payload()
+	}
+	return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *TaskMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+	switch name {
+	case task.FieldCreatedAt:
+		return m.OldCreatedAt(ctx)
+	case task.FieldUpdatedAt:
+		return m.OldUpdatedAt(ctx)
+	case task.FieldStatus:
+		return m.OldStatus(ctx)
+	case task.FieldName:
+		return m.OldName(ctx)
+	case task.FieldTaskGroup:
+		return m.OldTaskGroup(ctx)
+	case task.FieldCronExpression:
+		return m.OldCronExpression(ctx)
+	case task.FieldPattern:
+		return m.OldPattern(ctx)
+	case task.FieldPayload:
+		return m.OldPayload(ctx)
+	}
+	return nil, fmt.Errorf("unknown Task field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *TaskMutation) SetField(name string, value ent.Value) error {
+	switch name {
+	case task.FieldCreatedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetCreatedAt(v)
+		return nil
+	case task.FieldUpdatedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetUpdatedAt(v)
+		return nil
+	case task.FieldStatus:
+		v, ok := value.(uint8)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetStatus(v)
+		return nil
+	case task.FieldName:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetName(v)
+		return nil
+	case task.FieldTaskGroup:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetTaskGroup(v)
+		return nil
+	case task.FieldCronExpression:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetCronExpression(v)
+		return nil
+	case task.FieldPattern:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetPattern(v)
+		return nil
+	case task.FieldPayload:
+		v, ok := value.(string)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetPayload(v)
+		return nil
+	}
+	return fmt.Errorf("unknown Task field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *TaskMutation) AddedFields() []string {
+	var fields []string
+	if m.addstatus != nil {
+		fields = append(fields, task.FieldStatus)
+	}
+	return fields
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *TaskMutation) AddedField(name string) (ent.Value, bool) {
+	switch name {
+	case task.FieldStatus:
+		return m.AddedStatus()
+	}
+	return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *TaskMutation) AddField(name string, value ent.Value) error {
+	switch name {
+	case task.FieldStatus:
+		v, ok := value.(int8)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.AddStatus(v)
+		return nil
+	}
+	return fmt.Errorf("unknown Task numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *TaskMutation) ClearedFields() []string {
+	var fields []string
+	if m.FieldCleared(task.FieldStatus) {
+		fields = append(fields, task.FieldStatus)
+	}
+	return fields
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *TaskMutation) FieldCleared(name string) bool {
+	_, ok := m.clearedFields[name]
+	return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *TaskMutation) ClearField(name string) error {
+	switch name {
+	case task.FieldStatus:
+		m.ClearStatus()
+		return nil
+	}
+	return fmt.Errorf("unknown Task nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *TaskMutation) ResetField(name string) error {
+	switch name {
+	case task.FieldCreatedAt:
+		m.ResetCreatedAt()
+		return nil
+	case task.FieldUpdatedAt:
+		m.ResetUpdatedAt()
+		return nil
+	case task.FieldStatus:
+		m.ResetStatus()
+		return nil
+	case task.FieldName:
+		m.ResetName()
+		return nil
+	case task.FieldTaskGroup:
+		m.ResetTaskGroup()
+		return nil
+	case task.FieldCronExpression:
+		m.ResetCronExpression()
+		return nil
+	case task.FieldPattern:
+		m.ResetPattern()
+		return nil
+	case task.FieldPayload:
+		m.ResetPayload()
+		return nil
+	}
+	return fmt.Errorf("unknown Task field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *TaskMutation) AddedEdges() []string {
+	edges := make([]string, 0, 1)
+	if m.task_logs != nil {
+		edges = append(edges, task.EdgeTaskLogs)
+	}
+	return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *TaskMutation) AddedIDs(name string) []ent.Value {
+	switch name {
+	case task.EdgeTaskLogs:
+		ids := make([]ent.Value, 0, len(m.task_logs))
+		for id := range m.task_logs {
+			ids = append(ids, id)
+		}
+		return ids
+	}
+	return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *TaskMutation) RemovedEdges() []string {
+	edges := make([]string, 0, 1)
+	if m.removedtask_logs != nil {
+		edges = append(edges, task.EdgeTaskLogs)
+	}
+	return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *TaskMutation) RemovedIDs(name string) []ent.Value {
+	switch name {
+	case task.EdgeTaskLogs:
+		ids := make([]ent.Value, 0, len(m.removedtask_logs))
+		for id := range m.removedtask_logs {
+			ids = append(ids, id)
+		}
+		return ids
+	}
+	return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *TaskMutation) ClearedEdges() []string {
+	edges := make([]string, 0, 1)
+	if m.clearedtask_logs {
+		edges = append(edges, task.EdgeTaskLogs)
+	}
+	return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *TaskMutation) EdgeCleared(name string) bool {
+	switch name {
+	case task.EdgeTaskLogs:
+		return m.clearedtask_logs
+	}
+	return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *TaskMutation) ClearEdge(name string) error {
+	switch name {
+	}
+	return fmt.Errorf("unknown Task unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *TaskMutation) ResetEdge(name string) error {
+	switch name {
+	case task.EdgeTaskLogs:
+		m.ResetTaskLogs()
+		return nil
+	}
+	return fmt.Errorf("unknown Task edge %s", name)
+}
+
+// TaskLogMutation represents an operation that mutates the TaskLog nodes in the graph.
+type TaskLogMutation struct {
+	config
+	op            Op
+	typ           string
+	id            *uint64
+	started_at    *time.Time
+	finished_at   *time.Time
+	result        *uint8
+	addresult     *int8
+	clearedFields map[string]struct{}
+	tasks         *uint64
+	clearedtasks  bool
+	done          bool
+	oldValue      func(context.Context) (*TaskLog, error)
+	predicates    []predicate.TaskLog
+}
+
+var _ ent.Mutation = (*TaskLogMutation)(nil)
+
+// tasklogOption allows management of the mutation configuration using functional options.
+type tasklogOption func(*TaskLogMutation)
+
+// newTaskLogMutation creates new mutation for the TaskLog entity.
+func newTaskLogMutation(c config, op Op, opts ...tasklogOption) *TaskLogMutation {
+	m := &TaskLogMutation{
+		config:        c,
+		op:            op,
+		typ:           TypeTaskLog,
+		clearedFields: make(map[string]struct{}),
+	}
+	for _, opt := range opts {
+		opt(m)
+	}
+	return m
+}
+
+// withTaskLogID sets the ID field of the mutation.
+func withTaskLogID(id uint64) tasklogOption {
+	return func(m *TaskLogMutation) {
+		var (
+			err   error
+			once  sync.Once
+			value *TaskLog
+		)
+		m.oldValue = func(ctx context.Context) (*TaskLog, error) {
+			once.Do(func() {
+				if m.done {
+					err = errors.New("querying old values post mutation is not allowed")
+				} else {
+					value, err = m.Client().TaskLog.Get(ctx, id)
+				}
+			})
+			return value, err
+		}
+		m.id = &id
+	}
+}
+
+// withTaskLog sets the old TaskLog of the mutation.
+func withTaskLog(node *TaskLog) tasklogOption {
+	return func(m *TaskLogMutation) {
+		m.oldValue = func(context.Context) (*TaskLog, error) {
+			return node, nil
+		}
+		m.id = &node.ID
+	}
+}
+
+// Client returns a new `ent.Client` from the mutation. If the mutation was
+// executed in a transaction (ent.Tx), a transactional client is returned.
+func (m TaskLogMutation) Client() *Client {
+	client := &Client{config: m.config}
+	client.init()
+	return client
+}
+
+// Tx returns an `ent.Tx` for mutations that were executed in transactions;
+// it returns an error otherwise.
+func (m TaskLogMutation) Tx() (*Tx, error) {
+	if _, ok := m.driver.(*txDriver); !ok {
+		return nil, errors.New("ent: mutation is not running in a transaction")
+	}
+	tx := &Tx{config: m.config}
+	tx.init()
+	return tx, nil
+}
+
+// SetID sets the value of the id field. Note that this
+// operation is only accepted on creation of TaskLog entities.
+func (m *TaskLogMutation) SetID(id uint64) {
+	m.id = &id
+}
+
+// ID returns the ID value in the mutation. Note that the ID is only available
+// if it was provided to the builder or after it was returned from the database.
+func (m *TaskLogMutation) ID() (id uint64, exists bool) {
+	if m.id == nil {
+		return
+	}
+	return *m.id, true
+}
+
+// IDs queries the database and returns the entity ids that match the mutation's predicate.
+// That means, if the mutation is applied within a transaction with an isolation level such
+// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated
+// or updated by the mutation.
+func (m *TaskLogMutation) IDs(ctx context.Context) ([]uint64, error) {
+	switch {
+	case m.op.Is(OpUpdateOne | OpDeleteOne):
+		id, exists := m.ID()
+		if exists {
+			return []uint64{id}, nil
+		}
+		fallthrough
+	case m.op.Is(OpUpdate | OpDelete):
+		return m.Client().TaskLog.Query().Where(m.predicates...).IDs(ctx)
+	default:
+		return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op)
+	}
+}
+
+// SetStartedAt sets the "started_at" field.
+func (m *TaskLogMutation) SetStartedAt(t time.Time) {
+	m.started_at = &t
+}
+
+// StartedAt returns the value of the "started_at" field in the mutation.
+func (m *TaskLogMutation) StartedAt() (r time.Time, exists bool) {
+	v := m.started_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldStartedAt returns the old "started_at" field's value of the TaskLog entity.
+// If the TaskLog object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskLogMutation) OldStartedAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldStartedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldStartedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldStartedAt: %w", err)
+	}
+	return oldValue.StartedAt, nil
+}
+
+// ResetStartedAt resets all changes to the "started_at" field.
+func (m *TaskLogMutation) ResetStartedAt() {
+	m.started_at = nil
+}
+
+// SetFinishedAt sets the "finished_at" field.
+func (m *TaskLogMutation) SetFinishedAt(t time.Time) {
+	m.finished_at = &t
+}
+
+// FinishedAt returns the value of the "finished_at" field in the mutation.
+func (m *TaskLogMutation) FinishedAt() (r time.Time, exists bool) {
+	v := m.finished_at
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldFinishedAt returns the old "finished_at" field's value of the TaskLog entity.
+// If the TaskLog object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskLogMutation) OldFinishedAt(ctx context.Context) (v time.Time, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldFinishedAt is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldFinishedAt requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldFinishedAt: %w", err)
+	}
+	return oldValue.FinishedAt, nil
+}
+
+// ResetFinishedAt resets all changes to the "finished_at" field.
+func (m *TaskLogMutation) ResetFinishedAt() {
+	m.finished_at = nil
+}
+
+// SetResult sets the "result" field.
+func (m *TaskLogMutation) SetResult(u uint8) {
+	m.result = &u
+	m.addresult = nil
+}
+
+// Result returns the value of the "result" field in the mutation.
+func (m *TaskLogMutation) Result() (r uint8, exists bool) {
+	v := m.result
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// OldResult returns the old "result" field's value of the TaskLog entity.
+// If the TaskLog object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *TaskLogMutation) OldResult(ctx context.Context) (v uint8, err error) {
+	if !m.op.Is(OpUpdateOne) {
+		return v, errors.New("OldResult is only allowed on UpdateOne operations")
+	}
+	if m.id == nil || m.oldValue == nil {
+		return v, errors.New("OldResult requires an ID field in the mutation")
+	}
+	oldValue, err := m.oldValue(ctx)
+	if err != nil {
+		return v, fmt.Errorf("querying old value for OldResult: %w", err)
+	}
+	return oldValue.Result, nil
+}
+
+// AddResult adds u to the "result" field.
+func (m *TaskLogMutation) AddResult(u int8) {
+	if m.addresult != nil {
+		*m.addresult += u
+	} else {
+		m.addresult = &u
+	}
+}
+
+// AddedResult returns the value that was added to the "result" field in this mutation.
+func (m *TaskLogMutation) AddedResult() (r int8, exists bool) {
+	v := m.addresult
+	if v == nil {
+		return
+	}
+	return *v, true
+}
+
+// ResetResult resets all changes to the "result" field.
+func (m *TaskLogMutation) ResetResult() {
+	m.result = nil
+	m.addresult = nil
+}
+
+// SetTasksID sets the "tasks" edge to the Task entity by id.
+func (m *TaskLogMutation) SetTasksID(id uint64) {
+	m.tasks = &id
+}
+
+// ClearTasks clears the "tasks" edge to the Task entity.
+func (m *TaskLogMutation) ClearTasks() {
+	m.clearedtasks = true
+}
+
+// TasksCleared reports if the "tasks" edge to the Task entity was cleared.
+func (m *TaskLogMutation) TasksCleared() bool {
+	return m.clearedtasks
+}
+
+// TasksID returns the "tasks" edge ID in the mutation.
+func (m *TaskLogMutation) TasksID() (id uint64, exists bool) {
+	if m.tasks != nil {
+		return *m.tasks, true
+	}
+	return
+}
+
+// TasksIDs returns the "tasks" edge IDs in the mutation.
+// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use
+// TasksID instead. It exists only for internal usage by the builders.
+func (m *TaskLogMutation) TasksIDs() (ids []uint64) {
+	if id := m.tasks; id != nil {
+		ids = append(ids, *id)
+	}
+	return
+}
+
+// ResetTasks resets all changes to the "tasks" edge.
+func (m *TaskLogMutation) ResetTasks() {
+	m.tasks = nil
+	m.clearedtasks = false
+}
+
+// Where appends a list predicates to the TaskLogMutation builder.
+func (m *TaskLogMutation) Where(ps ...predicate.TaskLog) {
+	m.predicates = append(m.predicates, ps...)
+}
+
+// WhereP appends storage-level predicates to the TaskLogMutation builder. Using this method,
+// users can use type-assertion to append predicates that do not depend on any generated package.
+func (m *TaskLogMutation) WhereP(ps ...func(*sql.Selector)) {
+	p := make([]predicate.TaskLog, len(ps))
+	for i := range ps {
+		p[i] = ps[i]
+	}
+	m.Where(p...)
+}
+
+// Op returns the operation name.
+func (m *TaskLogMutation) Op() Op {
+	return m.op
+}
+
+// SetOp allows setting the mutation operation.
+func (m *TaskLogMutation) SetOp(op Op) {
+	m.op = op
+}
+
+// Type returns the node type of this mutation (TaskLog).
+func (m *TaskLogMutation) Type() string {
+	return m.typ
+}
+
+// Fields returns all fields that were changed during this mutation. Note that in
+// order to get all numeric fields that were incremented/decremented, call
+// AddedFields().
+func (m *TaskLogMutation) Fields() []string {
+	fields := make([]string, 0, 3)
+	if m.started_at != nil {
+		fields = append(fields, tasklog.FieldStartedAt)
+	}
+	if m.finished_at != nil {
+		fields = append(fields, tasklog.FieldFinishedAt)
+	}
+	if m.result != nil {
+		fields = append(fields, tasklog.FieldResult)
+	}
+	return fields
+}
+
+// Field returns the value of a field with the given name. The second boolean
+// return value indicates that this field was not set, or was not defined in the
+// schema.
+func (m *TaskLogMutation) Field(name string) (ent.Value, bool) {
+	switch name {
+	case tasklog.FieldStartedAt:
+		return m.StartedAt()
+	case tasklog.FieldFinishedAt:
+		return m.FinishedAt()
+	case tasklog.FieldResult:
+		return m.Result()
+	}
+	return nil, false
+}
+
+// OldField returns the old value of the field from the database. An error is
+// returned if the mutation operation is not UpdateOne, or the query to the
+// database failed.
+func (m *TaskLogMutation) OldField(ctx context.Context, name string) (ent.Value, error) {
+	switch name {
+	case tasklog.FieldStartedAt:
+		return m.OldStartedAt(ctx)
+	case tasklog.FieldFinishedAt:
+		return m.OldFinishedAt(ctx)
+	case tasklog.FieldResult:
+		return m.OldResult(ctx)
+	}
+	return nil, fmt.Errorf("unknown TaskLog field %s", name)
+}
+
+// SetField sets the value of a field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *TaskLogMutation) SetField(name string, value ent.Value) error {
+	switch name {
+	case tasklog.FieldStartedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetStartedAt(v)
+		return nil
+	case tasklog.FieldFinishedAt:
+		v, ok := value.(time.Time)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetFinishedAt(v)
+		return nil
+	case tasklog.FieldResult:
+		v, ok := value.(uint8)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.SetResult(v)
+		return nil
+	}
+	return fmt.Errorf("unknown TaskLog field %s", name)
+}
+
+// AddedFields returns all numeric fields that were incremented/decremented during
+// this mutation.
+func (m *TaskLogMutation) AddedFields() []string {
+	var fields []string
+	if m.addresult != nil {
+		fields = append(fields, tasklog.FieldResult)
+	}
+	return fields
+}
+
+// AddedField returns the numeric value that was incremented/decremented on a field
+// with the given name. The second boolean return value indicates that this field
+// was not set, or was not defined in the schema.
+func (m *TaskLogMutation) AddedField(name string) (ent.Value, bool) {
+	switch name {
+	case tasklog.FieldResult:
+		return m.AddedResult()
+	}
+	return nil, false
+}
+
+// AddField adds the value to the field with the given name. It returns an error if
+// the field is not defined in the schema, or if the type mismatched the field
+// type.
+func (m *TaskLogMutation) AddField(name string, value ent.Value) error {
+	switch name {
+	case tasklog.FieldResult:
+		v, ok := value.(int8)
+		if !ok {
+			return fmt.Errorf("unexpected type %T for field %s", value, name)
+		}
+		m.AddResult(v)
+		return nil
+	}
+	return fmt.Errorf("unknown TaskLog numeric field %s", name)
+}
+
+// ClearedFields returns all nullable fields that were cleared during this
+// mutation.
+func (m *TaskLogMutation) ClearedFields() []string {
+	return nil
+}
+
+// FieldCleared returns a boolean indicating if a field with the given name was
+// cleared in this mutation.
+func (m *TaskLogMutation) FieldCleared(name string) bool {
+	_, ok := m.clearedFields[name]
+	return ok
+}
+
+// ClearField clears the value of the field with the given name. It returns an
+// error if the field is not defined in the schema.
+func (m *TaskLogMutation) ClearField(name string) error {
+	return fmt.Errorf("unknown TaskLog nullable field %s", name)
+}
+
+// ResetField resets all changes in the mutation for the field with the given name.
+// It returns an error if the field is not defined in the schema.
+func (m *TaskLogMutation) ResetField(name string) error {
+	switch name {
+	case tasklog.FieldStartedAt:
+		m.ResetStartedAt()
+		return nil
+	case tasklog.FieldFinishedAt:
+		m.ResetFinishedAt()
+		return nil
+	case tasklog.FieldResult:
+		m.ResetResult()
+		return nil
+	}
+	return fmt.Errorf("unknown TaskLog field %s", name)
+}
+
+// AddedEdges returns all edge names that were set/added in this mutation.
+func (m *TaskLogMutation) AddedEdges() []string {
+	edges := make([]string, 0, 1)
+	if m.tasks != nil {
+		edges = append(edges, tasklog.EdgeTasks)
+	}
+	return edges
+}
+
+// AddedIDs returns all IDs (to other nodes) that were added for the given edge
+// name in this mutation.
+func (m *TaskLogMutation) AddedIDs(name string) []ent.Value {
+	switch name {
+	case tasklog.EdgeTasks:
+		if id := m.tasks; id != nil {
+			return []ent.Value{*id}
+		}
+	}
+	return nil
+}
+
+// RemovedEdges returns all edge names that were removed in this mutation.
+func (m *TaskLogMutation) RemovedEdges() []string {
+	edges := make([]string, 0, 1)
+	return edges
+}
+
+// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with
+// the given name in this mutation.
+func (m *TaskLogMutation) RemovedIDs(name string) []ent.Value {
+	return nil
+}
+
+// ClearedEdges returns all edge names that were cleared in this mutation.
+func (m *TaskLogMutation) ClearedEdges() []string {
+	edges := make([]string, 0, 1)
+	if m.clearedtasks {
+		edges = append(edges, tasklog.EdgeTasks)
+	}
+	return edges
+}
+
+// EdgeCleared returns a boolean which indicates if the edge with the given name
+// was cleared in this mutation.
+func (m *TaskLogMutation) EdgeCleared(name string) bool {
+	switch name {
+	case tasklog.EdgeTasks:
+		return m.clearedtasks
+	}
+	return false
+}
+
+// ClearEdge clears the value of the edge with the given name. It returns an error
+// if that edge is not defined in the schema.
+func (m *TaskLogMutation) ClearEdge(name string) error {
+	switch name {
+	case tasklog.EdgeTasks:
+		m.ClearTasks()
+		return nil
+	}
+	return fmt.Errorf("unknown TaskLog unique edge %s", name)
+}
+
+// ResetEdge resets all changes to the edge with the given name in this mutation.
+// It returns an error if the edge is not defined in the schema.
+func (m *TaskLogMutation) ResetEdge(name string) error {
+	switch name {
+	case tasklog.EdgeTasks:
+		m.ResetTasks()
+		return nil
+	}
+	return fmt.Errorf("unknown TaskLog edge %s", name)
+}

+ 215 - 0
ent/pagination.go

@@ -0,0 +1,215 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+const errInvalidPage = "INVALID_PAGE"
+
+const (
+	listField     = "list"
+	pageNumField  = "pageNum"
+	pageSizeField = "pageSize"
+)
+
+type PageDetails struct {
+	Page  uint64 `json:"page"`
+	Size  uint64 `json:"size"`
+	Total uint64 `json:"total"`
+}
+
+// OrderDirection defines the directions in which to order a list of items.
+type OrderDirection string
+
+const (
+	// OrderDirectionAsc specifies an ascending order.
+	OrderDirectionAsc OrderDirection = "ASC"
+	// OrderDirectionDesc specifies a descending order.
+	OrderDirectionDesc OrderDirection = "DESC"
+)
+
+// Validate the order direction value.
+func (o OrderDirection) Validate() error {
+	if o != OrderDirectionAsc && o != OrderDirectionDesc {
+		return fmt.Errorf("%s is not a valid OrderDirection", o)
+	}
+	return nil
+}
+
+// String implements fmt.Stringer interface.
+func (o OrderDirection) String() string {
+	return string(o)
+}
+
+func (o OrderDirection) reverse() OrderDirection {
+	if o == OrderDirectionDesc {
+		return OrderDirectionAsc
+	}
+	return OrderDirectionDesc
+}
+
+const errInvalidPagination = "INVALID_PAGINATION"
+
+type TaskPager struct {
+	Order  task.OrderOption
+	Filter func(*TaskQuery) (*TaskQuery, error)
+}
+
+// TaskPaginateOption enables pagination customization.
+type TaskPaginateOption func(*TaskPager)
+
+// DefaultTaskOrder is the default ordering of Task.
+var DefaultTaskOrder = Desc(task.FieldID)
+
+func newTaskPager(opts []TaskPaginateOption) (*TaskPager, error) {
+	pager := &TaskPager{}
+	for _, opt := range opts {
+		opt(pager)
+	}
+	if pager.Order == nil {
+		pager.Order = DefaultTaskOrder
+	}
+	return pager, nil
+}
+
+func (p *TaskPager) ApplyFilter(query *TaskQuery) (*TaskQuery, error) {
+	if p.Filter != nil {
+		return p.Filter(query)
+	}
+	return query, nil
+}
+
+// TaskPageList is Task PageList result.
+type TaskPageList struct {
+	List        []*Task      `json:"list"`
+	PageDetails *PageDetails `json:"pageDetails"`
+}
+
+func (t *TaskQuery) Page(
+	ctx context.Context, pageNum uint64, pageSize uint64, opts ...TaskPaginateOption,
+) (*TaskPageList, error) {
+
+	pager, err := newTaskPager(opts)
+	if err != nil {
+		return nil, err
+	}
+
+	if t, err = pager.ApplyFilter(t); err != nil {
+		return nil, err
+	}
+
+	ret := &TaskPageList{}
+
+	ret.PageDetails = &PageDetails{
+		Page: pageNum,
+		Size: pageSize,
+	}
+
+	count, err := t.Clone().Count(ctx)
+
+	if err != nil {
+		return nil, err
+	}
+
+	ret.PageDetails.Total = uint64(count)
+
+	if pager.Order != nil {
+		t = t.Order(pager.Order)
+	} else {
+		t = t.Order(DefaultTaskOrder)
+	}
+
+	t = t.Offset(int((pageNum - 1) * pageSize)).Limit(int(pageSize))
+	list, err := t.All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	ret.List = list
+
+	return ret, nil
+}
+
+type TaskLogPager struct {
+	Order  tasklog.OrderOption
+	Filter func(*TaskLogQuery) (*TaskLogQuery, error)
+}
+
+// TaskLogPaginateOption enables pagination customization.
+type TaskLogPaginateOption func(*TaskLogPager)
+
+// DefaultTaskLogOrder is the default ordering of TaskLog.
+var DefaultTaskLogOrder = Desc(tasklog.FieldID)
+
+func newTaskLogPager(opts []TaskLogPaginateOption) (*TaskLogPager, error) {
+	pager := &TaskLogPager{}
+	for _, opt := range opts {
+		opt(pager)
+	}
+	if pager.Order == nil {
+		pager.Order = DefaultTaskLogOrder
+	}
+	return pager, nil
+}
+
+func (p *TaskLogPager) ApplyFilter(query *TaskLogQuery) (*TaskLogQuery, error) {
+	if p.Filter != nil {
+		return p.Filter(query)
+	}
+	return query, nil
+}
+
+// TaskLogPageList is TaskLog PageList result.
+type TaskLogPageList struct {
+	List        []*TaskLog   `json:"list"`
+	PageDetails *PageDetails `json:"pageDetails"`
+}
+
+func (tl *TaskLogQuery) Page(
+	ctx context.Context, pageNum uint64, pageSize uint64, opts ...TaskLogPaginateOption,
+) (*TaskLogPageList, error) {
+
+	pager, err := newTaskLogPager(opts)
+	if err != nil {
+		return nil, err
+	}
+
+	if tl, err = pager.ApplyFilter(tl); err != nil {
+		return nil, err
+	}
+
+	ret := &TaskLogPageList{}
+
+	ret.PageDetails = &PageDetails{
+		Page: pageNum,
+		Size: pageSize,
+	}
+
+	count, err := tl.Clone().Count(ctx)
+
+	if err != nil {
+		return nil, err
+	}
+
+	ret.PageDetails.Total = uint64(count)
+
+	if pager.Order != nil {
+		tl = tl.Order(pager.Order)
+	} else {
+		tl = tl.Order(DefaultTaskLogOrder)
+	}
+
+	tl = tl.Offset(int((pageNum - 1) * pageSize)).Limit(int(pageSize))
+	list, err := tl.All(ctx)
+	if err != nil {
+		return nil, err
+	}
+	ret.List = list
+
+	return ret, nil
+}

+ 13 - 0
ent/predicate/predicate.go

@@ -0,0 +1,13 @@
+// Code generated by ent, DO NOT EDIT.
+
+package predicate
+
+import (
+	"entgo.io/ent/dialect/sql"
+)
+
+// Task is the predicate function for task builders.
+type Task func(*sql.Selector)
+
+// TaskLog is the predicate function for tasklog builders.
+type TaskLog func(*sql.Selector)

+ 44 - 0
ent/runtime.go

@@ -0,0 +1,44 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"time"
+
+	"github.com/suyuan32/simple-admin-job/ent/schema"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// The init function reads all schema descriptors with runtime code
+// (default values, validators, hooks and policies) and stitches it
+// to their package variables.
+func init() {
+	taskMixin := schema.Task{}.Mixin()
+	taskMixinFields0 := taskMixin[0].Fields()
+	_ = taskMixinFields0
+	taskMixinFields1 := taskMixin[1].Fields()
+	_ = taskMixinFields1
+	taskFields := schema.Task{}.Fields()
+	_ = taskFields
+	// taskDescCreatedAt is the schema descriptor for created_at field.
+	taskDescCreatedAt := taskMixinFields0[1].Descriptor()
+	// task.DefaultCreatedAt holds the default value on creation for the created_at field.
+	task.DefaultCreatedAt = taskDescCreatedAt.Default.(func() time.Time)
+	// taskDescUpdatedAt is the schema descriptor for updated_at field.
+	taskDescUpdatedAt := taskMixinFields0[2].Descriptor()
+	// task.DefaultUpdatedAt holds the default value on creation for the updated_at field.
+	task.DefaultUpdatedAt = taskDescUpdatedAt.Default.(func() time.Time)
+	// task.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
+	task.UpdateDefaultUpdatedAt = taskDescUpdatedAt.UpdateDefault.(func() time.Time)
+	// taskDescStatus is the schema descriptor for status field.
+	taskDescStatus := taskMixinFields1[0].Descriptor()
+	// task.DefaultStatus holds the default value on creation for the status field.
+	task.DefaultStatus = taskDescStatus.Default.(uint8)
+	tasklogFields := schema.TaskLog{}.Fields()
+	_ = tasklogFields
+	// tasklogDescStartedAt is the schema descriptor for started_at field.
+	tasklogDescStartedAt := tasklogFields[1].Descriptor()
+	// tasklog.DefaultStartedAt holds the default value on creation for the started_at field.
+	tasklog.DefaultStartedAt = tasklogDescStartedAt.Default.(func() time.Time)
+}

+ 10 - 0
ent/runtime/runtime.go

@@ -0,0 +1,10 @@
+// Code generated by ent, DO NOT EDIT.
+
+package runtime
+
+// The schema-stitching logic is generated in github.com/suyuan32/simple-admin-job/ent/runtime.go
+
+const (
+	Version = "v0.13.1"                                         // Version of ent codegen.
+	Sum     = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen.
+)

+ 53 - 0
ent/schema/task.go

@@ -0,0 +1,53 @@
+package schema
+
+import (
+	"entgo.io/ent"
+	"entgo.io/ent/dialect/entsql"
+	"entgo.io/ent/schema"
+	"entgo.io/ent/schema/edge"
+	"entgo.io/ent/schema/field"
+	"entgo.io/ent/schema/index"
+	"github.com/suyuan32/simple-admin-common/orm/ent/mixins"
+)
+
+// Task holds the schema definition for the Task entity.
+type Task struct {
+	ent.Schema
+}
+
+// Fields of the Task.
+func (Task) Fields() []ent.Field {
+	return []ent.Field{
+		field.String("name").Comment("Task Name | 任务名称"),
+		field.String("task_group").Comment("Task Group | 任务分组"),
+		field.String("cron_expression").Comment("Cron expression | 定时任务表达式"),
+		field.String("pattern").Comment("Cron Pattern | 任务的模式 (用于区分和确定要执行的任务)"),
+		field.String("payload").Comment("The data used in cron (JSON string) | 任务需要的数据(JSON 字符串)"),
+	}
+}
+
+func (Task) Mixin() []ent.Mixin {
+	return []ent.Mixin{
+		mixins.IDMixin{},
+		mixins.StatusMixin{},
+	}
+}
+
+// Edges of the Task.
+func (Task) Edges() []ent.Edge {
+	return []ent.Edge{
+		edge.To("task_logs", TaskLog.Type),
+	}
+}
+
+func (Task) Indexes() []ent.Index {
+	return []ent.Index{
+		index.Fields("pattern").Unique(),
+	}
+}
+
+func (Task) Annotations() []schema.Annotation {
+	return []schema.Annotation{
+		entsql.Annotation{Table: "sys_tasks"},
+	}
+}

+ 43 - 0
ent/schema/task_log.go

@@ -0,0 +1,43 @@
+package schema
+
+import (
+	"entgo.io/ent"
+	"entgo.io/ent/dialect/entsql"
+	"entgo.io/ent/schema"
+	"entgo.io/ent/schema/edge"
+	"entgo.io/ent/schema/field"
+	"time"
+)
+
+// TaskLog holds the schema definition for the TaskLog entity.
+type TaskLog struct {
+	ent.Schema
+}
+
+// Fields of the TaskLog.
+func (TaskLog) Fields() []ent.Field {
+	return []ent.Field{
+		field.Uint64("id"),
+		field.Time("started_at").Immutable().
+			Default(time.Now).
+			Comment("Task Started Time | 任务启动时间").
+			Annotations(entsql.WithComments(true)),
+		field.Time("finished_at").Comment("Task Finished Time | 任务完成时间").
+			Annotations(entsql.WithComments(true)),
+		field.Uint8("result").Comment("The Task Process Result | 任务执行结果").
+			Annotations(entsql.WithComments(true)),
+	}
+}
+
+// Edges of the TaskLog.
+func (TaskLog) Edges() []ent.Edge {
+	return []ent.Edge{
+		edge.From("tasks", Task.Type).Ref("task_logs").Unique(),
+	}
+}
+
+func (TaskLog) Annotations() []schema.Annotation {
+	return []schema.Annotation{
+		entsql.Annotation{Table: "sys_task_logs"},
+	}
+}

+ 221 - 0
ent/set_not_nil.go

@@ -0,0 +1,221 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import "time"
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdate) SetNotNilUpdatedAt(value *time.Time) *TaskUpdate {
+	if value != nil {
+		return t.SetUpdatedAt(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdateOne) SetNotNilUpdatedAt(value *time.Time) *TaskUpdateOne {
+	if value != nil {
+		return t.SetUpdatedAt(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskCreate) SetNotNilUpdatedAt(value *time.Time) *TaskCreate {
+	if value != nil {
+		return t.SetUpdatedAt(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdate) SetNotNilStatus(value *uint8) *TaskUpdate {
+	if value != nil {
+		return t.SetStatus(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdateOne) SetNotNilStatus(value *uint8) *TaskUpdateOne {
+	if value != nil {
+		return t.SetStatus(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskCreate) SetNotNilStatus(value *uint8) *TaskCreate {
+	if value != nil {
+		return t.SetStatus(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdate) SetNotNilName(value *string) *TaskUpdate {
+	if value != nil {
+		return t.SetName(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdateOne) SetNotNilName(value *string) *TaskUpdateOne {
+	if value != nil {
+		return t.SetName(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskCreate) SetNotNilName(value *string) *TaskCreate {
+	if value != nil {
+		return t.SetName(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdate) SetNotNilTaskGroup(value *string) *TaskUpdate {
+	if value != nil {
+		return t.SetTaskGroup(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdateOne) SetNotNilTaskGroup(value *string) *TaskUpdateOne {
+	if value != nil {
+		return t.SetTaskGroup(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskCreate) SetNotNilTaskGroup(value *string) *TaskCreate {
+	if value != nil {
+		return t.SetTaskGroup(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdate) SetNotNilCronExpression(value *string) *TaskUpdate {
+	if value != nil {
+		return t.SetCronExpression(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdateOne) SetNotNilCronExpression(value *string) *TaskUpdateOne {
+	if value != nil {
+		return t.SetCronExpression(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskCreate) SetNotNilCronExpression(value *string) *TaskCreate {
+	if value != nil {
+		return t.SetCronExpression(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdate) SetNotNilPattern(value *string) *TaskUpdate {
+	if value != nil {
+		return t.SetPattern(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdateOne) SetNotNilPattern(value *string) *TaskUpdateOne {
+	if value != nil {
+		return t.SetPattern(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskCreate) SetNotNilPattern(value *string) *TaskCreate {
+	if value != nil {
+		return t.SetPattern(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdate) SetNotNilPayload(value *string) *TaskUpdate {
+	if value != nil {
+		return t.SetPayload(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskUpdateOne) SetNotNilPayload(value *string) *TaskUpdateOne {
+	if value != nil {
+		return t.SetPayload(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (t *TaskCreate) SetNotNilPayload(value *string) *TaskCreate {
+	if value != nil {
+		return t.SetPayload(*value)
+	}
+	return t
+}
+
+// set field if value's pointer is not nil.
+func (tl *TaskLogUpdate) SetNotNilFinishedAt(value *time.Time) *TaskLogUpdate {
+	if value != nil {
+		return tl.SetFinishedAt(*value)
+	}
+	return tl
+}
+
+// set field if value's pointer is not nil.
+func (tl *TaskLogUpdateOne) SetNotNilFinishedAt(value *time.Time) *TaskLogUpdateOne {
+	if value != nil {
+		return tl.SetFinishedAt(*value)
+	}
+	return tl
+}
+
+// set field if value's pointer is not nil.
+func (tl *TaskLogCreate) SetNotNilFinishedAt(value *time.Time) *TaskLogCreate {
+	if value != nil {
+		return tl.SetFinishedAt(*value)
+	}
+	return tl
+}
+
+// set field if value's pointer is not nil.
+func (tl *TaskLogUpdate) SetNotNilResult(value *uint8) *TaskLogUpdate {
+	if value != nil {
+		return tl.SetResult(*value)
+	}
+	return tl
+}
+
+// set field if value's pointer is not nil.
+func (tl *TaskLogUpdateOne) SetNotNilResult(value *uint8) *TaskLogUpdateOne {
+	if value != nil {
+		return tl.SetResult(*value)
+	}
+	return tl
+}
+
+// set field if value's pointer is not nil.
+func (tl *TaskLogCreate) SetNotNilResult(value *uint8) *TaskLogCreate {
+	if value != nil {
+		return tl.SetResult(*value)
+	}
+	return tl
+}

+ 209 - 0
ent/task.go

@@ -0,0 +1,209 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"entgo.io/ent"
+	"entgo.io/ent/dialect/sql"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+)
+
+// Task is the model entity for the Task schema.
+type Task struct {
+	config `json:"-"`
+	// ID of the ent.
+	ID uint64 `json:"id,omitempty"`
+	// Create Time | 创建日期
+	CreatedAt time.Time `json:"created_at,omitempty"`
+	// Update Time | 修改日期
+	UpdatedAt time.Time `json:"updated_at,omitempty"`
+	// Status 1: normal 2: ban | 状态 1 正常 2 禁用
+	Status uint8 `json:"status,omitempty"`
+	// Task Name | 任务名称
+	Name string `json:"name,omitempty"`
+	// Task Group | 任务分组
+	TaskGroup string `json:"task_group,omitempty"`
+	// Cron expression | 定时任务表达式
+	CronExpression string `json:"cron_expression,omitempty"`
+	// Cron Pattern | 任务的模式 (用于区分和确定要执行的任务)
+	Pattern string `json:"pattern,omitempty"`
+	// The data used in cron (JSON string) | 任务需要的数据(JSON 字符串)
+	Payload string `json:"payload,omitempty"`
+	// Edges holds the relations/edges for other nodes in the graph.
+	// The values are being populated by the TaskQuery when eager-loading is set.
+	Edges        TaskEdges `json:"edges"`
+	selectValues sql.SelectValues
+}
+
+// TaskEdges holds the relations/edges for other nodes in the graph.
+type TaskEdges struct {
+	// TaskLogs holds the value of the task_logs edge.
+	TaskLogs []*TaskLog `json:"task_logs,omitempty"`
+	// loadedTypes holds the information for reporting if a
+	// type was loaded (or requested) in eager-loading or not.
+	loadedTypes [1]bool
+}
+
+// TaskLogsOrErr returns the TaskLogs value or an error if the edge
+// was not loaded in eager-loading.
+func (e TaskEdges) TaskLogsOrErr() ([]*TaskLog, error) {
+	if e.loadedTypes[0] {
+		return e.TaskLogs, nil
+	}
+	return nil, &NotLoadedError{edge: "task_logs"}
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*Task) scanValues(columns []string) ([]any, error) {
+	values := make([]any, len(columns))
+	for i := range columns {
+		switch columns[i] {
+		case task.FieldID, task.FieldStatus:
+			values[i] = new(sql.NullInt64)
+		case task.FieldName, task.FieldTaskGroup, task.FieldCronExpression, task.FieldPattern, task.FieldPayload:
+			values[i] = new(sql.NullString)
+		case task.FieldCreatedAt, task.FieldUpdatedAt:
+			values[i] = new(sql.NullTime)
+		default:
+			values[i] = new(sql.UnknownType)
+		}
+	}
+	return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the Task fields.
+func (t *Task) assignValues(columns []string, values []any) error {
+	if m, n := len(values), len(columns); m < n {
+		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+	}
+	for i := range columns {
+		switch columns[i] {
+		case task.FieldID:
+			value, ok := values[i].(*sql.NullInt64)
+			if !ok {
+				return fmt.Errorf("unexpected type %T for field id", value)
+			}
+			t.ID = uint64(value.Int64)
+		case task.FieldCreatedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field created_at", values[i])
+			} else if value.Valid {
+				t.CreatedAt = value.Time
+			}
+		case task.FieldUpdatedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field updated_at", values[i])
+			} else if value.Valid {
+				t.UpdatedAt = value.Time
+			}
+		case task.FieldStatus:
+			if value, ok := values[i].(*sql.NullInt64); !ok {
+				return fmt.Errorf("unexpected type %T for field status", values[i])
+			} else if value.Valid {
+				t.Status = uint8(value.Int64)
+			}
+		case task.FieldName:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field name", values[i])
+			} else if value.Valid {
+				t.Name = value.String
+			}
+		case task.FieldTaskGroup:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field task_group", values[i])
+			} else if value.Valid {
+				t.TaskGroup = value.String
+			}
+		case task.FieldCronExpression:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field cron_expression", values[i])
+			} else if value.Valid {
+				t.CronExpression = value.String
+			}
+		case task.FieldPattern:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field pattern", values[i])
+			} else if value.Valid {
+				t.Pattern = value.String
+			}
+		case task.FieldPayload:
+			if value, ok := values[i].(*sql.NullString); !ok {
+				return fmt.Errorf("unexpected type %T for field payload", values[i])
+			} else if value.Valid {
+				t.Payload = value.String
+			}
+		default:
+			t.selectValues.Set(columns[i], values[i])
+		}
+	}
+	return nil
+}
+
+// Value returns the ent.Value that was dynamically selected and assigned to the Task.
+// This includes values selected through modifiers, order, etc.
+func (t *Task) Value(name string) (ent.Value, error) {
+	return t.selectValues.Get(name)
+}
+
+// QueryTaskLogs queries the "task_logs" edge of the Task entity.
+func (t *Task) QueryTaskLogs() *TaskLogQuery {
+	return NewTaskClient(t.config).QueryTaskLogs(t)
+}
+
+// Update returns a builder for updating this Task.
+// Note that you need to call Task.Unwrap() before calling this method if this Task
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (t *Task) Update() *TaskUpdateOne {
+	return NewTaskClient(t.config).UpdateOne(t)
+}
+
+// Unwrap unwraps the Task entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (t *Task) Unwrap() *Task {
+	_tx, ok := t.config.driver.(*txDriver)
+	if !ok {
+		panic("ent: Task is not a transactional entity")
+	}
+	t.config.driver = _tx.drv
+	return t
+}
+
+// String implements the fmt.Stringer.
+func (t *Task) String() string {
+	var builder strings.Builder
+	builder.WriteString("Task(")
+	builder.WriteString(fmt.Sprintf("id=%v, ", t.ID))
+	builder.WriteString("created_at=")
+	builder.WriteString(t.CreatedAt.Format(time.ANSIC))
+	builder.WriteString(", ")
+	builder.WriteString("updated_at=")
+	builder.WriteString(t.UpdatedAt.Format(time.ANSIC))
+	builder.WriteString(", ")
+	builder.WriteString("status=")
+	builder.WriteString(fmt.Sprintf("%v", t.Status))
+	builder.WriteString(", ")
+	builder.WriteString("name=")
+	builder.WriteString(t.Name)
+	builder.WriteString(", ")
+	builder.WriteString("task_group=")
+	builder.WriteString(t.TaskGroup)
+	builder.WriteString(", ")
+	builder.WriteString("cron_expression=")
+	builder.WriteString(t.CronExpression)
+	builder.WriteString(", ")
+	builder.WriteString("pattern=")
+	builder.WriteString(t.Pattern)
+	builder.WriteString(", ")
+	builder.WriteString("payload=")
+	builder.WriteString(t.Payload)
+	builder.WriteByte(')')
+	return builder.String()
+}
+
+// Tasks is a parsable slice of Task.
+type Tasks []*Task

+ 147 - 0
ent/task/task.go

@@ -0,0 +1,147 @@
+// Code generated by ent, DO NOT EDIT.
+
+package task
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+)
+
+const (
+	// Label holds the string label denoting the task type in the database.
+	Label = "task"
+	// FieldID holds the string denoting the id field in the database.
+	FieldID = "id"
+	// FieldCreatedAt holds the string denoting the created_at field in the database.
+	FieldCreatedAt = "created_at"
+	// FieldUpdatedAt holds the string denoting the updated_at field in the database.
+	FieldUpdatedAt = "updated_at"
+	// FieldStatus holds the string denoting the status field in the database.
+	FieldStatus = "status"
+	// FieldName holds the string denoting the name field in the database.
+	FieldName = "name"
+	// FieldTaskGroup holds the string denoting the task_group field in the database.
+	FieldTaskGroup = "task_group"
+	// FieldCronExpression holds the string denoting the cron_expression field in the database.
+	FieldCronExpression = "cron_expression"
+	// FieldPattern holds the string denoting the pattern field in the database.
+	FieldPattern = "pattern"
+	// FieldPayload holds the string denoting the payload field in the database.
+	FieldPayload = "payload"
+	// EdgeTaskLogs holds the string denoting the task_logs edge name in mutations.
+	EdgeTaskLogs = "task_logs"
+	// Table holds the table name of the task in the database.
+	Table = "sys_tasks"
+	// TaskLogsTable is the table that holds the task_logs relation/edge.
+	TaskLogsTable = "sys_task_logs"
+	// TaskLogsInverseTable is the table name for the TaskLog entity.
+	// It exists in this package in order to avoid circular dependency with the "tasklog" package.
+	TaskLogsInverseTable = "sys_task_logs"
+	// TaskLogsColumn is the table column denoting the task_logs relation/edge.
+	TaskLogsColumn = "task_task_logs"
+)
+
+// Columns holds all SQL columns for task fields.
+var Columns = []string{
+	FieldID,
+	FieldCreatedAt,
+	FieldUpdatedAt,
+	FieldStatus,
+	FieldName,
+	FieldTaskGroup,
+	FieldCronExpression,
+	FieldPattern,
+	FieldPayload,
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+	for i := range Columns {
+		if column == Columns[i] {
+			return true
+		}
+	}
+	return false
+}
+
+var (
+	// DefaultCreatedAt holds the default value on creation for the "created_at" field.
+	DefaultCreatedAt func() time.Time
+	// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
+	DefaultUpdatedAt func() time.Time
+	// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
+	UpdateDefaultUpdatedAt func() time.Time
+	// DefaultStatus holds the default value on creation for the "status" field.
+	DefaultStatus uint8
+)
+
+// OrderOption defines the ordering options for the Task queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByCreatedAt orders the results by the created_at field.
+func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
+}
+
+// ByUpdatedAt orders the results by the updated_at field.
+func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
+}
+
+// ByStatus orders the results by the status field.
+func ByStatus(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldStatus, opts...).ToFunc()
+}
+
+// ByName orders the results by the name field.
+func ByName(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldName, opts...).ToFunc()
+}
+
+// ByTaskGroup orders the results by the task_group field.
+func ByTaskGroup(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldTaskGroup, opts...).ToFunc()
+}
+
+// ByCronExpression orders the results by the cron_expression field.
+func ByCronExpression(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldCronExpression, opts...).ToFunc()
+}
+
+// ByPattern orders the results by the pattern field.
+func ByPattern(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldPattern, opts...).ToFunc()
+}
+
+// ByPayload orders the results by the payload field.
+func ByPayload(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldPayload, opts...).ToFunc()
+}
+
+// ByTaskLogsCount orders the results by task_logs count.
+func ByTaskLogsCount(opts ...sql.OrderTermOption) OrderOption {
+	return func(s *sql.Selector) {
+		sqlgraph.OrderByNeighborsCount(s, newTaskLogsStep(), opts...)
+	}
+}
+
+// ByTaskLogs orders the results by task_logs terms.
+func ByTaskLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
+	return func(s *sql.Selector) {
+		sqlgraph.OrderByNeighborTerms(s, newTaskLogsStep(), append([]sql.OrderTerm{term}, terms...)...)
+	}
+}
+func newTaskLogsStep() *sqlgraph.Step {
+	return sqlgraph.NewStep(
+		sqlgraph.From(Table, FieldID),
+		sqlgraph.To(TaskLogsInverseTable, FieldID),
+		sqlgraph.Edge(sqlgraph.O2M, false, TaskLogsTable, TaskLogsColumn),
+	)
+}

+ 589 - 0
ent/task/where.go

@@ -0,0 +1,589 @@
+// Code generated by ent, DO NOT EDIT.
+
+package task
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id uint64) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldID, id))
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id uint64) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldID, id))
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id uint64) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldID, id))
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...uint64) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldID, ids...))
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...uint64) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldID, ids...))
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id uint64) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldID, id))
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id uint64) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldID, id))
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id uint64) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldID, id))
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id uint64) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldID, id))
+}
+
+// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
+func CreatedAt(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
+func UpdatedAt(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
+func Status(v uint8) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldStatus, v))
+}
+
+// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
+func Name(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldName, v))
+}
+
+// TaskGroup applies equality check predicate on the "task_group" field. It's identical to TaskGroupEQ.
+func TaskGroup(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldTaskGroup, v))
+}
+
+// CronExpression applies equality check predicate on the "cron_expression" field. It's identical to CronExpressionEQ.
+func CronExpression(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldCronExpression, v))
+}
+
+// Pattern applies equality check predicate on the "pattern" field. It's identical to PatternEQ.
+func Pattern(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldPattern, v))
+}
+
+// Payload applies equality check predicate on the "payload" field. It's identical to PayloadEQ.
+func Payload(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldPayload, v))
+}
+
+// CreatedAtEQ applies the EQ predicate on the "created_at" field.
+func CreatedAtEQ(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
+func CreatedAtNEQ(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldCreatedAt, v))
+}
+
+// CreatedAtIn applies the In predicate on the "created_at" field.
+func CreatedAtIn(vs ...time.Time) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
+func CreatedAtNotIn(vs ...time.Time) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldCreatedAt, vs...))
+}
+
+// CreatedAtGT applies the GT predicate on the "created_at" field.
+func CreatedAtGT(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldCreatedAt, v))
+}
+
+// CreatedAtGTE applies the GTE predicate on the "created_at" field.
+func CreatedAtGTE(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldCreatedAt, v))
+}
+
+// CreatedAtLT applies the LT predicate on the "created_at" field.
+func CreatedAtLT(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldCreatedAt, v))
+}
+
+// CreatedAtLTE applies the LTE predicate on the "created_at" field.
+func CreatedAtLTE(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldCreatedAt, v))
+}
+
+// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
+func UpdatedAtEQ(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
+func UpdatedAtNEQ(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldUpdatedAt, v))
+}
+
+// UpdatedAtIn applies the In predicate on the "updated_at" field.
+func UpdatedAtIn(vs ...time.Time) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
+func UpdatedAtNotIn(vs ...time.Time) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldUpdatedAt, vs...))
+}
+
+// UpdatedAtGT applies the GT predicate on the "updated_at" field.
+func UpdatedAtGT(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
+func UpdatedAtGTE(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLT applies the LT predicate on the "updated_at" field.
+func UpdatedAtLT(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldUpdatedAt, v))
+}
+
+// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
+func UpdatedAtLTE(v time.Time) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldUpdatedAt, v))
+}
+
+// StatusEQ applies the EQ predicate on the "status" field.
+func StatusEQ(v uint8) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldStatus, v))
+}
+
+// StatusNEQ applies the NEQ predicate on the "status" field.
+func StatusNEQ(v uint8) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldStatus, v))
+}
+
+// StatusIn applies the In predicate on the "status" field.
+func StatusIn(vs ...uint8) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldStatus, vs...))
+}
+
+// StatusNotIn applies the NotIn predicate on the "status" field.
+func StatusNotIn(vs ...uint8) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldStatus, vs...))
+}
+
+// StatusGT applies the GT predicate on the "status" field.
+func StatusGT(v uint8) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldStatus, v))
+}
+
+// StatusGTE applies the GTE predicate on the "status" field.
+func StatusGTE(v uint8) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldStatus, v))
+}
+
+// StatusLT applies the LT predicate on the "status" field.
+func StatusLT(v uint8) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldStatus, v))
+}
+
+// StatusLTE applies the LTE predicate on the "status" field.
+func StatusLTE(v uint8) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldStatus, v))
+}
+
+// StatusIsNil applies the IsNil predicate on the "status" field.
+func StatusIsNil() predicate.Task {
+	return predicate.Task(sql.FieldIsNull(FieldStatus))
+}
+
+// StatusNotNil applies the NotNil predicate on the "status" field.
+func StatusNotNil() predicate.Task {
+	return predicate.Task(sql.FieldNotNull(FieldStatus))
+}
+
+// NameEQ applies the EQ predicate on the "name" field.
+func NameEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldName, v))
+}
+
+// NameNEQ applies the NEQ predicate on the "name" field.
+func NameNEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldName, v))
+}
+
+// NameIn applies the In predicate on the "name" field.
+func NameIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldName, vs...))
+}
+
+// NameNotIn applies the NotIn predicate on the "name" field.
+func NameNotIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldName, vs...))
+}
+
+// NameGT applies the GT predicate on the "name" field.
+func NameGT(v string) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldName, v))
+}
+
+// NameGTE applies the GTE predicate on the "name" field.
+func NameGTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldName, v))
+}
+
+// NameLT applies the LT predicate on the "name" field.
+func NameLT(v string) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldName, v))
+}
+
+// NameLTE applies the LTE predicate on the "name" field.
+func NameLTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldName, v))
+}
+
+// NameContains applies the Contains predicate on the "name" field.
+func NameContains(v string) predicate.Task {
+	return predicate.Task(sql.FieldContains(FieldName, v))
+}
+
+// NameHasPrefix applies the HasPrefix predicate on the "name" field.
+func NameHasPrefix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasPrefix(FieldName, v))
+}
+
+// NameHasSuffix applies the HasSuffix predicate on the "name" field.
+func NameHasSuffix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasSuffix(FieldName, v))
+}
+
+// NameEqualFold applies the EqualFold predicate on the "name" field.
+func NameEqualFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldEqualFold(FieldName, v))
+}
+
+// NameContainsFold applies the ContainsFold predicate on the "name" field.
+func NameContainsFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldContainsFold(FieldName, v))
+}
+
+// TaskGroupEQ applies the EQ predicate on the "task_group" field.
+func TaskGroupEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldTaskGroup, v))
+}
+
+// TaskGroupNEQ applies the NEQ predicate on the "task_group" field.
+func TaskGroupNEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldTaskGroup, v))
+}
+
+// TaskGroupIn applies the In predicate on the "task_group" field.
+func TaskGroupIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldTaskGroup, vs...))
+}
+
+// TaskGroupNotIn applies the NotIn predicate on the "task_group" field.
+func TaskGroupNotIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldTaskGroup, vs...))
+}
+
+// TaskGroupGT applies the GT predicate on the "task_group" field.
+func TaskGroupGT(v string) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldTaskGroup, v))
+}
+
+// TaskGroupGTE applies the GTE predicate on the "task_group" field.
+func TaskGroupGTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldTaskGroup, v))
+}
+
+// TaskGroupLT applies the LT predicate on the "task_group" field.
+func TaskGroupLT(v string) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldTaskGroup, v))
+}
+
+// TaskGroupLTE applies the LTE predicate on the "task_group" field.
+func TaskGroupLTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldTaskGroup, v))
+}
+
+// TaskGroupContains applies the Contains predicate on the "task_group" field.
+func TaskGroupContains(v string) predicate.Task {
+	return predicate.Task(sql.FieldContains(FieldTaskGroup, v))
+}
+
+// TaskGroupHasPrefix applies the HasPrefix predicate on the "task_group" field.
+func TaskGroupHasPrefix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasPrefix(FieldTaskGroup, v))
+}
+
+// TaskGroupHasSuffix applies the HasSuffix predicate on the "task_group" field.
+func TaskGroupHasSuffix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasSuffix(FieldTaskGroup, v))
+}
+
+// TaskGroupEqualFold applies the EqualFold predicate on the "task_group" field.
+func TaskGroupEqualFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldEqualFold(FieldTaskGroup, v))
+}
+
+// TaskGroupContainsFold applies the ContainsFold predicate on the "task_group" field.
+func TaskGroupContainsFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldContainsFold(FieldTaskGroup, v))
+}
+
+// CronExpressionEQ applies the EQ predicate on the "cron_expression" field.
+func CronExpressionEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldCronExpression, v))
+}
+
+// CronExpressionNEQ applies the NEQ predicate on the "cron_expression" field.
+func CronExpressionNEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldCronExpression, v))
+}
+
+// CronExpressionIn applies the In predicate on the "cron_expression" field.
+func CronExpressionIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldCronExpression, vs...))
+}
+
+// CronExpressionNotIn applies the NotIn predicate on the "cron_expression" field.
+func CronExpressionNotIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldCronExpression, vs...))
+}
+
+// CronExpressionGT applies the GT predicate on the "cron_expression" field.
+func CronExpressionGT(v string) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldCronExpression, v))
+}
+
+// CronExpressionGTE applies the GTE predicate on the "cron_expression" field.
+func CronExpressionGTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldCronExpression, v))
+}
+
+// CronExpressionLT applies the LT predicate on the "cron_expression" field.
+func CronExpressionLT(v string) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldCronExpression, v))
+}
+
+// CronExpressionLTE applies the LTE predicate on the "cron_expression" field.
+func CronExpressionLTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldCronExpression, v))
+}
+
+// CronExpressionContains applies the Contains predicate on the "cron_expression" field.
+func CronExpressionContains(v string) predicate.Task {
+	return predicate.Task(sql.FieldContains(FieldCronExpression, v))
+}
+
+// CronExpressionHasPrefix applies the HasPrefix predicate on the "cron_expression" field.
+func CronExpressionHasPrefix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasPrefix(FieldCronExpression, v))
+}
+
+// CronExpressionHasSuffix applies the HasSuffix predicate on the "cron_expression" field.
+func CronExpressionHasSuffix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasSuffix(FieldCronExpression, v))
+}
+
+// CronExpressionEqualFold applies the EqualFold predicate on the "cron_expression" field.
+func CronExpressionEqualFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldEqualFold(FieldCronExpression, v))
+}
+
+// CronExpressionContainsFold applies the ContainsFold predicate on the "cron_expression" field.
+func CronExpressionContainsFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldContainsFold(FieldCronExpression, v))
+}
+
+// PatternEQ applies the EQ predicate on the "pattern" field.
+func PatternEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldPattern, v))
+}
+
+// PatternNEQ applies the NEQ predicate on the "pattern" field.
+func PatternNEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldPattern, v))
+}
+
+// PatternIn applies the In predicate on the "pattern" field.
+func PatternIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldPattern, vs...))
+}
+
+// PatternNotIn applies the NotIn predicate on the "pattern" field.
+func PatternNotIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldPattern, vs...))
+}
+
+// PatternGT applies the GT predicate on the "pattern" field.
+func PatternGT(v string) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldPattern, v))
+}
+
+// PatternGTE applies the GTE predicate on the "pattern" field.
+func PatternGTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldPattern, v))
+}
+
+// PatternLT applies the LT predicate on the "pattern" field.
+func PatternLT(v string) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldPattern, v))
+}
+
+// PatternLTE applies the LTE predicate on the "pattern" field.
+func PatternLTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldPattern, v))
+}
+
+// PatternContains applies the Contains predicate on the "pattern" field.
+func PatternContains(v string) predicate.Task {
+	return predicate.Task(sql.FieldContains(FieldPattern, v))
+}
+
+// PatternHasPrefix applies the HasPrefix predicate on the "pattern" field.
+func PatternHasPrefix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasPrefix(FieldPattern, v))
+}
+
+// PatternHasSuffix applies the HasSuffix predicate on the "pattern" field.
+func PatternHasSuffix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasSuffix(FieldPattern, v))
+}
+
+// PatternEqualFold applies the EqualFold predicate on the "pattern" field.
+func PatternEqualFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldEqualFold(FieldPattern, v))
+}
+
+// PatternContainsFold applies the ContainsFold predicate on the "pattern" field.
+func PatternContainsFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldContainsFold(FieldPattern, v))
+}
+
+// PayloadEQ applies the EQ predicate on the "payload" field.
+func PayloadEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldEQ(FieldPayload, v))
+}
+
+// PayloadNEQ applies the NEQ predicate on the "payload" field.
+func PayloadNEQ(v string) predicate.Task {
+	return predicate.Task(sql.FieldNEQ(FieldPayload, v))
+}
+
+// PayloadIn applies the In predicate on the "payload" field.
+func PayloadIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldIn(FieldPayload, vs...))
+}
+
+// PayloadNotIn applies the NotIn predicate on the "payload" field.
+func PayloadNotIn(vs ...string) predicate.Task {
+	return predicate.Task(sql.FieldNotIn(FieldPayload, vs...))
+}
+
+// PayloadGT applies the GT predicate on the "payload" field.
+func PayloadGT(v string) predicate.Task {
+	return predicate.Task(sql.FieldGT(FieldPayload, v))
+}
+
+// PayloadGTE applies the GTE predicate on the "payload" field.
+func PayloadGTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldGTE(FieldPayload, v))
+}
+
+// PayloadLT applies the LT predicate on the "payload" field.
+func PayloadLT(v string) predicate.Task {
+	return predicate.Task(sql.FieldLT(FieldPayload, v))
+}
+
+// PayloadLTE applies the LTE predicate on the "payload" field.
+func PayloadLTE(v string) predicate.Task {
+	return predicate.Task(sql.FieldLTE(FieldPayload, v))
+}
+
+// PayloadContains applies the Contains predicate on the "payload" field.
+func PayloadContains(v string) predicate.Task {
+	return predicate.Task(sql.FieldContains(FieldPayload, v))
+}
+
+// PayloadHasPrefix applies the HasPrefix predicate on the "payload" field.
+func PayloadHasPrefix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasPrefix(FieldPayload, v))
+}
+
+// PayloadHasSuffix applies the HasSuffix predicate on the "payload" field.
+func PayloadHasSuffix(v string) predicate.Task {
+	return predicate.Task(sql.FieldHasSuffix(FieldPayload, v))
+}
+
+// PayloadEqualFold applies the EqualFold predicate on the "payload" field.
+func PayloadEqualFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldEqualFold(FieldPayload, v))
+}
+
+// PayloadContainsFold applies the ContainsFold predicate on the "payload" field.
+func PayloadContainsFold(v string) predicate.Task {
+	return predicate.Task(sql.FieldContainsFold(FieldPayload, v))
+}
+
+// HasTaskLogs applies the HasEdge predicate on the "task_logs" edge.
+func HasTaskLogs() predicate.Task {
+	return predicate.Task(func(s *sql.Selector) {
+		step := sqlgraph.NewStep(
+			sqlgraph.From(Table, FieldID),
+			sqlgraph.Edge(sqlgraph.O2M, false, TaskLogsTable, TaskLogsColumn),
+		)
+		sqlgraph.HasNeighbors(s, step)
+	})
+}
+
+// HasTaskLogsWith applies the HasEdge predicate on the "task_logs" edge with a given conditions (other predicates).
+func HasTaskLogsWith(preds ...predicate.TaskLog) predicate.Task {
+	return predicate.Task(func(s *sql.Selector) {
+		step := newTaskLogsStep()
+		sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+			for _, p := range preds {
+				p(s)
+			}
+		})
+	})
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.Task) predicate.Task {
+	return predicate.Task(sql.AndPredicates(predicates...))
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.Task) predicate.Task {
+	return predicate.Task(sql.OrPredicates(predicates...))
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.Task) predicate.Task {
+	return predicate.Task(sql.NotPredicates(p))
+}

+ 358 - 0
ent/task_create.go

@@ -0,0 +1,358 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskCreate is the builder for creating a Task entity.
+type TaskCreate struct {
+	config
+	mutation *TaskMutation
+	hooks    []Hook
+}
+
+// SetCreatedAt sets the "created_at" field.
+func (tc *TaskCreate) SetCreatedAt(t time.Time) *TaskCreate {
+	tc.mutation.SetCreatedAt(t)
+	return tc
+}
+
+// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
+func (tc *TaskCreate) SetNillableCreatedAt(t *time.Time) *TaskCreate {
+	if t != nil {
+		tc.SetCreatedAt(*t)
+	}
+	return tc
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (tc *TaskCreate) SetUpdatedAt(t time.Time) *TaskCreate {
+	tc.mutation.SetUpdatedAt(t)
+	return tc
+}
+
+// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
+func (tc *TaskCreate) SetNillableUpdatedAt(t *time.Time) *TaskCreate {
+	if t != nil {
+		tc.SetUpdatedAt(*t)
+	}
+	return tc
+}
+
+// SetStatus sets the "status" field.
+func (tc *TaskCreate) SetStatus(u uint8) *TaskCreate {
+	tc.mutation.SetStatus(u)
+	return tc
+}
+
+// SetNillableStatus sets the "status" field if the given value is not nil.
+func (tc *TaskCreate) SetNillableStatus(u *uint8) *TaskCreate {
+	if u != nil {
+		tc.SetStatus(*u)
+	}
+	return tc
+}
+
+// SetName sets the "name" field.
+func (tc *TaskCreate) SetName(s string) *TaskCreate {
+	tc.mutation.SetName(s)
+	return tc
+}
+
+// SetTaskGroup sets the "task_group" field.
+func (tc *TaskCreate) SetTaskGroup(s string) *TaskCreate {
+	tc.mutation.SetTaskGroup(s)
+	return tc
+}
+
+// SetCronExpression sets the "cron_expression" field.
+func (tc *TaskCreate) SetCronExpression(s string) *TaskCreate {
+	tc.mutation.SetCronExpression(s)
+	return tc
+}
+
+// SetPattern sets the "pattern" field.
+func (tc *TaskCreate) SetPattern(s string) *TaskCreate {
+	tc.mutation.SetPattern(s)
+	return tc
+}
+
+// SetPayload sets the "payload" field.
+func (tc *TaskCreate) SetPayload(s string) *TaskCreate {
+	tc.mutation.SetPayload(s)
+	return tc
+}
+
+// SetID sets the "id" field.
+func (tc *TaskCreate) SetID(u uint64) *TaskCreate {
+	tc.mutation.SetID(u)
+	return tc
+}
+
+// AddTaskLogIDs adds the "task_logs" edge to the TaskLog entity by IDs.
+func (tc *TaskCreate) AddTaskLogIDs(ids ...uint64) *TaskCreate {
+	tc.mutation.AddTaskLogIDs(ids...)
+	return tc
+}
+
+// AddTaskLogs adds the "task_logs" edges to the TaskLog entity.
+func (tc *TaskCreate) AddTaskLogs(t ...*TaskLog) *TaskCreate {
+	ids := make([]uint64, len(t))
+	for i := range t {
+		ids[i] = t[i].ID
+	}
+	return tc.AddTaskLogIDs(ids...)
+}
+
+// Mutation returns the TaskMutation object of the builder.
+func (tc *TaskCreate) Mutation() *TaskMutation {
+	return tc.mutation
+}
+
+// Save creates the Task in the database.
+func (tc *TaskCreate) Save(ctx context.Context) (*Task, error) {
+	tc.defaults()
+	return withHooks(ctx, tc.sqlSave, tc.mutation, tc.hooks)
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (tc *TaskCreate) SaveX(ctx context.Context) *Task {
+	v, err := tc.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (tc *TaskCreate) Exec(ctx context.Context) error {
+	_, err := tc.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tc *TaskCreate) ExecX(ctx context.Context) {
+	if err := tc.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (tc *TaskCreate) defaults() {
+	if _, ok := tc.mutation.CreatedAt(); !ok {
+		v := task.DefaultCreatedAt()
+		tc.mutation.SetCreatedAt(v)
+	}
+	if _, ok := tc.mutation.UpdatedAt(); !ok {
+		v := task.DefaultUpdatedAt()
+		tc.mutation.SetUpdatedAt(v)
+	}
+	if _, ok := tc.mutation.Status(); !ok {
+		v := task.DefaultStatus
+		tc.mutation.SetStatus(v)
+	}
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (tc *TaskCreate) check() error {
+	if _, ok := tc.mutation.CreatedAt(); !ok {
+		return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Task.created_at"`)}
+	}
+	if _, ok := tc.mutation.UpdatedAt(); !ok {
+		return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Task.updated_at"`)}
+	}
+	if _, ok := tc.mutation.Name(); !ok {
+		return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Task.name"`)}
+	}
+	if _, ok := tc.mutation.TaskGroup(); !ok {
+		return &ValidationError{Name: "task_group", err: errors.New(`ent: missing required field "Task.task_group"`)}
+	}
+	if _, ok := tc.mutation.CronExpression(); !ok {
+		return &ValidationError{Name: "cron_expression", err: errors.New(`ent: missing required field "Task.cron_expression"`)}
+	}
+	if _, ok := tc.mutation.Pattern(); !ok {
+		return &ValidationError{Name: "pattern", err: errors.New(`ent: missing required field "Task.pattern"`)}
+	}
+	if _, ok := tc.mutation.Payload(); !ok {
+		return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Task.payload"`)}
+	}
+	return nil
+}
+
+func (tc *TaskCreate) sqlSave(ctx context.Context) (*Task, error) {
+	if err := tc.check(); err != nil {
+		return nil, err
+	}
+	_node, _spec := tc.createSpec()
+	if err := sqlgraph.CreateNode(ctx, tc.driver, _spec); err != nil {
+		if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	if _spec.ID.Value != _node.ID {
+		id := _spec.ID.Value.(int64)
+		_node.ID = uint64(id)
+	}
+	tc.mutation.id = &_node.ID
+	tc.mutation.done = true
+	return _node, nil
+}
+
+func (tc *TaskCreate) createSpec() (*Task, *sqlgraph.CreateSpec) {
+	var (
+		_node = &Task{config: tc.config}
+		_spec = sqlgraph.NewCreateSpec(task.Table, sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64))
+	)
+	if id, ok := tc.mutation.ID(); ok {
+		_node.ID = id
+		_spec.ID.Value = id
+	}
+	if value, ok := tc.mutation.CreatedAt(); ok {
+		_spec.SetField(task.FieldCreatedAt, field.TypeTime, value)
+		_node.CreatedAt = value
+	}
+	if value, ok := tc.mutation.UpdatedAt(); ok {
+		_spec.SetField(task.FieldUpdatedAt, field.TypeTime, value)
+		_node.UpdatedAt = value
+	}
+	if value, ok := tc.mutation.Status(); ok {
+		_spec.SetField(task.FieldStatus, field.TypeUint8, value)
+		_node.Status = value
+	}
+	if value, ok := tc.mutation.Name(); ok {
+		_spec.SetField(task.FieldName, field.TypeString, value)
+		_node.Name = value
+	}
+	if value, ok := tc.mutation.TaskGroup(); ok {
+		_spec.SetField(task.FieldTaskGroup, field.TypeString, value)
+		_node.TaskGroup = value
+	}
+	if value, ok := tc.mutation.CronExpression(); ok {
+		_spec.SetField(task.FieldCronExpression, field.TypeString, value)
+		_node.CronExpression = value
+	}
+	if value, ok := tc.mutation.Pattern(); ok {
+		_spec.SetField(task.FieldPattern, field.TypeString, value)
+		_node.Pattern = value
+	}
+	if value, ok := tc.mutation.Payload(); ok {
+		_spec.SetField(task.FieldPayload, field.TypeString, value)
+		_node.Payload = value
+	}
+	if nodes := tc.mutation.TaskLogsIDs(); len(nodes) > 0 {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.O2M,
+			Inverse: false,
+			Table:   task.TaskLogsTable,
+			Columns: []string{task.TaskLogsColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_spec.Edges = append(_spec.Edges, edge)
+	}
+	return _node, _spec
+}
+
+// TaskCreateBulk is the builder for creating many Task entities in bulk.
+type TaskCreateBulk struct {
+	config
+	err      error
+	builders []*TaskCreate
+}
+
+// Save creates the Task entities in the database.
+func (tcb *TaskCreateBulk) Save(ctx context.Context) ([]*Task, error) {
+	if tcb.err != nil {
+		return nil, tcb.err
+	}
+	specs := make([]*sqlgraph.CreateSpec, len(tcb.builders))
+	nodes := make([]*Task, len(tcb.builders))
+	mutators := make([]Mutator, len(tcb.builders))
+	for i := range tcb.builders {
+		func(i int, root context.Context) {
+			builder := tcb.builders[i]
+			builder.defaults()
+			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+				mutation, ok := m.(*TaskMutation)
+				if !ok {
+					return nil, fmt.Errorf("unexpected mutation type %T", m)
+				}
+				if err := builder.check(); err != nil {
+					return nil, err
+				}
+				builder.mutation = mutation
+				var err error
+				nodes[i], specs[i] = builder.createSpec()
+				if i < len(mutators)-1 {
+					_, err = mutators[i+1].Mutate(root, tcb.builders[i+1].mutation)
+				} else {
+					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+					// Invoke the actual operation on the latest mutation in the chain.
+					if err = sqlgraph.BatchCreate(ctx, tcb.driver, spec); err != nil {
+						if sqlgraph.IsConstraintError(err) {
+							err = &ConstraintError{msg: err.Error(), wrap: err}
+						}
+					}
+				}
+				if err != nil {
+					return nil, err
+				}
+				mutation.id = &nodes[i].ID
+				if specs[i].ID.Value != nil && nodes[i].ID == 0 {
+					id := specs[i].ID.Value.(int64)
+					nodes[i].ID = uint64(id)
+				}
+				mutation.done = true
+				return nodes[i], nil
+			})
+			for i := len(builder.hooks) - 1; i >= 0; i-- {
+				mut = builder.hooks[i](mut)
+			}
+			mutators[i] = mut
+		}(i, ctx)
+	}
+	if len(mutators) > 0 {
+		if _, err := mutators[0].Mutate(ctx, tcb.builders[0].mutation); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (tcb *TaskCreateBulk) SaveX(ctx context.Context) []*Task {
+	v, err := tcb.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (tcb *TaskCreateBulk) Exec(ctx context.Context) error {
+	_, err := tcb.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tcb *TaskCreateBulk) ExecX(ctx context.Context) {
+	if err := tcb.Exec(ctx); err != nil {
+		panic(err)
+	}
+}

+ 88 - 0
ent/task_delete.go

@@ -0,0 +1,88 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+)
+
+// TaskDelete is the builder for deleting a Task entity.
+type TaskDelete struct {
+	config
+	hooks    []Hook
+	mutation *TaskMutation
+}
+
+// Where appends a list predicates to the TaskDelete builder.
+func (td *TaskDelete) Where(ps ...predicate.Task) *TaskDelete {
+	td.mutation.Where(ps...)
+	return td
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (td *TaskDelete) Exec(ctx context.Context) (int, error) {
+	return withHooks(ctx, td.sqlExec, td.mutation, td.hooks)
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (td *TaskDelete) ExecX(ctx context.Context) int {
+	n, err := td.Exec(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+func (td *TaskDelete) sqlExec(ctx context.Context) (int, error) {
+	_spec := sqlgraph.NewDeleteSpec(task.Table, sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64))
+	if ps := td.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	affected, err := sqlgraph.DeleteNodes(ctx, td.driver, _spec)
+	if err != nil && sqlgraph.IsConstraintError(err) {
+		err = &ConstraintError{msg: err.Error(), wrap: err}
+	}
+	td.mutation.done = true
+	return affected, err
+}
+
+// TaskDeleteOne is the builder for deleting a single Task entity.
+type TaskDeleteOne struct {
+	td *TaskDelete
+}
+
+// Where appends a list predicates to the TaskDelete builder.
+func (tdo *TaskDeleteOne) Where(ps ...predicate.Task) *TaskDeleteOne {
+	tdo.td.mutation.Where(ps...)
+	return tdo
+}
+
+// Exec executes the deletion query.
+func (tdo *TaskDeleteOne) Exec(ctx context.Context) error {
+	n, err := tdo.td.Exec(ctx)
+	switch {
+	case err != nil:
+		return err
+	case n == 0:
+		return &NotFoundError{task.Label}
+	default:
+		return nil
+	}
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tdo *TaskDeleteOne) ExecX(ctx context.Context) {
+	if err := tdo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}

+ 606 - 0
ent/task_query.go

@@ -0,0 +1,606 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"database/sql/driver"
+	"fmt"
+	"math"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskQuery is the builder for querying Task entities.
+type TaskQuery struct {
+	config
+	ctx          *QueryContext
+	order        []task.OrderOption
+	inters       []Interceptor
+	predicates   []predicate.Task
+	withTaskLogs *TaskLogQuery
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the TaskQuery builder.
+func (tq *TaskQuery) Where(ps ...predicate.Task) *TaskQuery {
+	tq.predicates = append(tq.predicates, ps...)
+	return tq
+}
+
+// Limit the number of records to be returned by this query.
+func (tq *TaskQuery) Limit(limit int) *TaskQuery {
+	tq.ctx.Limit = &limit
+	return tq
+}
+
+// Offset to start from.
+func (tq *TaskQuery) Offset(offset int) *TaskQuery {
+	tq.ctx.Offset = &offset
+	return tq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (tq *TaskQuery) Unique(unique bool) *TaskQuery {
+	tq.ctx.Unique = &unique
+	return tq
+}
+
+// Order specifies how the records should be ordered.
+func (tq *TaskQuery) Order(o ...task.OrderOption) *TaskQuery {
+	tq.order = append(tq.order, o...)
+	return tq
+}
+
+// QueryTaskLogs chains the current query on the "task_logs" edge.
+func (tq *TaskQuery) QueryTaskLogs() *TaskLogQuery {
+	query := (&TaskLogClient{config: tq.config}).Query()
+	query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+		if err := tq.prepareQuery(ctx); err != nil {
+			return nil, err
+		}
+		selector := tq.sqlQuery(ctx)
+		if err := selector.Err(); err != nil {
+			return nil, err
+		}
+		step := sqlgraph.NewStep(
+			sqlgraph.From(task.Table, task.FieldID, selector),
+			sqlgraph.To(tasklog.Table, tasklog.FieldID),
+			sqlgraph.Edge(sqlgraph.O2M, false, task.TaskLogsTable, task.TaskLogsColumn),
+		)
+		fromU = sqlgraph.SetNeighbors(tq.driver.Dialect(), step)
+		return fromU, nil
+	}
+	return query
+}
+
+// First returns the first Task entity from the query.
+// Returns a *NotFoundError when no Task was found.
+func (tq *TaskQuery) First(ctx context.Context) (*Task, error) {
+	nodes, err := tq.Limit(1).All(setContextOp(ctx, tq.ctx, "First"))
+	if err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nil, &NotFoundError{task.Label}
+	}
+	return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (tq *TaskQuery) FirstX(ctx context.Context) *Task {
+	node, err := tq.First(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return node
+}
+
+// FirstID returns the first Task ID from the query.
+// Returns a *NotFoundError when no Task ID was found.
+func (tq *TaskQuery) FirstID(ctx context.Context) (id uint64, err error) {
+	var ids []uint64
+	if ids, err = tq.Limit(1).IDs(setContextOp(ctx, tq.ctx, "FirstID")); err != nil {
+		return
+	}
+	if len(ids) == 0 {
+		err = &NotFoundError{task.Label}
+		return
+	}
+	return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (tq *TaskQuery) FirstIDX(ctx context.Context) uint64 {
+	id, err := tq.FirstID(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return id
+}
+
+// Only returns a single Task entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one Task entity is found.
+// Returns a *NotFoundError when no Task entities are found.
+func (tq *TaskQuery) Only(ctx context.Context) (*Task, error) {
+	nodes, err := tq.Limit(2).All(setContextOp(ctx, tq.ctx, "Only"))
+	if err != nil {
+		return nil, err
+	}
+	switch len(nodes) {
+	case 1:
+		return nodes[0], nil
+	case 0:
+		return nil, &NotFoundError{task.Label}
+	default:
+		return nil, &NotSingularError{task.Label}
+	}
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (tq *TaskQuery) OnlyX(ctx context.Context) *Task {
+	node, err := tq.Only(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// OnlyID is like Only, but returns the only Task ID in the query.
+// Returns a *NotSingularError when more than one Task ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (tq *TaskQuery) OnlyID(ctx context.Context) (id uint64, err error) {
+	var ids []uint64
+	if ids, err = tq.Limit(2).IDs(setContextOp(ctx, tq.ctx, "OnlyID")); err != nil {
+		return
+	}
+	switch len(ids) {
+	case 1:
+		id = ids[0]
+	case 0:
+		err = &NotFoundError{task.Label}
+	default:
+		err = &NotSingularError{task.Label}
+	}
+	return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (tq *TaskQuery) OnlyIDX(ctx context.Context) uint64 {
+	id, err := tq.OnlyID(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+// All executes the query and returns a list of Tasks.
+func (tq *TaskQuery) All(ctx context.Context) ([]*Task, error) {
+	ctx = setContextOp(ctx, tq.ctx, "All")
+	if err := tq.prepareQuery(ctx); err != nil {
+		return nil, err
+	}
+	qr := querierAll[[]*Task, *TaskQuery]()
+	return withInterceptors[[]*Task](ctx, tq, qr, tq.inters)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (tq *TaskQuery) AllX(ctx context.Context) []*Task {
+	nodes, err := tq.All(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return nodes
+}
+
+// IDs executes the query and returns a list of Task IDs.
+func (tq *TaskQuery) IDs(ctx context.Context) (ids []uint64, err error) {
+	if tq.ctx.Unique == nil && tq.path != nil {
+		tq.Unique(true)
+	}
+	ctx = setContextOp(ctx, tq.ctx, "IDs")
+	if err = tq.Select(task.FieldID).Scan(ctx, &ids); err != nil {
+		return nil, err
+	}
+	return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (tq *TaskQuery) IDsX(ctx context.Context) []uint64 {
+	ids, err := tq.IDs(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return ids
+}
+
+// Count returns the count of the given query.
+func (tq *TaskQuery) Count(ctx context.Context) (int, error) {
+	ctx = setContextOp(ctx, tq.ctx, "Count")
+	if err := tq.prepareQuery(ctx); err != nil {
+		return 0, err
+	}
+	return withInterceptors[int](ctx, tq, querierCount[*TaskQuery](), tq.inters)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (tq *TaskQuery) CountX(ctx context.Context) int {
+	count, err := tq.Count(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (tq *TaskQuery) Exist(ctx context.Context) (bool, error) {
+	ctx = setContextOp(ctx, tq.ctx, "Exist")
+	switch _, err := tq.FirstID(ctx); {
+	case IsNotFound(err):
+		return false, nil
+	case err != nil:
+		return false, fmt.Errorf("ent: check existence: %w", err)
+	default:
+		return true, nil
+	}
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (tq *TaskQuery) ExistX(ctx context.Context) bool {
+	exist, err := tq.Exist(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return exist
+}
+
+// Clone returns a duplicate of the TaskQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (tq *TaskQuery) Clone() *TaskQuery {
+	if tq == nil {
+		return nil
+	}
+	return &TaskQuery{
+		config:       tq.config,
+		ctx:          tq.ctx.Clone(),
+		order:        append([]task.OrderOption{}, tq.order...),
+		inters:       append([]Interceptor{}, tq.inters...),
+		predicates:   append([]predicate.Task{}, tq.predicates...),
+		withTaskLogs: tq.withTaskLogs.Clone(),
+		// clone intermediate query.
+		sql:  tq.sql.Clone(),
+		path: tq.path,
+	}
+}
+
+// WithTaskLogs tells the query-builder to eager-load the nodes that are connected to
+// the "task_logs" edge. The optional arguments are used to configure the query builder of the edge.
+func (tq *TaskQuery) WithTaskLogs(opts ...func(*TaskLogQuery)) *TaskQuery {
+	query := (&TaskLogClient{config: tq.config}).Query()
+	for _, opt := range opts {
+		opt(query)
+	}
+	tq.withTaskLogs = query
+	return tq
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at,omitempty"`
+//		Count int `json:"count,omitempty"`
+//	}
+//
+//	client.Task.Query().
+//		GroupBy(task.FieldCreatedAt).
+//		Aggregate(ent.Count()).
+//		Scan(ctx, &v)
+func (tq *TaskQuery) GroupBy(field string, fields ...string) *TaskGroupBy {
+	tq.ctx.Fields = append([]string{field}, fields...)
+	grbuild := &TaskGroupBy{build: tq}
+	grbuild.flds = &tq.ctx.Fields
+	grbuild.label = task.Label
+	grbuild.scan = grbuild.Scan
+	return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+//	var v []struct {
+//		CreatedAt time.Time `json:"created_at,omitempty"`
+//	}
+//
+//	client.Task.Query().
+//		Select(task.FieldCreatedAt).
+//		Scan(ctx, &v)
+func (tq *TaskQuery) Select(fields ...string) *TaskSelect {
+	tq.ctx.Fields = append(tq.ctx.Fields, fields...)
+	sbuild := &TaskSelect{TaskQuery: tq}
+	sbuild.label = task.Label
+	sbuild.flds, sbuild.scan = &tq.ctx.Fields, sbuild.Scan
+	return sbuild
+}
+
+// Aggregate returns a TaskSelect configured with the given aggregations.
+func (tq *TaskQuery) Aggregate(fns ...AggregateFunc) *TaskSelect {
+	return tq.Select().Aggregate(fns...)
+}
+
+func (tq *TaskQuery) prepareQuery(ctx context.Context) error {
+	for _, inter := range tq.inters {
+		if inter == nil {
+			return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+		}
+		if trv, ok := inter.(Traverser); ok {
+			if err := trv.Traverse(ctx, tq); err != nil {
+				return err
+			}
+		}
+	}
+	for _, f := range tq.ctx.Fields {
+		if !task.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+		}
+	}
+	if tq.path != nil {
+		prev, err := tq.path(ctx)
+		if err != nil {
+			return err
+		}
+		tq.sql = prev
+	}
+	return nil
+}
+
+func (tq *TaskQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Task, error) {
+	var (
+		nodes       = []*Task{}
+		_spec       = tq.querySpec()
+		loadedTypes = [1]bool{
+			tq.withTaskLogs != nil,
+		}
+	)
+	_spec.ScanValues = func(columns []string) ([]any, error) {
+		return (*Task).scanValues(nil, columns)
+	}
+	_spec.Assign = func(columns []string, values []any) error {
+		node := &Task{config: tq.config}
+		nodes = append(nodes, node)
+		node.Edges.loadedTypes = loadedTypes
+		return node.assignValues(columns, values)
+	}
+	for i := range hooks {
+		hooks[i](ctx, _spec)
+	}
+	if err := sqlgraph.QueryNodes(ctx, tq.driver, _spec); err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nodes, nil
+	}
+	if query := tq.withTaskLogs; query != nil {
+		if err := tq.loadTaskLogs(ctx, query, nodes,
+			func(n *Task) { n.Edges.TaskLogs = []*TaskLog{} },
+			func(n *Task, e *TaskLog) { n.Edges.TaskLogs = append(n.Edges.TaskLogs, e) }); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+func (tq *TaskQuery) loadTaskLogs(ctx context.Context, query *TaskLogQuery, nodes []*Task, init func(*Task), assign func(*Task, *TaskLog)) error {
+	fks := make([]driver.Value, 0, len(nodes))
+	nodeids := make(map[uint64]*Task)
+	for i := range nodes {
+		fks = append(fks, nodes[i].ID)
+		nodeids[nodes[i].ID] = nodes[i]
+		if init != nil {
+			init(nodes[i])
+		}
+	}
+	query.withFKs = true
+	query.Where(predicate.TaskLog(func(s *sql.Selector) {
+		s.Where(sql.InValues(s.C(task.TaskLogsColumn), fks...))
+	}))
+	neighbors, err := query.All(ctx)
+	if err != nil {
+		return err
+	}
+	for _, n := range neighbors {
+		fk := n.task_task_logs
+		if fk == nil {
+			return fmt.Errorf(`foreign-key "task_task_logs" is nil for node %v`, n.ID)
+		}
+		node, ok := nodeids[*fk]
+		if !ok {
+			return fmt.Errorf(`unexpected referenced foreign-key "task_task_logs" returned %v for node %v`, *fk, n.ID)
+		}
+		assign(node, n)
+	}
+	return nil
+}
+
+func (tq *TaskQuery) sqlCount(ctx context.Context) (int, error) {
+	_spec := tq.querySpec()
+	_spec.Node.Columns = tq.ctx.Fields
+	if len(tq.ctx.Fields) > 0 {
+		_spec.Unique = tq.ctx.Unique != nil && *tq.ctx.Unique
+	}
+	return sqlgraph.CountNodes(ctx, tq.driver, _spec)
+}
+
+func (tq *TaskQuery) querySpec() *sqlgraph.QuerySpec {
+	_spec := sqlgraph.NewQuerySpec(task.Table, task.Columns, sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64))
+	_spec.From = tq.sql
+	if unique := tq.ctx.Unique; unique != nil {
+		_spec.Unique = *unique
+	} else if tq.path != nil {
+		_spec.Unique = true
+	}
+	if fields := tq.ctx.Fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, task.FieldID)
+		for i := range fields {
+			if fields[i] != task.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+			}
+		}
+	}
+	if ps := tq.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if limit := tq.ctx.Limit; limit != nil {
+		_spec.Limit = *limit
+	}
+	if offset := tq.ctx.Offset; offset != nil {
+		_spec.Offset = *offset
+	}
+	if ps := tq.order; len(ps) > 0 {
+		_spec.Order = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	return _spec
+}
+
+func (tq *TaskQuery) sqlQuery(ctx context.Context) *sql.Selector {
+	builder := sql.Dialect(tq.driver.Dialect())
+	t1 := builder.Table(task.Table)
+	columns := tq.ctx.Fields
+	if len(columns) == 0 {
+		columns = task.Columns
+	}
+	selector := builder.Select(t1.Columns(columns...)...).From(t1)
+	if tq.sql != nil {
+		selector = tq.sql
+		selector.Select(selector.Columns(columns...)...)
+	}
+	if tq.ctx.Unique != nil && *tq.ctx.Unique {
+		selector.Distinct()
+	}
+	for _, p := range tq.predicates {
+		p(selector)
+	}
+	for _, p := range tq.order {
+		p(selector)
+	}
+	if offset := tq.ctx.Offset; offset != nil {
+		// limit is mandatory for offset clause. We start
+		// with default value, and override it below if needed.
+		selector.Offset(*offset).Limit(math.MaxInt32)
+	}
+	if limit := tq.ctx.Limit; limit != nil {
+		selector.Limit(*limit)
+	}
+	return selector
+}
+
+// TaskGroupBy is the group-by builder for Task entities.
+type TaskGroupBy struct {
+	selector
+	build *TaskQuery
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (tgb *TaskGroupBy) Aggregate(fns ...AggregateFunc) *TaskGroupBy {
+	tgb.fns = append(tgb.fns, fns...)
+	return tgb
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (tgb *TaskGroupBy) Scan(ctx context.Context, v any) error {
+	ctx = setContextOp(ctx, tgb.build.ctx, "GroupBy")
+	if err := tgb.build.prepareQuery(ctx); err != nil {
+		return err
+	}
+	return scanWithInterceptors[*TaskQuery, *TaskGroupBy](ctx, tgb.build, tgb, tgb.build.inters, v)
+}
+
+func (tgb *TaskGroupBy) sqlScan(ctx context.Context, root *TaskQuery, v any) error {
+	selector := root.sqlQuery(ctx).Select()
+	aggregation := make([]string, 0, len(tgb.fns))
+	for _, fn := range tgb.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	if len(selector.SelectedColumns()) == 0 {
+		columns := make([]string, 0, len(*tgb.flds)+len(tgb.fns))
+		for _, f := range *tgb.flds {
+			columns = append(columns, selector.C(f))
+		}
+		columns = append(columns, aggregation...)
+		selector.Select(columns...)
+	}
+	selector.GroupBy(selector.Columns(*tgb.flds...)...)
+	if err := selector.Err(); err != nil {
+		return err
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := tgb.build.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
+
+// TaskSelect is the builder for selecting fields of Task entities.
+type TaskSelect struct {
+	*TaskQuery
+	selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (ts *TaskSelect) Aggregate(fns ...AggregateFunc) *TaskSelect {
+	ts.fns = append(ts.fns, fns...)
+	return ts
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (ts *TaskSelect) Scan(ctx context.Context, v any) error {
+	ctx = setContextOp(ctx, ts.ctx, "Select")
+	if err := ts.prepareQuery(ctx); err != nil {
+		return err
+	}
+	return scanWithInterceptors[*TaskQuery, *TaskSelect](ctx, ts.TaskQuery, ts, ts.inters, v)
+}
+
+func (ts *TaskSelect) sqlScan(ctx context.Context, root *TaskQuery, v any) error {
+	selector := root.sqlQuery(ctx)
+	aggregation := make([]string, 0, len(ts.fns))
+	for _, fn := range ts.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	switch n := len(*ts.selector.flds); {
+	case n == 0 && len(aggregation) > 0:
+		selector.Select(aggregation...)
+	case n != 0 && len(aggregation) > 0:
+		selector.AppendSelect(aggregation...)
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := ts.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}

+ 617 - 0
ent/task_update.go

@@ -0,0 +1,617 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskUpdate is the builder for updating Task entities.
+type TaskUpdate struct {
+	config
+	hooks    []Hook
+	mutation *TaskMutation
+}
+
+// Where appends a list predicates to the TaskUpdate builder.
+func (tu *TaskUpdate) Where(ps ...predicate.Task) *TaskUpdate {
+	tu.mutation.Where(ps...)
+	return tu
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (tu *TaskUpdate) SetUpdatedAt(t time.Time) *TaskUpdate {
+	tu.mutation.SetUpdatedAt(t)
+	return tu
+}
+
+// SetStatus sets the "status" field.
+func (tu *TaskUpdate) SetStatus(u uint8) *TaskUpdate {
+	tu.mutation.ResetStatus()
+	tu.mutation.SetStatus(u)
+	return tu
+}
+
+// SetNillableStatus sets the "status" field if the given value is not nil.
+func (tu *TaskUpdate) SetNillableStatus(u *uint8) *TaskUpdate {
+	if u != nil {
+		tu.SetStatus(*u)
+	}
+	return tu
+}
+
+// AddStatus adds u to the "status" field.
+func (tu *TaskUpdate) AddStatus(u int8) *TaskUpdate {
+	tu.mutation.AddStatus(u)
+	return tu
+}
+
+// ClearStatus clears the value of the "status" field.
+func (tu *TaskUpdate) ClearStatus() *TaskUpdate {
+	tu.mutation.ClearStatus()
+	return tu
+}
+
+// SetName sets the "name" field.
+func (tu *TaskUpdate) SetName(s string) *TaskUpdate {
+	tu.mutation.SetName(s)
+	return tu
+}
+
+// SetNillableName sets the "name" field if the given value is not nil.
+func (tu *TaskUpdate) SetNillableName(s *string) *TaskUpdate {
+	if s != nil {
+		tu.SetName(*s)
+	}
+	return tu
+}
+
+// SetTaskGroup sets the "task_group" field.
+func (tu *TaskUpdate) SetTaskGroup(s string) *TaskUpdate {
+	tu.mutation.SetTaskGroup(s)
+	return tu
+}
+
+// SetNillableTaskGroup sets the "task_group" field if the given value is not nil.
+func (tu *TaskUpdate) SetNillableTaskGroup(s *string) *TaskUpdate {
+	if s != nil {
+		tu.SetTaskGroup(*s)
+	}
+	return tu
+}
+
+// SetCronExpression sets the "cron_expression" field.
+func (tu *TaskUpdate) SetCronExpression(s string) *TaskUpdate {
+	tu.mutation.SetCronExpression(s)
+	return tu
+}
+
+// SetNillableCronExpression sets the "cron_expression" field if the given value is not nil.
+func (tu *TaskUpdate) SetNillableCronExpression(s *string) *TaskUpdate {
+	if s != nil {
+		tu.SetCronExpression(*s)
+	}
+	return tu
+}
+
+// SetPattern sets the "pattern" field.
+func (tu *TaskUpdate) SetPattern(s string) *TaskUpdate {
+	tu.mutation.SetPattern(s)
+	return tu
+}
+
+// SetNillablePattern sets the "pattern" field if the given value is not nil.
+func (tu *TaskUpdate) SetNillablePattern(s *string) *TaskUpdate {
+	if s != nil {
+		tu.SetPattern(*s)
+	}
+	return tu
+}
+
+// SetPayload sets the "payload" field.
+func (tu *TaskUpdate) SetPayload(s string) *TaskUpdate {
+	tu.mutation.SetPayload(s)
+	return tu
+}
+
+// SetNillablePayload sets the "payload" field if the given value is not nil.
+func (tu *TaskUpdate) SetNillablePayload(s *string) *TaskUpdate {
+	if s != nil {
+		tu.SetPayload(*s)
+	}
+	return tu
+}
+
+// AddTaskLogIDs adds the "task_logs" edge to the TaskLog entity by IDs.
+func (tu *TaskUpdate) AddTaskLogIDs(ids ...uint64) *TaskUpdate {
+	tu.mutation.AddTaskLogIDs(ids...)
+	return tu
+}
+
+// AddTaskLogs adds the "task_logs" edges to the TaskLog entity.
+func (tu *TaskUpdate) AddTaskLogs(t ...*TaskLog) *TaskUpdate {
+	ids := make([]uint64, len(t))
+	for i := range t {
+		ids[i] = t[i].ID
+	}
+	return tu.AddTaskLogIDs(ids...)
+}
+
+// Mutation returns the TaskMutation object of the builder.
+func (tu *TaskUpdate) Mutation() *TaskMutation {
+	return tu.mutation
+}
+
+// ClearTaskLogs clears all "task_logs" edges to the TaskLog entity.
+func (tu *TaskUpdate) ClearTaskLogs() *TaskUpdate {
+	tu.mutation.ClearTaskLogs()
+	return tu
+}
+
+// RemoveTaskLogIDs removes the "task_logs" edge to TaskLog entities by IDs.
+func (tu *TaskUpdate) RemoveTaskLogIDs(ids ...uint64) *TaskUpdate {
+	tu.mutation.RemoveTaskLogIDs(ids...)
+	return tu
+}
+
+// RemoveTaskLogs removes "task_logs" edges to TaskLog entities.
+func (tu *TaskUpdate) RemoveTaskLogs(t ...*TaskLog) *TaskUpdate {
+	ids := make([]uint64, len(t))
+	for i := range t {
+		ids[i] = t[i].ID
+	}
+	return tu.RemoveTaskLogIDs(ids...)
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (tu *TaskUpdate) Save(ctx context.Context) (int, error) {
+	tu.defaults()
+	return withHooks(ctx, tu.sqlSave, tu.mutation, tu.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (tu *TaskUpdate) SaveX(ctx context.Context) int {
+	affected, err := tu.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return affected
+}
+
+// Exec executes the query.
+func (tu *TaskUpdate) Exec(ctx context.Context) error {
+	_, err := tu.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tu *TaskUpdate) ExecX(ctx context.Context) {
+	if err := tu.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (tu *TaskUpdate) defaults() {
+	if _, ok := tu.mutation.UpdatedAt(); !ok {
+		v := task.UpdateDefaultUpdatedAt()
+		tu.mutation.SetUpdatedAt(v)
+	}
+}
+
+func (tu *TaskUpdate) sqlSave(ctx context.Context) (n int, err error) {
+	_spec := sqlgraph.NewUpdateSpec(task.Table, task.Columns, sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64))
+	if ps := tu.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := tu.mutation.UpdatedAt(); ok {
+		_spec.SetField(task.FieldUpdatedAt, field.TypeTime, value)
+	}
+	if value, ok := tu.mutation.Status(); ok {
+		_spec.SetField(task.FieldStatus, field.TypeUint8, value)
+	}
+	if value, ok := tu.mutation.AddedStatus(); ok {
+		_spec.AddField(task.FieldStatus, field.TypeUint8, value)
+	}
+	if tu.mutation.StatusCleared() {
+		_spec.ClearField(task.FieldStatus, field.TypeUint8)
+	}
+	if value, ok := tu.mutation.Name(); ok {
+		_spec.SetField(task.FieldName, field.TypeString, value)
+	}
+	if value, ok := tu.mutation.TaskGroup(); ok {
+		_spec.SetField(task.FieldTaskGroup, field.TypeString, value)
+	}
+	if value, ok := tu.mutation.CronExpression(); ok {
+		_spec.SetField(task.FieldCronExpression, field.TypeString, value)
+	}
+	if value, ok := tu.mutation.Pattern(); ok {
+		_spec.SetField(task.FieldPattern, field.TypeString, value)
+	}
+	if value, ok := tu.mutation.Payload(); ok {
+		_spec.SetField(task.FieldPayload, field.TypeString, value)
+	}
+	if tu.mutation.TaskLogsCleared() {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.O2M,
+			Inverse: false,
+			Table:   task.TaskLogsTable,
+			Columns: []string{task.TaskLogsColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64),
+			},
+		}
+		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+	}
+	if nodes := tu.mutation.RemovedTaskLogsIDs(); len(nodes) > 0 && !tu.mutation.TaskLogsCleared() {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.O2M,
+			Inverse: false,
+			Table:   task.TaskLogsTable,
+			Columns: []string{task.TaskLogsColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+	}
+	if nodes := tu.mutation.TaskLogsIDs(); len(nodes) > 0 {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.O2M,
+			Inverse: false,
+			Table:   task.TaskLogsTable,
+			Columns: []string{task.TaskLogsColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_spec.Edges.Add = append(_spec.Edges.Add, edge)
+	}
+	if n, err = sqlgraph.UpdateNodes(ctx, tu.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{task.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return 0, err
+	}
+	tu.mutation.done = true
+	return n, nil
+}
+
+// TaskUpdateOne is the builder for updating a single Task entity.
+type TaskUpdateOne struct {
+	config
+	fields   []string
+	hooks    []Hook
+	mutation *TaskMutation
+}
+
+// SetUpdatedAt sets the "updated_at" field.
+func (tuo *TaskUpdateOne) SetUpdatedAt(t time.Time) *TaskUpdateOne {
+	tuo.mutation.SetUpdatedAt(t)
+	return tuo
+}
+
+// SetStatus sets the "status" field.
+func (tuo *TaskUpdateOne) SetStatus(u uint8) *TaskUpdateOne {
+	tuo.mutation.ResetStatus()
+	tuo.mutation.SetStatus(u)
+	return tuo
+}
+
+// SetNillableStatus sets the "status" field if the given value is not nil.
+func (tuo *TaskUpdateOne) SetNillableStatus(u *uint8) *TaskUpdateOne {
+	if u != nil {
+		tuo.SetStatus(*u)
+	}
+	return tuo
+}
+
+// AddStatus adds u to the "status" field.
+func (tuo *TaskUpdateOne) AddStatus(u int8) *TaskUpdateOne {
+	tuo.mutation.AddStatus(u)
+	return tuo
+}
+
+// ClearStatus clears the value of the "status" field.
+func (tuo *TaskUpdateOne) ClearStatus() *TaskUpdateOne {
+	tuo.mutation.ClearStatus()
+	return tuo
+}
+
+// SetName sets the "name" field.
+func (tuo *TaskUpdateOne) SetName(s string) *TaskUpdateOne {
+	tuo.mutation.SetName(s)
+	return tuo
+}
+
+// SetNillableName sets the "name" field if the given value is not nil.
+func (tuo *TaskUpdateOne) SetNillableName(s *string) *TaskUpdateOne {
+	if s != nil {
+		tuo.SetName(*s)
+	}
+	return tuo
+}
+
+// SetTaskGroup sets the "task_group" field.
+func (tuo *TaskUpdateOne) SetTaskGroup(s string) *TaskUpdateOne {
+	tuo.mutation.SetTaskGroup(s)
+	return tuo
+}
+
+// SetNillableTaskGroup sets the "task_group" field if the given value is not nil.
+func (tuo *TaskUpdateOne) SetNillableTaskGroup(s *string) *TaskUpdateOne {
+	if s != nil {
+		tuo.SetTaskGroup(*s)
+	}
+	return tuo
+}
+
+// SetCronExpression sets the "cron_expression" field.
+func (tuo *TaskUpdateOne) SetCronExpression(s string) *TaskUpdateOne {
+	tuo.mutation.SetCronExpression(s)
+	return tuo
+}
+
+// SetNillableCronExpression sets the "cron_expression" field if the given value is not nil.
+func (tuo *TaskUpdateOne) SetNillableCronExpression(s *string) *TaskUpdateOne {
+	if s != nil {
+		tuo.SetCronExpression(*s)
+	}
+	return tuo
+}
+
+// SetPattern sets the "pattern" field.
+func (tuo *TaskUpdateOne) SetPattern(s string) *TaskUpdateOne {
+	tuo.mutation.SetPattern(s)
+	return tuo
+}
+
+// SetNillablePattern sets the "pattern" field if the given value is not nil.
+func (tuo *TaskUpdateOne) SetNillablePattern(s *string) *TaskUpdateOne {
+	if s != nil {
+		tuo.SetPattern(*s)
+	}
+	return tuo
+}
+
+// SetPayload sets the "payload" field.
+func (tuo *TaskUpdateOne) SetPayload(s string) *TaskUpdateOne {
+	tuo.mutation.SetPayload(s)
+	return tuo
+}
+
+// SetNillablePayload sets the "payload" field if the given value is not nil.
+func (tuo *TaskUpdateOne) SetNillablePayload(s *string) *TaskUpdateOne {
+	if s != nil {
+		tuo.SetPayload(*s)
+	}
+	return tuo
+}
+
+// AddTaskLogIDs adds the "task_logs" edge to the TaskLog entity by IDs.
+func (tuo *TaskUpdateOne) AddTaskLogIDs(ids ...uint64) *TaskUpdateOne {
+	tuo.mutation.AddTaskLogIDs(ids...)
+	return tuo
+}
+
+// AddTaskLogs adds the "task_logs" edges to the TaskLog entity.
+func (tuo *TaskUpdateOne) AddTaskLogs(t ...*TaskLog) *TaskUpdateOne {
+	ids := make([]uint64, len(t))
+	for i := range t {
+		ids[i] = t[i].ID
+	}
+	return tuo.AddTaskLogIDs(ids...)
+}
+
+// Mutation returns the TaskMutation object of the builder.
+func (tuo *TaskUpdateOne) Mutation() *TaskMutation {
+	return tuo.mutation
+}
+
+// ClearTaskLogs clears all "task_logs" edges to the TaskLog entity.
+func (tuo *TaskUpdateOne) ClearTaskLogs() *TaskUpdateOne {
+	tuo.mutation.ClearTaskLogs()
+	return tuo
+}
+
+// RemoveTaskLogIDs removes the "task_logs" edge to TaskLog entities by IDs.
+func (tuo *TaskUpdateOne) RemoveTaskLogIDs(ids ...uint64) *TaskUpdateOne {
+	tuo.mutation.RemoveTaskLogIDs(ids...)
+	return tuo
+}
+
+// RemoveTaskLogs removes "task_logs" edges to TaskLog entities.
+func (tuo *TaskUpdateOne) RemoveTaskLogs(t ...*TaskLog) *TaskUpdateOne {
+	ids := make([]uint64, len(t))
+	for i := range t {
+		ids[i] = t[i].ID
+	}
+	return tuo.RemoveTaskLogIDs(ids...)
+}
+
+// Where appends a list predicates to the TaskUpdate builder.
+func (tuo *TaskUpdateOne) Where(ps ...predicate.Task) *TaskUpdateOne {
+	tuo.mutation.Where(ps...)
+	return tuo
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (tuo *TaskUpdateOne) Select(field string, fields ...string) *TaskUpdateOne {
+	tuo.fields = append([]string{field}, fields...)
+	return tuo
+}
+
+// Save executes the query and returns the updated Task entity.
+func (tuo *TaskUpdateOne) Save(ctx context.Context) (*Task, error) {
+	tuo.defaults()
+	return withHooks(ctx, tuo.sqlSave, tuo.mutation, tuo.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (tuo *TaskUpdateOne) SaveX(ctx context.Context) *Task {
+	node, err := tuo.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// Exec executes the query on the entity.
+func (tuo *TaskUpdateOne) Exec(ctx context.Context) error {
+	_, err := tuo.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tuo *TaskUpdateOne) ExecX(ctx context.Context) {
+	if err := tuo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (tuo *TaskUpdateOne) defaults() {
+	if _, ok := tuo.mutation.UpdatedAt(); !ok {
+		v := task.UpdateDefaultUpdatedAt()
+		tuo.mutation.SetUpdatedAt(v)
+	}
+}
+
+func (tuo *TaskUpdateOne) sqlSave(ctx context.Context) (_node *Task, err error) {
+	_spec := sqlgraph.NewUpdateSpec(task.Table, task.Columns, sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64))
+	id, ok := tuo.mutation.ID()
+	if !ok {
+		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Task.id" for update`)}
+	}
+	_spec.Node.ID.Value = id
+	if fields := tuo.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, task.FieldID)
+		for _, f := range fields {
+			if !task.ValidColumn(f) {
+				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+			}
+			if f != task.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, f)
+			}
+		}
+	}
+	if ps := tuo.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := tuo.mutation.UpdatedAt(); ok {
+		_spec.SetField(task.FieldUpdatedAt, field.TypeTime, value)
+	}
+	if value, ok := tuo.mutation.Status(); ok {
+		_spec.SetField(task.FieldStatus, field.TypeUint8, value)
+	}
+	if value, ok := tuo.mutation.AddedStatus(); ok {
+		_spec.AddField(task.FieldStatus, field.TypeUint8, value)
+	}
+	if tuo.mutation.StatusCleared() {
+		_spec.ClearField(task.FieldStatus, field.TypeUint8)
+	}
+	if value, ok := tuo.mutation.Name(); ok {
+		_spec.SetField(task.FieldName, field.TypeString, value)
+	}
+	if value, ok := tuo.mutation.TaskGroup(); ok {
+		_spec.SetField(task.FieldTaskGroup, field.TypeString, value)
+	}
+	if value, ok := tuo.mutation.CronExpression(); ok {
+		_spec.SetField(task.FieldCronExpression, field.TypeString, value)
+	}
+	if value, ok := tuo.mutation.Pattern(); ok {
+		_spec.SetField(task.FieldPattern, field.TypeString, value)
+	}
+	if value, ok := tuo.mutation.Payload(); ok {
+		_spec.SetField(task.FieldPayload, field.TypeString, value)
+	}
+	if tuo.mutation.TaskLogsCleared() {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.O2M,
+			Inverse: false,
+			Table:   task.TaskLogsTable,
+			Columns: []string{task.TaskLogsColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64),
+			},
+		}
+		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+	}
+	if nodes := tuo.mutation.RemovedTaskLogsIDs(); len(nodes) > 0 && !tuo.mutation.TaskLogsCleared() {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.O2M,
+			Inverse: false,
+			Table:   task.TaskLogsTable,
+			Columns: []string{task.TaskLogsColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+	}
+	if nodes := tuo.mutation.TaskLogsIDs(); len(nodes) > 0 {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.O2M,
+			Inverse: false,
+			Table:   task.TaskLogsTable,
+			Columns: []string{task.TaskLogsColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_spec.Edges.Add = append(_spec.Edges.Add, edge)
+	}
+	_node = &Task{config: tuo.config}
+	_spec.Assign = _node.assignValues
+	_spec.ScanValues = _node.scanValues
+	if err = sqlgraph.UpdateNode(ctx, tuo.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{task.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	tuo.mutation.done = true
+	return _node, nil
+}

+ 165 - 0
ent/tasklog.go

@@ -0,0 +1,165 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"fmt"
+	"strings"
+	"time"
+
+	"entgo.io/ent"
+	"entgo.io/ent/dialect/sql"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskLog is the model entity for the TaskLog schema.
+type TaskLog struct {
+	config `json:"-"`
+	// ID of the ent.
+	ID uint64 `json:"id,omitempty"`
+	// Task Started Time | 任务启动时间
+	StartedAt time.Time `json:"started_at,omitempty"`
+	// Task Finished Time | 任务完成时间
+	FinishedAt time.Time `json:"finished_at,omitempty"`
+	// The Task Process Result | 任务执行结果
+	Result uint8 `json:"result,omitempty"`
+	// Edges holds the relations/edges for other nodes in the graph.
+	// The values are being populated by the TaskLogQuery when eager-loading is set.
+	Edges          TaskLogEdges `json:"edges"`
+	task_task_logs *uint64
+	selectValues   sql.SelectValues
+}
+
+// TaskLogEdges holds the relations/edges for other nodes in the graph.
+type TaskLogEdges struct {
+	// Tasks holds the value of the tasks edge.
+	Tasks *Task `json:"tasks,omitempty"`
+	// loadedTypes holds the information for reporting if a
+	// type was loaded (or requested) in eager-loading or not.
+	loadedTypes [1]bool
+}
+
+// TasksOrErr returns the Tasks value or an error if the edge
+// was not loaded in eager-loading, or loaded but was not found.
+func (e TaskLogEdges) TasksOrErr() (*Task, error) {
+	if e.Tasks != nil {
+		return e.Tasks, nil
+	} else if e.loadedTypes[0] {
+		return nil, &NotFoundError{label: task.Label}
+	}
+	return nil, &NotLoadedError{edge: "tasks"}
+}
+
+// scanValues returns the types for scanning values from sql.Rows.
+func (*TaskLog) scanValues(columns []string) ([]any, error) {
+	values := make([]any, len(columns))
+	for i := range columns {
+		switch columns[i] {
+		case tasklog.FieldID, tasklog.FieldResult:
+			values[i] = new(sql.NullInt64)
+		case tasklog.FieldStartedAt, tasklog.FieldFinishedAt:
+			values[i] = new(sql.NullTime)
+		case tasklog.ForeignKeys[0]: // task_task_logs
+			values[i] = new(sql.NullInt64)
+		default:
+			values[i] = new(sql.UnknownType)
+		}
+	}
+	return values, nil
+}
+
+// assignValues assigns the values that were returned from sql.Rows (after scanning)
+// to the TaskLog fields.
+func (tl *TaskLog) assignValues(columns []string, values []any) error {
+	if m, n := len(values), len(columns); m < n {
+		return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
+	}
+	for i := range columns {
+		switch columns[i] {
+		case tasklog.FieldID:
+			value, ok := values[i].(*sql.NullInt64)
+			if !ok {
+				return fmt.Errorf("unexpected type %T for field id", value)
+			}
+			tl.ID = uint64(value.Int64)
+		case tasklog.FieldStartedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field started_at", values[i])
+			} else if value.Valid {
+				tl.StartedAt = value.Time
+			}
+		case tasklog.FieldFinishedAt:
+			if value, ok := values[i].(*sql.NullTime); !ok {
+				return fmt.Errorf("unexpected type %T for field finished_at", values[i])
+			} else if value.Valid {
+				tl.FinishedAt = value.Time
+			}
+		case tasklog.FieldResult:
+			if value, ok := values[i].(*sql.NullInt64); !ok {
+				return fmt.Errorf("unexpected type %T for field result", values[i])
+			} else if value.Valid {
+				tl.Result = uint8(value.Int64)
+			}
+		case tasklog.ForeignKeys[0]:
+			if value, ok := values[i].(*sql.NullInt64); !ok {
+				return fmt.Errorf("unexpected type %T for edge-field task_task_logs", value)
+			} else if value.Valid {
+				tl.task_task_logs = new(uint64)
+				*tl.task_task_logs = uint64(value.Int64)
+			}
+		default:
+			tl.selectValues.Set(columns[i], values[i])
+		}
+	}
+	return nil
+}
+
+// Value returns the ent.Value that was dynamically selected and assigned to the TaskLog.
+// This includes values selected through modifiers, order, etc.
+func (tl *TaskLog) Value(name string) (ent.Value, error) {
+	return tl.selectValues.Get(name)
+}
+
+// QueryTasks queries the "tasks" edge of the TaskLog entity.
+func (tl *TaskLog) QueryTasks() *TaskQuery {
+	return NewTaskLogClient(tl.config).QueryTasks(tl)
+}
+
+// Update returns a builder for updating this TaskLog.
+// Note that you need to call TaskLog.Unwrap() before calling this method if this TaskLog
+// was returned from a transaction, and the transaction was committed or rolled back.
+func (tl *TaskLog) Update() *TaskLogUpdateOne {
+	return NewTaskLogClient(tl.config).UpdateOne(tl)
+}
+
+// Unwrap unwraps the TaskLog entity that was returned from a transaction after it was closed,
+// so that all future queries will be executed through the driver which created the transaction.
+func (tl *TaskLog) Unwrap() *TaskLog {
+	_tx, ok := tl.config.driver.(*txDriver)
+	if !ok {
+		panic("ent: TaskLog is not a transactional entity")
+	}
+	tl.config.driver = _tx.drv
+	return tl
+}
+
+// String implements the fmt.Stringer.
+func (tl *TaskLog) String() string {
+	var builder strings.Builder
+	builder.WriteString("TaskLog(")
+	builder.WriteString(fmt.Sprintf("id=%v, ", tl.ID))
+	builder.WriteString("started_at=")
+	builder.WriteString(tl.StartedAt.Format(time.ANSIC))
+	builder.WriteString(", ")
+	builder.WriteString("finished_at=")
+	builder.WriteString(tl.FinishedAt.Format(time.ANSIC))
+	builder.WriteString(", ")
+	builder.WriteString("result=")
+	builder.WriteString(fmt.Sprintf("%v", tl.Result))
+	builder.WriteByte(')')
+	return builder.String()
+}
+
+// TaskLogs is a parsable slice of TaskLog.
+type TaskLogs []*TaskLog

+ 105 - 0
ent/tasklog/tasklog.go

@@ -0,0 +1,105 @@
+// Code generated by ent, DO NOT EDIT.
+
+package tasklog
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+)
+
+const (
+	// Label holds the string label denoting the tasklog type in the database.
+	Label = "task_log"
+	// FieldID holds the string denoting the id field in the database.
+	FieldID = "id"
+	// FieldStartedAt holds the string denoting the started_at field in the database.
+	FieldStartedAt = "started_at"
+	// FieldFinishedAt holds the string denoting the finished_at field in the database.
+	FieldFinishedAt = "finished_at"
+	// FieldResult holds the string denoting the result field in the database.
+	FieldResult = "result"
+	// EdgeTasks holds the string denoting the tasks edge name in mutations.
+	EdgeTasks = "tasks"
+	// Table holds the table name of the tasklog in the database.
+	Table = "sys_task_logs"
+	// TasksTable is the table that holds the tasks relation/edge.
+	TasksTable = "sys_task_logs"
+	// TasksInverseTable is the table name for the Task entity.
+	// It exists in this package in order to avoid circular dependency with the "task" package.
+	TasksInverseTable = "sys_tasks"
+	// TasksColumn is the table column denoting the tasks relation/edge.
+	TasksColumn = "task_task_logs"
+)
+
+// Columns holds all SQL columns for tasklog fields.
+var Columns = []string{
+	FieldID,
+	FieldStartedAt,
+	FieldFinishedAt,
+	FieldResult,
+}
+
+// ForeignKeys holds the SQL foreign-keys that are owned by the "sys_task_logs"
+// table and are not defined as standalone fields in the schema.
+var ForeignKeys = []string{
+	"task_task_logs",
+}
+
+// ValidColumn reports if the column name is valid (part of the table columns).
+func ValidColumn(column string) bool {
+	for i := range Columns {
+		if column == Columns[i] {
+			return true
+		}
+	}
+	for i := range ForeignKeys {
+		if column == ForeignKeys[i] {
+			return true
+		}
+	}
+	return false
+}
+
+var (
+	// DefaultStartedAt holds the default value on creation for the "started_at" field.
+	DefaultStartedAt func() time.Time
+)
+
+// OrderOption defines the ordering options for the TaskLog queries.
+type OrderOption func(*sql.Selector)
+
+// ByID orders the results by the id field.
+func ByID(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldID, opts...).ToFunc()
+}
+
+// ByStartedAt orders the results by the started_at field.
+func ByStartedAt(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldStartedAt, opts...).ToFunc()
+}
+
+// ByFinishedAt orders the results by the finished_at field.
+func ByFinishedAt(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldFinishedAt, opts...).ToFunc()
+}
+
+// ByResult orders the results by the result field.
+func ByResult(opts ...sql.OrderTermOption) OrderOption {
+	return sql.OrderByField(FieldResult, opts...).ToFunc()
+}
+
+// ByTasksField orders the results by tasks field.
+func ByTasksField(field string, opts ...sql.OrderTermOption) OrderOption {
+	return func(s *sql.Selector) {
+		sqlgraph.OrderByNeighborTerms(s, newTasksStep(), sql.OrderByField(field, opts...))
+	}
+}
+func newTasksStep() *sqlgraph.Step {
+	return sqlgraph.NewStep(
+		sqlgraph.From(Table, FieldID),
+		sqlgraph.To(TasksInverseTable, FieldID),
+		sqlgraph.Edge(sqlgraph.M2O, true, TasksTable, TasksColumn),
+	)
+}

+ 229 - 0
ent/tasklog/where.go

@@ -0,0 +1,229 @@
+// Code generated by ent, DO NOT EDIT.
+
+package tasklog
+
+import (
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+)
+
+// ID filters vertices based on their ID field.
+func ID(id uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldID, id))
+}
+
+// IDEQ applies the EQ predicate on the ID field.
+func IDEQ(id uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldID, id))
+}
+
+// IDNEQ applies the NEQ predicate on the ID field.
+func IDNEQ(id uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNEQ(FieldID, id))
+}
+
+// IDIn applies the In predicate on the ID field.
+func IDIn(ids ...uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldIn(FieldID, ids...))
+}
+
+// IDNotIn applies the NotIn predicate on the ID field.
+func IDNotIn(ids ...uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNotIn(FieldID, ids...))
+}
+
+// IDGT applies the GT predicate on the ID field.
+func IDGT(id uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGT(FieldID, id))
+}
+
+// IDGTE applies the GTE predicate on the ID field.
+func IDGTE(id uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGTE(FieldID, id))
+}
+
+// IDLT applies the LT predicate on the ID field.
+func IDLT(id uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLT(FieldID, id))
+}
+
+// IDLTE applies the LTE predicate on the ID field.
+func IDLTE(id uint64) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLTE(FieldID, id))
+}
+
+// StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ.
+func StartedAt(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldStartedAt, v))
+}
+
+// FinishedAt applies equality check predicate on the "finished_at" field. It's identical to FinishedAtEQ.
+func FinishedAt(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldFinishedAt, v))
+}
+
+// Result applies equality check predicate on the "result" field. It's identical to ResultEQ.
+func Result(v uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldResult, v))
+}
+
+// StartedAtEQ applies the EQ predicate on the "started_at" field.
+func StartedAtEQ(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldStartedAt, v))
+}
+
+// StartedAtNEQ applies the NEQ predicate on the "started_at" field.
+func StartedAtNEQ(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNEQ(FieldStartedAt, v))
+}
+
+// StartedAtIn applies the In predicate on the "started_at" field.
+func StartedAtIn(vs ...time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldIn(FieldStartedAt, vs...))
+}
+
+// StartedAtNotIn applies the NotIn predicate on the "started_at" field.
+func StartedAtNotIn(vs ...time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNotIn(FieldStartedAt, vs...))
+}
+
+// StartedAtGT applies the GT predicate on the "started_at" field.
+func StartedAtGT(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGT(FieldStartedAt, v))
+}
+
+// StartedAtGTE applies the GTE predicate on the "started_at" field.
+func StartedAtGTE(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGTE(FieldStartedAt, v))
+}
+
+// StartedAtLT applies the LT predicate on the "started_at" field.
+func StartedAtLT(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLT(FieldStartedAt, v))
+}
+
+// StartedAtLTE applies the LTE predicate on the "started_at" field.
+func StartedAtLTE(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLTE(FieldStartedAt, v))
+}
+
+// FinishedAtEQ applies the EQ predicate on the "finished_at" field.
+func FinishedAtEQ(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldFinishedAt, v))
+}
+
+// FinishedAtNEQ applies the NEQ predicate on the "finished_at" field.
+func FinishedAtNEQ(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNEQ(FieldFinishedAt, v))
+}
+
+// FinishedAtIn applies the In predicate on the "finished_at" field.
+func FinishedAtIn(vs ...time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldIn(FieldFinishedAt, vs...))
+}
+
+// FinishedAtNotIn applies the NotIn predicate on the "finished_at" field.
+func FinishedAtNotIn(vs ...time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNotIn(FieldFinishedAt, vs...))
+}
+
+// FinishedAtGT applies the GT predicate on the "finished_at" field.
+func FinishedAtGT(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGT(FieldFinishedAt, v))
+}
+
+// FinishedAtGTE applies the GTE predicate on the "finished_at" field.
+func FinishedAtGTE(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGTE(FieldFinishedAt, v))
+}
+
+// FinishedAtLT applies the LT predicate on the "finished_at" field.
+func FinishedAtLT(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLT(FieldFinishedAt, v))
+}
+
+// FinishedAtLTE applies the LTE predicate on the "finished_at" field.
+func FinishedAtLTE(v time.Time) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLTE(FieldFinishedAt, v))
+}
+
+// ResultEQ applies the EQ predicate on the "result" field.
+func ResultEQ(v uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldEQ(FieldResult, v))
+}
+
+// ResultNEQ applies the NEQ predicate on the "result" field.
+func ResultNEQ(v uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNEQ(FieldResult, v))
+}
+
+// ResultIn applies the In predicate on the "result" field.
+func ResultIn(vs ...uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldIn(FieldResult, vs...))
+}
+
+// ResultNotIn applies the NotIn predicate on the "result" field.
+func ResultNotIn(vs ...uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldNotIn(FieldResult, vs...))
+}
+
+// ResultGT applies the GT predicate on the "result" field.
+func ResultGT(v uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGT(FieldResult, v))
+}
+
+// ResultGTE applies the GTE predicate on the "result" field.
+func ResultGTE(v uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldGTE(FieldResult, v))
+}
+
+// ResultLT applies the LT predicate on the "result" field.
+func ResultLT(v uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLT(FieldResult, v))
+}
+
+// ResultLTE applies the LTE predicate on the "result" field.
+func ResultLTE(v uint8) predicate.TaskLog {
+	return predicate.TaskLog(sql.FieldLTE(FieldResult, v))
+}
+
+// HasTasks applies the HasEdge predicate on the "tasks" edge.
+func HasTasks() predicate.TaskLog {
+	return predicate.TaskLog(func(s *sql.Selector) {
+		step := sqlgraph.NewStep(
+			sqlgraph.From(Table, FieldID),
+			sqlgraph.Edge(sqlgraph.M2O, true, TasksTable, TasksColumn),
+		)
+		sqlgraph.HasNeighbors(s, step)
+	})
+}
+
+// HasTasksWith applies the HasEdge predicate on the "tasks" edge with a given conditions (other predicates).
+func HasTasksWith(preds ...predicate.Task) predicate.TaskLog {
+	return predicate.TaskLog(func(s *sql.Selector) {
+		step := newTasksStep()
+		sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
+			for _, p := range preds {
+				p(s)
+			}
+		})
+	})
+}
+
+// And groups predicates with the AND operator between them.
+func And(predicates ...predicate.TaskLog) predicate.TaskLog {
+	return predicate.TaskLog(sql.AndPredicates(predicates...))
+}
+
+// Or groups predicates with the OR operator between them.
+func Or(predicates ...predicate.TaskLog) predicate.TaskLog {
+	return predicate.TaskLog(sql.OrPredicates(predicates...))
+}
+
+// Not applies the not operator on the given predicate.
+func Not(p predicate.TaskLog) predicate.TaskLog {
+	return predicate.TaskLog(sql.NotPredicates(p))
+}

+ 277 - 0
ent/tasklog_create.go

@@ -0,0 +1,277 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskLogCreate is the builder for creating a TaskLog entity.
+type TaskLogCreate struct {
+	config
+	mutation *TaskLogMutation
+	hooks    []Hook
+}
+
+// SetStartedAt sets the "started_at" field.
+func (tlc *TaskLogCreate) SetStartedAt(t time.Time) *TaskLogCreate {
+	tlc.mutation.SetStartedAt(t)
+	return tlc
+}
+
+// SetNillableStartedAt sets the "started_at" field if the given value is not nil.
+func (tlc *TaskLogCreate) SetNillableStartedAt(t *time.Time) *TaskLogCreate {
+	if t != nil {
+		tlc.SetStartedAt(*t)
+	}
+	return tlc
+}
+
+// SetFinishedAt sets the "finished_at" field.
+func (tlc *TaskLogCreate) SetFinishedAt(t time.Time) *TaskLogCreate {
+	tlc.mutation.SetFinishedAt(t)
+	return tlc
+}
+
+// SetResult sets the "result" field.
+func (tlc *TaskLogCreate) SetResult(u uint8) *TaskLogCreate {
+	tlc.mutation.SetResult(u)
+	return tlc
+}
+
+// SetID sets the "id" field.
+func (tlc *TaskLogCreate) SetID(u uint64) *TaskLogCreate {
+	tlc.mutation.SetID(u)
+	return tlc
+}
+
+// SetTasksID sets the "tasks" edge to the Task entity by ID.
+func (tlc *TaskLogCreate) SetTasksID(id uint64) *TaskLogCreate {
+	tlc.mutation.SetTasksID(id)
+	return tlc
+}
+
+// SetNillableTasksID sets the "tasks" edge to the Task entity by ID if the given value is not nil.
+func (tlc *TaskLogCreate) SetNillableTasksID(id *uint64) *TaskLogCreate {
+	if id != nil {
+		tlc = tlc.SetTasksID(*id)
+	}
+	return tlc
+}
+
+// SetTasks sets the "tasks" edge to the Task entity.
+func (tlc *TaskLogCreate) SetTasks(t *Task) *TaskLogCreate {
+	return tlc.SetTasksID(t.ID)
+}
+
+// Mutation returns the TaskLogMutation object of the builder.
+func (tlc *TaskLogCreate) Mutation() *TaskLogMutation {
+	return tlc.mutation
+}
+
+// Save creates the TaskLog in the database.
+func (tlc *TaskLogCreate) Save(ctx context.Context) (*TaskLog, error) {
+	tlc.defaults()
+	return withHooks(ctx, tlc.sqlSave, tlc.mutation, tlc.hooks)
+}
+
+// SaveX calls Save and panics if Save returns an error.
+func (tlc *TaskLogCreate) SaveX(ctx context.Context) *TaskLog {
+	v, err := tlc.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (tlc *TaskLogCreate) Exec(ctx context.Context) error {
+	_, err := tlc.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tlc *TaskLogCreate) ExecX(ctx context.Context) {
+	if err := tlc.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+// defaults sets the default values of the builder before save.
+func (tlc *TaskLogCreate) defaults() {
+	if _, ok := tlc.mutation.StartedAt(); !ok {
+		v := tasklog.DefaultStartedAt()
+		tlc.mutation.SetStartedAt(v)
+	}
+}
+
+// check runs all checks and user-defined validators on the builder.
+func (tlc *TaskLogCreate) check() error {
+	if _, ok := tlc.mutation.StartedAt(); !ok {
+		return &ValidationError{Name: "started_at", err: errors.New(`ent: missing required field "TaskLog.started_at"`)}
+	}
+	if _, ok := tlc.mutation.FinishedAt(); !ok {
+		return &ValidationError{Name: "finished_at", err: errors.New(`ent: missing required field "TaskLog.finished_at"`)}
+	}
+	if _, ok := tlc.mutation.Result(); !ok {
+		return &ValidationError{Name: "result", err: errors.New(`ent: missing required field "TaskLog.result"`)}
+	}
+	return nil
+}
+
+func (tlc *TaskLogCreate) sqlSave(ctx context.Context) (*TaskLog, error) {
+	if err := tlc.check(); err != nil {
+		return nil, err
+	}
+	_node, _spec := tlc.createSpec()
+	if err := sqlgraph.CreateNode(ctx, tlc.driver, _spec); err != nil {
+		if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	if _spec.ID.Value != _node.ID {
+		id := _spec.ID.Value.(int64)
+		_node.ID = uint64(id)
+	}
+	tlc.mutation.id = &_node.ID
+	tlc.mutation.done = true
+	return _node, nil
+}
+
+func (tlc *TaskLogCreate) createSpec() (*TaskLog, *sqlgraph.CreateSpec) {
+	var (
+		_node = &TaskLog{config: tlc.config}
+		_spec = sqlgraph.NewCreateSpec(tasklog.Table, sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64))
+	)
+	if id, ok := tlc.mutation.ID(); ok {
+		_node.ID = id
+		_spec.ID.Value = id
+	}
+	if value, ok := tlc.mutation.StartedAt(); ok {
+		_spec.SetField(tasklog.FieldStartedAt, field.TypeTime, value)
+		_node.StartedAt = value
+	}
+	if value, ok := tlc.mutation.FinishedAt(); ok {
+		_spec.SetField(tasklog.FieldFinishedAt, field.TypeTime, value)
+		_node.FinishedAt = value
+	}
+	if value, ok := tlc.mutation.Result(); ok {
+		_spec.SetField(tasklog.FieldResult, field.TypeUint8, value)
+		_node.Result = value
+	}
+	if nodes := tlc.mutation.TasksIDs(); len(nodes) > 0 {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.M2O,
+			Inverse: true,
+			Table:   tasklog.TasksTable,
+			Columns: []string{tasklog.TasksColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_node.task_task_logs = &nodes[0]
+		_spec.Edges = append(_spec.Edges, edge)
+	}
+	return _node, _spec
+}
+
+// TaskLogCreateBulk is the builder for creating many TaskLog entities in bulk.
+type TaskLogCreateBulk struct {
+	config
+	err      error
+	builders []*TaskLogCreate
+}
+
+// Save creates the TaskLog entities in the database.
+func (tlcb *TaskLogCreateBulk) Save(ctx context.Context) ([]*TaskLog, error) {
+	if tlcb.err != nil {
+		return nil, tlcb.err
+	}
+	specs := make([]*sqlgraph.CreateSpec, len(tlcb.builders))
+	nodes := make([]*TaskLog, len(tlcb.builders))
+	mutators := make([]Mutator, len(tlcb.builders))
+	for i := range tlcb.builders {
+		func(i int, root context.Context) {
+			builder := tlcb.builders[i]
+			builder.defaults()
+			var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
+				mutation, ok := m.(*TaskLogMutation)
+				if !ok {
+					return nil, fmt.Errorf("unexpected mutation type %T", m)
+				}
+				if err := builder.check(); err != nil {
+					return nil, err
+				}
+				builder.mutation = mutation
+				var err error
+				nodes[i], specs[i] = builder.createSpec()
+				if i < len(mutators)-1 {
+					_, err = mutators[i+1].Mutate(root, tlcb.builders[i+1].mutation)
+				} else {
+					spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
+					// Invoke the actual operation on the latest mutation in the chain.
+					if err = sqlgraph.BatchCreate(ctx, tlcb.driver, spec); err != nil {
+						if sqlgraph.IsConstraintError(err) {
+							err = &ConstraintError{msg: err.Error(), wrap: err}
+						}
+					}
+				}
+				if err != nil {
+					return nil, err
+				}
+				mutation.id = &nodes[i].ID
+				if specs[i].ID.Value != nil && nodes[i].ID == 0 {
+					id := specs[i].ID.Value.(int64)
+					nodes[i].ID = uint64(id)
+				}
+				mutation.done = true
+				return nodes[i], nil
+			})
+			for i := len(builder.hooks) - 1; i >= 0; i-- {
+				mut = builder.hooks[i](mut)
+			}
+			mutators[i] = mut
+		}(i, ctx)
+	}
+	if len(mutators) > 0 {
+		if _, err := mutators[0].Mutate(ctx, tlcb.builders[0].mutation); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (tlcb *TaskLogCreateBulk) SaveX(ctx context.Context) []*TaskLog {
+	v, err := tlcb.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Exec executes the query.
+func (tlcb *TaskLogCreateBulk) Exec(ctx context.Context) error {
+	_, err := tlcb.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tlcb *TaskLogCreateBulk) ExecX(ctx context.Context) {
+	if err := tlcb.Exec(ctx); err != nil {
+		panic(err)
+	}
+}

+ 88 - 0
ent/tasklog_delete.go

@@ -0,0 +1,88 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskLogDelete is the builder for deleting a TaskLog entity.
+type TaskLogDelete struct {
+	config
+	hooks    []Hook
+	mutation *TaskLogMutation
+}
+
+// Where appends a list predicates to the TaskLogDelete builder.
+func (tld *TaskLogDelete) Where(ps ...predicate.TaskLog) *TaskLogDelete {
+	tld.mutation.Where(ps...)
+	return tld
+}
+
+// Exec executes the deletion query and returns how many vertices were deleted.
+func (tld *TaskLogDelete) Exec(ctx context.Context) (int, error) {
+	return withHooks(ctx, tld.sqlExec, tld.mutation, tld.hooks)
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tld *TaskLogDelete) ExecX(ctx context.Context) int {
+	n, err := tld.Exec(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+func (tld *TaskLogDelete) sqlExec(ctx context.Context) (int, error) {
+	_spec := sqlgraph.NewDeleteSpec(tasklog.Table, sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64))
+	if ps := tld.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	affected, err := sqlgraph.DeleteNodes(ctx, tld.driver, _spec)
+	if err != nil && sqlgraph.IsConstraintError(err) {
+		err = &ConstraintError{msg: err.Error(), wrap: err}
+	}
+	tld.mutation.done = true
+	return affected, err
+}
+
+// TaskLogDeleteOne is the builder for deleting a single TaskLog entity.
+type TaskLogDeleteOne struct {
+	tld *TaskLogDelete
+}
+
+// Where appends a list predicates to the TaskLogDelete builder.
+func (tldo *TaskLogDeleteOne) Where(ps ...predicate.TaskLog) *TaskLogDeleteOne {
+	tldo.tld.mutation.Where(ps...)
+	return tldo
+}
+
+// Exec executes the deletion query.
+func (tldo *TaskLogDeleteOne) Exec(ctx context.Context) error {
+	n, err := tldo.tld.Exec(ctx)
+	switch {
+	case err != nil:
+		return err
+	case n == 0:
+		return &NotFoundError{tasklog.Label}
+	default:
+		return nil
+	}
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tldo *TaskLogDeleteOne) ExecX(ctx context.Context) {
+	if err := tldo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}

+ 613 - 0
ent/tasklog_query.go

@@ -0,0 +1,613 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"fmt"
+	"math"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskLogQuery is the builder for querying TaskLog entities.
+type TaskLogQuery struct {
+	config
+	ctx        *QueryContext
+	order      []tasklog.OrderOption
+	inters     []Interceptor
+	predicates []predicate.TaskLog
+	withTasks  *TaskQuery
+	withFKs    bool
+	// intermediate query (i.e. traversal path).
+	sql  *sql.Selector
+	path func(context.Context) (*sql.Selector, error)
+}
+
+// Where adds a new predicate for the TaskLogQuery builder.
+func (tlq *TaskLogQuery) Where(ps ...predicate.TaskLog) *TaskLogQuery {
+	tlq.predicates = append(tlq.predicates, ps...)
+	return tlq
+}
+
+// Limit the number of records to be returned by this query.
+func (tlq *TaskLogQuery) Limit(limit int) *TaskLogQuery {
+	tlq.ctx.Limit = &limit
+	return tlq
+}
+
+// Offset to start from.
+func (tlq *TaskLogQuery) Offset(offset int) *TaskLogQuery {
+	tlq.ctx.Offset = &offset
+	return tlq
+}
+
+// Unique configures the query builder to filter duplicate records on query.
+// By default, unique is set to true, and can be disabled using this method.
+func (tlq *TaskLogQuery) Unique(unique bool) *TaskLogQuery {
+	tlq.ctx.Unique = &unique
+	return tlq
+}
+
+// Order specifies how the records should be ordered.
+func (tlq *TaskLogQuery) Order(o ...tasklog.OrderOption) *TaskLogQuery {
+	tlq.order = append(tlq.order, o...)
+	return tlq
+}
+
+// QueryTasks chains the current query on the "tasks" edge.
+func (tlq *TaskLogQuery) QueryTasks() *TaskQuery {
+	query := (&TaskClient{config: tlq.config}).Query()
+	query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
+		if err := tlq.prepareQuery(ctx); err != nil {
+			return nil, err
+		}
+		selector := tlq.sqlQuery(ctx)
+		if err := selector.Err(); err != nil {
+			return nil, err
+		}
+		step := sqlgraph.NewStep(
+			sqlgraph.From(tasklog.Table, tasklog.FieldID, selector),
+			sqlgraph.To(task.Table, task.FieldID),
+			sqlgraph.Edge(sqlgraph.M2O, true, tasklog.TasksTable, tasklog.TasksColumn),
+		)
+		fromU = sqlgraph.SetNeighbors(tlq.driver.Dialect(), step)
+		return fromU, nil
+	}
+	return query
+}
+
+// First returns the first TaskLog entity from the query.
+// Returns a *NotFoundError when no TaskLog was found.
+func (tlq *TaskLogQuery) First(ctx context.Context) (*TaskLog, error) {
+	nodes, err := tlq.Limit(1).All(setContextOp(ctx, tlq.ctx, "First"))
+	if err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nil, &NotFoundError{tasklog.Label}
+	}
+	return nodes[0], nil
+}
+
+// FirstX is like First, but panics if an error occurs.
+func (tlq *TaskLogQuery) FirstX(ctx context.Context) *TaskLog {
+	node, err := tlq.First(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return node
+}
+
+// FirstID returns the first TaskLog ID from the query.
+// Returns a *NotFoundError when no TaskLog ID was found.
+func (tlq *TaskLogQuery) FirstID(ctx context.Context) (id uint64, err error) {
+	var ids []uint64
+	if ids, err = tlq.Limit(1).IDs(setContextOp(ctx, tlq.ctx, "FirstID")); err != nil {
+		return
+	}
+	if len(ids) == 0 {
+		err = &NotFoundError{tasklog.Label}
+		return
+	}
+	return ids[0], nil
+}
+
+// FirstIDX is like FirstID, but panics if an error occurs.
+func (tlq *TaskLogQuery) FirstIDX(ctx context.Context) uint64 {
+	id, err := tlq.FirstID(ctx)
+	if err != nil && !IsNotFound(err) {
+		panic(err)
+	}
+	return id
+}
+
+// Only returns a single TaskLog entity found by the query, ensuring it only returns one.
+// Returns a *NotSingularError when more than one TaskLog entity is found.
+// Returns a *NotFoundError when no TaskLog entities are found.
+func (tlq *TaskLogQuery) Only(ctx context.Context) (*TaskLog, error) {
+	nodes, err := tlq.Limit(2).All(setContextOp(ctx, tlq.ctx, "Only"))
+	if err != nil {
+		return nil, err
+	}
+	switch len(nodes) {
+	case 1:
+		return nodes[0], nil
+	case 0:
+		return nil, &NotFoundError{tasklog.Label}
+	default:
+		return nil, &NotSingularError{tasklog.Label}
+	}
+}
+
+// OnlyX is like Only, but panics if an error occurs.
+func (tlq *TaskLogQuery) OnlyX(ctx context.Context) *TaskLog {
+	node, err := tlq.Only(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// OnlyID is like Only, but returns the only TaskLog ID in the query.
+// Returns a *NotSingularError when more than one TaskLog ID is found.
+// Returns a *NotFoundError when no entities are found.
+func (tlq *TaskLogQuery) OnlyID(ctx context.Context) (id uint64, err error) {
+	var ids []uint64
+	if ids, err = tlq.Limit(2).IDs(setContextOp(ctx, tlq.ctx, "OnlyID")); err != nil {
+		return
+	}
+	switch len(ids) {
+	case 1:
+		id = ids[0]
+	case 0:
+		err = &NotFoundError{tasklog.Label}
+	default:
+		err = &NotSingularError{tasklog.Label}
+	}
+	return
+}
+
+// OnlyIDX is like OnlyID, but panics if an error occurs.
+func (tlq *TaskLogQuery) OnlyIDX(ctx context.Context) uint64 {
+	id, err := tlq.OnlyID(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return id
+}
+
+// All executes the query and returns a list of TaskLogs.
+func (tlq *TaskLogQuery) All(ctx context.Context) ([]*TaskLog, error) {
+	ctx = setContextOp(ctx, tlq.ctx, "All")
+	if err := tlq.prepareQuery(ctx); err != nil {
+		return nil, err
+	}
+	qr := querierAll[[]*TaskLog, *TaskLogQuery]()
+	return withInterceptors[[]*TaskLog](ctx, tlq, qr, tlq.inters)
+}
+
+// AllX is like All, but panics if an error occurs.
+func (tlq *TaskLogQuery) AllX(ctx context.Context) []*TaskLog {
+	nodes, err := tlq.All(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return nodes
+}
+
+// IDs executes the query and returns a list of TaskLog IDs.
+func (tlq *TaskLogQuery) IDs(ctx context.Context) (ids []uint64, err error) {
+	if tlq.ctx.Unique == nil && tlq.path != nil {
+		tlq.Unique(true)
+	}
+	ctx = setContextOp(ctx, tlq.ctx, "IDs")
+	if err = tlq.Select(tasklog.FieldID).Scan(ctx, &ids); err != nil {
+		return nil, err
+	}
+	return ids, nil
+}
+
+// IDsX is like IDs, but panics if an error occurs.
+func (tlq *TaskLogQuery) IDsX(ctx context.Context) []uint64 {
+	ids, err := tlq.IDs(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return ids
+}
+
+// Count returns the count of the given query.
+func (tlq *TaskLogQuery) Count(ctx context.Context) (int, error) {
+	ctx = setContextOp(ctx, tlq.ctx, "Count")
+	if err := tlq.prepareQuery(ctx); err != nil {
+		return 0, err
+	}
+	return withInterceptors[int](ctx, tlq, querierCount[*TaskLogQuery](), tlq.inters)
+}
+
+// CountX is like Count, but panics if an error occurs.
+func (tlq *TaskLogQuery) CountX(ctx context.Context) int {
+	count, err := tlq.Count(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return count
+}
+
+// Exist returns true if the query has elements in the graph.
+func (tlq *TaskLogQuery) Exist(ctx context.Context) (bool, error) {
+	ctx = setContextOp(ctx, tlq.ctx, "Exist")
+	switch _, err := tlq.FirstID(ctx); {
+	case IsNotFound(err):
+		return false, nil
+	case err != nil:
+		return false, fmt.Errorf("ent: check existence: %w", err)
+	default:
+		return true, nil
+	}
+}
+
+// ExistX is like Exist, but panics if an error occurs.
+func (tlq *TaskLogQuery) ExistX(ctx context.Context) bool {
+	exist, err := tlq.Exist(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return exist
+}
+
+// Clone returns a duplicate of the TaskLogQuery builder, including all associated steps. It can be
+// used to prepare common query builders and use them differently after the clone is made.
+func (tlq *TaskLogQuery) Clone() *TaskLogQuery {
+	if tlq == nil {
+		return nil
+	}
+	return &TaskLogQuery{
+		config:     tlq.config,
+		ctx:        tlq.ctx.Clone(),
+		order:      append([]tasklog.OrderOption{}, tlq.order...),
+		inters:     append([]Interceptor{}, tlq.inters...),
+		predicates: append([]predicate.TaskLog{}, tlq.predicates...),
+		withTasks:  tlq.withTasks.Clone(),
+		// clone intermediate query.
+		sql:  tlq.sql.Clone(),
+		path: tlq.path,
+	}
+}
+
+// WithTasks tells the query-builder to eager-load the nodes that are connected to
+// the "tasks" edge. The optional arguments are used to configure the query builder of the edge.
+func (tlq *TaskLogQuery) WithTasks(opts ...func(*TaskQuery)) *TaskLogQuery {
+	query := (&TaskClient{config: tlq.config}).Query()
+	for _, opt := range opts {
+		opt(query)
+	}
+	tlq.withTasks = query
+	return tlq
+}
+
+// GroupBy is used to group vertices by one or more fields/columns.
+// It is often used with aggregate functions, like: count, max, mean, min, sum.
+//
+// Example:
+//
+//	var v []struct {
+//		StartedAt time.Time `json:"started_at,omitempty"`
+//		Count int `json:"count,omitempty"`
+//	}
+//
+//	client.TaskLog.Query().
+//		GroupBy(tasklog.FieldStartedAt).
+//		Aggregate(ent.Count()).
+//		Scan(ctx, &v)
+func (tlq *TaskLogQuery) GroupBy(field string, fields ...string) *TaskLogGroupBy {
+	tlq.ctx.Fields = append([]string{field}, fields...)
+	grbuild := &TaskLogGroupBy{build: tlq}
+	grbuild.flds = &tlq.ctx.Fields
+	grbuild.label = tasklog.Label
+	grbuild.scan = grbuild.Scan
+	return grbuild
+}
+
+// Select allows the selection one or more fields/columns for the given query,
+// instead of selecting all fields in the entity.
+//
+// Example:
+//
+//	var v []struct {
+//		StartedAt time.Time `json:"started_at,omitempty"`
+//	}
+//
+//	client.TaskLog.Query().
+//		Select(tasklog.FieldStartedAt).
+//		Scan(ctx, &v)
+func (tlq *TaskLogQuery) Select(fields ...string) *TaskLogSelect {
+	tlq.ctx.Fields = append(tlq.ctx.Fields, fields...)
+	sbuild := &TaskLogSelect{TaskLogQuery: tlq}
+	sbuild.label = tasklog.Label
+	sbuild.flds, sbuild.scan = &tlq.ctx.Fields, sbuild.Scan
+	return sbuild
+}
+
+// Aggregate returns a TaskLogSelect configured with the given aggregations.
+func (tlq *TaskLogQuery) Aggregate(fns ...AggregateFunc) *TaskLogSelect {
+	return tlq.Select().Aggregate(fns...)
+}
+
+func (tlq *TaskLogQuery) prepareQuery(ctx context.Context) error {
+	for _, inter := range tlq.inters {
+		if inter == nil {
+			return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
+		}
+		if trv, ok := inter.(Traverser); ok {
+			if err := trv.Traverse(ctx, tlq); err != nil {
+				return err
+			}
+		}
+	}
+	for _, f := range tlq.ctx.Fields {
+		if !tasklog.ValidColumn(f) {
+			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+		}
+	}
+	if tlq.path != nil {
+		prev, err := tlq.path(ctx)
+		if err != nil {
+			return err
+		}
+		tlq.sql = prev
+	}
+	return nil
+}
+
+func (tlq *TaskLogQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*TaskLog, error) {
+	var (
+		nodes       = []*TaskLog{}
+		withFKs     = tlq.withFKs
+		_spec       = tlq.querySpec()
+		loadedTypes = [1]bool{
+			tlq.withTasks != nil,
+		}
+	)
+	if tlq.withTasks != nil {
+		withFKs = true
+	}
+	if withFKs {
+		_spec.Node.Columns = append(_spec.Node.Columns, tasklog.ForeignKeys...)
+	}
+	_spec.ScanValues = func(columns []string) ([]any, error) {
+		return (*TaskLog).scanValues(nil, columns)
+	}
+	_spec.Assign = func(columns []string, values []any) error {
+		node := &TaskLog{config: tlq.config}
+		nodes = append(nodes, node)
+		node.Edges.loadedTypes = loadedTypes
+		return node.assignValues(columns, values)
+	}
+	for i := range hooks {
+		hooks[i](ctx, _spec)
+	}
+	if err := sqlgraph.QueryNodes(ctx, tlq.driver, _spec); err != nil {
+		return nil, err
+	}
+	if len(nodes) == 0 {
+		return nodes, nil
+	}
+	if query := tlq.withTasks; query != nil {
+		if err := tlq.loadTasks(ctx, query, nodes, nil,
+			func(n *TaskLog, e *Task) { n.Edges.Tasks = e }); err != nil {
+			return nil, err
+		}
+	}
+	return nodes, nil
+}
+
+func (tlq *TaskLogQuery) loadTasks(ctx context.Context, query *TaskQuery, nodes []*TaskLog, init func(*TaskLog), assign func(*TaskLog, *Task)) error {
+	ids := make([]uint64, 0, len(nodes))
+	nodeids := make(map[uint64][]*TaskLog)
+	for i := range nodes {
+		if nodes[i].task_task_logs == nil {
+			continue
+		}
+		fk := *nodes[i].task_task_logs
+		if _, ok := nodeids[fk]; !ok {
+			ids = append(ids, fk)
+		}
+		nodeids[fk] = append(nodeids[fk], nodes[i])
+	}
+	if len(ids) == 0 {
+		return nil
+	}
+	query.Where(task.IDIn(ids...))
+	neighbors, err := query.All(ctx)
+	if err != nil {
+		return err
+	}
+	for _, n := range neighbors {
+		nodes, ok := nodeids[n.ID]
+		if !ok {
+			return fmt.Errorf(`unexpected foreign-key "task_task_logs" returned %v`, n.ID)
+		}
+		for i := range nodes {
+			assign(nodes[i], n)
+		}
+	}
+	return nil
+}
+
+func (tlq *TaskLogQuery) sqlCount(ctx context.Context) (int, error) {
+	_spec := tlq.querySpec()
+	_spec.Node.Columns = tlq.ctx.Fields
+	if len(tlq.ctx.Fields) > 0 {
+		_spec.Unique = tlq.ctx.Unique != nil && *tlq.ctx.Unique
+	}
+	return sqlgraph.CountNodes(ctx, tlq.driver, _spec)
+}
+
+func (tlq *TaskLogQuery) querySpec() *sqlgraph.QuerySpec {
+	_spec := sqlgraph.NewQuerySpec(tasklog.Table, tasklog.Columns, sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64))
+	_spec.From = tlq.sql
+	if unique := tlq.ctx.Unique; unique != nil {
+		_spec.Unique = *unique
+	} else if tlq.path != nil {
+		_spec.Unique = true
+	}
+	if fields := tlq.ctx.Fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, tasklog.FieldID)
+		for i := range fields {
+			if fields[i] != tasklog.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
+			}
+		}
+	}
+	if ps := tlq.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if limit := tlq.ctx.Limit; limit != nil {
+		_spec.Limit = *limit
+	}
+	if offset := tlq.ctx.Offset; offset != nil {
+		_spec.Offset = *offset
+	}
+	if ps := tlq.order; len(ps) > 0 {
+		_spec.Order = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	return _spec
+}
+
+func (tlq *TaskLogQuery) sqlQuery(ctx context.Context) *sql.Selector {
+	builder := sql.Dialect(tlq.driver.Dialect())
+	t1 := builder.Table(tasklog.Table)
+	columns := tlq.ctx.Fields
+	if len(columns) == 0 {
+		columns = tasklog.Columns
+	}
+	selector := builder.Select(t1.Columns(columns...)...).From(t1)
+	if tlq.sql != nil {
+		selector = tlq.sql
+		selector.Select(selector.Columns(columns...)...)
+	}
+	if tlq.ctx.Unique != nil && *tlq.ctx.Unique {
+		selector.Distinct()
+	}
+	for _, p := range tlq.predicates {
+		p(selector)
+	}
+	for _, p := range tlq.order {
+		p(selector)
+	}
+	if offset := tlq.ctx.Offset; offset != nil {
+		// limit is mandatory for offset clause. We start
+		// with default value, and override it below if needed.
+		selector.Offset(*offset).Limit(math.MaxInt32)
+	}
+	if limit := tlq.ctx.Limit; limit != nil {
+		selector.Limit(*limit)
+	}
+	return selector
+}
+
+// TaskLogGroupBy is the group-by builder for TaskLog entities.
+type TaskLogGroupBy struct {
+	selector
+	build *TaskLogQuery
+}
+
+// Aggregate adds the given aggregation functions to the group-by query.
+func (tlgb *TaskLogGroupBy) Aggregate(fns ...AggregateFunc) *TaskLogGroupBy {
+	tlgb.fns = append(tlgb.fns, fns...)
+	return tlgb
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (tlgb *TaskLogGroupBy) Scan(ctx context.Context, v any) error {
+	ctx = setContextOp(ctx, tlgb.build.ctx, "GroupBy")
+	if err := tlgb.build.prepareQuery(ctx); err != nil {
+		return err
+	}
+	return scanWithInterceptors[*TaskLogQuery, *TaskLogGroupBy](ctx, tlgb.build, tlgb, tlgb.build.inters, v)
+}
+
+func (tlgb *TaskLogGroupBy) sqlScan(ctx context.Context, root *TaskLogQuery, v any) error {
+	selector := root.sqlQuery(ctx).Select()
+	aggregation := make([]string, 0, len(tlgb.fns))
+	for _, fn := range tlgb.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	if len(selector.SelectedColumns()) == 0 {
+		columns := make([]string, 0, len(*tlgb.flds)+len(tlgb.fns))
+		for _, f := range *tlgb.flds {
+			columns = append(columns, selector.C(f))
+		}
+		columns = append(columns, aggregation...)
+		selector.Select(columns...)
+	}
+	selector.GroupBy(selector.Columns(*tlgb.flds...)...)
+	if err := selector.Err(); err != nil {
+		return err
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := tlgb.build.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}
+
+// TaskLogSelect is the builder for selecting fields of TaskLog entities.
+type TaskLogSelect struct {
+	*TaskLogQuery
+	selector
+}
+
+// Aggregate adds the given aggregation functions to the selector query.
+func (tls *TaskLogSelect) Aggregate(fns ...AggregateFunc) *TaskLogSelect {
+	tls.fns = append(tls.fns, fns...)
+	return tls
+}
+
+// Scan applies the selector query and scans the result into the given value.
+func (tls *TaskLogSelect) Scan(ctx context.Context, v any) error {
+	ctx = setContextOp(ctx, tls.ctx, "Select")
+	if err := tls.prepareQuery(ctx); err != nil {
+		return err
+	}
+	return scanWithInterceptors[*TaskLogQuery, *TaskLogSelect](ctx, tls.TaskLogQuery, tls, tls.inters, v)
+}
+
+func (tls *TaskLogSelect) sqlScan(ctx context.Context, root *TaskLogQuery, v any) error {
+	selector := root.sqlQuery(ctx)
+	aggregation := make([]string, 0, len(tls.fns))
+	for _, fn := range tls.fns {
+		aggregation = append(aggregation, fn(selector))
+	}
+	switch n := len(*tls.selector.flds); {
+	case n == 0 && len(aggregation) > 0:
+		selector.Select(aggregation...)
+	case n != 0 && len(aggregation) > 0:
+		selector.AppendSelect(aggregation...)
+	}
+	rows := &sql.Rows{}
+	query, args := selector.Query()
+	if err := tls.driver.Query(ctx, query, args, rows); err != nil {
+		return err
+	}
+	defer rows.Close()
+	return sql.ScanSlice(rows, v)
+}

+ 373 - 0
ent/tasklog_update.go

@@ -0,0 +1,373 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"entgo.io/ent/dialect/sql"
+	"entgo.io/ent/dialect/sql/sqlgraph"
+	"entgo.io/ent/schema/field"
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+)
+
+// TaskLogUpdate is the builder for updating TaskLog entities.
+type TaskLogUpdate struct {
+	config
+	hooks    []Hook
+	mutation *TaskLogMutation
+}
+
+// Where appends a list predicates to the TaskLogUpdate builder.
+func (tlu *TaskLogUpdate) Where(ps ...predicate.TaskLog) *TaskLogUpdate {
+	tlu.mutation.Where(ps...)
+	return tlu
+}
+
+// SetFinishedAt sets the "finished_at" field.
+func (tlu *TaskLogUpdate) SetFinishedAt(t time.Time) *TaskLogUpdate {
+	tlu.mutation.SetFinishedAt(t)
+	return tlu
+}
+
+// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil.
+func (tlu *TaskLogUpdate) SetNillableFinishedAt(t *time.Time) *TaskLogUpdate {
+	if t != nil {
+		tlu.SetFinishedAt(*t)
+	}
+	return tlu
+}
+
+// SetResult sets the "result" field.
+func (tlu *TaskLogUpdate) SetResult(u uint8) *TaskLogUpdate {
+	tlu.mutation.ResetResult()
+	tlu.mutation.SetResult(u)
+	return tlu
+}
+
+// SetNillableResult sets the "result" field if the given value is not nil.
+func (tlu *TaskLogUpdate) SetNillableResult(u *uint8) *TaskLogUpdate {
+	if u != nil {
+		tlu.SetResult(*u)
+	}
+	return tlu
+}
+
+// AddResult adds u to the "result" field.
+func (tlu *TaskLogUpdate) AddResult(u int8) *TaskLogUpdate {
+	tlu.mutation.AddResult(u)
+	return tlu
+}
+
+// SetTasksID sets the "tasks" edge to the Task entity by ID.
+func (tlu *TaskLogUpdate) SetTasksID(id uint64) *TaskLogUpdate {
+	tlu.mutation.SetTasksID(id)
+	return tlu
+}
+
+// SetNillableTasksID sets the "tasks" edge to the Task entity by ID if the given value is not nil.
+func (tlu *TaskLogUpdate) SetNillableTasksID(id *uint64) *TaskLogUpdate {
+	if id != nil {
+		tlu = tlu.SetTasksID(*id)
+	}
+	return tlu
+}
+
+// SetTasks sets the "tasks" edge to the Task entity.
+func (tlu *TaskLogUpdate) SetTasks(t *Task) *TaskLogUpdate {
+	return tlu.SetTasksID(t.ID)
+}
+
+// Mutation returns the TaskLogMutation object of the builder.
+func (tlu *TaskLogUpdate) Mutation() *TaskLogMutation {
+	return tlu.mutation
+}
+
+// ClearTasks clears the "tasks" edge to the Task entity.
+func (tlu *TaskLogUpdate) ClearTasks() *TaskLogUpdate {
+	tlu.mutation.ClearTasks()
+	return tlu
+}
+
+// Save executes the query and returns the number of nodes affected by the update operation.
+func (tlu *TaskLogUpdate) Save(ctx context.Context) (int, error) {
+	return withHooks(ctx, tlu.sqlSave, tlu.mutation, tlu.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (tlu *TaskLogUpdate) SaveX(ctx context.Context) int {
+	affected, err := tlu.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return affected
+}
+
+// Exec executes the query.
+func (tlu *TaskLogUpdate) Exec(ctx context.Context) error {
+	_, err := tlu.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tlu *TaskLogUpdate) ExecX(ctx context.Context) {
+	if err := tlu.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (tlu *TaskLogUpdate) sqlSave(ctx context.Context) (n int, err error) {
+	_spec := sqlgraph.NewUpdateSpec(tasklog.Table, tasklog.Columns, sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64))
+	if ps := tlu.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := tlu.mutation.FinishedAt(); ok {
+		_spec.SetField(tasklog.FieldFinishedAt, field.TypeTime, value)
+	}
+	if value, ok := tlu.mutation.Result(); ok {
+		_spec.SetField(tasklog.FieldResult, field.TypeUint8, value)
+	}
+	if value, ok := tlu.mutation.AddedResult(); ok {
+		_spec.AddField(tasklog.FieldResult, field.TypeUint8, value)
+	}
+	if tlu.mutation.TasksCleared() {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.M2O,
+			Inverse: true,
+			Table:   tasklog.TasksTable,
+			Columns: []string{tasklog.TasksColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64),
+			},
+		}
+		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+	}
+	if nodes := tlu.mutation.TasksIDs(); len(nodes) > 0 {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.M2O,
+			Inverse: true,
+			Table:   tasklog.TasksTable,
+			Columns: []string{tasklog.TasksColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_spec.Edges.Add = append(_spec.Edges.Add, edge)
+	}
+	if n, err = sqlgraph.UpdateNodes(ctx, tlu.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{tasklog.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return 0, err
+	}
+	tlu.mutation.done = true
+	return n, nil
+}
+
+// TaskLogUpdateOne is the builder for updating a single TaskLog entity.
+type TaskLogUpdateOne struct {
+	config
+	fields   []string
+	hooks    []Hook
+	mutation *TaskLogMutation
+}
+
+// SetFinishedAt sets the "finished_at" field.
+func (tluo *TaskLogUpdateOne) SetFinishedAt(t time.Time) *TaskLogUpdateOne {
+	tluo.mutation.SetFinishedAt(t)
+	return tluo
+}
+
+// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil.
+func (tluo *TaskLogUpdateOne) SetNillableFinishedAt(t *time.Time) *TaskLogUpdateOne {
+	if t != nil {
+		tluo.SetFinishedAt(*t)
+	}
+	return tluo
+}
+
+// SetResult sets the "result" field.
+func (tluo *TaskLogUpdateOne) SetResult(u uint8) *TaskLogUpdateOne {
+	tluo.mutation.ResetResult()
+	tluo.mutation.SetResult(u)
+	return tluo
+}
+
+// SetNillableResult sets the "result" field if the given value is not nil.
+func (tluo *TaskLogUpdateOne) SetNillableResult(u *uint8) *TaskLogUpdateOne {
+	if u != nil {
+		tluo.SetResult(*u)
+	}
+	return tluo
+}
+
+// AddResult adds u to the "result" field.
+func (tluo *TaskLogUpdateOne) AddResult(u int8) *TaskLogUpdateOne {
+	tluo.mutation.AddResult(u)
+	return tluo
+}
+
+// SetTasksID sets the "tasks" edge to the Task entity by ID.
+func (tluo *TaskLogUpdateOne) SetTasksID(id uint64) *TaskLogUpdateOne {
+	tluo.mutation.SetTasksID(id)
+	return tluo
+}
+
+// SetNillableTasksID sets the "tasks" edge to the Task entity by ID if the given value is not nil.
+func (tluo *TaskLogUpdateOne) SetNillableTasksID(id *uint64) *TaskLogUpdateOne {
+	if id != nil {
+		tluo = tluo.SetTasksID(*id)
+	}
+	return tluo
+}
+
+// SetTasks sets the "tasks" edge to the Task entity.
+func (tluo *TaskLogUpdateOne) SetTasks(t *Task) *TaskLogUpdateOne {
+	return tluo.SetTasksID(t.ID)
+}
+
+// Mutation returns the TaskLogMutation object of the builder.
+func (tluo *TaskLogUpdateOne) Mutation() *TaskLogMutation {
+	return tluo.mutation
+}
+
+// ClearTasks clears the "tasks" edge to the Task entity.
+func (tluo *TaskLogUpdateOne) ClearTasks() *TaskLogUpdateOne {
+	tluo.mutation.ClearTasks()
+	return tluo
+}
+
+// Where appends a list predicates to the TaskLogUpdate builder.
+func (tluo *TaskLogUpdateOne) Where(ps ...predicate.TaskLog) *TaskLogUpdateOne {
+	tluo.mutation.Where(ps...)
+	return tluo
+}
+
+// Select allows selecting one or more fields (columns) of the returned entity.
+// The default is selecting all fields defined in the entity schema.
+func (tluo *TaskLogUpdateOne) Select(field string, fields ...string) *TaskLogUpdateOne {
+	tluo.fields = append([]string{field}, fields...)
+	return tluo
+}
+
+// Save executes the query and returns the updated TaskLog entity.
+func (tluo *TaskLogUpdateOne) Save(ctx context.Context) (*TaskLog, error) {
+	return withHooks(ctx, tluo.sqlSave, tluo.mutation, tluo.hooks)
+}
+
+// SaveX is like Save, but panics if an error occurs.
+func (tluo *TaskLogUpdateOne) SaveX(ctx context.Context) *TaskLog {
+	node, err := tluo.Save(ctx)
+	if err != nil {
+		panic(err)
+	}
+	return node
+}
+
+// Exec executes the query on the entity.
+func (tluo *TaskLogUpdateOne) Exec(ctx context.Context) error {
+	_, err := tluo.Save(ctx)
+	return err
+}
+
+// ExecX is like Exec, but panics if an error occurs.
+func (tluo *TaskLogUpdateOne) ExecX(ctx context.Context) {
+	if err := tluo.Exec(ctx); err != nil {
+		panic(err)
+	}
+}
+
+func (tluo *TaskLogUpdateOne) sqlSave(ctx context.Context) (_node *TaskLog, err error) {
+	_spec := sqlgraph.NewUpdateSpec(tasklog.Table, tasklog.Columns, sqlgraph.NewFieldSpec(tasklog.FieldID, field.TypeUint64))
+	id, ok := tluo.mutation.ID()
+	if !ok {
+		return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "TaskLog.id" for update`)}
+	}
+	_spec.Node.ID.Value = id
+	if fields := tluo.fields; len(fields) > 0 {
+		_spec.Node.Columns = make([]string, 0, len(fields))
+		_spec.Node.Columns = append(_spec.Node.Columns, tasklog.FieldID)
+		for _, f := range fields {
+			if !tasklog.ValidColumn(f) {
+				return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
+			}
+			if f != tasklog.FieldID {
+				_spec.Node.Columns = append(_spec.Node.Columns, f)
+			}
+		}
+	}
+	if ps := tluo.mutation.predicates; len(ps) > 0 {
+		_spec.Predicate = func(selector *sql.Selector) {
+			for i := range ps {
+				ps[i](selector)
+			}
+		}
+	}
+	if value, ok := tluo.mutation.FinishedAt(); ok {
+		_spec.SetField(tasklog.FieldFinishedAt, field.TypeTime, value)
+	}
+	if value, ok := tluo.mutation.Result(); ok {
+		_spec.SetField(tasklog.FieldResult, field.TypeUint8, value)
+	}
+	if value, ok := tluo.mutation.AddedResult(); ok {
+		_spec.AddField(tasklog.FieldResult, field.TypeUint8, value)
+	}
+	if tluo.mutation.TasksCleared() {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.M2O,
+			Inverse: true,
+			Table:   tasklog.TasksTable,
+			Columns: []string{tasklog.TasksColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64),
+			},
+		}
+		_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
+	}
+	if nodes := tluo.mutation.TasksIDs(); len(nodes) > 0 {
+		edge := &sqlgraph.EdgeSpec{
+			Rel:     sqlgraph.M2O,
+			Inverse: true,
+			Table:   tasklog.TasksTable,
+			Columns: []string{tasklog.TasksColumn},
+			Bidi:    false,
+			Target: &sqlgraph.EdgeTarget{
+				IDSpec: sqlgraph.NewFieldSpec(task.FieldID, field.TypeUint64),
+			},
+		}
+		for _, k := range nodes {
+			edge.Target.Nodes = append(edge.Target.Nodes, k)
+		}
+		_spec.Edges.Add = append(_spec.Edges.Add, edge)
+	}
+	_node = &TaskLog{config: tluo.config}
+	_spec.Assign = _node.assignValues
+	_spec.ScanValues = _node.scanValues
+	if err = sqlgraph.UpdateNode(ctx, tluo.driver, _spec); err != nil {
+		if _, ok := err.(*sqlgraph.NotFoundError); ok {
+			err = &NotFoundError{tasklog.Label}
+		} else if sqlgraph.IsConstraintError(err) {
+			err = &ConstraintError{msg: err.Error(), wrap: err}
+		}
+		return nil, err
+	}
+	tluo.mutation.done = true
+	return _node, nil
+}

+ 162 - 0
ent/template/pagination.tmpl

@@ -0,0 +1,162 @@
+{{ define "pagination" }}
+    {{- /*gotype: entgo.io/ent/entc/gen.Graph*/ -}}
+
+    {{ template "header" $ }}
+    {{ $pkg := base $.Config.Package }}
+    {{ template "import" $ }}
+
+    const errInvalidPage = "INVALID_PAGE"
+
+    const (
+        listField = "list"
+        pageNumField = "pageNum"
+        pageSizeField = "pageSize"
+    )
+
+    type PageDetails struct {
+        Page  uint64 `json:"page"`
+        Size  uint64 `json:"size"`
+        Total uint64 `json:"total"`
+    }
+
+    // OrderDirection defines the directions in which to order a list of items.
+    type OrderDirection string
+
+    const (
+        // OrderDirectionAsc specifies an ascending order.
+        OrderDirectionAsc OrderDirection  = "ASC"
+        // OrderDirectionDesc specifies a descending order.
+        OrderDirectionDesc OrderDirection = "DESC"
+    )
+
+    // Validate the order direction value.
+    func (o OrderDirection) Validate() error {
+        if o != OrderDirectionAsc && o != OrderDirectionDesc {
+            return fmt.Errorf("%s is not a valid OrderDirection", o)
+        }
+        return nil
+    }
+
+    // String implements fmt.Stringer interface.
+    func (o OrderDirection) String() string {
+        return string(o)
+    }
+
+    func (o OrderDirection) reverse() OrderDirection {
+        if o == OrderDirectionDesc {
+            return OrderDirectionAsc
+        }
+	    return OrderDirectionDesc
+    }
+
+    const errInvalidPagination = "INVALID_PAGINATION"
+
+    {{ range $node := $.Nodes -}}
+        {{- if ne $node.Name "CasbinRule" }}
+        {{ $pager := print $node.Name "Pager" }}
+        {{ $order := print $node.Name "Order"}}
+        {{ $query := print $node.Name "Query"}}
+        {{ $orderField := print $node.Name "OrderField"}}
+        type {{ $pager }} struct {
+            Order {{ lower $node.Name }}.OrderOption
+            Filter func(*{{ $query }}) (*{{ $query }}, error)
+        }
+
+        {{ $opt := print $node.Name "PaginateOption" }}
+        // {{ $opt }} enables pagination customization.
+        type {{ $opt }} func(*{{ $pager }})
+
+
+        {{ $newPager := print "new" $node.Name "Pager" -}}
+        {{- $defaultOrder := print "Default" $node.Name "Order" }}
+
+		{{ range $f :=  $node.Fields -}}
+		   {{- if eq $node.HasOneFieldID true}}
+        // {{ $defaultOrder }} is the default ordering of {{ $node.Name }}.
+        var {{ $defaultOrder }} = Desc({{ lower $node.Name }}.FieldID)
+		    {{- break}}
+		    {{- else}}
+        // {{ $defaultOrder }} is the default ordering of {{ $node.Name }}.
+        var {{ $defaultOrder }} = Desc({{ lower $node.Name }}.Field{{ $f.StructField }})
+		    {{- break}}
+		    {{- end}}
+		{{end}}
+
+        func {{ $newPager }}(opts []{{ $opt }}) (*{{ $pager }}, error) {
+            pager := &{{ $pager }}{}
+            for _, opt := range opts {
+                opt(pager)
+            }
+            if pager.Order == nil {
+                pager.Order = {{ $defaultOrder }}
+            }
+            return pager, nil
+        }
+
+
+        func (p *{{ $pager }}) ApplyFilter(query *{{ $query }}) (*{{ $query }}, error) {
+            if p.Filter != nil {
+                return p.Filter(query)
+            }
+            return query, nil
+        }
+
+           {{ $pageList := print $node.Name "PageList" -}}
+        {{ $name := $node.Name }}
+
+        // {{ $pageList }} is {{ $name }} PageList result.
+        type {{ $pageList }} struct {
+            List []*{{ $name }}      `json:"list"`
+            PageDetails *PageDetails  `json:"pageDetails"`
+        }
+
+
+        {{ $r := $node.Receiver -}}
+        {{ $queryName := print $node.QueryName -}}
+
+        func ({{ $r }} *{{ $queryName }}) Page(
+            ctx context.Context, pageNum uint64, pageSize uint64, opts ...{{ $opt }},
+            ) (*{{ $pageList }}, error) {
+
+            pager, err := {{ $newPager }}(opts)
+            if err != nil {
+                return nil, err
+            }
+
+            if {{ $r }}, err = pager.ApplyFilter({{ $r }}); err != nil {
+                return nil, err
+            }
+
+            ret := &{{ $pageList }}{}
+
+            ret.PageDetails = &PageDetails{
+                Page: pageNum,
+                Size: pageSize,
+            }
+
+            count, err := {{ $r }}.Clone().Count(ctx)
+
+            if err != nil {
+                return nil, err
+            }
+
+            ret.PageDetails.Total = uint64(count)
+
+            if pager.Order != nil {
+           		{{ $r }} = {{ $r }}.Order(pager.Order)
+           	} else {
+           		{{ $r }} = {{ $r }}.Order({{ $defaultOrder }})
+           	}
+
+            {{ $r }} = {{ $r }}.Offset(int((pageNum - 1) * pageSize)).Limit(int(pageSize))
+            list, err := {{ $r }}.All(ctx)
+            if err != nil {
+                return nil, err
+            }
+            ret.List = list
+
+            return ret, nil
+        }
+    {{- end}}
+    {{- end}}
+{{- end}}

+ 26 - 0
ent/template/set_not_nil.tmpl

@@ -0,0 +1,26 @@
+{{/* gotype: entgo.io/ent/entc/gen.Graph */}}
+
+
+{{ define "set_not_nil" }}
+
+    {{/* Add the base header for the generated file */}}
+    {{ $pkg := base $.Config.Package }}
+    {{ template "header" $ }}
+
+    {{/* Loop over all updaters and implement the "SetNotNil" method for all optional fields */}}
+    {{ range $n := $.Nodes }}
+        {{ range $f := $n.MutableFields }}
+            {{ $set := print "Set" $f.StructField }}
+
+            {{ range $updater := list $n.UpdateName $n.UpdateOneName $n.CreateName}}
+                // set field if value's pointer is not nil.
+                func ({{ $n.Receiver }} *{{ $updater }}) SetNotNil{{ $f.StructField }}(value *{{ $f.Type }}) *{{ $updater }} {
+                if value != nil  {
+                return {{ $n.Receiver }}.{{ $set }}(*value)
+                }
+                return {{ $n.Receiver }}
+                }
+            {{ end }}
+        {{ end }}
+    {{ end }}
+{{ end }}

+ 239 - 0
ent/tx.go

@@ -0,0 +1,239 @@
+// Code generated by ent, DO NOT EDIT.
+
+package ent
+
+import (
+	"context"
+	stdsql "database/sql"
+	"fmt"
+	"sync"
+
+	"entgo.io/ent/dialect"
+)
+
+// Tx is a transactional client that is created by calling Client.Tx().
+type Tx struct {
+	config
+	// Task is the client for interacting with the Task builders.
+	Task *TaskClient
+	// TaskLog is the client for interacting with the TaskLog builders.
+	TaskLog *TaskLogClient
+
+	// lazily loaded.
+	client     *Client
+	clientOnce sync.Once
+	// ctx lives for the life of the transaction. It is
+	// the same context used by the underlying connection.
+	ctx context.Context
+}
+
+type (
+	// Committer is the interface that wraps the Commit method.
+	Committer interface {
+		Commit(context.Context, *Tx) error
+	}
+
+	// The CommitFunc type is an adapter to allow the use of ordinary
+	// function as a Committer. If f is a function with the appropriate
+	// signature, CommitFunc(f) is a Committer that calls f.
+	CommitFunc func(context.Context, *Tx) error
+
+	// CommitHook defines the "commit middleware". A function that gets a Committer
+	// and returns a Committer. For example:
+	//
+	//	hook := func(next ent.Committer) ent.Committer {
+	//		return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error {
+	//			// Do some stuff before.
+	//			if err := next.Commit(ctx, tx); err != nil {
+	//				return err
+	//			}
+	//			// Do some stuff after.
+	//			return nil
+	//		})
+	//	}
+	//
+	CommitHook func(Committer) Committer
+)
+
+// Commit calls f(ctx, m).
+func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error {
+	return f(ctx, tx)
+}
+
+// Commit commits the transaction.
+func (tx *Tx) Commit() error {
+	txDriver := tx.config.driver.(*txDriver)
+	var fn Committer = CommitFunc(func(context.Context, *Tx) error {
+		return txDriver.tx.Commit()
+	})
+	txDriver.mu.Lock()
+	hooks := append([]CommitHook(nil), txDriver.onCommit...)
+	txDriver.mu.Unlock()
+	for i := len(hooks) - 1; i >= 0; i-- {
+		fn = hooks[i](fn)
+	}
+	return fn.Commit(tx.ctx, tx)
+}
+
+// OnCommit adds a hook to call on commit.
+func (tx *Tx) OnCommit(f CommitHook) {
+	txDriver := tx.config.driver.(*txDriver)
+	txDriver.mu.Lock()
+	txDriver.onCommit = append(txDriver.onCommit, f)
+	txDriver.mu.Unlock()
+}
+
+type (
+	// Rollbacker is the interface that wraps the Rollback method.
+	Rollbacker interface {
+		Rollback(context.Context, *Tx) error
+	}
+
+	// The RollbackFunc type is an adapter to allow the use of ordinary
+	// function as a Rollbacker. If f is a function with the appropriate
+	// signature, RollbackFunc(f) is a Rollbacker that calls f.
+	RollbackFunc func(context.Context, *Tx) error
+
+	// RollbackHook defines the "rollback middleware". A function that gets a Rollbacker
+	// and returns a Rollbacker. For example:
+	//
+	//	hook := func(next ent.Rollbacker) ent.Rollbacker {
+	//		return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error {
+	//			// Do some stuff before.
+	//			if err := next.Rollback(ctx, tx); err != nil {
+	//				return err
+	//			}
+	//			// Do some stuff after.
+	//			return nil
+	//		})
+	//	}
+	//
+	RollbackHook func(Rollbacker) Rollbacker
+)
+
+// Rollback calls f(ctx, m).
+func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error {
+	return f(ctx, tx)
+}
+
+// Rollback rollbacks the transaction.
+func (tx *Tx) Rollback() error {
+	txDriver := tx.config.driver.(*txDriver)
+	var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
+		return txDriver.tx.Rollback()
+	})
+	txDriver.mu.Lock()
+	hooks := append([]RollbackHook(nil), txDriver.onRollback...)
+	txDriver.mu.Unlock()
+	for i := len(hooks) - 1; i >= 0; i-- {
+		fn = hooks[i](fn)
+	}
+	return fn.Rollback(tx.ctx, tx)
+}
+
+// OnRollback adds a hook to call on rollback.
+func (tx *Tx) OnRollback(f RollbackHook) {
+	txDriver := tx.config.driver.(*txDriver)
+	txDriver.mu.Lock()
+	txDriver.onRollback = append(txDriver.onRollback, f)
+	txDriver.mu.Unlock()
+}
+
+// Client returns a Client that binds to current transaction.
+func (tx *Tx) Client() *Client {
+	tx.clientOnce.Do(func() {
+		tx.client = &Client{config: tx.config}
+		tx.client.init()
+	})
+	return tx.client
+}
+
+func (tx *Tx) init() {
+	tx.Task = NewTaskClient(tx.config)
+	tx.TaskLog = NewTaskLogClient(tx.config)
+}
+
+// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation.
+// The idea is to support transactions without adding any extra code to the builders.
+// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance.
+// Commit and Rollback are nop for the internal builders and the user must call one
+// of them in order to commit or rollback the transaction.
+//
+// If a closed transaction is embedded in one of the generated entities, and the entity
+// applies a query, for example: Task.QueryXXX(), the query will be executed
+// through the driver which created this transaction.
+//
+// Note that txDriver is not goroutine safe.
+type txDriver struct {
+	// the driver we started the transaction from.
+	drv dialect.Driver
+	// tx is the underlying transaction.
+	tx dialect.Tx
+	// completion hooks.
+	mu         sync.Mutex
+	onCommit   []CommitHook
+	onRollback []RollbackHook
+}
+
+// newTx creates a new transactional driver.
+func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) {
+	tx, err := drv.Tx(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return &txDriver{tx: tx, drv: drv}, nil
+}
+
+// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls
+// from the internal builders. Should be called only by the internal builders.
+func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }
+
+// Dialect returns the dialect of the driver we started the transaction from.
+func (tx *txDriver) Dialect() string { return tx.drv.Dialect() }
+
+// Close is a nop close.
+func (*txDriver) Close() error { return nil }
+
+// Commit is a nop commit for the internal builders.
+// User must call `Tx.Commit` in order to commit the transaction.
+func (*txDriver) Commit() error { return nil }
+
+// Rollback is a nop rollback for the internal builders.
+// User must call `Tx.Rollback` in order to rollback the transaction.
+func (*txDriver) Rollback() error { return nil }
+
+// Exec calls tx.Exec.
+func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error {
+	return tx.tx.Exec(ctx, query, args, v)
+}
+
+// Query calls tx.Query.
+func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error {
+	return tx.tx.Query(ctx, query, args, v)
+}
+
+var _ dialect.Driver = (*txDriver)(nil)
+
+// ExecContext allows calling the underlying ExecContext method of the transaction if it is supported by it.
+// See, database/sql#Tx.ExecContext for more information.
+func (tx *txDriver) ExecContext(ctx context.Context, query string, args ...any) (stdsql.Result, error) {
+	ex, ok := tx.tx.(interface {
+		ExecContext(context.Context, string, ...any) (stdsql.Result, error)
+	})
+	if !ok {
+		return nil, fmt.Errorf("Tx.ExecContext is not supported")
+	}
+	return ex.ExecContext(ctx, query, args...)
+}
+
+// QueryContext allows calling the underlying QueryContext method of the transaction if it is supported by it.
+// See, database/sql#Tx.QueryContext for more information.
+func (tx *txDriver) QueryContext(ctx context.Context, query string, args ...any) (*stdsql.Rows, error) {
+	q, ok := tx.tx.(interface {
+		QueryContext(context.Context, string, ...any) (*stdsql.Rows, error)
+	})
+	if !ok {
+		return nil, fmt.Errorf("Tx.QueryContext is not supported")
+	}
+	return q.QueryContext(ctx, query, args...)
+}

+ 46 - 0
etc/job.yaml

@@ -0,0 +1,46 @@
+Name: job.rpc
+ListenOn: 0.0.0.0:9105
+
+DatabaseConf:
+  Type: mysql
+  Host: 127.0.0.1
+  Port: 3306
+  DBName: wechat-job
+  Username: root
+  Password: p@ssw0rd123456
+  MaxOpenConn: 100
+  SSLMode: disable
+  CacheTime: 5
+
+RedisConf:
+  Host: 127.0.0.1:6379
+  Db: 0
+
+Log:
+  ServiceName: jobRpcLogger
+  Mode: console
+  Path: D:\code\gooki\sa\logs\job\rpc
+  Encoding: json
+  Level: info
+  Compress: false
+  KeepDays: 7
+  StackCoolDownMillis: 100
+
+#Prometheus:
+#  Host: 0.0.0.0
+#  Port: 4005
+#  Path: /metrics
+
+AsynqConf:
+# if you do not use WithRedisConf, you should uncomment the configuration below
+
+#  Addr: localhost:6379  # Redis address
+#  Pass: # Redis Password
+#  DB: 0 # Redis database index
+#  Concurrency: 20 # max concurrent process job task num
+#  SyncInterval: 10 # seconds, this field specifies how often sync should happen
+  Enable: true
+
+TaskConf:
+  EnableScheduledTask: false
+  EnableDPTask: true

+ 126 - 0
go.mod

@@ -0,0 +1,126 @@
+module github.com/suyuan32/simple-admin-job
+
+go 1.22.0
+
+replace github.com/zeromicro/go-zero v1.6.3 => github.com/suyuan32/simple-admin-tools v1.6.9
+
+require (
+	entgo.io/ent v0.13.1
+	github.com/hibiken/asynq v0.24.1
+	github.com/redis/go-redis/v9 v9.5.1
+	github.com/suyuan32/simple-admin-common v1.3.11
+	github.com/zeromicro/go-zero v1.6.3
+	google.golang.org/grpc v1.62.1
+	google.golang.org/protobuf v1.33.0
+)
+
+require (
+	ariga.io/atlas v0.19.2 // indirect
+	filippo.io/edwards25519 v1.1.0 // indirect
+	github.com/agext/levenshtein v1.2.3 // indirect
+	github.com/andybalholm/brotli v1.1.0 // indirect
+	github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
+	github.com/beorn7/perks v1.0.1 // indirect
+	github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+	github.com/cespare/xxhash/v2 v2.2.0 // indirect
+	github.com/cloudflare/circl v1.3.7 // indirect
+	github.com/coreos/go-semver v0.3.1 // indirect
+	github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+	github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+	github.com/emicklei/go-restful/v3 v3.11.0 // indirect
+	github.com/fatih/color v1.16.0 // indirect
+	github.com/go-logr/logr v1.4.1 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
+	github.com/go-openapi/inflect v0.21.0 // indirect
+	github.com/go-openapi/jsonpointer v0.19.6 // indirect
+	github.com/go-openapi/jsonreference v0.20.2 // indirect
+	github.com/go-openapi/swag v0.22.3 // indirect
+	github.com/go-sql-driver/mysql v1.8.0 // indirect
+	github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
+	github.com/gofrs/uuid/v5 v5.0.0 // indirect
+	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/golang/mock v1.6.0 // indirect
+	github.com/golang/protobuf v1.5.4 // indirect
+	github.com/google/gnostic-models v0.6.8 // indirect
+	github.com/google/go-cmp v0.6.0 // indirect
+	github.com/google/gofuzz v1.2.0 // indirect
+	github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect
+	github.com/google/uuid v1.6.0 // indirect
+	github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect
+	github.com/hashicorp/errwrap v1.1.0 // indirect
+	github.com/hashicorp/go-multierror v1.1.1 // indirect
+	github.com/hashicorp/hcl/v2 v2.20.0 // indirect
+	github.com/imroc/req/v3 v3.43.1 // indirect
+	github.com/josharian/intern v1.0.0 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/klauspost/compress v1.17.7 // indirect
+	github.com/lib/pq v1.10.9 // indirect
+	github.com/mailru/easyjson v0.7.7 // indirect
+	github.com/mattn/go-colorable v0.1.13 // indirect
+	github.com/mattn/go-isatty v0.0.20 // indirect
+	github.com/mattn/go-sqlite3 v1.14.22 // indirect
+	github.com/mitchellh/go-wordwrap v1.0.1 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+	github.com/nicksnyder/go-i18n/v2 v2.4.0 // indirect
+	github.com/onsi/ginkgo/v2 v2.16.0 // indirect
+	github.com/openzipkin/zipkin-go v0.4.2 // indirect
+	github.com/pelletier/go-toml/v2 v2.1.1 // indirect
+	github.com/pkg/errors v0.9.1 // indirect
+	github.com/prometheus/client_golang v1.19.0 // indirect
+	github.com/prometheus/client_model v0.5.0 // indirect
+	github.com/prometheus/common v0.48.0 // indirect
+	github.com/prometheus/procfs v0.12.0 // indirect
+	github.com/quic-go/qpack v0.4.0 // indirect
+	github.com/quic-go/quic-go v0.41.0 // indirect
+	github.com/refraction-networking/utls v1.6.3 // indirect
+	github.com/robfig/cron/v3 v3.0.1 // indirect
+	github.com/spaolacci/murmur3 v1.1.0 // indirect
+	github.com/spf13/cast v1.6.0 // indirect
+	github.com/zclconf/go-cty v1.14.3 // indirect
+	go.etcd.io/etcd/api/v3 v3.5.12 // indirect
+	go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect
+	go.etcd.io/etcd/client/v3 v3.5.12 // indirect
+	go.opentelemetry.io/otel v1.24.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect
+	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 // indirect
+	go.opentelemetry.io/otel/exporters/zipkin v1.24.0 // indirect
+	go.opentelemetry.io/otel/metric v1.24.0 // indirect
+	go.opentelemetry.io/otel/sdk v1.24.0 // indirect
+	go.opentelemetry.io/otel/trace v1.24.0 // indirect
+	go.opentelemetry.io/proto/otlp v1.1.0 // indirect
+	go.uber.org/atomic v1.11.0 // indirect
+	go.uber.org/automaxprocs v1.5.3 // indirect
+	go.uber.org/mock v0.4.0 // indirect
+	go.uber.org/multierr v1.11.0 // indirect
+	go.uber.org/zap v1.24.0 // indirect
+	golang.org/x/crypto v0.21.0 // indirect
+	golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
+	golang.org/x/mod v0.16.0 // indirect
+	golang.org/x/net v0.22.0 // indirect
+	golang.org/x/oauth2 v0.16.0 // indirect
+	golang.org/x/sys v0.18.0 // indirect
+	golang.org/x/term v0.18.0 // indirect
+	golang.org/x/text v0.14.0 // indirect
+	golang.org/x/time v0.5.0 // indirect
+	golang.org/x/tools v0.19.0 // indirect
+	google.golang.org/appengine v1.6.8 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
+	gopkg.in/inf.v0 v0.9.1 // indirect
+	gopkg.in/yaml.v2 v2.4.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+	k8s.io/api v0.29.3 // indirect
+	k8s.io/apimachinery v0.29.3 // indirect
+	k8s.io/client-go v0.29.3 // indirect
+	k8s.io/klog/v2 v2.110.1 // indirect
+	k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
+	k8s.io/utils v0.0.0-20240310230437-4693a0247e57 // indirect
+	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
+	sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
+	sigs.k8s.io/yaml v1.3.0 // indirect
+)

+ 393 - 0
go.sum

@@ -0,0 +1,393 @@
+ariga.io/atlas v0.19.2 h1:ulK06d4joEaMP06HNNPxdpD8dFgZGzjzjk+Mb5VfF08=
+ariga.io/atlas v0.19.2/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE=
+entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=
+entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A=
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
+github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
+github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
+github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
+github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
+github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
+github.com/alicebob/miniredis/v2 v2.32.1 h1:Bz7CciDnYSaa0mX5xODh6GUITRSx+cVhjNoOR4JssBo=
+github.com/alicebob/miniredis/v2 v2.32.1/go.mod h1:AqkLNAfUm0K07J28hnAyyQKf/x0YkCY/g5DCtuL01Mw=
+github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
+github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
+github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
+github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bsm/ginkgo/v2 v2.7.0/go.mod h1:AiKlXPm7ItEHNc/2+OkrNG4E0ITzojb9/xWzvQ9XZ9w=
+github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
+github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
+github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
+github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
+github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
+github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
+github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
+github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk=
+github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw=
+github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
+github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-sql-driver/mysql v1.8.0 h1:UtktXaU2Nb64z/pLiGIxY4431SJ4/dR5cjMmlVHgnT4=
+github.com/go-sql-driver/mysql v1.8.0/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
+github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M=
+github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
+github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
+github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
+github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
+github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/hcl/v2 v2.20.0 h1:l++cRs/5jQOiKVvqXZm/P1ZEfVXJmvLS9WSVxkaeTb4=
+github.com/hashicorp/hcl/v2 v2.20.0/go.mod h1:WmcD/Ym72MDOOx5F62Ly+leloeu6H7m0pG7VBiU6pQk=
+github.com/hibiken/asynq v0.24.1 h1:+5iIEAyA9K/lcSPvx3qoPtsKJeKI5u9aOIvUmSsazEw=
+github.com/hibiken/asynq v0.24.1/go.mod h1:u5qVeSbrnfT+vtG5Mq8ZPzQu/BmCKMHvTGb91uy9Tts=
+github.com/imroc/req/v3 v3.43.1 h1:tsWAhvxik4egtHAvMlxcjaWJtHlJL8EpBqJMOm5rmyQ=
+github.com/imroc/req/v3 v3.43.1/go.mod h1:SQIz5iYop16MJxbo8ib+4LnostGCok8NQf8ToyQc2xA=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
+github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
+github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
+github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/nicksnyder/go-i18n/v2 v2.4.0 h1:3IcvPOAvnCKwNm0TB0dLDTuawWEj+ax/RERNC+diLMM=
+github.com/nicksnyder/go-i18n/v2 v2.4.0/go.mod h1:nxYSZE9M0bf3Y70gPQjN9ha7XNHX7gMc814+6wVyEI4=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
+github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
+github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM=
+github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
+github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
+github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA=
+github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY=
+github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
+github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
+github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
+github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
+github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
+github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
+github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
+github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
+github.com/quic-go/quic-go v0.41.0 h1:aD8MmHfgqTURWNJy48IYFg2OnxwHT3JL7ahGs73lb4k=
+github.com/quic-go/quic-go v0.41.0/go.mod h1:qCkNjqczPEvgsOnxZ0eCD14lv+B2LHlFAB++CNOh9hA=
+github.com/redis/go-redis/v9 v9.0.3/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk=
+github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8=
+github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M=
+github.com/refraction-networking/utls v1.6.3 h1:MFOfRN35sSx6K5AZNIoESsBuBxS2LCgRilRIdHb6fDc=
+github.com/refraction-networking/utls v1.6.3/go.mod h1:yil9+7qSl+gBwJqztoQseO6Pr3h62pQoY1lXiNR/FPs=
+github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
+github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
+github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/suyuan32/simple-admin-common v1.3.11 h1:9AxqIkyAA2du5uh+BMM38iJhDSXwCzQpN/SH66eKmFg=
+github.com/suyuan32/simple-admin-common v1.3.11/go.mod h1:ZKNZ/S/pIqs6E2NRHngDhdDvx9s2n10+7F/AnpFw4AQ=
+github.com/suyuan32/simple-admin-tools v1.6.9 h1:wEHV1YeEXdyKIh5MHT73NxQaNGKZo4eLzzK2bg7Zvho=
+github.com/suyuan32/simple-admin-tools v1.6.9/go.mod h1:RtX0cyNWNEd5mquIpl4azH5dUp790bYVmLIzxELS5Pw=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
+github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
+github.com/zclconf/go-cty v1.14.3 h1:1JXy1XroaGrzZuG6X9dt7HL6s9AwbY+l4UNL8o5B6ho=
+github.com/zclconf/go-cty v1.14.3/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
+go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c=
+go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
+go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A=
+go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
+go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg=
+go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0 h1:Mw5xcxMwlqoJd97vwPxA8isEaIoxsta9/Q51+TTJLGE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.24.0/go.mod h1:CQNu9bj7o7mC6U7+CA/schKEYakYXWr79ucDHTMGhCM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 h1:s0PHtIkN+3xrbDOpt2M8OTG92cWqUESvzh2MxiR5xY8=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0/go.mod h1:hZlFbDbRt++MMPCCfSJfmhkGIWnX1h3XjkfxZUjLrIA=
+go.opentelemetry.io/otel/exporters/zipkin v1.24.0 h1:3evrL5poBuh1KF51D9gO/S+N/1msnm4DaBqs/rpXUqY=
+go.opentelemetry.io/otel/exporters/zipkin v1.24.0/go.mod h1:0EHgD8R0+8yRhUYJOGR8Hfg2dpiJQxDOszd5smVO9wM=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
+go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
+go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
+golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
+golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
+golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
+golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
+golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
+google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4=
+google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
+google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/h2non/gock.v1 v1.1.2 h1:jBbHXgGBK/AoPVfJh5x4r/WxIrElvbLel8TCZkkZJoY=
+gopkg.in/h2non/gock.v1 v1.1.2/go.mod h1:n7UGz/ckNChHiK05rDoiC4MYSunEC/lyaUm2WWaDva0=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
+k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
+k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
+k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
+k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
+k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
+k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
+k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
+k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
+k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
+k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY=
+k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
+sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
+sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

+ 21 - 0
internal/config/config.go

@@ -0,0 +1,21 @@
+package config
+
+import (
+	"github.com/suyuan32/simple-admin-common/config"
+	"github.com/suyuan32/simple-admin-common/plugins/mq/asynq"
+
+	"github.com/zeromicro/go-zero/zrpc"
+)
+
+type Config struct {
+	zrpc.RpcServerConf
+	DatabaseConf config.DatabaseConf
+	RedisConf    config.RedisConf
+	AsynqConf    asynq.AsynqConf
+	TaskConf     TaskConf
+}
+
+type TaskConf struct {
+	EnableScheduledTask bool `json:",default=true"`
+	EnableDPTask        bool `json:",default=true"`
+}

+ 6 - 0
internal/enum/taskresult/task_result.go

@@ -0,0 +1,6 @@
+package taskresult
+
+const (
+	Success uint8 = 1 + iota
+	Failed
+)

+ 75 - 0
internal/logic/base/init_database_logic.go

@@ -0,0 +1,75 @@
+package base
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+
+	"entgo.io/ent/dialect/sql/schema"
+	"github.com/suyuan32/simple-admin-common/enum/errorcode"
+	"github.com/suyuan32/simple-admin-common/i18n"
+	"github.com/suyuan32/simple-admin-common/msg/logmsg"
+	"github.com/zeromicro/go-zero/core/errorx"
+
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/pattern"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type InitDatabaseLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewInitDatabaseLogic(ctx context.Context, svcCtx *svc.ServiceContext) *InitDatabaseLogic {
+	return &InitDatabaseLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *InitDatabaseLogic) InitDatabase(in *job.Empty) (*job.BaseResp, error) {
+
+	if err := l.svcCtx.DB.Schema.Create(l.ctx, schema.WithForeignKeys(false)); err != nil {
+		logx.Errorw(logmsg.DatabaseError, logx.Field("detail", err.Error()))
+		return nil, errorx.NewCodeError(errorcode.Internal, err.Error())
+	}
+
+	count, err := l.svcCtx.DB.Task.Query().Count(l.ctx)
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, "database error")
+	}
+
+	if count != 0 {
+		return nil, errorx.NewInvalidArgumentError(i18n.AlreadyInit)
+	}
+
+	err = l.insertTaskData()
+	if err != nil {
+		return nil, err
+	}
+
+	return &job.BaseResp{
+		Msg: i18n.Success,
+	}, nil
+}
+
+func (l *InitDatabaseLogic) insertTaskData() error {
+	err := l.svcCtx.DB.Task.Create().
+		SetName("hello_world").
+		SetTaskGroup("base").
+		SetCronExpression("@every 60s").
+		SetPattern(pattern.RecordHelloWorld).
+		SetPayload("{\"name\": \"Mike (DPTask 60s)\"}").
+		Exec(l.ctx)
+
+	if err != nil {
+		return err
+	}
+
+	return nil
+}

+ 45 - 0
internal/logic/task/create_task_logic.go

@@ -0,0 +1,45 @@
+package task
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/i18n"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type CreateTaskLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewCreateTaskLogic(ctx context.Context, svcCtx *svc.ServiceContext) *CreateTaskLogic {
+	return &CreateTaskLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *CreateTaskLogic) CreateTask(in *job.TaskInfo) (*job.BaseIDResp, error) {
+	result, err := l.svcCtx.DB.Task.Create().
+		SetNotNilStatus(pointy.GetStatusPointer(in.Status)).
+		SetNotNilName(in.Name).
+		SetNotNilTaskGroup(in.TaskGroup).
+		SetNotNilCronExpression(in.CronExpression).
+		SetNotNilPattern(in.Pattern).
+		SetNotNilPayload(in.Payload).
+		Save(l.ctx)
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.BaseIDResp{Id: result.ID, Msg: i18n.CreateSuccess}, nil
+}

+ 51 - 0
internal/logic/task/delete_task_logic.go

@@ -0,0 +1,51 @@
+package task
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/ent"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/internal/utils/entx"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/i18n"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type DeleteTaskLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewDeleteTaskLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteTaskLogic {
+	return &DeleteTaskLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *DeleteTaskLogic) DeleteTask(in *job.IDsReq) (*job.BaseResp, error) {
+	err := entx.WithTx(l.ctx, l.svcCtx.DB, func(tx *ent.Tx) error {
+		_, err := tx.TaskLog.Delete().Where(tasklog.HasTasksWith(task.IDIn(in.Ids...))).Exec(l.ctx)
+		if err != nil {
+			return err
+		}
+
+		_, err = tx.Task.Delete().Where(task.IDIn(in.Ids...)).Exec(l.ctx)
+		if err != nil {
+			return err
+		}
+		return nil
+	})
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.BaseResp{Msg: i18n.DeleteSuccess}, nil
+}

+ 45 - 0
internal/logic/task/get_task_by_id_logic.go

@@ -0,0 +1,45 @@
+package task
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetTaskByIdLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewGetTaskByIdLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetTaskByIdLogic {
+	return &GetTaskByIdLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *GetTaskByIdLogic) GetTaskById(in *job.IDReq) (*job.TaskInfo, error) {
+	result, err := l.svcCtx.DB.Task.Get(l.ctx, in.Id)
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.TaskInfo{
+		Id:             &result.ID,
+		CreatedAt:      pointy.GetPointer(result.CreatedAt.UnixMilli()),
+		UpdatedAt:      pointy.GetPointer(result.UpdatedAt.UnixMilli()),
+		Status:         pointy.GetPointer(uint32(result.Status)),
+		Name:           &result.Name,
+		TaskGroup:      &result.TaskGroup,
+		CronExpression: &result.CronExpression,
+		Pattern:        &result.Pattern,
+		Payload:        &result.Payload,
+	}, nil
+}

+ 63 - 0
internal/logic/task/get_task_list_logic.go

@@ -0,0 +1,63 @@
+package task
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetTaskListLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewGetTaskListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetTaskListLogic {
+	return &GetTaskListLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *GetTaskListLogic) GetTaskList(in *job.TaskListReq) (*job.TaskListResp, error) {
+	var predicates []predicate.Task
+	if in.Name != nil {
+		predicates = append(predicates, task.NameContains(*in.Name))
+	}
+	if in.TaskGroup != nil {
+		predicates = append(predicates, task.TaskGroupContains(*in.TaskGroup))
+	}
+
+	result, err := l.svcCtx.DB.Task.Query().Where(predicates...).Page(l.ctx, in.Page, in.PageSize)
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	resp := &job.TaskListResp{}
+	resp.Total = result.PageDetails.Total
+
+	for _, v := range result.List {
+		resp.Data = append(resp.Data, &job.TaskInfo{
+			Id:             &v.ID,
+			CreatedAt:      pointy.GetPointer(v.CreatedAt.UnixMilli()),
+			UpdatedAt:      pointy.GetPointer(v.UpdatedAt.UnixMilli()),
+			Status:         pointy.GetPointer(uint32(v.Status)),
+			Name:           &v.Name,
+			TaskGroup:      &v.TaskGroup,
+			CronExpression: &v.CronExpression,
+			Pattern:        &v.Pattern,
+			Payload:        &v.Payload,
+		})
+	}
+
+	return resp, nil
+}

+ 45 - 0
internal/logic/task/update_task_logic.go

@@ -0,0 +1,45 @@
+package task
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/i18n"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type UpdateTaskLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewUpdateTaskLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateTaskLogic {
+	return &UpdateTaskLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *UpdateTaskLogic) UpdateTask(in *job.TaskInfo) (*job.BaseResp, error) {
+	err := l.svcCtx.DB.Task.UpdateOneID(*in.Id).
+		SetNotNilStatus(pointy.GetStatusPointer(in.Status)).
+		SetNotNilName(in.Name).
+		SetNotNilTaskGroup(in.TaskGroup).
+		SetNotNilCronExpression(in.CronExpression).
+		SetNotNilPattern(in.Pattern).
+		SetNotNilPayload(in.Payload).
+		Exec(l.ctx)
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.BaseResp{Msg: i18n.UpdateSuccess}, nil
+}

+ 41 - 0
internal/logic/tasklog/create_task_log_logic.go

@@ -0,0 +1,41 @@
+package tasklog
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/i18n"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type CreateTaskLogLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewCreateTaskLogLogic(ctx context.Context, svcCtx *svc.ServiceContext) *CreateTaskLogLogic {
+	return &CreateTaskLogLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *CreateTaskLogLogic) CreateTaskLog(in *job.TaskLogInfo) (*job.BaseIDResp, error) {
+	result, err := l.svcCtx.DB.TaskLog.Create().
+		SetNotNilFinishedAt(pointy.GetTimeMilliPointer(in.FinishedAt)).
+		SetNotNilResult(pointy.GetStatusPointer(in.Result)).
+		Save(l.ctx)
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.BaseIDResp{Id: result.ID, Msg: i18n.CreateSuccess}, nil
+}

+ 37 - 0
internal/logic/tasklog/delete_task_log_logic.go

@@ -0,0 +1,37 @@
+package tasklog
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/i18n"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type DeleteTaskLogLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewDeleteTaskLogLogic(ctx context.Context, svcCtx *svc.ServiceContext) *DeleteTaskLogLogic {
+	return &DeleteTaskLogLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *DeleteTaskLogLogic) DeleteTaskLog(in *job.IDsReq) (*job.BaseResp, error) {
+	_, err := l.svcCtx.DB.TaskLog.Delete().Where(tasklog.IDIn(in.Ids...)).Exec(l.ctx)
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.BaseResp{Msg: i18n.DeleteSuccess}, nil
+}

+ 40 - 0
internal/logic/tasklog/get_task_log_by_id_logic.go

@@ -0,0 +1,40 @@
+package tasklog
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetTaskLogByIdLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewGetTaskLogByIdLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetTaskLogByIdLogic {
+	return &GetTaskLogByIdLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *GetTaskLogByIdLogic) GetTaskLogById(in *job.IDReq) (*job.TaskLogInfo, error) {
+	result, err := l.svcCtx.DB.TaskLog.Get(l.ctx, in.Id)
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.TaskLogInfo{
+		Id:         &result.ID,
+		StartedAt:  pointy.GetPointer(result.StartedAt.UnixMilli()),
+		FinishedAt: pointy.GetPointer(result.FinishedAt.UnixMilli()),
+		Result:     pointy.GetPointer(uint32(result.Result)),
+	}, nil
+}

+ 61 - 0
internal/logic/tasklog/get_task_log_list_logic.go

@@ -0,0 +1,61 @@
+package tasklog
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/ent/predicate"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/ent/tasklog"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type GetTaskLogListLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewGetTaskLogListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetTaskLogListLogic {
+	return &GetTaskLogListLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *GetTaskLogListLogic) GetTaskLogList(in *job.TaskLogListReq) (*job.TaskLogListResp, error) {
+	var predicates []predicate.TaskLog
+
+	if in.TaskId != nil {
+		predicates = append(predicates, tasklog.HasTasksWith(task.IDEQ(*in.TaskId)))
+	}
+
+	if in.Result != nil && *in.Result != 0 {
+		predicates = append(predicates, tasklog.ResultEQ(uint8(*in.Result)))
+	}
+
+	result, err := l.svcCtx.DB.TaskLog.Query().Where(predicates...).Page(l.ctx, in.Page, in.PageSize)
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	resp := &job.TaskLogListResp{}
+	resp.Total = result.PageDetails.Total
+
+	for _, v := range result.List {
+		resp.Data = append(resp.Data, &job.TaskLogInfo{
+			Id:         &v.ID,
+			StartedAt:  pointy.GetPointer(v.StartedAt.UnixMilli()),
+			FinishedAt: pointy.GetPointer(v.FinishedAt.UnixMilli()),
+			Result:     pointy.GetPointer(uint32(v.Result)),
+		})
+	}
+
+	return resp, nil
+}

+ 41 - 0
internal/logic/tasklog/update_task_log_logic.go

@@ -0,0 +1,41 @@
+package tasklog
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/suyuan32/simple-admin-common/i18n"
+
+	"github.com/suyuan32/simple-admin-common/utils/pointy"
+	"github.com/zeromicro/go-zero/core/logx"
+)
+
+type UpdateTaskLogLogic struct {
+	ctx    context.Context
+	svcCtx *svc.ServiceContext
+	logx.Logger
+}
+
+func NewUpdateTaskLogLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UpdateTaskLogLogic {
+	return &UpdateTaskLogLogic{
+		ctx:    ctx,
+		svcCtx: svcCtx,
+		Logger: logx.WithContext(ctx),
+	}
+}
+
+func (l *UpdateTaskLogLogic) UpdateTaskLog(in *job.TaskLogInfo) (*job.BaseResp, error) {
+	err := l.svcCtx.DB.TaskLog.UpdateOneID(*in.Id).
+		SetNotNilFinishedAt(pointy.GetTimeMilliPointer(in.FinishedAt)).
+		SetNotNilResult(pointy.GetStatusPointer(in.Result)).
+		Exec(l.ctx)
+
+	if err != nil {
+		return nil, dberrorhandler.DefaultEntError(l.Logger, err, in)
+	}
+
+	return &job.BaseResp{Msg: i18n.CreateSuccess}, nil
+}

+ 69 - 0
internal/mqs/amq/handler/amq/base/hello_world.go

@@ -0,0 +1,69 @@
+package base
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/suyuan32/simple-admin-common/i18n"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/internal/enum/taskresult"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/pattern"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/zeromicro/go-zero/core/errorx"
+	"github.com/zeromicro/go-zero/core/logx"
+	"time"
+
+	"github.com/hibiken/asynq"
+
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/payload"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+)
+
+type HelloWorldHandler struct {
+	svcCtx *svc.ServiceContext
+	taskId uint64
+}
+
+func NewHelloWorldHandler(svcCtx *svc.ServiceContext) *HelloWorldHandler {
+	task, err := svcCtx.DB.Task.Query().Where(task.PatternEQ(pattern.RecordHelloWorld)).First(context.Background())
+	if err != nil || task == nil {
+		return nil
+	}
+
+	return &HelloWorldHandler{
+		svcCtx: svcCtx,
+		taskId: task.ID,
+	}
+}
+
+// ProcessTask if return err != nil , asynq will retry | 如果返回错误不为空则会重试
+func (l *HelloWorldHandler) ProcessTask(ctx context.Context, t *asynq.Task) error {
+	if l.taskId == 0 {
+		logx.Errorw("failed to load task info")
+		return errorx.NewInternalError(i18n.DatabaseError)
+	}
+
+	var p payload.HelloWorldPayload
+	if err := json.Unmarshal(t.Payload(), &p); err != nil {
+		return errors.Join(err, fmt.Errorf("failed to umarshal the payload :%s", string(t.Payload())))
+	}
+
+	startTime := time.Now()
+	fmt.Printf("Hi! %s\n", p.Name)
+	finishTime := time.Now()
+
+	err := l.svcCtx.DB.TaskLog.Create().
+		SetStartedAt(startTime).
+		SetFinishedAt(finishTime).
+		SetResult(taskresult.Success).
+		SetTasksID(l.taskId).
+		Exec(context.Background())
+
+	if err != nil {
+		return dberrorhandler.DefaultEntError(logx.WithContext(context.Background()), err,
+			"failed to save task log to database")
+	}
+
+	return nil
+}

+ 87 - 0
internal/mqs/amq/handler/amq/wxhook/say_morning.go

@@ -0,0 +1,87 @@
+package wxhook
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"github.com/imroc/req/v3"
+	"github.com/suyuan32/simple-admin-common/i18n"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+	"github.com/suyuan32/simple-admin-job/internal/enum/taskresult"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/pattern"
+	"github.com/suyuan32/simple-admin-job/internal/utils/dberrorhandler"
+	"github.com/zeromicro/go-zero/core/errorx"
+	"github.com/zeromicro/go-zero/core/logx"
+	"time"
+
+	"github.com/hibiken/asynq"
+
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/payload"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+)
+
+type SayMorningHandler struct {
+	svcCtx *svc.ServiceContext
+	taskId uint64
+}
+
+func NewSayMorningHandler(svcCtx *svc.ServiceContext) *SayMorningHandler {
+	task, err := svcCtx.DB.Task.Query().Where(task.PatternEQ(pattern.RecordSayMorning)).First(context.Background())
+	if err != nil || task == nil {
+		return nil
+	}
+
+	return &SayMorningHandler{
+		svcCtx: svcCtx,
+		taskId: task.ID,
+	}
+}
+
+// ProcessTask if return err != nil , asynq will retry | 如果返回错误不为空则会重试
+func (l *SayMorningHandler) ProcessTask(ctx context.Context, t *asynq.Task) error {
+	if l.taskId == 0 {
+		logx.Errorw("failed to load task info")
+		return errorx.NewInternalError(i18n.DatabaseError)
+	}
+
+	var p []payload.SayMorningPayload
+	if err := json.Unmarshal(t.Payload(), &p); err != nil {
+		return errors.Join(err, fmt.Errorf("failed to umarshal the payload :%s", string(t.Payload())))
+	}
+
+	startTime := time.Now()
+
+	client := req.C().DevMode()
+	client.SetCommonRetryCount(2).
+		SetCommonRetryBackoffInterval(1*time.Second, 5*time.Second).
+		SetCommonRetryFixedInterval(2 * time.Second).SetTimeout(30 * time.Second)
+
+	type SendTextMsgReq struct {
+		Wxid string `json:"wxid"`
+		Msg  string `json:"msg"`
+	}
+
+	for _, v := range p {
+		client.R().SetBody(&SendTextMsgReq{
+			Wxid: v.Wxid,
+			Msg:  v.Msg,
+		}).Post("http://" + v.Ip + ":" + v.Port + "/SendTextMsg")
+	}
+
+	finishTime := time.Now()
+
+	err := l.svcCtx.DB.TaskLog.Create().
+		SetStartedAt(startTime).
+		SetFinishedAt(finishTime).
+		SetResult(taskresult.Success).
+		SetTasksID(l.taskId).
+		Exec(context.Background())
+
+	if err != nil {
+		return dberrorhandler.DefaultEntError(logx.WithContext(context.Background()), err,
+			"failed to save task log to database")
+	}
+
+	return nil
+}

+ 49 - 0
internal/mqs/amq/task/dynamicperiodictask/dynamic_periodic_task.go

@@ -0,0 +1,49 @@
+// Copyright 2023 The Ryan SU Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dynamicperiodictask
+
+import (
+	"fmt"
+	"log"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+)
+
+type DPTask struct {
+	svcCtx *svc.ServiceContext
+}
+
+func NewDPTask(svcCtx *svc.ServiceContext) *DPTask {
+	return &DPTask{
+		svcCtx: svcCtx,
+	}
+}
+
+// Start starts the server.
+func (m *DPTask) Start() {
+	if err := m.svcCtx.AsynqPTM.Run(); err != nil {
+		log.Fatal(fmt.Errorf("failed to start dptask server, error: %v", err))
+	}
+}
+
+// Stop stops the server.
+func (m *DPTask) Stop() {
+	defer func() {
+		if recover() != nil {
+			log.Println("DPTask shuts down successfully")
+		}
+	}()
+	m.svcCtx.AsynqPTM.Shutdown()
+}

+ 51 - 0
internal/mqs/amq/task/mqtask/mqtask.go

@@ -0,0 +1,51 @@
+// Copyright 2023 The Ryan SU Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mqtask
+
+import (
+	"fmt"
+	"log"
+	"time"
+
+	"github.com/hibiken/asynq"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+)
+
+type MQTask struct {
+	svcCtx *svc.ServiceContext
+	mux    *asynq.ServeMux
+}
+
+func NewMQTask(svcCtx *svc.ServiceContext) *MQTask {
+	return &MQTask{
+		svcCtx: svcCtx,
+	}
+}
+
+// Start starts the server.
+func (m *MQTask) Start() {
+	m.Register()
+	if err := m.svcCtx.AsynqServer.Run(m.mux); err != nil {
+		log.Fatal(fmt.Errorf("failed to start mqtask server, error: %v", err))
+	}
+}
+
+// Stop stops the server.
+func (m *MQTask) Stop() {
+	time.Sleep(5 * time.Second)
+	m.svcCtx.AsynqServer.Stop()
+	m.svcCtx.AsynqServer.Shutdown()
+}

+ 21 - 0
internal/mqs/amq/task/mqtask/register.go

@@ -0,0 +1,21 @@
+package mqtask
+
+import (
+	"github.com/hibiken/asynq"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/handler/amq/wxhook"
+
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/handler/amq/base"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/pattern"
+)
+
+// Register adds task to cron. | 在此处定义任务处理逻辑,注册worker.
+func (m *MQTask) Register() {
+	mux := asynq.NewServeMux()
+
+	// define the handler | 定义处理逻辑
+	mux.Handle(pattern.RecordHelloWorld, base.NewHelloWorldHandler(m.svcCtx))
+
+	mux.Handle(pattern.RecordSayMorning, wxhook.NewSayMorningHandler(m.svcCtx))
+
+	m.mux = mux
+}

+ 14 - 0
internal/mqs/amq/task/scheduletask/register.go

@@ -0,0 +1,14 @@
+package scheduletask
+
+import (
+	"github.com/hibiken/asynq"
+
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/pattern"
+)
+
+// Register adds task to cron. | 在此处定义定时任务
+func (s *SchedulerTask) Register() {
+	// register task to schedule | 注册任务到调度器
+	s.svcCtx.AsynqScheduler.Register("@every 5s", asynq.NewTask(pattern.RecordHelloWorld,
+		[]byte("{\"name\": \"Jack (Scheduled Task every 5s)\"}")))
+}

+ 45 - 0
internal/mqs/amq/task/scheduletask/scheduletask.go

@@ -0,0 +1,45 @@
+// Copyright 2023 The Ryan SU Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scheduletask
+
+import (
+	"fmt"
+	"log"
+
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+)
+
+type SchedulerTask struct {
+	svcCtx *svc.ServiceContext
+}
+
+func NewSchedulerTask(svcCtx *svc.ServiceContext) *SchedulerTask {
+	return &SchedulerTask{
+		svcCtx: svcCtx,
+	}
+}
+
+// Start starts the server.
+func (s *SchedulerTask) Start() {
+	s.Register()
+	if err := s.svcCtx.AsynqScheduler.Run(); err != nil {
+		log.Fatal(fmt.Errorf("failed to start mqtask server, error: %v", err))
+	}
+}
+
+// Stop stops the server.
+func (s *SchedulerTask) Stop() {
+	s.svcCtx.AsynqScheduler.Shutdown()
+}

+ 5 - 0
internal/mqs/amq/types/pattern/pattern.go

@@ -0,0 +1,5 @@
+// Package pattern defines all the patterns used in tasks which used for Differentiating tasks
+package pattern
+
+const RecordHelloWorld = "hello_world"
+const RecordSayMorning = "say_morning"

+ 14 - 0
internal/mqs/amq/types/payload/payload.go

@@ -0,0 +1,14 @@
+// Package payload defines all the payload structures used in tasks
+package payload
+
+type HelloWorldPayload struct {
+	Name string `json:"name"`
+}
+
+type SayMorningPayload struct {
+	Ip       string `json:"ip"`
+	Port     string `json:"port"`
+	Wxid     string `json:"wxid"`
+	Nickname string `json:"nickname"`
+	Msg      string `json:"msg"`
+}

+ 58 - 0
internal/mqs/amq/types/periodicconfig/provider.go

@@ -0,0 +1,58 @@
+// Copyright 2023 The Ryan SU Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package periodicconfig
+
+import (
+	"context"
+	"fmt"
+	"github.com/zeromicro/go-zero/core/logx"
+
+	"github.com/hibiken/asynq"
+	"github.com/suyuan32/simple-admin-common/enum/common"
+	"github.com/suyuan32/simple-admin-job/ent"
+	"github.com/suyuan32/simple-admin-job/ent/task"
+)
+
+type EntConfigProvider struct {
+	DB *ent.Client
+}
+
+func NewEntConfigProvider(db *ent.Client) *EntConfigProvider {
+	return &EntConfigProvider{
+		db,
+	}
+}
+
+func (e *EntConfigProvider) GetConfigs() ([]*asynq.PeriodicTaskConfig, error) {
+	configData, err := e.DB.Task.Query().Where(task.StatusEQ(common.StatusNormal)).All(context.Background())
+	if err != nil {
+		fmt.Printf("database error: %s, make sure the database configuration is correct and database has been initialized \n", err.Error())
+		logx.Errorw("database error", logx.Field("detail", err.Error()),
+			logx.Field("recommend", "you maybe need to  initialize the database"))
+		return nil, nil
+	}
+
+	var result []*asynq.PeriodicTaskConfig
+
+	for _, v := range configData {
+		result = append(result, &asynq.PeriodicTaskConfig{
+			Cronspec: v.CronExpression,
+			Task:     asynq.NewTask(v.Pattern, []byte(v.Payload)),
+			Opts:     nil,
+		})
+	}
+
+	return result, nil
+}

+ 82 - 0
internal/server/job_server.go

@@ -0,0 +1,82 @@
+// Code generated by goctl. DO NOT EDIT.
+// Source: job.proto
+
+package server
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/internal/logic/base"
+	"github.com/suyuan32/simple-admin-job/internal/logic/task"
+	"github.com/suyuan32/simple-admin-job/internal/logic/tasklog"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/types/job"
+)
+
+type JobServer struct {
+	svcCtx *svc.ServiceContext
+	job.UnimplementedJobServer
+}
+
+func NewJobServer(svcCtx *svc.ServiceContext) *JobServer {
+	return &JobServer{
+		svcCtx: svcCtx,
+	}
+}
+
+func (s *JobServer) InitDatabase(ctx context.Context, in *job.Empty) (*job.BaseResp, error) {
+	l := base.NewInitDatabaseLogic(ctx, s.svcCtx)
+	return l.InitDatabase(in)
+}
+
+// Task management
+func (s *JobServer) CreateTask(ctx context.Context, in *job.TaskInfo) (*job.BaseIDResp, error) {
+	l := task.NewCreateTaskLogic(ctx, s.svcCtx)
+	return l.CreateTask(in)
+}
+
+func (s *JobServer) UpdateTask(ctx context.Context, in *job.TaskInfo) (*job.BaseResp, error) {
+	l := task.NewUpdateTaskLogic(ctx, s.svcCtx)
+	return l.UpdateTask(in)
+}
+
+func (s *JobServer) GetTaskList(ctx context.Context, in *job.TaskListReq) (*job.TaskListResp, error) {
+	l := task.NewGetTaskListLogic(ctx, s.svcCtx)
+	return l.GetTaskList(in)
+}
+
+func (s *JobServer) GetTaskById(ctx context.Context, in *job.IDReq) (*job.TaskInfo, error) {
+	l := task.NewGetTaskByIdLogic(ctx, s.svcCtx)
+	return l.GetTaskById(in)
+}
+
+func (s *JobServer) DeleteTask(ctx context.Context, in *job.IDsReq) (*job.BaseResp, error) {
+	l := task.NewDeleteTaskLogic(ctx, s.svcCtx)
+	return l.DeleteTask(in)
+}
+
+// TaskLog management
+func (s *JobServer) CreateTaskLog(ctx context.Context, in *job.TaskLogInfo) (*job.BaseIDResp, error) {
+	l := tasklog.NewCreateTaskLogLogic(ctx, s.svcCtx)
+	return l.CreateTaskLog(in)
+}
+
+func (s *JobServer) UpdateTaskLog(ctx context.Context, in *job.TaskLogInfo) (*job.BaseResp, error) {
+	l := tasklog.NewUpdateTaskLogLogic(ctx, s.svcCtx)
+	return l.UpdateTaskLog(in)
+}
+
+func (s *JobServer) GetTaskLogList(ctx context.Context, in *job.TaskLogListReq) (*job.TaskLogListResp, error) {
+	l := tasklog.NewGetTaskLogListLogic(ctx, s.svcCtx)
+	return l.GetTaskLogList(in)
+}
+
+func (s *JobServer) GetTaskLogById(ctx context.Context, in *job.IDReq) (*job.TaskLogInfo, error) {
+	l := tasklog.NewGetTaskLogByIdLogic(ctx, s.svcCtx)
+	return l.GetTaskLogById(in)
+}
+
+func (s *JobServer) DeleteTaskLog(ctx context.Context, in *job.IDsReq) (*job.BaseResp, error) {
+	l := tasklog.NewDeleteTaskLogLogic(ctx, s.svcCtx)
+	return l.DeleteTaskLog(in)
+}

+ 51 - 0
internal/svc/service_context.go

@@ -0,0 +1,51 @@
+// Copyright 2023 The Ryan SU Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package svc
+
+import (
+	"github.com/hibiken/asynq"
+	"github.com/redis/go-redis/v9"
+	"github.com/zeromicro/go-zero/core/logx"
+
+	"github.com/suyuan32/simple-admin-job/ent"
+	"github.com/suyuan32/simple-admin-job/internal/config"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/types/periodicconfig"
+)
+
+type ServiceContext struct {
+	Config         config.Config
+	DB             *ent.Client
+	Redis          redis.UniversalClient
+	AsynqServer    *asynq.Server
+	AsynqScheduler *asynq.Scheduler
+	AsynqPTM       *asynq.PeriodicTaskManager
+}
+
+func NewServiceContext(c config.Config) *ServiceContext {
+	db := ent.NewClient(
+		ent.Log(logx.Info), // logger
+		ent.Driver(c.DatabaseConf.NewNoCacheDriver()),
+		ent.Debug(), // debug mode
+	)
+
+	return &ServiceContext{
+		Config:         c,
+		DB:             db,
+		AsynqServer:    c.AsynqConf.WithOriginalRedisConf(c.RedisConf).NewServer(),
+		AsynqScheduler: c.AsynqConf.NewScheduler(),
+		AsynqPTM:       c.AsynqConf.NewPeriodicTaskManager(periodicconfig.NewEntConfigProvider(db)),
+		Redis:          c.RedisConf.MustNewUniversalRedis(),
+	}
+}

+ 35 - 0
internal/utils/dberrorhandler/error_handler.go

@@ -0,0 +1,35 @@
+package dberrorhandler
+
+import (
+	"github.com/zeromicro/go-zero/core/errorx"
+	"github.com/zeromicro/go-zero/core/logx"
+
+	"github.com/suyuan32/simple-admin-common/i18n"
+	"github.com/suyuan32/simple-admin-common/msg/logmsg"
+
+	"github.com/suyuan32/simple-admin-job/ent"
+)
+
+// DefaultEntError returns errors dealing with default functions.
+func DefaultEntError(logger logx.Logger, err error, detail any) error {
+	if err != nil {
+		switch {
+		case ent.IsNotFound(err):
+			logger.Errorw(err.Error(), logx.Field("detail", detail))
+			return errorx.NewInvalidArgumentError(i18n.TargetNotFound)
+		case ent.IsConstraintError(err):
+			logger.Errorw(err.Error(), logx.Field("detail", detail))
+			return errorx.NewInvalidArgumentError(i18n.ConstraintError)
+		case ent.IsValidationError(err):
+			logger.Errorw(err.Error(), logx.Field("detail", detail))
+			return errorx.NewInvalidArgumentError(i18n.ValidationError)
+		case ent.IsNotSingular(err):
+			logger.Errorw(err.Error(), logx.Field("detail", detail))
+			return errorx.NewInvalidArgumentError(i18n.NotSingularError)
+		default:
+			logger.Errorw(logmsg.DatabaseError, logx.Field("detail", err.Error()))
+			return errorx.NewInternalError(i18n.DatabaseError)
+		}
+	}
+	return err
+}

+ 37 - 0
internal/utils/entx/ent_tx.go

@@ -0,0 +1,37 @@
+package entx
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/zeromicro/go-zero/core/logx"
+
+	"github.com/suyuan32/simple-admin-job/ent"
+)
+
+// WithTx uses transaction in ent.
+func WithTx(ctx context.Context, client *ent.Client, fn func(tx *ent.Tx) error) error {
+	tx, err := client.Tx(ctx)
+	if err != nil {
+		logx.Errorw("failed to start transaction", logx.Field("detail", err.Error()))
+		return err
+	}
+	defer func() {
+		if v := recover(); v != nil {
+			tx.Rollback()
+			panic(v)
+		}
+	}()
+	if err := fn(tx); err != nil {
+		if rollBackErr := tx.Rollback(); rollBackErr != nil {
+			err = fmt.Errorf("%w: rolling back transaction: %v", err, rollBackErr)
+		}
+		logx.Errorw("errors occur in transaction", logx.Field("detail", err.Error()))
+		return err
+	}
+	if err := tx.Commit(); err != nil {
+		logx.Errorw("failed to commit transaction", logx.Field("detail", err.Error()))
+		return err
+	}
+	return nil
+}

+ 75 - 0
job.go

@@ -0,0 +1,75 @@
+// Copyright 2023 The Ryan SU Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"flag"
+	"fmt"
+
+	"github.com/zeromicro/go-zero/core/conf"
+	"github.com/zeromicro/go-zero/core/logx"
+	"github.com/zeromicro/go-zero/core/service"
+	"github.com/zeromicro/go-zero/zrpc"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/reflection"
+
+	"github.com/suyuan32/simple-admin-job/internal/config"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/task/dynamicperiodictask"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/task/mqtask"
+	"github.com/suyuan32/simple-admin-job/internal/mqs/amq/task/scheduletask"
+	"github.com/suyuan32/simple-admin-job/internal/server"
+	"github.com/suyuan32/simple-admin-job/internal/svc"
+	"github.com/suyuan32/simple-admin-job/types/job"
+)
+
+var configFile = flag.String("f", "etc/job.yaml", "the config file")
+
+func main() {
+	flag.Parse()
+
+	var c config.Config
+	conf.MustLoad(*configFile, &c)
+	ctx := svc.NewServiceContext(c)
+
+	s := zrpc.MustNewServer(c.RpcServerConf, func(grpcServer *grpc.Server) {
+		job.RegisterJobServer(grpcServer, server.NewJobServer(ctx))
+
+		if c.Mode == service.DevMode || c.Mode == service.TestMode {
+			reflection.Register(grpcServer)
+		}
+	})
+
+	go func() {
+		s.Start()
+	}()
+
+	serviceGroup := service.NewServiceGroup()
+	defer func() {
+		serviceGroup.Stop()
+		logx.Close()
+	}()
+
+	serviceGroup.Add(mqtask.NewMQTask(ctx))
+	if c.TaskConf.EnableDPTask {
+		serviceGroup.Add(dynamicperiodictask.NewDPTask(ctx))
+	}
+
+	if c.TaskConf.EnableScheduledTask {
+		serviceGroup.Add(scheduletask.NewSchedulerTask(ctx))
+	}
+
+	fmt.Printf("Starting rpc server at %s...\n", c.ListenOn)
+	serviceGroup.Start()
+}

+ 115 - 0
job.proto

@@ -0,0 +1,115 @@
+syntax = "proto3";
+
+package job;
+option go_package="./job";
+
+message IDsReq {
+  repeated uint64 ids = 1;
+}
+
+message BaseResp {
+  string msg = 1;
+}
+
+message PageInfoReq {
+  uint64 page = 1;
+  uint64 page_size = 2;
+}
+
+message TaskLogListResp {
+  uint64 total = 1;
+  repeated TaskLogInfo data = 2;
+}
+
+message IDReq {
+  uint64 id = 1;
+}
+
+message UUIDReq {
+  string id = 1;
+}
+
+message BaseIDResp {
+  uint64 id = 1;
+  string msg = 2;
+}
+
+message TaskListResp {
+  uint64 total = 1;
+  repeated TaskInfo data = 2;
+}
+
+//  base message
+message Empty {}
+
+message TaskInfo {
+  optional uint64 id = 1;
+  optional int64 created_at = 2;
+  optional int64 updated_at = 3;
+  optional uint32 status = 4;
+  optional string name = 5;
+  optional string task_group = 6;
+  optional string cron_expression = 7;
+  optional string pattern = 8;
+  optional string payload = 9;
+}
+
+message TaskLogInfo {
+  optional uint64 id = 1;
+  optional int64 created_at = 2;
+  optional int64 updated_at = 3;
+  optional int64 started_at = 4;
+  optional int64 finished_at = 5;
+  optional uint32 result = 6;
+}
+
+message UUIDsReq {
+  repeated string ids = 1;
+}
+
+message BaseUUIDResp {
+  string id = 1;
+  string msg = 2;
+}
+
+message TaskListReq {
+  uint64 page = 1;
+  uint64 page_size = 2;
+  optional string name = 3;
+  optional string task_group = 4;
+}
+
+message TaskLogListReq {
+  uint64 page = 1;
+  uint64 page_size = 2;
+  optional uint64 task_id = 3;
+  optional uint32 result = 4;
+}
+
+service Job {
+  //  group: base
+  rpc initDatabase(Empty) returns (BaseResp);
+  //  Task management
+  //  group: task
+  rpc createTask(TaskInfo) returns (BaseIDResp);
+  //  group: task
+  rpc updateTask(TaskInfo) returns (BaseResp);
+  //  group: task
+  rpc getTaskList(TaskListReq) returns (TaskListResp);
+  //  group: task
+  rpc getTaskById(IDReq) returns (TaskInfo);
+  //  group: task
+  rpc deleteTask(IDsReq) returns (BaseResp);
+  //  TaskLog management
+  //  group: tasklog
+  rpc createTaskLog(TaskLogInfo) returns (BaseIDResp);
+  //  group: tasklog
+  rpc updateTaskLog(TaskLogInfo) returns (BaseResp);
+  //  group: tasklog
+  rpc getTaskLogList(TaskLogListReq) returns (TaskLogListResp);
+  //  group: tasklog
+  rpc getTaskLogById(IDReq) returns (TaskLogInfo);
+  //  group: tasklog
+  rpc deleteTaskLog(IDsReq) returns (BaseResp);
+}
+

+ 114 - 0
jobclient/job.go

@@ -0,0 +1,114 @@
+// Code generated by goctl. DO NOT EDIT.
+// Source: job.proto
+
+package jobclient
+
+import (
+	"context"
+
+	"github.com/suyuan32/simple-admin-job/types/job"
+
+	"github.com/zeromicro/go-zero/zrpc"
+	"google.golang.org/grpc"
+)
+
+type (
+	BaseIDResp      = job.BaseIDResp
+	BaseResp        = job.BaseResp
+	BaseUUIDResp    = job.BaseUUIDResp
+	Empty           = job.Empty
+	IDReq           = job.IDReq
+	IDsReq          = job.IDsReq
+	PageInfoReq     = job.PageInfoReq
+	TaskInfo        = job.TaskInfo
+	TaskListReq     = job.TaskListReq
+	TaskListResp    = job.TaskListResp
+	TaskLogInfo     = job.TaskLogInfo
+	TaskLogListReq  = job.TaskLogListReq
+	TaskLogListResp = job.TaskLogListResp
+	UUIDReq         = job.UUIDReq
+	UUIDsReq        = job.UUIDsReq
+
+	Job interface {
+		InitDatabase(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*BaseResp, error)
+		// Task management
+		CreateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseIDResp, error)
+		UpdateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseResp, error)
+		GetTaskList(ctx context.Context, in *TaskListReq, opts ...grpc.CallOption) (*TaskListResp, error)
+		GetTaskById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskInfo, error)
+		DeleteTask(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error)
+		// TaskLog management
+		CreateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseIDResp, error)
+		UpdateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseResp, error)
+		GetTaskLogList(ctx context.Context, in *TaskLogListReq, opts ...grpc.CallOption) (*TaskLogListResp, error)
+		GetTaskLogById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskLogInfo, error)
+		DeleteTaskLog(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error)
+	}
+
+	defaultJob struct {
+		cli zrpc.Client
+	}
+)
+
+func NewJob(cli zrpc.Client) Job {
+	return &defaultJob{
+		cli: cli,
+	}
+}
+
+func (m *defaultJob) InitDatabase(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*BaseResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.InitDatabase(ctx, in, opts...)
+}
+
+// Task management
+func (m *defaultJob) CreateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseIDResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.CreateTask(ctx, in, opts...)
+}
+
+func (m *defaultJob) UpdateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.UpdateTask(ctx, in, opts...)
+}
+
+func (m *defaultJob) GetTaskList(ctx context.Context, in *TaskListReq, opts ...grpc.CallOption) (*TaskListResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.GetTaskList(ctx, in, opts...)
+}
+
+func (m *defaultJob) GetTaskById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskInfo, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.GetTaskById(ctx, in, opts...)
+}
+
+func (m *defaultJob) DeleteTask(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.DeleteTask(ctx, in, opts...)
+}
+
+// TaskLog management
+func (m *defaultJob) CreateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseIDResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.CreateTaskLog(ctx, in, opts...)
+}
+
+func (m *defaultJob) UpdateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.UpdateTaskLog(ctx, in, opts...)
+}
+
+func (m *defaultJob) GetTaskLogList(ctx context.Context, in *TaskLogListReq, opts ...grpc.CallOption) (*TaskLogListResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.GetTaskLogList(ctx, in, opts...)
+}
+
+func (m *defaultJob) GetTaskLogById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskLogInfo, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.GetTaskLogById(ctx, in, opts...)
+}
+
+func (m *defaultJob) DeleteTaskLog(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error) {
+	client := job.NewJobClient(m.cli.Conn())
+	return client.DeleteTaskLog(ctx, in, opts...)
+}

+ 1316 - 0
types/job/job.pb.go

@@ -0,0 +1,1316 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.32.0
+// 	protoc        v4.25.2
+// source: job.proto
+
+package job
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type IDsReq struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Ids []uint64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids"`
+}
+
+func (x *IDsReq) Reset() {
+	*x = IDsReq{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *IDsReq) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IDsReq) ProtoMessage() {}
+
+func (x *IDsReq) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use IDsReq.ProtoReflect.Descriptor instead.
+func (*IDsReq) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *IDsReq) GetIds() []uint64 {
+	if x != nil {
+		return x.Ids
+	}
+	return nil
+}
+
+type BaseResp struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Msg string `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg"`
+}
+
+func (x *BaseResp) Reset() {
+	*x = BaseResp{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BaseResp) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BaseResp) ProtoMessage() {}
+
+func (x *BaseResp) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BaseResp.ProtoReflect.Descriptor instead.
+func (*BaseResp) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *BaseResp) GetMsg() string {
+	if x != nil {
+		return x.Msg
+	}
+	return ""
+}
+
+type PageInfoReq struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Page     uint64 `protobuf:"varint,1,opt,name=page,proto3" json:"page"`
+	PageSize uint64 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size"`
+}
+
+func (x *PageInfoReq) Reset() {
+	*x = PageInfoReq{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PageInfoReq) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PageInfoReq) ProtoMessage() {}
+
+func (x *PageInfoReq) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PageInfoReq.ProtoReflect.Descriptor instead.
+func (*PageInfoReq) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *PageInfoReq) GetPage() uint64 {
+	if x != nil {
+		return x.Page
+	}
+	return 0
+}
+
+func (x *PageInfoReq) GetPageSize() uint64 {
+	if x != nil {
+		return x.PageSize
+	}
+	return 0
+}
+
+type TaskLogListResp struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Total uint64         `protobuf:"varint,1,opt,name=total,proto3" json:"total"`
+	Data  []*TaskLogInfo `protobuf:"bytes,2,rep,name=data,proto3" json:"data"`
+}
+
+func (x *TaskLogListResp) Reset() {
+	*x = TaskLogListResp{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TaskLogListResp) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskLogListResp) ProtoMessage() {}
+
+func (x *TaskLogListResp) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskLogListResp.ProtoReflect.Descriptor instead.
+func (*TaskLogListResp) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TaskLogListResp) GetTotal() uint64 {
+	if x != nil {
+		return x.Total
+	}
+	return 0
+}
+
+func (x *TaskLogListResp) GetData() []*TaskLogInfo {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+type IDReq struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id"`
+}
+
+func (x *IDReq) Reset() {
+	*x = IDReq{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *IDReq) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IDReq) ProtoMessage() {}
+
+func (x *IDReq) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use IDReq.ProtoReflect.Descriptor instead.
+func (*IDReq) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *IDReq) GetId() uint64 {
+	if x != nil {
+		return x.Id
+	}
+	return 0
+}
+
+type UUIDReq struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id"`
+}
+
+func (x *UUIDReq) Reset() {
+	*x = UUIDReq{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UUIDReq) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UUIDReq) ProtoMessage() {}
+
+func (x *UUIDReq) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UUIDReq.ProtoReflect.Descriptor instead.
+func (*UUIDReq) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *UUIDReq) GetId() string {
+	if x != nil {
+		return x.Id
+	}
+	return ""
+}
+
+type BaseIDResp struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Id  uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id"`
+	Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg"`
+}
+
+func (x *BaseIDResp) Reset() {
+	*x = BaseIDResp{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BaseIDResp) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BaseIDResp) ProtoMessage() {}
+
+func (x *BaseIDResp) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BaseIDResp.ProtoReflect.Descriptor instead.
+func (*BaseIDResp) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *BaseIDResp) GetId() uint64 {
+	if x != nil {
+		return x.Id
+	}
+	return 0
+}
+
+func (x *BaseIDResp) GetMsg() string {
+	if x != nil {
+		return x.Msg
+	}
+	return ""
+}
+
+type TaskListResp struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Total uint64      `protobuf:"varint,1,opt,name=total,proto3" json:"total"`
+	Data  []*TaskInfo `protobuf:"bytes,2,rep,name=data,proto3" json:"data"`
+}
+
+func (x *TaskListResp) Reset() {
+	*x = TaskListResp{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TaskListResp) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskListResp) ProtoMessage() {}
+
+func (x *TaskListResp) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskListResp.ProtoReflect.Descriptor instead.
+func (*TaskListResp) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *TaskListResp) GetTotal() uint64 {
+	if x != nil {
+		return x.Total
+	}
+	return 0
+}
+
+func (x *TaskListResp) GetData() []*TaskInfo {
+	if x != nil {
+		return x.Data
+	}
+	return nil
+}
+
+// base message
+type Empty struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+}
+
+func (x *Empty) Reset() {
+	*x = Empty{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Empty) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Empty) ProtoMessage() {}
+
+func (x *Empty) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
+func (*Empty) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{8}
+}
+
+type TaskInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Id             *uint64 `protobuf:"varint,1,opt,name=id,proto3,oneof" json:"id"`
+	CreatedAt      *int64  `protobuf:"varint,2,opt,name=created_at,json=createdAt,proto3,oneof" json:"created_at"`
+	UpdatedAt      *int64  `protobuf:"varint,3,opt,name=updated_at,json=updatedAt,proto3,oneof" json:"updated_at"`
+	Status         *uint32 `protobuf:"varint,4,opt,name=status,proto3,oneof" json:"status"`
+	Name           *string `protobuf:"bytes,5,opt,name=name,proto3,oneof" json:"name"`
+	TaskGroup      *string `protobuf:"bytes,6,opt,name=task_group,json=taskGroup,proto3,oneof" json:"task_group"`
+	CronExpression *string `protobuf:"bytes,7,opt,name=cron_expression,json=cronExpression,proto3,oneof" json:"cron_expression"`
+	Pattern        *string `protobuf:"bytes,8,opt,name=pattern,proto3,oneof" json:"pattern"`
+	Payload        *string `protobuf:"bytes,9,opt,name=payload,proto3,oneof" json:"payload"`
+}
+
+func (x *TaskInfo) Reset() {
+	*x = TaskInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TaskInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskInfo) ProtoMessage() {}
+
+func (x *TaskInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskInfo.ProtoReflect.Descriptor instead.
+func (*TaskInfo) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *TaskInfo) GetId() uint64 {
+	if x != nil && x.Id != nil {
+		return *x.Id
+	}
+	return 0
+}
+
+func (x *TaskInfo) GetCreatedAt() int64 {
+	if x != nil && x.CreatedAt != nil {
+		return *x.CreatedAt
+	}
+	return 0
+}
+
+func (x *TaskInfo) GetUpdatedAt() int64 {
+	if x != nil && x.UpdatedAt != nil {
+		return *x.UpdatedAt
+	}
+	return 0
+}
+
+func (x *TaskInfo) GetStatus() uint32 {
+	if x != nil && x.Status != nil {
+		return *x.Status
+	}
+	return 0
+}
+
+func (x *TaskInfo) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
+	}
+	return ""
+}
+
+func (x *TaskInfo) GetTaskGroup() string {
+	if x != nil && x.TaskGroup != nil {
+		return *x.TaskGroup
+	}
+	return ""
+}
+
+func (x *TaskInfo) GetCronExpression() string {
+	if x != nil && x.CronExpression != nil {
+		return *x.CronExpression
+	}
+	return ""
+}
+
+func (x *TaskInfo) GetPattern() string {
+	if x != nil && x.Pattern != nil {
+		return *x.Pattern
+	}
+	return ""
+}
+
+func (x *TaskInfo) GetPayload() string {
+	if x != nil && x.Payload != nil {
+		return *x.Payload
+	}
+	return ""
+}
+
+type TaskLogInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Id         *uint64 `protobuf:"varint,1,opt,name=id,proto3,oneof" json:"id"`
+	CreatedAt  *int64  `protobuf:"varint,2,opt,name=created_at,json=createdAt,proto3,oneof" json:"created_at"`
+	UpdatedAt  *int64  `protobuf:"varint,3,opt,name=updated_at,json=updatedAt,proto3,oneof" json:"updated_at"`
+	StartedAt  *int64  `protobuf:"varint,4,opt,name=started_at,json=startedAt,proto3,oneof" json:"started_at"`
+	FinishedAt *int64  `protobuf:"varint,5,opt,name=finished_at,json=finishedAt,proto3,oneof" json:"finished_at"`
+	Result     *uint32 `protobuf:"varint,6,opt,name=result,proto3,oneof" json:"result"`
+}
+
+func (x *TaskLogInfo) Reset() {
+	*x = TaskLogInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TaskLogInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskLogInfo) ProtoMessage() {}
+
+func (x *TaskLogInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskLogInfo.ProtoReflect.Descriptor instead.
+func (*TaskLogInfo) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *TaskLogInfo) GetId() uint64 {
+	if x != nil && x.Id != nil {
+		return *x.Id
+	}
+	return 0
+}
+
+func (x *TaskLogInfo) GetCreatedAt() int64 {
+	if x != nil && x.CreatedAt != nil {
+		return *x.CreatedAt
+	}
+	return 0
+}
+
+func (x *TaskLogInfo) GetUpdatedAt() int64 {
+	if x != nil && x.UpdatedAt != nil {
+		return *x.UpdatedAt
+	}
+	return 0
+}
+
+func (x *TaskLogInfo) GetStartedAt() int64 {
+	if x != nil && x.StartedAt != nil {
+		return *x.StartedAt
+	}
+	return 0
+}
+
+func (x *TaskLogInfo) GetFinishedAt() int64 {
+	if x != nil && x.FinishedAt != nil {
+		return *x.FinishedAt
+	}
+	return 0
+}
+
+func (x *TaskLogInfo) GetResult() uint32 {
+	if x != nil && x.Result != nil {
+		return *x.Result
+	}
+	return 0
+}
+
+type UUIDsReq struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids"`
+}
+
+func (x *UUIDsReq) Reset() {
+	*x = UUIDsReq{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *UUIDsReq) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UUIDsReq) ProtoMessage() {}
+
+func (x *UUIDsReq) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use UUIDsReq.ProtoReflect.Descriptor instead.
+func (*UUIDsReq) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *UUIDsReq) GetIds() []string {
+	if x != nil {
+		return x.Ids
+	}
+	return nil
+}
+
+type BaseUUIDResp struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Id  string `protobuf:"bytes,1,opt,name=id,proto3" json:"id"`
+	Msg string `protobuf:"bytes,2,opt,name=msg,proto3" json:"msg"`
+}
+
+func (x *BaseUUIDResp) Reset() {
+	*x = BaseUUIDResp{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BaseUUIDResp) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BaseUUIDResp) ProtoMessage() {}
+
+func (x *BaseUUIDResp) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BaseUUIDResp.ProtoReflect.Descriptor instead.
+func (*BaseUUIDResp) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *BaseUUIDResp) GetId() string {
+	if x != nil {
+		return x.Id
+	}
+	return ""
+}
+
+func (x *BaseUUIDResp) GetMsg() string {
+	if x != nil {
+		return x.Msg
+	}
+	return ""
+}
+
+type TaskListReq struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Page      uint64  `protobuf:"varint,1,opt,name=page,proto3" json:"page"`
+	PageSize  uint64  `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size"`
+	Name      *string `protobuf:"bytes,3,opt,name=name,proto3,oneof" json:"name"`
+	TaskGroup *string `protobuf:"bytes,4,opt,name=task_group,json=taskGroup,proto3,oneof" json:"task_group"`
+}
+
+func (x *TaskListReq) Reset() {
+	*x = TaskListReq{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[13]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TaskListReq) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskListReq) ProtoMessage() {}
+
+func (x *TaskListReq) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[13]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskListReq.ProtoReflect.Descriptor instead.
+func (*TaskListReq) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *TaskListReq) GetPage() uint64 {
+	if x != nil {
+		return x.Page
+	}
+	return 0
+}
+
+func (x *TaskListReq) GetPageSize() uint64 {
+	if x != nil {
+		return x.PageSize
+	}
+	return 0
+}
+
+func (x *TaskListReq) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
+	}
+	return ""
+}
+
+func (x *TaskListReq) GetTaskGroup() string {
+	if x != nil && x.TaskGroup != nil {
+		return *x.TaskGroup
+	}
+	return ""
+}
+
+type TaskLogListReq struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Page     uint64  `protobuf:"varint,1,opt,name=page,proto3" json:"page"`
+	PageSize uint64  `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size"`
+	TaskId   *uint64 `protobuf:"varint,3,opt,name=task_id,json=taskId,proto3,oneof" json:"task_id"`
+	Result   *uint32 `protobuf:"varint,4,opt,name=result,proto3,oneof" json:"result"`
+}
+
+func (x *TaskLogListReq) Reset() {
+	*x = TaskLogListReq{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_job_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TaskLogListReq) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TaskLogListReq) ProtoMessage() {}
+
+func (x *TaskLogListReq) ProtoReflect() protoreflect.Message {
+	mi := &file_job_proto_msgTypes[14]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TaskLogListReq.ProtoReflect.Descriptor instead.
+func (*TaskLogListReq) Descriptor() ([]byte, []int) {
+	return file_job_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *TaskLogListReq) GetPage() uint64 {
+	if x != nil {
+		return x.Page
+	}
+	return 0
+}
+
+func (x *TaskLogListReq) GetPageSize() uint64 {
+	if x != nil {
+		return x.PageSize
+	}
+	return 0
+}
+
+func (x *TaskLogListReq) GetTaskId() uint64 {
+	if x != nil && x.TaskId != nil {
+		return *x.TaskId
+	}
+	return 0
+}
+
+func (x *TaskLogListReq) GetResult() uint32 {
+	if x != nil && x.Result != nil {
+		return *x.Result
+	}
+	return 0
+}
+
+var File_job_proto protoreflect.FileDescriptor
+
+var file_job_proto_rawDesc = []byte{
+	0x0a, 0x09, 0x6a, 0x6f, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x6a, 0x6f, 0x62,
+	0x22, 0x1a, 0x0a, 0x06, 0x49, 0x44, 0x73, 0x52, 0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64,
+	0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x1c, 0x0a, 0x08,
+	0x42, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x3e, 0x0a, 0x0b, 0x50, 0x61,
+	0x67, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x67,
+	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x70, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a,
+	0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
+	0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x4d, 0x0a, 0x0f, 0x54, 0x61,
+	0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x14, 0x0a,
+	0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f,
+	0x74, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x10, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x49,
+	0x6e, 0x66, 0x6f, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x17, 0x0a, 0x05, 0x49, 0x44, 0x52,
+	0x65, 0x71, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02,
+	0x69, 0x64, 0x22, 0x19, 0x0a, 0x07, 0x55, 0x55, 0x49, 0x44, 0x52, 0x65, 0x71, 0x12, 0x0e, 0x0a,
+	0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2e, 0x0a,
+	0x0a, 0x42, 0x61, 0x73, 0x65, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+	0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d,
+	0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x47, 0x0a,
+	0x0c, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x14, 0x0a,
+	0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f,
+	0x74, 0x61, 0x6c, 0x12, 0x21, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x0d, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f,
+	0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
+	0xa1, 0x03, 0x0a, 0x08, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x13, 0x0a, 0x02,
+	0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01,
+	0x01, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+	0x41, 0x74, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+	0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x09, 0x75, 0x70, 0x64,
+	0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x73, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x03, 0x52, 0x06, 0x73, 0x74, 0x61,
+	0x74, 0x75, 0x73, 0x88, 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05,
+	0x20, 0x01, 0x28, 0x09, 0x48, 0x04, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12,
+	0x22, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x06, 0x20,
+	0x01, 0x28, 0x09, 0x48, 0x05, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x47, 0x72, 0x6f, 0x75, 0x70,
+	0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72,
+	0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x06, 0x52, 0x0e,
+	0x63, 0x72, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01,
+	0x01, 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x08, 0x20, 0x01,
+	0x28, 0x09, 0x48, 0x07, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x88, 0x01, 0x01,
+	0x12, 0x1d, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28,
+	0x09, 0x48, 0x08, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x88, 0x01, 0x01, 0x42,
+	0x05, 0x0a, 0x03, 0x5f, 0x69, 0x64, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74,
+	0x65, 0x64, 0x5f, 0x61, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+	0x64, 0x5f, 0x61, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42,
+	0x07, 0x0a, 0x05, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x74, 0x61, 0x73,
+	0x6b, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x72, 0x6f, 0x6e,
+	0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x5f,
+	0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x61, 0x79, 0x6c,
+	0x6f, 0x61, 0x64, 0x22, 0xa0, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x49,
+	0x6e, 0x66, 0x6f, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x48,
+	0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61,
+	0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x09,
+	0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a,
+	0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
+	0x48, 0x02, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x88, 0x01, 0x01,
+	0x12, 0x22, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41,
+	0x74, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64,
+	0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x66, 0x69, 0x6e,
+	0x69, 0x73, 0x68, 0x65, 0x64, 0x41, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x72, 0x65,
+	0x73, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x05, 0x52, 0x06, 0x72, 0x65,
+	0x73, 0x75, 0x6c, 0x74, 0x88, 0x01, 0x01, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x69, 0x64, 0x42, 0x0d,
+	0x0a, 0x0b, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x42, 0x0d, 0x0a,
+	0x0b, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x42, 0x0d, 0x0a, 0x0b,
+	0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f,
+	0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f,
+	0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x1c, 0x0a, 0x08, 0x55, 0x55, 0x49, 0x44, 0x73, 0x52,
+	0x65, 0x71, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+	0x03, 0x69, 0x64, 0x73, 0x22, 0x30, 0x0a, 0x0c, 0x42, 0x61, 0x73, 0x65, 0x55, 0x55, 0x49, 0x44,
+	0x52, 0x65, 0x73, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x4c,
+	0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x67, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x70, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
+	0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70,
+	0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01,
+	0x12, 0x22, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x47, 0x72, 0x6f, 0x75,
+	0x70, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x0d, 0x0a,
+	0x0b, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x93, 0x01, 0x0a,
+	0x0e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x12,
+	0x12, 0x0a, 0x04, 0x70, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x70,
+	0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+	0x12, 0x1c, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x04, 0x48, 0x00, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1b,
+	0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01,
+	0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f,
+	0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x72, 0x65, 0x73, 0x75,
+	0x6c, 0x74, 0x32, 0x92, 0x04, 0x0a, 0x03, 0x4a, 0x6f, 0x62, 0x12, 0x29, 0x0a, 0x0c, 0x69, 0x6e,
+	0x69, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x0a, 0x2e, 0x6a, 0x6f, 0x62,
+	0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x42, 0x61, 0x73,
+	0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
+	0x61, 0x73, 0x6b, 0x12, 0x0d, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e,
+	0x66, 0x6f, 0x1a, 0x0f, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x49, 0x44, 0x52,
+	0x65, 0x73, 0x70, 0x12, 0x2a, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73,
+	0x6b, 0x12, 0x0d, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f,
+	0x1a, 0x0d, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12,
+	0x32, 0x0a, 0x0b, 0x67, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x10,
+	0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71,
+	0x1a, 0x11, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x69, 0x73, 0x74, 0x52,
+	0x65, 0x73, 0x70, 0x12, 0x28, 0x0a, 0x0b, 0x67, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x42, 0x79,
+	0x49, 0x64, 0x12, 0x0a, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x0d,
+	0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x28, 0x0a,
+	0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x0b, 0x2e, 0x6a, 0x6f,
+	0x62, 0x2e, 0x49, 0x44, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x42,
+	0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x32, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74,
+	0x65, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x12, 0x10, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54,
+	0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x0f, 0x2e, 0x6a, 0x6f, 0x62,
+	0x2e, 0x42, 0x61, 0x73, 0x65, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x12, 0x30, 0x0a, 0x0d, 0x75,
+	0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x12, 0x10, 0x2e, 0x6a,
+	0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x0d,
+	0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3b, 0x0a,
+	0x0e, 0x67, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12,
+	0x13, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x4c, 0x69, 0x73,
+	0x74, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c,
+	0x6f, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2e, 0x0a, 0x0e, 0x67, 0x65,
+	0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x42, 0x79, 0x49, 0x64, 0x12, 0x0a, 0x2e, 0x6a,
+	0x6f, 0x62, 0x2e, 0x49, 0x44, 0x52, 0x65, 0x71, 0x1a, 0x10, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x54,
+	0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x0a, 0x0d, 0x64, 0x65,
+	0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x12, 0x0b, 0x2e, 0x6a, 0x6f,
+	0x62, 0x2e, 0x49, 0x44, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x6a, 0x6f, 0x62, 0x2e, 0x42,
+	0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x42, 0x07, 0x5a, 0x05, 0x2e, 0x2f, 0x6a, 0x6f, 0x62,
+	0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_job_proto_rawDescOnce sync.Once
+	file_job_proto_rawDescData = file_job_proto_rawDesc
+)
+
+func file_job_proto_rawDescGZIP() []byte {
+	file_job_proto_rawDescOnce.Do(func() {
+		file_job_proto_rawDescData = protoimpl.X.CompressGZIP(file_job_proto_rawDescData)
+	})
+	return file_job_proto_rawDescData
+}
+
+var file_job_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_job_proto_goTypes = []interface{}{
+	(*IDsReq)(nil),          // 0: job.IDsReq
+	(*BaseResp)(nil),        // 1: job.BaseResp
+	(*PageInfoReq)(nil),     // 2: job.PageInfoReq
+	(*TaskLogListResp)(nil), // 3: job.TaskLogListResp
+	(*IDReq)(nil),           // 4: job.IDReq
+	(*UUIDReq)(nil),         // 5: job.UUIDReq
+	(*BaseIDResp)(nil),      // 6: job.BaseIDResp
+	(*TaskListResp)(nil),    // 7: job.TaskListResp
+	(*Empty)(nil),           // 8: job.Empty
+	(*TaskInfo)(nil),        // 9: job.TaskInfo
+	(*TaskLogInfo)(nil),     // 10: job.TaskLogInfo
+	(*UUIDsReq)(nil),        // 11: job.UUIDsReq
+	(*BaseUUIDResp)(nil),    // 12: job.BaseUUIDResp
+	(*TaskListReq)(nil),     // 13: job.TaskListReq
+	(*TaskLogListReq)(nil),  // 14: job.TaskLogListReq
+}
+var file_job_proto_depIdxs = []int32{
+	10, // 0: job.TaskLogListResp.data:type_name -> job.TaskLogInfo
+	9,  // 1: job.TaskListResp.data:type_name -> job.TaskInfo
+	8,  // 2: job.Job.initDatabase:input_type -> job.Empty
+	9,  // 3: job.Job.createTask:input_type -> job.TaskInfo
+	9,  // 4: job.Job.updateTask:input_type -> job.TaskInfo
+	13, // 5: job.Job.getTaskList:input_type -> job.TaskListReq
+	4,  // 6: job.Job.getTaskById:input_type -> job.IDReq
+	0,  // 7: job.Job.deleteTask:input_type -> job.IDsReq
+	10, // 8: job.Job.createTaskLog:input_type -> job.TaskLogInfo
+	10, // 9: job.Job.updateTaskLog:input_type -> job.TaskLogInfo
+	14, // 10: job.Job.getTaskLogList:input_type -> job.TaskLogListReq
+	4,  // 11: job.Job.getTaskLogById:input_type -> job.IDReq
+	0,  // 12: job.Job.deleteTaskLog:input_type -> job.IDsReq
+	1,  // 13: job.Job.initDatabase:output_type -> job.BaseResp
+	6,  // 14: job.Job.createTask:output_type -> job.BaseIDResp
+	1,  // 15: job.Job.updateTask:output_type -> job.BaseResp
+	7,  // 16: job.Job.getTaskList:output_type -> job.TaskListResp
+	9,  // 17: job.Job.getTaskById:output_type -> job.TaskInfo
+	1,  // 18: job.Job.deleteTask:output_type -> job.BaseResp
+	6,  // 19: job.Job.createTaskLog:output_type -> job.BaseIDResp
+	1,  // 20: job.Job.updateTaskLog:output_type -> job.BaseResp
+	3,  // 21: job.Job.getTaskLogList:output_type -> job.TaskLogListResp
+	10, // 22: job.Job.getTaskLogById:output_type -> job.TaskLogInfo
+	1,  // 23: job.Job.deleteTaskLog:output_type -> job.BaseResp
+	13, // [13:24] is the sub-list for method output_type
+	2,  // [2:13] is the sub-list for method input_type
+	2,  // [2:2] is the sub-list for extension type_name
+	2,  // [2:2] is the sub-list for extension extendee
+	0,  // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_job_proto_init() }
+func file_job_proto_init() {
+	if File_job_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_job_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*IDsReq); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BaseResp); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PageInfoReq); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TaskLogListResp); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*IDReq); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UUIDReq); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BaseIDResp); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TaskListResp); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Empty); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TaskInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TaskLogInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UUIDsReq); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BaseUUIDResp); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TaskListReq); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_job_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TaskLogListReq); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	file_job_proto_msgTypes[9].OneofWrappers = []interface{}{}
+	file_job_proto_msgTypes[10].OneofWrappers = []interface{}{}
+	file_job_proto_msgTypes[13].OneofWrappers = []interface{}{}
+	file_job_proto_msgTypes[14].OneofWrappers = []interface{}{}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_job_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   15,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_job_proto_goTypes,
+		DependencyIndexes: file_job_proto_depIdxs,
+		MessageInfos:      file_job_proto_msgTypes,
+	}.Build()
+	File_job_proto = out.File
+	file_job_proto_rawDesc = nil
+	file_job_proto_goTypes = nil
+	file_job_proto_depIdxs = nil
+}

+ 505 - 0
types/job/job_grpc.pb.go

@@ -0,0 +1,505 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc             v4.25.2
+// source: job.proto
+
+package job
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+	Job_InitDatabase_FullMethodName   = "/job.Job/initDatabase"
+	Job_CreateTask_FullMethodName     = "/job.Job/createTask"
+	Job_UpdateTask_FullMethodName     = "/job.Job/updateTask"
+	Job_GetTaskList_FullMethodName    = "/job.Job/getTaskList"
+	Job_GetTaskById_FullMethodName    = "/job.Job/getTaskById"
+	Job_DeleteTask_FullMethodName     = "/job.Job/deleteTask"
+	Job_CreateTaskLog_FullMethodName  = "/job.Job/createTaskLog"
+	Job_UpdateTaskLog_FullMethodName  = "/job.Job/updateTaskLog"
+	Job_GetTaskLogList_FullMethodName = "/job.Job/getTaskLogList"
+	Job_GetTaskLogById_FullMethodName = "/job.Job/getTaskLogById"
+	Job_DeleteTaskLog_FullMethodName  = "/job.Job/deleteTaskLog"
+)
+
+// JobClient is the client API for Job service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type JobClient interface {
+	//  group: base
+	InitDatabase(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*BaseResp, error)
+	// Task management
+	// group: task
+	CreateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseIDResp, error)
+	// group: task
+	UpdateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseResp, error)
+	// group: task
+	GetTaskList(ctx context.Context, in *TaskListReq, opts ...grpc.CallOption) (*TaskListResp, error)
+	// group: task
+	GetTaskById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskInfo, error)
+	// group: task
+	DeleteTask(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error)
+	// TaskLog management
+	// group: tasklog
+	CreateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseIDResp, error)
+	// group: tasklog
+	UpdateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseResp, error)
+	// group: tasklog
+	GetTaskLogList(ctx context.Context, in *TaskLogListReq, opts ...grpc.CallOption) (*TaskLogListResp, error)
+	// group: tasklog
+	GetTaskLogById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskLogInfo, error)
+	// group: tasklog
+	DeleteTaskLog(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error)
+}
+
+type jobClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewJobClient(cc grpc.ClientConnInterface) JobClient {
+	return &jobClient{cc}
+}
+
+func (c *jobClient) InitDatabase(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*BaseResp, error) {
+	out := new(BaseResp)
+	err := c.cc.Invoke(ctx, Job_InitDatabase_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) CreateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseIDResp, error) {
+	out := new(BaseIDResp)
+	err := c.cc.Invoke(ctx, Job_CreateTask_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) UpdateTask(ctx context.Context, in *TaskInfo, opts ...grpc.CallOption) (*BaseResp, error) {
+	out := new(BaseResp)
+	err := c.cc.Invoke(ctx, Job_UpdateTask_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) GetTaskList(ctx context.Context, in *TaskListReq, opts ...grpc.CallOption) (*TaskListResp, error) {
+	out := new(TaskListResp)
+	err := c.cc.Invoke(ctx, Job_GetTaskList_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) GetTaskById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskInfo, error) {
+	out := new(TaskInfo)
+	err := c.cc.Invoke(ctx, Job_GetTaskById_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) DeleteTask(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error) {
+	out := new(BaseResp)
+	err := c.cc.Invoke(ctx, Job_DeleteTask_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) CreateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseIDResp, error) {
+	out := new(BaseIDResp)
+	err := c.cc.Invoke(ctx, Job_CreateTaskLog_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) UpdateTaskLog(ctx context.Context, in *TaskLogInfo, opts ...grpc.CallOption) (*BaseResp, error) {
+	out := new(BaseResp)
+	err := c.cc.Invoke(ctx, Job_UpdateTaskLog_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) GetTaskLogList(ctx context.Context, in *TaskLogListReq, opts ...grpc.CallOption) (*TaskLogListResp, error) {
+	out := new(TaskLogListResp)
+	err := c.cc.Invoke(ctx, Job_GetTaskLogList_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) GetTaskLogById(ctx context.Context, in *IDReq, opts ...grpc.CallOption) (*TaskLogInfo, error) {
+	out := new(TaskLogInfo)
+	err := c.cc.Invoke(ctx, Job_GetTaskLogById_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *jobClient) DeleteTaskLog(ctx context.Context, in *IDsReq, opts ...grpc.CallOption) (*BaseResp, error) {
+	out := new(BaseResp)
+	err := c.cc.Invoke(ctx, Job_DeleteTaskLog_FullMethodName, in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// JobServer is the server API for Job service.
+// All implementations must embed UnimplementedJobServer
+// for forward compatibility
+type JobServer interface {
+	//  group: base
+	InitDatabase(context.Context, *Empty) (*BaseResp, error)
+	// Task management
+	// group: task
+	CreateTask(context.Context, *TaskInfo) (*BaseIDResp, error)
+	// group: task
+	UpdateTask(context.Context, *TaskInfo) (*BaseResp, error)
+	// group: task
+	GetTaskList(context.Context, *TaskListReq) (*TaskListResp, error)
+	// group: task
+	GetTaskById(context.Context, *IDReq) (*TaskInfo, error)
+	// group: task
+	DeleteTask(context.Context, *IDsReq) (*BaseResp, error)
+	// TaskLog management
+	// group: tasklog
+	CreateTaskLog(context.Context, *TaskLogInfo) (*BaseIDResp, error)
+	// group: tasklog
+	UpdateTaskLog(context.Context, *TaskLogInfo) (*BaseResp, error)
+	// group: tasklog
+	GetTaskLogList(context.Context, *TaskLogListReq) (*TaskLogListResp, error)
+	// group: tasklog
+	GetTaskLogById(context.Context, *IDReq) (*TaskLogInfo, error)
+	// group: tasklog
+	DeleteTaskLog(context.Context, *IDsReq) (*BaseResp, error)
+	mustEmbedUnimplementedJobServer()
+}
+
+// UnimplementedJobServer must be embedded to have forward compatible implementations.
+type UnimplementedJobServer struct {
+}
+
+func (UnimplementedJobServer) InitDatabase(context.Context, *Empty) (*BaseResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method InitDatabase not implemented")
+}
+func (UnimplementedJobServer) CreateTask(context.Context, *TaskInfo) (*BaseIDResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateTask not implemented")
+}
+func (UnimplementedJobServer) UpdateTask(context.Context, *TaskInfo) (*BaseResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateTask not implemented")
+}
+func (UnimplementedJobServer) GetTaskList(context.Context, *TaskListReq) (*TaskListResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetTaskList not implemented")
+}
+func (UnimplementedJobServer) GetTaskById(context.Context, *IDReq) (*TaskInfo, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetTaskById not implemented")
+}
+func (UnimplementedJobServer) DeleteTask(context.Context, *IDsReq) (*BaseResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteTask not implemented")
+}
+func (UnimplementedJobServer) CreateTaskLog(context.Context, *TaskLogInfo) (*BaseIDResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateTaskLog not implemented")
+}
+func (UnimplementedJobServer) UpdateTaskLog(context.Context, *TaskLogInfo) (*BaseResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateTaskLog not implemented")
+}
+func (UnimplementedJobServer) GetTaskLogList(context.Context, *TaskLogListReq) (*TaskLogListResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetTaskLogList not implemented")
+}
+func (UnimplementedJobServer) GetTaskLogById(context.Context, *IDReq) (*TaskLogInfo, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetTaskLogById not implemented")
+}
+func (UnimplementedJobServer) DeleteTaskLog(context.Context, *IDsReq) (*BaseResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteTaskLog not implemented")
+}
+func (UnimplementedJobServer) mustEmbedUnimplementedJobServer() {}
+
+// UnsafeJobServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to JobServer will
+// result in compilation errors.
+type UnsafeJobServer interface {
+	mustEmbedUnimplementedJobServer()
+}
+
+func RegisterJobServer(s grpc.ServiceRegistrar, srv JobServer) {
+	s.RegisterService(&Job_ServiceDesc, srv)
+}
+
+func _Job_InitDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).InitDatabase(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_InitDatabase_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).InitDatabase(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_CreateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TaskInfo)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).CreateTask(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_CreateTask_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).CreateTask(ctx, req.(*TaskInfo))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_UpdateTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TaskInfo)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).UpdateTask(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_UpdateTask_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).UpdateTask(ctx, req.(*TaskInfo))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_GetTaskList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TaskListReq)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).GetTaskList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_GetTaskList_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).GetTaskList(ctx, req.(*TaskListReq))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_GetTaskById_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(IDReq)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).GetTaskById(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_GetTaskById_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).GetTaskById(ctx, req.(*IDReq))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_DeleteTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(IDsReq)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).DeleteTask(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_DeleteTask_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).DeleteTask(ctx, req.(*IDsReq))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_CreateTaskLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TaskLogInfo)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).CreateTaskLog(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_CreateTaskLog_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).CreateTaskLog(ctx, req.(*TaskLogInfo))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_UpdateTaskLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TaskLogInfo)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).UpdateTaskLog(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_UpdateTaskLog_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).UpdateTaskLog(ctx, req.(*TaskLogInfo))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_GetTaskLogList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TaskLogListReq)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).GetTaskLogList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_GetTaskLogList_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).GetTaskLogList(ctx, req.(*TaskLogListReq))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_GetTaskLogById_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(IDReq)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).GetTaskLogById(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_GetTaskLogById_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).GetTaskLogById(ctx, req.(*IDReq))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Job_DeleteTaskLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(IDsReq)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(JobServer).DeleteTaskLog(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: Job_DeleteTaskLog_FullMethodName,
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(JobServer).DeleteTaskLog(ctx, req.(*IDsReq))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+// Job_ServiceDesc is the grpc.ServiceDesc for Job service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Job_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "job.Job",
+	HandlerType: (*JobServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "initDatabase",
+			Handler:    _Job_InitDatabase_Handler,
+		},
+		{
+			MethodName: "createTask",
+			Handler:    _Job_CreateTask_Handler,
+		},
+		{
+			MethodName: "updateTask",
+			Handler:    _Job_UpdateTask_Handler,
+		},
+		{
+			MethodName: "getTaskList",
+			Handler:    _Job_GetTaskList_Handler,
+		},
+		{
+			MethodName: "getTaskById",
+			Handler:    _Job_GetTaskById_Handler,
+		},
+		{
+			MethodName: "deleteTask",
+			Handler:    _Job_DeleteTask_Handler,
+		},
+		{
+			MethodName: "createTaskLog",
+			Handler:    _Job_CreateTaskLog_Handler,
+		},
+		{
+			MethodName: "updateTaskLog",
+			Handler:    _Job_UpdateTaskLog_Handler,
+		},
+		{
+			MethodName: "getTaskLogList",
+			Handler:    _Job_GetTaskLogList_Handler,
+		},
+		{
+			MethodName: "getTaskLogById",
+			Handler:    _Job_GetTaskLogById_Handler,
+		},
+		{
+			MethodName: "deleteTaskLog",
+			Handler:    _Job_DeleteTaskLog_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "job.proto",
+}