// Code generated by ent, DO NOT EDIT.

package ent

import (
	"context"
	"fmt"
	"math"
	"wechat-api/ent/predicate"
	"wechat-api/ent/usagestatistichour"

	"entgo.io/ent/dialect/sql"
	"entgo.io/ent/dialect/sql/sqlgraph"
	"entgo.io/ent/schema/field"
)

// UsageStatisticHourQuery is the builder for querying UsageStatisticHour entities.
type UsageStatisticHourQuery struct {
	config
	ctx        *QueryContext
	order      []usagestatistichour.OrderOption
	inters     []Interceptor
	predicates []predicate.UsageStatisticHour
	// intermediate query (i.e. traversal path).
	sql  *sql.Selector
	path func(context.Context) (*sql.Selector, error)
}

// Where adds a new predicate for the UsageStatisticHourQuery builder.
func (ushq *UsageStatisticHourQuery) Where(ps ...predicate.UsageStatisticHour) *UsageStatisticHourQuery {
	ushq.predicates = append(ushq.predicates, ps...)
	return ushq
}

// Limit the number of records to be returned by this query.
func (ushq *UsageStatisticHourQuery) Limit(limit int) *UsageStatisticHourQuery {
	ushq.ctx.Limit = &limit
	return ushq
}

// Offset to start from.
func (ushq *UsageStatisticHourQuery) Offset(offset int) *UsageStatisticHourQuery {
	ushq.ctx.Offset = &offset
	return ushq
}

// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (ushq *UsageStatisticHourQuery) Unique(unique bool) *UsageStatisticHourQuery {
	ushq.ctx.Unique = &unique
	return ushq
}

// Order specifies how the records should be ordered.
func (ushq *UsageStatisticHourQuery) Order(o ...usagestatistichour.OrderOption) *UsageStatisticHourQuery {
	ushq.order = append(ushq.order, o...)
	return ushq
}

// First returns the first UsageStatisticHour entity from the query.
// Returns a *NotFoundError when no UsageStatisticHour was found.
func (ushq *UsageStatisticHourQuery) First(ctx context.Context) (*UsageStatisticHour, error) {
	nodes, err := ushq.Limit(1).All(setContextOp(ctx, ushq.ctx, "First"))
	if err != nil {
		return nil, err
	}
	if len(nodes) == 0 {
		return nil, &NotFoundError{usagestatistichour.Label}
	}
	return nodes[0], nil
}

// FirstX is like First, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) FirstX(ctx context.Context) *UsageStatisticHour {
	node, err := ushq.First(ctx)
	if err != nil && !IsNotFound(err) {
		panic(err)
	}
	return node
}

// FirstID returns the first UsageStatisticHour ID from the query.
// Returns a *NotFoundError when no UsageStatisticHour ID was found.
func (ushq *UsageStatisticHourQuery) FirstID(ctx context.Context) (id uint64, err error) {
	var ids []uint64
	if ids, err = ushq.Limit(1).IDs(setContextOp(ctx, ushq.ctx, "FirstID")); err != nil {
		return
	}
	if len(ids) == 0 {
		err = &NotFoundError{usagestatistichour.Label}
		return
	}
	return ids[0], nil
}

// FirstIDX is like FirstID, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) FirstIDX(ctx context.Context) uint64 {
	id, err := ushq.FirstID(ctx)
	if err != nil && !IsNotFound(err) {
		panic(err)
	}
	return id
}

// Only returns a single UsageStatisticHour entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one UsageStatisticHour entity is found.
// Returns a *NotFoundError when no UsageStatisticHour entities are found.
func (ushq *UsageStatisticHourQuery) Only(ctx context.Context) (*UsageStatisticHour, error) {
	nodes, err := ushq.Limit(2).All(setContextOp(ctx, ushq.ctx, "Only"))
	if err != nil {
		return nil, err
	}
	switch len(nodes) {
	case 1:
		return nodes[0], nil
	case 0:
		return nil, &NotFoundError{usagestatistichour.Label}
	default:
		return nil, &NotSingularError{usagestatistichour.Label}
	}
}

// OnlyX is like Only, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) OnlyX(ctx context.Context) *UsageStatisticHour {
	node, err := ushq.Only(ctx)
	if err != nil {
		panic(err)
	}
	return node
}

// OnlyID is like Only, but returns the only UsageStatisticHour ID in the query.
// Returns a *NotSingularError when more than one UsageStatisticHour ID is found.
// Returns a *NotFoundError when no entities are found.
func (ushq *UsageStatisticHourQuery) OnlyID(ctx context.Context) (id uint64, err error) {
	var ids []uint64
	if ids, err = ushq.Limit(2).IDs(setContextOp(ctx, ushq.ctx, "OnlyID")); err != nil {
		return
	}
	switch len(ids) {
	case 1:
		id = ids[0]
	case 0:
		err = &NotFoundError{usagestatistichour.Label}
	default:
		err = &NotSingularError{usagestatistichour.Label}
	}
	return
}

// OnlyIDX is like OnlyID, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) OnlyIDX(ctx context.Context) uint64 {
	id, err := ushq.OnlyID(ctx)
	if err != nil {
		panic(err)
	}
	return id
}

// All executes the query and returns a list of UsageStatisticHours.
func (ushq *UsageStatisticHourQuery) All(ctx context.Context) ([]*UsageStatisticHour, error) {
	ctx = setContextOp(ctx, ushq.ctx, "All")
	if err := ushq.prepareQuery(ctx); err != nil {
		return nil, err
	}
	qr := querierAll[[]*UsageStatisticHour, *UsageStatisticHourQuery]()
	return withInterceptors[[]*UsageStatisticHour](ctx, ushq, qr, ushq.inters)
}

// AllX is like All, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) AllX(ctx context.Context) []*UsageStatisticHour {
	nodes, err := ushq.All(ctx)
	if err != nil {
		panic(err)
	}
	return nodes
}

// IDs executes the query and returns a list of UsageStatisticHour IDs.
func (ushq *UsageStatisticHourQuery) IDs(ctx context.Context) (ids []uint64, err error) {
	if ushq.ctx.Unique == nil && ushq.path != nil {
		ushq.Unique(true)
	}
	ctx = setContextOp(ctx, ushq.ctx, "IDs")
	if err = ushq.Select(usagestatistichour.FieldID).Scan(ctx, &ids); err != nil {
		return nil, err
	}
	return ids, nil
}

// IDsX is like IDs, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) IDsX(ctx context.Context) []uint64 {
	ids, err := ushq.IDs(ctx)
	if err != nil {
		panic(err)
	}
	return ids
}

// Count returns the count of the given query.
func (ushq *UsageStatisticHourQuery) Count(ctx context.Context) (int, error) {
	ctx = setContextOp(ctx, ushq.ctx, "Count")
	if err := ushq.prepareQuery(ctx); err != nil {
		return 0, err
	}
	return withInterceptors[int](ctx, ushq, querierCount[*UsageStatisticHourQuery](), ushq.inters)
}

// CountX is like Count, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) CountX(ctx context.Context) int {
	count, err := ushq.Count(ctx)
	if err != nil {
		panic(err)
	}
	return count
}

// Exist returns true if the query has elements in the graph.
func (ushq *UsageStatisticHourQuery) Exist(ctx context.Context) (bool, error) {
	ctx = setContextOp(ctx, ushq.ctx, "Exist")
	switch _, err := ushq.FirstID(ctx); {
	case IsNotFound(err):
		return false, nil
	case err != nil:
		return false, fmt.Errorf("ent: check existence: %w", err)
	default:
		return true, nil
	}
}

// ExistX is like Exist, but panics if an error occurs.
func (ushq *UsageStatisticHourQuery) ExistX(ctx context.Context) bool {
	exist, err := ushq.Exist(ctx)
	if err != nil {
		panic(err)
	}
	return exist
}

// Clone returns a duplicate of the UsageStatisticHourQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (ushq *UsageStatisticHourQuery) Clone() *UsageStatisticHourQuery {
	if ushq == nil {
		return nil
	}
	return &UsageStatisticHourQuery{
		config:     ushq.config,
		ctx:        ushq.ctx.Clone(),
		order:      append([]usagestatistichour.OrderOption{}, ushq.order...),
		inters:     append([]Interceptor{}, ushq.inters...),
		predicates: append([]predicate.UsageStatisticHour{}, ushq.predicates...),
		// clone intermediate query.
		sql:  ushq.sql.Clone(),
		path: ushq.path,
	}
}

// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
//	var v []struct {
//		CreatedAt time.Time `json:"created_at,omitempty"`
//		Count int `json:"count,omitempty"`
//	}
//
//	client.UsageStatisticHour.Query().
//		GroupBy(usagestatistichour.FieldCreatedAt).
//		Aggregate(ent.Count()).
//		Scan(ctx, &v)
func (ushq *UsageStatisticHourQuery) GroupBy(field string, fields ...string) *UsageStatisticHourGroupBy {
	ushq.ctx.Fields = append([]string{field}, fields...)
	grbuild := &UsageStatisticHourGroupBy{build: ushq}
	grbuild.flds = &ushq.ctx.Fields
	grbuild.label = usagestatistichour.Label
	grbuild.scan = grbuild.Scan
	return grbuild
}

// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
//	var v []struct {
//		CreatedAt time.Time `json:"created_at,omitempty"`
//	}
//
//	client.UsageStatisticHour.Query().
//		Select(usagestatistichour.FieldCreatedAt).
//		Scan(ctx, &v)
func (ushq *UsageStatisticHourQuery) Select(fields ...string) *UsageStatisticHourSelect {
	ushq.ctx.Fields = append(ushq.ctx.Fields, fields...)
	sbuild := &UsageStatisticHourSelect{UsageStatisticHourQuery: ushq}
	sbuild.label = usagestatistichour.Label
	sbuild.flds, sbuild.scan = &ushq.ctx.Fields, sbuild.Scan
	return sbuild
}

// Aggregate returns a UsageStatisticHourSelect configured with the given aggregations.
func (ushq *UsageStatisticHourQuery) Aggregate(fns ...AggregateFunc) *UsageStatisticHourSelect {
	return ushq.Select().Aggregate(fns...)
}

func (ushq *UsageStatisticHourQuery) prepareQuery(ctx context.Context) error {
	for _, inter := range ushq.inters {
		if inter == nil {
			return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
		}
		if trv, ok := inter.(Traverser); ok {
			if err := trv.Traverse(ctx, ushq); err != nil {
				return err
			}
		}
	}
	for _, f := range ushq.ctx.Fields {
		if !usagestatistichour.ValidColumn(f) {
			return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
		}
	}
	if ushq.path != nil {
		prev, err := ushq.path(ctx)
		if err != nil {
			return err
		}
		ushq.sql = prev
	}
	return nil
}

func (ushq *UsageStatisticHourQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UsageStatisticHour, error) {
	var (
		nodes = []*UsageStatisticHour{}
		_spec = ushq.querySpec()
	)
	_spec.ScanValues = func(columns []string) ([]any, error) {
		return (*UsageStatisticHour).scanValues(nil, columns)
	}
	_spec.Assign = func(columns []string, values []any) error {
		node := &UsageStatisticHour{config: ushq.config}
		nodes = append(nodes, node)
		return node.assignValues(columns, values)
	}
	for i := range hooks {
		hooks[i](ctx, _spec)
	}
	if err := sqlgraph.QueryNodes(ctx, ushq.driver, _spec); err != nil {
		return nil, err
	}
	if len(nodes) == 0 {
		return nodes, nil
	}
	return nodes, nil
}

func (ushq *UsageStatisticHourQuery) sqlCount(ctx context.Context) (int, error) {
	_spec := ushq.querySpec()
	_spec.Node.Columns = ushq.ctx.Fields
	if len(ushq.ctx.Fields) > 0 {
		_spec.Unique = ushq.ctx.Unique != nil && *ushq.ctx.Unique
	}
	return sqlgraph.CountNodes(ctx, ushq.driver, _spec)
}

func (ushq *UsageStatisticHourQuery) querySpec() *sqlgraph.QuerySpec {
	_spec := sqlgraph.NewQuerySpec(usagestatistichour.Table, usagestatistichour.Columns, sqlgraph.NewFieldSpec(usagestatistichour.FieldID, field.TypeUint64))
	_spec.From = ushq.sql
	if unique := ushq.ctx.Unique; unique != nil {
		_spec.Unique = *unique
	} else if ushq.path != nil {
		_spec.Unique = true
	}
	if fields := ushq.ctx.Fields; len(fields) > 0 {
		_spec.Node.Columns = make([]string, 0, len(fields))
		_spec.Node.Columns = append(_spec.Node.Columns, usagestatistichour.FieldID)
		for i := range fields {
			if fields[i] != usagestatistichour.FieldID {
				_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
			}
		}
	}
	if ps := ushq.predicates; len(ps) > 0 {
		_spec.Predicate = func(selector *sql.Selector) {
			for i := range ps {
				ps[i](selector)
			}
		}
	}
	if limit := ushq.ctx.Limit; limit != nil {
		_spec.Limit = *limit
	}
	if offset := ushq.ctx.Offset; offset != nil {
		_spec.Offset = *offset
	}
	if ps := ushq.order; len(ps) > 0 {
		_spec.Order = func(selector *sql.Selector) {
			for i := range ps {
				ps[i](selector)
			}
		}
	}
	return _spec
}

func (ushq *UsageStatisticHourQuery) sqlQuery(ctx context.Context) *sql.Selector {
	builder := sql.Dialect(ushq.driver.Dialect())
	t1 := builder.Table(usagestatistichour.Table)
	columns := ushq.ctx.Fields
	if len(columns) == 0 {
		columns = usagestatistichour.Columns
	}
	selector := builder.Select(t1.Columns(columns...)...).From(t1)
	if ushq.sql != nil {
		selector = ushq.sql
		selector.Select(selector.Columns(columns...)...)
	}
	if ushq.ctx.Unique != nil && *ushq.ctx.Unique {
		selector.Distinct()
	}
	for _, p := range ushq.predicates {
		p(selector)
	}
	for _, p := range ushq.order {
		p(selector)
	}
	if offset := ushq.ctx.Offset; offset != nil {
		// limit is mandatory for offset clause. We start
		// with default value, and override it below if needed.
		selector.Offset(*offset).Limit(math.MaxInt32)
	}
	if limit := ushq.ctx.Limit; limit != nil {
		selector.Limit(*limit)
	}
	return selector
}

// UsageStatisticHourGroupBy is the group-by builder for UsageStatisticHour entities.
type UsageStatisticHourGroupBy struct {
	selector
	build *UsageStatisticHourQuery
}

// Aggregate adds the given aggregation functions to the group-by query.
func (ushgb *UsageStatisticHourGroupBy) Aggregate(fns ...AggregateFunc) *UsageStatisticHourGroupBy {
	ushgb.fns = append(ushgb.fns, fns...)
	return ushgb
}

// Scan applies the selector query and scans the result into the given value.
func (ushgb *UsageStatisticHourGroupBy) Scan(ctx context.Context, v any) error {
	ctx = setContextOp(ctx, ushgb.build.ctx, "GroupBy")
	if err := ushgb.build.prepareQuery(ctx); err != nil {
		return err
	}
	return scanWithInterceptors[*UsageStatisticHourQuery, *UsageStatisticHourGroupBy](ctx, ushgb.build, ushgb, ushgb.build.inters, v)
}

func (ushgb *UsageStatisticHourGroupBy) sqlScan(ctx context.Context, root *UsageStatisticHourQuery, v any) error {
	selector := root.sqlQuery(ctx).Select()
	aggregation := make([]string, 0, len(ushgb.fns))
	for _, fn := range ushgb.fns {
		aggregation = append(aggregation, fn(selector))
	}
	if len(selector.SelectedColumns()) == 0 {
		columns := make([]string, 0, len(*ushgb.flds)+len(ushgb.fns))
		for _, f := range *ushgb.flds {
			columns = append(columns, selector.C(f))
		}
		columns = append(columns, aggregation...)
		selector.Select(columns...)
	}
	selector.GroupBy(selector.Columns(*ushgb.flds...)...)
	if err := selector.Err(); err != nil {
		return err
	}
	rows := &sql.Rows{}
	query, args := selector.Query()
	if err := ushgb.build.driver.Query(ctx, query, args, rows); err != nil {
		return err
	}
	defer rows.Close()
	return sql.ScanSlice(rows, v)
}

// UsageStatisticHourSelect is the builder for selecting fields of UsageStatisticHour entities.
type UsageStatisticHourSelect struct {
	*UsageStatisticHourQuery
	selector
}

// Aggregate adds the given aggregation functions to the selector query.
func (ushs *UsageStatisticHourSelect) Aggregate(fns ...AggregateFunc) *UsageStatisticHourSelect {
	ushs.fns = append(ushs.fns, fns...)
	return ushs
}

// Scan applies the selector query and scans the result into the given value.
func (ushs *UsageStatisticHourSelect) Scan(ctx context.Context, v any) error {
	ctx = setContextOp(ctx, ushs.ctx, "Select")
	if err := ushs.prepareQuery(ctx); err != nil {
		return err
	}
	return scanWithInterceptors[*UsageStatisticHourQuery, *UsageStatisticHourSelect](ctx, ushs.UsageStatisticHourQuery, ushs, ushs.inters, v)
}

func (ushs *UsageStatisticHourSelect) sqlScan(ctx context.Context, root *UsageStatisticHourQuery, v any) error {
	selector := root.sqlQuery(ctx)
	aggregation := make([]string, 0, len(ushs.fns))
	for _, fn := range ushs.fns {
		aggregation = append(aggregation, fn(selector))
	}
	switch n := len(*ushs.selector.flds); {
	case n == 0 && len(aggregation) > 0:
		selector.Select(aggregation...)
	case n != 0 && len(aggregation) > 0:
		selector.AppendSelect(aggregation...)
	}
	rows := &sql.Rows{}
	query, args := selector.Query()
	if err := ushs.driver.Query(ctx, query, args, rows); err != nil {
		return err
	}
	defer rows.Close()
	return sql.ScanSlice(rows, v)
}