re-generate ent code (#2844)

This commit is contained in:
mmetc 2024-02-14 11:19:13 +01:00 committed by GitHub
parent 45571cea08
commit 2bbf0b4762
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
58 changed files with 4032 additions and 8015 deletions

2
go.sum
View file

@ -542,6 +542,8 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
@ -67,6 +68,7 @@ type Alert struct {
// The values are being populated by the AlertQuery when eager-loading is set.
Edges AlertEdges `json:"edges"`
machine_alerts *int
selectValues sql.SelectValues
}
// AlertEdges holds the relations/edges for other nodes in the graph.
@ -142,7 +144,7 @@ func (*Alert) scanValues(columns []string) ([]any, error) {
case alert.ForeignKeys[0]: // machine_alerts
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Alert", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -309,36 +311,44 @@ func (a *Alert) assignValues(columns []string, values []any) error {
a.machine_alerts = new(int)
*a.machine_alerts = int(value.Int64)
}
default:
a.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Alert.
// This includes values selected through modifiers, order, etc.
func (a *Alert) Value(name string) (ent.Value, error) {
return a.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the Alert entity.
func (a *Alert) QueryOwner() *MachineQuery {
return (&AlertClient{config: a.config}).QueryOwner(a)
return NewAlertClient(a.config).QueryOwner(a)
}
// QueryDecisions queries the "decisions" edge of the Alert entity.
func (a *Alert) QueryDecisions() *DecisionQuery {
return (&AlertClient{config: a.config}).QueryDecisions(a)
return NewAlertClient(a.config).QueryDecisions(a)
}
// QueryEvents queries the "events" edge of the Alert entity.
func (a *Alert) QueryEvents() *EventQuery {
return (&AlertClient{config: a.config}).QueryEvents(a)
return NewAlertClient(a.config).QueryEvents(a)
}
// QueryMetas queries the "metas" edge of the Alert entity.
func (a *Alert) QueryMetas() *MetaQuery {
return (&AlertClient{config: a.config}).QueryMetas(a)
return NewAlertClient(a.config).QueryMetas(a)
}
// Update returns a builder for updating this Alert.
// Note that you need to call Alert.Unwrap() before calling this method if this Alert
// was returned from a transaction, and the transaction was committed or rolled back.
func (a *Alert) Update() *AlertUpdateOne {
return (&AlertClient{config: a.config}).UpdateOne(a)
return NewAlertClient(a.config).UpdateOne(a)
}
// Unwrap unwraps the Alert entity that was returned from a transaction after it was closed,
@ -435,9 +445,3 @@ func (a *Alert) String() string {
// Alerts is a parsable slice of Alert.
type Alerts []*Alert
func (a Alerts) config(cfg config) {
for _i := range a {
a[_i].config = cfg
}
}

View file

@ -4,6 +4,9 @@ package alert
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
@ -168,3 +171,203 @@ var (
// DefaultSimulated holds the default value on creation for the "simulated" field.
DefaultSimulated bool
)
// OrderOption defines the ordering options for the Alert queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByScenario orders the results by the scenario field.
func ByScenario(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenario, opts...).ToFunc()
}
// ByBucketId orders the results by the bucketId field.
func ByBucketId(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldBucketId, opts...).ToFunc()
}
// ByMessage orders the results by the message field.
func ByMessage(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldMessage, opts...).ToFunc()
}
// ByEventsCountField orders the results by the eventsCount field.
func ByEventsCountField(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldEventsCount, opts...).ToFunc()
}
// ByStartedAt orders the results by the startedAt field.
func ByStartedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStartedAt, opts...).ToFunc()
}
// ByStoppedAt orders the results by the stoppedAt field.
func ByStoppedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStoppedAt, opts...).ToFunc()
}
// BySourceIp orders the results by the sourceIp field.
func BySourceIp(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceIp, opts...).ToFunc()
}
// BySourceRange orders the results by the sourceRange field.
func BySourceRange(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceRange, opts...).ToFunc()
}
// BySourceAsNumber orders the results by the sourceAsNumber field.
func BySourceAsNumber(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceAsNumber, opts...).ToFunc()
}
// BySourceAsName orders the results by the sourceAsName field.
func BySourceAsName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceAsName, opts...).ToFunc()
}
// BySourceCountry orders the results by the sourceCountry field.
func BySourceCountry(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceCountry, opts...).ToFunc()
}
// BySourceLatitude orders the results by the sourceLatitude field.
func BySourceLatitude(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceLatitude, opts...).ToFunc()
}
// BySourceLongitude orders the results by the sourceLongitude field.
func BySourceLongitude(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceLongitude, opts...).ToFunc()
}
// BySourceScope orders the results by the sourceScope field.
func BySourceScope(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceScope, opts...).ToFunc()
}
// BySourceValue orders the results by the sourceValue field.
func BySourceValue(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSourceValue, opts...).ToFunc()
}
// ByCapacity orders the results by the capacity field.
func ByCapacity(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCapacity, opts...).ToFunc()
}
// ByLeakSpeed orders the results by the leakSpeed field.
func ByLeakSpeed(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLeakSpeed, opts...).ToFunc()
}
// ByScenarioVersion orders the results by the scenarioVersion field.
func ByScenarioVersion(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenarioVersion, opts...).ToFunc()
}
// ByScenarioHash orders the results by the scenarioHash field.
func ByScenarioHash(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenarioHash, opts...).ToFunc()
}
// BySimulated orders the results by the simulated field.
func BySimulated(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSimulated, opts...).ToFunc()
}
// ByUUID orders the results by the uuid field.
func ByUUID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUUID, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
// ByDecisionsCount orders the results by decisions count.
func ByDecisionsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newDecisionsStep(), opts...)
}
}
// ByDecisions orders the results by decisions terms.
func ByDecisions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newDecisionsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByEventsCount orders the results by events count.
func ByEventsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newEventsStep(), opts...)
}
}
// ByEvents orders the results by events terms.
func ByEvents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newEventsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByMetasCount orders the results by metas count.
func ByMetasCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newMetasStep(), opts...)
}
}
// ByMetas orders the results by metas terms.
func ByMetas(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newMetasStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}
func newDecisionsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DecisionsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn),
)
}
func newEventsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(EventsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn),
)
}
func newMetasStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(MetasInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn),
)
}

File diff suppressed because it is too large Load diff

View file

@ -409,50 +409,8 @@ func (ac *AlertCreate) Mutation() *AlertMutation {
// Save creates the Alert in the database.
func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) {
var (
err error
node *Alert
)
ac.defaults()
if len(ac.hooks) == 0 {
if err = ac.check(); err != nil {
return nil, err
}
node, err = ac.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*AlertMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = ac.check(); err != nil {
return nil, err
}
ac.mutation = mutation
if node, err = ac.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(ac.hooks) - 1; i >= 0; i-- {
if ac.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ac.hooks[i](mut)
}
v, err := mut.Mutate(ctx, ac.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Alert)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -525,6 +483,9 @@ func (ac *AlertCreate) check() error {
}
func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) {
if err := ac.check(); err != nil {
return nil, err
}
_node, _spec := ac.createSpec()
if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -534,202 +495,106 @@ func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
ac.mutation.id = &_node.ID
ac.mutation.done = true
return _node, nil
}
func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
var (
_node = &Alert{config: ac.config}
_spec = &sqlgraph.CreateSpec{
Table: alert.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt))
)
if value, ok := ac.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldCreatedAt,
})
_spec.SetField(alert.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := ac.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldUpdatedAt,
})
_spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := ac.mutation.Scenario(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldScenario,
})
_spec.SetField(alert.FieldScenario, field.TypeString, value)
_node.Scenario = value
}
if value, ok := ac.mutation.BucketId(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldBucketId,
})
_spec.SetField(alert.FieldBucketId, field.TypeString, value)
_node.BucketId = value
}
if value, ok := ac.mutation.Message(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldMessage,
})
_spec.SetField(alert.FieldMessage, field.TypeString, value)
_node.Message = value
}
if value, ok := ac.mutation.EventsCount(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt32,
Value: value,
Column: alert.FieldEventsCount,
})
_spec.SetField(alert.FieldEventsCount, field.TypeInt32, value)
_node.EventsCount = value
}
if value, ok := ac.mutation.StartedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldStartedAt,
})
_spec.SetField(alert.FieldStartedAt, field.TypeTime, value)
_node.StartedAt = value
}
if value, ok := ac.mutation.StoppedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: alert.FieldStoppedAt,
})
_spec.SetField(alert.FieldStoppedAt, field.TypeTime, value)
_node.StoppedAt = value
}
if value, ok := ac.mutation.SourceIp(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceIp,
})
_spec.SetField(alert.FieldSourceIp, field.TypeString, value)
_node.SourceIp = value
}
if value, ok := ac.mutation.SourceRange(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceRange,
})
_spec.SetField(alert.FieldSourceRange, field.TypeString, value)
_node.SourceRange = value
}
if value, ok := ac.mutation.SourceAsNumber(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceAsNumber,
})
_spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value)
_node.SourceAsNumber = value
}
if value, ok := ac.mutation.SourceAsName(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceAsName,
})
_spec.SetField(alert.FieldSourceAsName, field.TypeString, value)
_node.SourceAsName = value
}
if value, ok := ac.mutation.SourceCountry(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceCountry,
})
_spec.SetField(alert.FieldSourceCountry, field.TypeString, value)
_node.SourceCountry = value
}
if value, ok := ac.mutation.SourceLatitude(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat32,
Value: value,
Column: alert.FieldSourceLatitude,
})
_spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value)
_node.SourceLatitude = value
}
if value, ok := ac.mutation.SourceLongitude(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeFloat32,
Value: value,
Column: alert.FieldSourceLongitude,
})
_spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value)
_node.SourceLongitude = value
}
if value, ok := ac.mutation.SourceScope(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceScope,
})
_spec.SetField(alert.FieldSourceScope, field.TypeString, value)
_node.SourceScope = value
}
if value, ok := ac.mutation.SourceValue(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldSourceValue,
})
_spec.SetField(alert.FieldSourceValue, field.TypeString, value)
_node.SourceValue = value
}
if value, ok := ac.mutation.Capacity(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt32,
Value: value,
Column: alert.FieldCapacity,
})
_spec.SetField(alert.FieldCapacity, field.TypeInt32, value)
_node.Capacity = value
}
if value, ok := ac.mutation.LeakSpeed(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldLeakSpeed,
})
_spec.SetField(alert.FieldLeakSpeed, field.TypeString, value)
_node.LeakSpeed = value
}
if value, ok := ac.mutation.ScenarioVersion(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldScenarioVersion,
})
_spec.SetField(alert.FieldScenarioVersion, field.TypeString, value)
_node.ScenarioVersion = value
}
if value, ok := ac.mutation.ScenarioHash(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldScenarioHash,
})
_spec.SetField(alert.FieldScenarioHash, field.TypeString, value)
_node.ScenarioHash = value
}
if value, ok := ac.mutation.Simulated(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: alert.FieldSimulated,
})
_spec.SetField(alert.FieldSimulated, field.TypeBool, value)
_node.Simulated = value
}
if value, ok := ac.mutation.UUID(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: alert.FieldUUID,
})
_spec.SetField(alert.FieldUUID, field.TypeString, value)
_node.UUID = value
}
if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 {
@ -740,10 +605,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: machine.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -760,10 +622,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.DecisionsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: decision.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -779,10 +638,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.EventsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: event.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -798,10 +654,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
Columns: []string{alert.MetasColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: meta.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -815,11 +668,15 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) {
// AlertCreateBulk is the builder for creating many Alert entities in bulk.
type AlertCreateBulk struct {
config
err error
builders []*AlertCreate
}
// Save creates the Alert entities in the database.
func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) {
if acb.err != nil {
return nil, acb.err
}
specs := make([]*sqlgraph.CreateSpec, len(acb.builders))
nodes := make([]*Alert, len(acb.builders))
mutators := make([]Mutator, len(acb.builders))
@ -836,8 +693,8 @@ func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ad *AlertDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(ad.hooks) == 0 {
affected, err = ad.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*AlertMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
ad.mutation = mutation
affected, err = ad.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(ad.hooks) - 1; i >= 0; i-- {
if ad.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ad.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ad.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (ad *AlertDelete) ExecX(ctx context.Context) int {
}
func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: alert.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt))
if ps := ad.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
ad.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type AlertDeleteOne struct {
ad *AlertDelete
}
// Where appends a list predicates to the AlertDelete builder.
func (ado *AlertDeleteOne) Where(ps ...predicate.Alert) *AlertDeleteOne {
ado.ad.mutation.Where(ps...)
return ado
}
// Exec executes the deletion query.
func (ado *AlertDeleteOne) Exec(ctx context.Context) error {
n, err := ado.ad.Exec(ctx)
@ -111,5 +82,7 @@ func (ado *AlertDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ado *AlertDeleteOne) ExecX(ctx context.Context) {
ado.ad.ExecX(ctx)
if err := ado.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -22,11 +22,9 @@ import (
// AlertQuery is the builder for querying Alert entities.
type AlertQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []alert.OrderOption
inters []Interceptor
predicates []predicate.Alert
withOwner *MachineQuery
withDecisions *DecisionQuery
@ -44,34 +42,34 @@ func (aq *AlertQuery) Where(ps ...predicate.Alert) *AlertQuery {
return aq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (aq *AlertQuery) Limit(limit int) *AlertQuery {
aq.limit = &limit
aq.ctx.Limit = &limit
return aq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (aq *AlertQuery) Offset(offset int) *AlertQuery {
aq.offset = &offset
aq.ctx.Offset = &offset
return aq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (aq *AlertQuery) Unique(unique bool) *AlertQuery {
aq.unique = &unique
aq.ctx.Unique = &unique
return aq
}
// Order adds an order step to the query.
func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery {
// Order specifies how the records should be ordered.
func (aq *AlertQuery) Order(o ...alert.OrderOption) *AlertQuery {
aq.order = append(aq.order, o...)
return aq
}
// QueryOwner chains the current query on the "owner" edge.
func (aq *AlertQuery) QueryOwner() *MachineQuery {
query := &MachineQuery{config: aq.config}
query := (&MachineClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -93,7 +91,7 @@ func (aq *AlertQuery) QueryOwner() *MachineQuery {
// QueryDecisions chains the current query on the "decisions" edge.
func (aq *AlertQuery) QueryDecisions() *DecisionQuery {
query := &DecisionQuery{config: aq.config}
query := (&DecisionClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -115,7 +113,7 @@ func (aq *AlertQuery) QueryDecisions() *DecisionQuery {
// QueryEvents chains the current query on the "events" edge.
func (aq *AlertQuery) QueryEvents() *EventQuery {
query := &EventQuery{config: aq.config}
query := (&EventClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -137,7 +135,7 @@ func (aq *AlertQuery) QueryEvents() *EventQuery {
// QueryMetas chains the current query on the "metas" edge.
func (aq *AlertQuery) QueryMetas() *MetaQuery {
query := &MetaQuery{config: aq.config}
query := (&MetaClient{config: aq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
@ -160,7 +158,7 @@ func (aq *AlertQuery) QueryMetas() *MetaQuery {
// First returns the first Alert entity from the query.
// Returns a *NotFoundError when no Alert was found.
func (aq *AlertQuery) First(ctx context.Context) (*Alert, error) {
nodes, err := aq.Limit(1).All(ctx)
nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First"))
if err != nil {
return nil, err
}
@ -183,7 +181,7 @@ func (aq *AlertQuery) FirstX(ctx context.Context) *Alert {
// Returns a *NotFoundError when no Alert ID was found.
func (aq *AlertQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = aq.Limit(1).IDs(ctx); err != nil {
if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -206,7 +204,7 @@ func (aq *AlertQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one Alert entity is found.
// Returns a *NotFoundError when no Alert entities are found.
func (aq *AlertQuery) Only(ctx context.Context) (*Alert, error) {
nodes, err := aq.Limit(2).All(ctx)
nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -234,7 +232,7 @@ func (aq *AlertQuery) OnlyX(ctx context.Context) *Alert {
// Returns a *NotFoundError when no entities are found.
func (aq *AlertQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = aq.Limit(2).IDs(ctx); err != nil {
if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -259,10 +257,12 @@ func (aq *AlertQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of Alerts.
func (aq *AlertQuery) All(ctx context.Context) ([]*Alert, error) {
ctx = setContextOp(ctx, aq.ctx, "All")
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
}
return aq.sqlAll(ctx)
qr := querierAll[[]*Alert, *AlertQuery]()
return withInterceptors[[]*Alert](ctx, aq, qr, aq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -275,9 +275,12 @@ func (aq *AlertQuery) AllX(ctx context.Context) []*Alert {
}
// IDs executes the query and returns a list of Alert IDs.
func (aq *AlertQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil {
func (aq *AlertQuery) IDs(ctx context.Context) (ids []int, err error) {
if aq.ctx.Unique == nil && aq.path != nil {
aq.Unique(true)
}
ctx = setContextOp(ctx, aq.ctx, "IDs")
if err = aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -294,10 +297,11 @@ func (aq *AlertQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (aq *AlertQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, aq.ctx, "Count")
if err := aq.prepareQuery(ctx); err != nil {
return 0, err
}
return aq.sqlCount(ctx)
return withInterceptors[int](ctx, aq, querierCount[*AlertQuery](), aq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -311,10 +315,15 @@ func (aq *AlertQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (aq *AlertQuery) Exist(ctx context.Context) (bool, error) {
if err := aq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, aq.ctx, "Exist")
switch _, err := aq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return aq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -334,25 +343,24 @@ func (aq *AlertQuery) Clone() *AlertQuery {
}
return &AlertQuery{
config: aq.config,
limit: aq.limit,
offset: aq.offset,
order: append([]OrderFunc{}, aq.order...),
ctx: aq.ctx.Clone(),
order: append([]alert.OrderOption{}, aq.order...),
inters: append([]Interceptor{}, aq.inters...),
predicates: append([]predicate.Alert{}, aq.predicates...),
withOwner: aq.withOwner.Clone(),
withDecisions: aq.withDecisions.Clone(),
withEvents: aq.withEvents.Clone(),
withMetas: aq.withMetas.Clone(),
// clone intermediate query.
sql: aq.sql.Clone(),
path: aq.path,
unique: aq.unique,
sql: aq.sql.Clone(),
path: aq.path,
}
}
// WithOwner tells the query-builder to eager-load the nodes that are connected to
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery {
query := &MachineQuery{config: aq.config}
query := (&MachineClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -363,7 +371,7 @@ func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery {
// WithDecisions tells the query-builder to eager-load the nodes that are connected to
// the "decisions" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery {
query := &DecisionQuery{config: aq.config}
query := (&DecisionClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -374,7 +382,7 @@ func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery {
// WithEvents tells the query-builder to eager-load the nodes that are connected to
// the "events" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery {
query := &EventQuery{config: aq.config}
query := (&EventClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -385,7 +393,7 @@ func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery {
// WithMetas tells the query-builder to eager-load the nodes that are connected to
// the "metas" edge. The optional arguments are used to configure the query builder of the edge.
func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery {
query := &MetaQuery{config: aq.config}
query := (&MetaClient{config: aq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -408,16 +416,11 @@ func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy {
grbuild := &AlertGroupBy{config: aq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := aq.prepareQuery(ctx); err != nil {
return nil, err
}
return aq.sqlQuery(ctx), nil
}
aq.ctx.Fields = append([]string{field}, fields...)
grbuild := &AlertGroupBy{build: aq}
grbuild.flds = &aq.ctx.Fields
grbuild.label = alert.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -434,15 +437,30 @@ func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy {
// Select(alert.FieldCreatedAt).
// Scan(ctx, &v)
func (aq *AlertQuery) Select(fields ...string) *AlertSelect {
aq.fields = append(aq.fields, fields...)
selbuild := &AlertSelect{AlertQuery: aq}
selbuild.label = alert.Label
selbuild.flds, selbuild.scan = &aq.fields, selbuild.Scan
return selbuild
aq.ctx.Fields = append(aq.ctx.Fields, fields...)
sbuild := &AlertSelect{AlertQuery: aq}
sbuild.label = alert.Label
sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a AlertSelect configured with the given aggregations.
func (aq *AlertQuery) Aggregate(fns ...AggregateFunc) *AlertSelect {
return aq.Select().Aggregate(fns...)
}
func (aq *AlertQuery) prepareQuery(ctx context.Context) error {
for _, f := range aq.fields {
for _, inter := range aq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, aq); err != nil {
return err
}
}
}
for _, f := range aq.ctx.Fields {
if !alert.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -536,6 +554,9 @@ func (aq *AlertQuery) loadOwner(ctx context.Context, query *MachineQuery, nodes
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(machine.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@ -562,8 +583,11 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(decision.FieldAlertDecisions)
}
query.Where(predicate.Decision(func(s *sql.Selector) {
s.Where(sql.InValues(alert.DecisionsColumn, fks...))
s.Where(sql.InValues(s.C(alert.DecisionsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@ -573,7 +597,7 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n
fk := n.AlertDecisions
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@ -589,8 +613,11 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(event.FieldAlertEvents)
}
query.Where(predicate.Event(func(s *sql.Selector) {
s.Where(sql.InValues(alert.EventsColumn, fks...))
s.Where(sql.InValues(s.C(alert.EventsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@ -600,7 +627,7 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [
fk := n.AlertEvents
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "alert_events" returned %v for node %v`, fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "alert_events" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@ -616,8 +643,11 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(meta.FieldAlertMetas)
}
query.Where(predicate.Meta(func(s *sql.Selector) {
s.Where(sql.InValues(alert.MetasColumn, fks...))
s.Where(sql.InValues(s.C(alert.MetasColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@ -627,7 +657,7 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*
fk := n.AlertMetas
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v for node %v`, fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "alert_metas" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}
@ -636,41 +666,22 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []*
func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) {
_spec := aq.querySpec()
_spec.Node.Columns = aq.fields
if len(aq.fields) > 0 {
_spec.Unique = aq.unique != nil && *aq.unique
_spec.Node.Columns = aq.ctx.Fields
if len(aq.ctx.Fields) > 0 {
_spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, aq.driver, _spec)
}
func (aq *AlertQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := aq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: alert.Table,
Columns: alert.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
},
From: aq.sql,
Unique: true,
}
if unique := aq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt))
_spec.From = aq.sql
if unique := aq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if aq.path != nil {
_spec.Unique = true
}
if fields := aq.fields; len(fields) > 0 {
if fields := aq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID)
for i := range fields {
@ -686,10 +697,10 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := aq.limit; limit != nil {
if limit := aq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := aq.offset; offset != nil {
if offset := aq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := aq.order; len(ps) > 0 {
@ -705,7 +716,7 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec {
func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(aq.driver.Dialect())
t1 := builder.Table(alert.Table)
columns := aq.fields
columns := aq.ctx.Fields
if len(columns) == 0 {
columns = alert.Columns
}
@ -714,7 +725,7 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = aq.sql
selector.Select(selector.Columns(columns...)...)
}
if aq.unique != nil && *aq.unique {
if aq.ctx.Unique != nil && *aq.ctx.Unique {
selector.Distinct()
}
for _, p := range aq.predicates {
@ -723,12 +734,12 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range aq.order {
p(selector)
}
if offset := aq.offset; offset != nil {
if offset := aq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := aq.limit; limit != nil {
if limit := aq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -736,13 +747,8 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector {
// AlertGroupBy is the group-by builder for Alert entities.
type AlertGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *AlertQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -751,74 +757,77 @@ func (agb *AlertGroupBy) Aggregate(fns ...AggregateFunc) *AlertGroupBy {
return agb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (agb *AlertGroupBy) Scan(ctx context.Context, v any) error {
query, err := agb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, agb.build.ctx, "GroupBy")
if err := agb.build.prepareQuery(ctx); err != nil {
return err
}
agb.sql = query
return agb.sqlScan(ctx, v)
return scanWithInterceptors[*AlertQuery, *AlertGroupBy](ctx, agb.build, agb, agb.build.inters, v)
}
func (agb *AlertGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range agb.fields {
if !alert.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := agb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := agb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (agb *AlertGroupBy) sqlQuery() *sql.Selector {
selector := agb.sql.Select()
func (agb *AlertGroupBy) sqlScan(ctx context.Context, root *AlertQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(agb.fns))
for _, fn := range agb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(agb.fields)+len(agb.fns))
for _, f := range agb.fields {
columns := make([]string, 0, len(*agb.flds)+len(agb.fns))
for _, f := range *agb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(agb.fields...)...)
selector.GroupBy(selector.Columns(*agb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := agb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// AlertSelect is the builder for selecting fields of Alert entities.
type AlertSelect struct {
*AlertQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (as *AlertSelect) Aggregate(fns ...AggregateFunc) *AlertSelect {
as.fns = append(as.fns, fns...)
return as
}
// Scan applies the selector query and scans the result into the given value.
func (as *AlertSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, as.ctx, "Select")
if err := as.prepareQuery(ctx); err != nil {
return err
}
as.sql = as.AlertQuery.sqlQuery(ctx)
return as.sqlScan(ctx, v)
return scanWithInterceptors[*AlertQuery, *AlertSelect](ctx, as.AlertQuery, as, as.inters, v)
}
func (as *AlertSelect) sqlScan(ctx context.Context, v any) error {
func (as *AlertSelect) sqlScan(ctx context.Context, root *AlertQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(as.fns))
for _, fn := range as.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*as.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := as.sql.Query()
query, args := selector.Query()
if err := as.driver.Query(ctx, query, args, rows); err != nil {
return err
}

File diff suppressed because it is too large Load diff

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
)
@ -37,7 +38,8 @@ type Bouncer struct {
// LastPull holds the value of the "last_pull" field.
LastPull time.Time `json:"last_pull"`
// AuthType holds the value of the "auth_type" field.
AuthType string `json:"auth_type"`
AuthType string `json:"auth_type"`
selectValues sql.SelectValues
}
// scanValues returns the types for scanning values from sql.Rows.
@ -54,7 +56,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) {
case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -142,16 +144,24 @@ func (b *Bouncer) assignValues(columns []string, values []any) error {
} else if value.Valid {
b.AuthType = value.String
}
default:
b.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Bouncer.
// This includes values selected through modifiers, order, etc.
func (b *Bouncer) Value(name string) (ent.Value, error) {
return b.selectValues.Get(name)
}
// Update returns a builder for updating this Bouncer.
// Note that you need to call Bouncer.Unwrap() before calling this method if this Bouncer
// was returned from a transaction, and the transaction was committed or rolled back.
func (b *Bouncer) Update() *BouncerUpdateOne {
return (&BouncerClient{config: b.config}).UpdateOne(b)
return NewBouncerClient(b.config).UpdateOne(b)
}
// Unwrap unwraps the Bouncer entity that was returned from a transaction after it was closed,
@ -212,9 +222,3 @@ func (b *Bouncer) String() string {
// Bouncers is a parsable slice of Bouncer.
type Bouncers []*Bouncer
func (b Bouncers) config(cfg config) {
for _i := range b {
b[_i].config = cfg
}
}

View file

@ -4,6 +4,8 @@ package bouncer
import (
"time"
"entgo.io/ent/dialect/sql"
)
const (
@ -81,3 +83,66 @@ var (
// DefaultAuthType holds the default value on creation for the "auth_type" field.
DefaultAuthType string
)
// OrderOption defines the ordering options for the Bouncer queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByAPIKey orders the results by the api_key field.
func ByAPIKey(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAPIKey, opts...).ToFunc()
}
// ByRevoked orders the results by the revoked field.
func ByRevoked(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldRevoked, opts...).ToFunc()
}
// ByIPAddress orders the results by the ip_address field.
func ByIPAddress(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIPAddress, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// ByVersion orders the results by the version field.
func ByVersion(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldVersion, opts...).ToFunc()
}
// ByUntil orders the results by the until field.
func ByUntil(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUntil, opts...).ToFunc()
}
// ByLastPull orders the results by the last_pull field.
func ByLastPull(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLastPull, opts...).ToFunc()
}
// ByAuthType orders the results by the auth_type field.
func ByAuthType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAuthType, opts...).ToFunc()
}

File diff suppressed because it is too large Load diff

View file

@ -157,50 +157,8 @@ func (bc *BouncerCreate) Mutation() *BouncerMutation {
// Save creates the Bouncer in the database.
func (bc *BouncerCreate) Save(ctx context.Context) (*Bouncer, error) {
var (
err error
node *Bouncer
)
bc.defaults()
if len(bc.hooks) == 0 {
if err = bc.check(); err != nil {
return nil, err
}
node, err = bc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*BouncerMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = bc.check(); err != nil {
return nil, err
}
bc.mutation = mutation
if node, err = bc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(bc.hooks) - 1; i >= 0; i-- {
if bc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = bc.hooks[i](mut)
}
v, err := mut.Mutate(ctx, bc.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Bouncer)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, bc.sqlSave, bc.mutation, bc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -274,6 +232,9 @@ func (bc *BouncerCreate) check() error {
}
func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) {
if err := bc.check(); err != nil {
return nil, err
}
_node, _spec := bc.createSpec()
if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -283,106 +244,58 @@ func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
bc.mutation.id = &_node.ID
bc.mutation.done = true
return _node, nil
}
func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) {
var (
_node = &Bouncer{config: bc.config}
_spec = &sqlgraph.CreateSpec{
Table: bouncer.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: bouncer.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(bouncer.Table, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt))
)
if value, ok := bc.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldCreatedAt,
})
_spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := bc.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldUpdatedAt,
})
_spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := bc.mutation.Name(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldName,
})
_spec.SetField(bouncer.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := bc.mutation.APIKey(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldAPIKey,
})
_spec.SetField(bouncer.FieldAPIKey, field.TypeString, value)
_node.APIKey = value
}
if value, ok := bc.mutation.Revoked(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: bouncer.FieldRevoked,
})
_spec.SetField(bouncer.FieldRevoked, field.TypeBool, value)
_node.Revoked = value
}
if value, ok := bc.mutation.IPAddress(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldIPAddress,
})
_spec.SetField(bouncer.FieldIPAddress, field.TypeString, value)
_node.IPAddress = value
}
if value, ok := bc.mutation.GetType(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldType,
})
_spec.SetField(bouncer.FieldType, field.TypeString, value)
_node.Type = value
}
if value, ok := bc.mutation.Version(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldVersion,
})
_spec.SetField(bouncer.FieldVersion, field.TypeString, value)
_node.Version = value
}
if value, ok := bc.mutation.Until(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldUntil,
})
_spec.SetField(bouncer.FieldUntil, field.TypeTime, value)
_node.Until = value
}
if value, ok := bc.mutation.LastPull(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldLastPull,
})
_spec.SetField(bouncer.FieldLastPull, field.TypeTime, value)
_node.LastPull = value
}
if value, ok := bc.mutation.AuthType(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldAuthType,
})
_spec.SetField(bouncer.FieldAuthType, field.TypeString, value)
_node.AuthType = value
}
return _node, _spec
@ -391,11 +304,15 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) {
// BouncerCreateBulk is the builder for creating many Bouncer entities in bulk.
type BouncerCreateBulk struct {
config
err error
builders []*BouncerCreate
}
// Save creates the Bouncer entities in the database.
func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) {
if bcb.err != nil {
return nil, bcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(bcb.builders))
nodes := make([]*Bouncer, len(bcb.builders))
mutators := make([]Mutator, len(bcb.builders))
@ -412,8 +329,8 @@ func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (bd *BouncerDelete) Where(ps ...predicate.Bouncer) *BouncerDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (bd *BouncerDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(bd.hooks) == 0 {
affected, err = bd.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*BouncerMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
bd.mutation = mutation
affected, err = bd.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(bd.hooks) - 1; i >= 0; i-- {
if bd.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = bd.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, bd.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, bd.sqlExec, bd.mutation, bd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (bd *BouncerDelete) ExecX(ctx context.Context) int {
}
func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: bouncer.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: bouncer.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(bouncer.Table, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt))
if ps := bd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
bd.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type BouncerDeleteOne struct {
bd *BouncerDelete
}
// Where appends a list predicates to the BouncerDelete builder.
func (bdo *BouncerDeleteOne) Where(ps ...predicate.Bouncer) *BouncerDeleteOne {
bdo.bd.mutation.Where(ps...)
return bdo
}
// Exec executes the deletion query.
func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error {
n, err := bdo.bd.Exec(ctx)
@ -111,5 +82,7 @@ func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (bdo *BouncerDeleteOne) ExecX(ctx context.Context) {
bdo.bd.ExecX(ctx)
if err := bdo.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -17,11 +17,9 @@ import (
// BouncerQuery is the builder for querying Bouncer entities.
type BouncerQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []bouncer.OrderOption
inters []Interceptor
predicates []predicate.Bouncer
// intermediate query (i.e. traversal path).
sql *sql.Selector
@ -34,27 +32,27 @@ func (bq *BouncerQuery) Where(ps ...predicate.Bouncer) *BouncerQuery {
return bq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (bq *BouncerQuery) Limit(limit int) *BouncerQuery {
bq.limit = &limit
bq.ctx.Limit = &limit
return bq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (bq *BouncerQuery) Offset(offset int) *BouncerQuery {
bq.offset = &offset
bq.ctx.Offset = &offset
return bq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (bq *BouncerQuery) Unique(unique bool) *BouncerQuery {
bq.unique = &unique
bq.ctx.Unique = &unique
return bq
}
// Order adds an order step to the query.
func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery {
// Order specifies how the records should be ordered.
func (bq *BouncerQuery) Order(o ...bouncer.OrderOption) *BouncerQuery {
bq.order = append(bq.order, o...)
return bq
}
@ -62,7 +60,7 @@ func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery {
// First returns the first Bouncer entity from the query.
// Returns a *NotFoundError when no Bouncer was found.
func (bq *BouncerQuery) First(ctx context.Context) (*Bouncer, error) {
nodes, err := bq.Limit(1).All(ctx)
nodes, err := bq.Limit(1).All(setContextOp(ctx, bq.ctx, "First"))
if err != nil {
return nil, err
}
@ -85,7 +83,7 @@ func (bq *BouncerQuery) FirstX(ctx context.Context) *Bouncer {
// Returns a *NotFoundError when no Bouncer ID was found.
func (bq *BouncerQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = bq.Limit(1).IDs(ctx); err != nil {
if ids, err = bq.Limit(1).IDs(setContextOp(ctx, bq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -108,7 +106,7 @@ func (bq *BouncerQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one Bouncer entity is found.
// Returns a *NotFoundError when no Bouncer entities are found.
func (bq *BouncerQuery) Only(ctx context.Context) (*Bouncer, error) {
nodes, err := bq.Limit(2).All(ctx)
nodes, err := bq.Limit(2).All(setContextOp(ctx, bq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -136,7 +134,7 @@ func (bq *BouncerQuery) OnlyX(ctx context.Context) *Bouncer {
// Returns a *NotFoundError when no entities are found.
func (bq *BouncerQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = bq.Limit(2).IDs(ctx); err != nil {
if ids, err = bq.Limit(2).IDs(setContextOp(ctx, bq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -161,10 +159,12 @@ func (bq *BouncerQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of Bouncers.
func (bq *BouncerQuery) All(ctx context.Context) ([]*Bouncer, error) {
ctx = setContextOp(ctx, bq.ctx, "All")
if err := bq.prepareQuery(ctx); err != nil {
return nil, err
}
return bq.sqlAll(ctx)
qr := querierAll[[]*Bouncer, *BouncerQuery]()
return withInterceptors[[]*Bouncer](ctx, bq, qr, bq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -177,9 +177,12 @@ func (bq *BouncerQuery) AllX(ctx context.Context) []*Bouncer {
}
// IDs executes the query and returns a list of Bouncer IDs.
func (bq *BouncerQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil {
func (bq *BouncerQuery) IDs(ctx context.Context) (ids []int, err error) {
if bq.ctx.Unique == nil && bq.path != nil {
bq.Unique(true)
}
ctx = setContextOp(ctx, bq.ctx, "IDs")
if err = bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -196,10 +199,11 @@ func (bq *BouncerQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (bq *BouncerQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, bq.ctx, "Count")
if err := bq.prepareQuery(ctx); err != nil {
return 0, err
}
return bq.sqlCount(ctx)
return withInterceptors[int](ctx, bq, querierCount[*BouncerQuery](), bq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -213,10 +217,15 @@ func (bq *BouncerQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (bq *BouncerQuery) Exist(ctx context.Context) (bool, error) {
if err := bq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, bq.ctx, "Exist")
switch _, err := bq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return bq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -236,14 +245,13 @@ func (bq *BouncerQuery) Clone() *BouncerQuery {
}
return &BouncerQuery{
config: bq.config,
limit: bq.limit,
offset: bq.offset,
order: append([]OrderFunc{}, bq.order...),
ctx: bq.ctx.Clone(),
order: append([]bouncer.OrderOption{}, bq.order...),
inters: append([]Interceptor{}, bq.inters...),
predicates: append([]predicate.Bouncer{}, bq.predicates...),
// clone intermediate query.
sql: bq.sql.Clone(),
path: bq.path,
unique: bq.unique,
sql: bq.sql.Clone(),
path: bq.path,
}
}
@ -262,16 +270,11 @@ func (bq *BouncerQuery) Clone() *BouncerQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy {
grbuild := &BouncerGroupBy{config: bq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := bq.prepareQuery(ctx); err != nil {
return nil, err
}
return bq.sqlQuery(ctx), nil
}
bq.ctx.Fields = append([]string{field}, fields...)
grbuild := &BouncerGroupBy{build: bq}
grbuild.flds = &bq.ctx.Fields
grbuild.label = bouncer.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -288,15 +291,30 @@ func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy
// Select(bouncer.FieldCreatedAt).
// Scan(ctx, &v)
func (bq *BouncerQuery) Select(fields ...string) *BouncerSelect {
bq.fields = append(bq.fields, fields...)
selbuild := &BouncerSelect{BouncerQuery: bq}
selbuild.label = bouncer.Label
selbuild.flds, selbuild.scan = &bq.fields, selbuild.Scan
return selbuild
bq.ctx.Fields = append(bq.ctx.Fields, fields...)
sbuild := &BouncerSelect{BouncerQuery: bq}
sbuild.label = bouncer.Label
sbuild.flds, sbuild.scan = &bq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a BouncerSelect configured with the given aggregations.
func (bq *BouncerQuery) Aggregate(fns ...AggregateFunc) *BouncerSelect {
return bq.Select().Aggregate(fns...)
}
func (bq *BouncerQuery) prepareQuery(ctx context.Context) error {
for _, f := range bq.fields {
for _, inter := range bq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, bq); err != nil {
return err
}
}
}
for _, f := range bq.ctx.Fields {
if !bouncer.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -338,41 +356,22 @@ func (bq *BouncerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Boun
func (bq *BouncerQuery) sqlCount(ctx context.Context) (int, error) {
_spec := bq.querySpec()
_spec.Node.Columns = bq.fields
if len(bq.fields) > 0 {
_spec.Unique = bq.unique != nil && *bq.unique
_spec.Node.Columns = bq.ctx.Fields
if len(bq.ctx.Fields) > 0 {
_spec.Unique = bq.ctx.Unique != nil && *bq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, bq.driver, _spec)
}
func (bq *BouncerQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := bq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: bouncer.Table,
Columns: bouncer.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: bouncer.FieldID,
},
},
From: bq.sql,
Unique: true,
}
if unique := bq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt))
_spec.From = bq.sql
if unique := bq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if bq.path != nil {
_spec.Unique = true
}
if fields := bq.fields; len(fields) > 0 {
if fields := bq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID)
for i := range fields {
@ -388,10 +387,10 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := bq.limit; limit != nil {
if limit := bq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := bq.offset; offset != nil {
if offset := bq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := bq.order; len(ps) > 0 {
@ -407,7 +406,7 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec {
func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(bq.driver.Dialect())
t1 := builder.Table(bouncer.Table)
columns := bq.fields
columns := bq.ctx.Fields
if len(columns) == 0 {
columns = bouncer.Columns
}
@ -416,7 +415,7 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = bq.sql
selector.Select(selector.Columns(columns...)...)
}
if bq.unique != nil && *bq.unique {
if bq.ctx.Unique != nil && *bq.ctx.Unique {
selector.Distinct()
}
for _, p := range bq.predicates {
@ -425,12 +424,12 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range bq.order {
p(selector)
}
if offset := bq.offset; offset != nil {
if offset := bq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := bq.limit; limit != nil {
if limit := bq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -438,13 +437,8 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector {
// BouncerGroupBy is the group-by builder for Bouncer entities.
type BouncerGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *BouncerQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -453,74 +447,77 @@ func (bgb *BouncerGroupBy) Aggregate(fns ...AggregateFunc) *BouncerGroupBy {
return bgb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (bgb *BouncerGroupBy) Scan(ctx context.Context, v any) error {
query, err := bgb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, bgb.build.ctx, "GroupBy")
if err := bgb.build.prepareQuery(ctx); err != nil {
return err
}
bgb.sql = query
return bgb.sqlScan(ctx, v)
return scanWithInterceptors[*BouncerQuery, *BouncerGroupBy](ctx, bgb.build, bgb, bgb.build.inters, v)
}
func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range bgb.fields {
if !bouncer.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := bgb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := bgb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (bgb *BouncerGroupBy) sqlQuery() *sql.Selector {
selector := bgb.sql.Select()
func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, root *BouncerQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(bgb.fns))
for _, fn := range bgb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(bgb.fields)+len(bgb.fns))
for _, f := range bgb.fields {
columns := make([]string, 0, len(*bgb.flds)+len(bgb.fns))
for _, f := range *bgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(bgb.fields...)...)
selector.GroupBy(selector.Columns(*bgb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := bgb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// BouncerSelect is the builder for selecting fields of Bouncer entities.
type BouncerSelect struct {
*BouncerQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (bs *BouncerSelect) Aggregate(fns ...AggregateFunc) *BouncerSelect {
bs.fns = append(bs.fns, fns...)
return bs
}
// Scan applies the selector query and scans the result into the given value.
func (bs *BouncerSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, bs.ctx, "Select")
if err := bs.prepareQuery(ctx); err != nil {
return err
}
bs.sql = bs.BouncerQuery.sqlQuery(ctx)
return bs.sqlScan(ctx, v)
return scanWithInterceptors[*BouncerQuery, *BouncerSelect](ctx, bs.BouncerQuery, bs, bs.inters, v)
}
func (bs *BouncerSelect) sqlScan(ctx context.Context, v any) error {
func (bs *BouncerSelect) sqlScan(ctx context.Context, root *BouncerQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(bs.fns))
for _, fn := range bs.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*bs.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := bs.sql.Query()
query, args := selector.Query()
if err := bs.driver.Query(ctx, query, args, rows); err != nil {
return err
}

View file

@ -185,35 +185,8 @@ func (bu *BouncerUpdate) Mutation() *BouncerMutation {
// Save executes the query and returns the number of nodes affected by the update operation.
func (bu *BouncerUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
bu.defaults()
if len(bu.hooks) == 0 {
affected, err = bu.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*BouncerMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
bu.mutation = mutation
affected, err = bu.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(bu.hooks) - 1; i >= 0; i-- {
if bu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = bu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, bu.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, bu.sqlSave, bu.mutation, bu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -251,16 +224,7 @@ func (bu *BouncerUpdate) defaults() {
}
func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: bouncer.Table,
Columns: bouncer.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: bouncer.FieldID,
},
},
}
_spec := sqlgraph.NewUpdateSpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt))
if ps := bu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -269,117 +233,55 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := bu.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldCreatedAt,
})
_spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value)
}
if bu.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: bouncer.FieldCreatedAt,
})
_spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime)
}
if value, ok := bu.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldUpdatedAt,
})
_spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value)
}
if bu.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: bouncer.FieldUpdatedAt,
})
_spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime)
}
if value, ok := bu.mutation.Name(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldName,
})
_spec.SetField(bouncer.FieldName, field.TypeString, value)
}
if value, ok := bu.mutation.APIKey(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldAPIKey,
})
_spec.SetField(bouncer.FieldAPIKey, field.TypeString, value)
}
if value, ok := bu.mutation.Revoked(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: bouncer.FieldRevoked,
})
_spec.SetField(bouncer.FieldRevoked, field.TypeBool, value)
}
if value, ok := bu.mutation.IPAddress(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldIPAddress,
})
_spec.SetField(bouncer.FieldIPAddress, field.TypeString, value)
}
if bu.mutation.IPAddressCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: bouncer.FieldIPAddress,
})
_spec.ClearField(bouncer.FieldIPAddress, field.TypeString)
}
if value, ok := bu.mutation.GetType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldType,
})
_spec.SetField(bouncer.FieldType, field.TypeString, value)
}
if bu.mutation.TypeCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: bouncer.FieldType,
})
_spec.ClearField(bouncer.FieldType, field.TypeString)
}
if value, ok := bu.mutation.Version(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldVersion,
})
_spec.SetField(bouncer.FieldVersion, field.TypeString, value)
}
if bu.mutation.VersionCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: bouncer.FieldVersion,
})
_spec.ClearField(bouncer.FieldVersion, field.TypeString)
}
if value, ok := bu.mutation.Until(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldUntil,
})
_spec.SetField(bouncer.FieldUntil, field.TypeTime, value)
}
if bu.mutation.UntilCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: bouncer.FieldUntil,
})
_spec.ClearField(bouncer.FieldUntil, field.TypeTime)
}
if value, ok := bu.mutation.LastPull(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldLastPull,
})
_spec.SetField(bouncer.FieldLastPull, field.TypeTime, value)
}
if value, ok := bu.mutation.AuthType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldAuthType,
})
_spec.SetField(bouncer.FieldAuthType, field.TypeString, value)
}
if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
@ -389,6 +291,7 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
bu.mutation.done = true
return n, nil
}
@ -555,6 +458,12 @@ func (buo *BouncerUpdateOne) Mutation() *BouncerMutation {
return buo.mutation
}
// Where appends a list predicates to the BouncerUpdate builder.
func (buo *BouncerUpdateOne) Where(ps ...predicate.Bouncer) *BouncerUpdateOne {
buo.mutation.Where(ps...)
return buo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpdateOne {
@ -564,41 +473,8 @@ func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpda
// Save executes the query and returns the updated Bouncer entity.
func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) {
var (
err error
node *Bouncer
)
buo.defaults()
if len(buo.hooks) == 0 {
node, err = buo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*BouncerMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
buo.mutation = mutation
node, err = buo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(buo.hooks) - 1; i >= 0; i-- {
if buo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = buo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, buo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Bouncer)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, buo.sqlSave, buo.mutation, buo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -636,16 +512,7 @@ func (buo *BouncerUpdateOne) defaults() {
}
func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: bouncer.Table,
Columns: bouncer.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: bouncer.FieldID,
},
},
}
_spec := sqlgraph.NewUpdateSpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt))
id, ok := buo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Bouncer.id" for update`)}
@ -671,117 +538,55 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e
}
}
if value, ok := buo.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldCreatedAt,
})
_spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value)
}
if buo.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: bouncer.FieldCreatedAt,
})
_spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime)
}
if value, ok := buo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldUpdatedAt,
})
_spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value)
}
if buo.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: bouncer.FieldUpdatedAt,
})
_spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime)
}
if value, ok := buo.mutation.Name(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldName,
})
_spec.SetField(bouncer.FieldName, field.TypeString, value)
}
if value, ok := buo.mutation.APIKey(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldAPIKey,
})
_spec.SetField(bouncer.FieldAPIKey, field.TypeString, value)
}
if value, ok := buo.mutation.Revoked(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: bouncer.FieldRevoked,
})
_spec.SetField(bouncer.FieldRevoked, field.TypeBool, value)
}
if value, ok := buo.mutation.IPAddress(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldIPAddress,
})
_spec.SetField(bouncer.FieldIPAddress, field.TypeString, value)
}
if buo.mutation.IPAddressCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: bouncer.FieldIPAddress,
})
_spec.ClearField(bouncer.FieldIPAddress, field.TypeString)
}
if value, ok := buo.mutation.GetType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldType,
})
_spec.SetField(bouncer.FieldType, field.TypeString, value)
}
if buo.mutation.TypeCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: bouncer.FieldType,
})
_spec.ClearField(bouncer.FieldType, field.TypeString)
}
if value, ok := buo.mutation.Version(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldVersion,
})
_spec.SetField(bouncer.FieldVersion, field.TypeString, value)
}
if buo.mutation.VersionCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: bouncer.FieldVersion,
})
_spec.ClearField(bouncer.FieldVersion, field.TypeString)
}
if value, ok := buo.mutation.Until(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldUntil,
})
_spec.SetField(bouncer.FieldUntil, field.TypeTime, value)
}
if buo.mutation.UntilCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: bouncer.FieldUntil,
})
_spec.ClearField(bouncer.FieldUntil, field.TypeTime)
}
if value, ok := buo.mutation.LastPull(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: bouncer.FieldLastPull,
})
_spec.SetField(bouncer.FieldLastPull, field.TypeTime, value)
}
if value, ok := buo.mutation.AuthType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: bouncer.FieldAuthType,
})
_spec.SetField(bouncer.FieldAuthType, field.TypeString, value)
}
_node = &Bouncer{config: buo.config}
_spec.Assign = _node.assignValues
@ -794,5 +599,6 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e
}
return nil, err
}
buo.mutation.done = true
return _node, nil
}

View file

@ -7,9 +7,14 @@ import (
"errors"
"fmt"
"log"
"reflect"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/migrate"
"entgo.io/ent"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
@ -17,10 +22,6 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
// Client is the client that holds all ent builders.
@ -46,7 +47,7 @@ type Client struct {
// NewClient creates a new client configured with the given options.
func NewClient(opts ...Option) *Client {
cfg := config{log: log.Println, hooks: &hooks{}}
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
cfg.options(opts...)
client := &Client{config: cfg}
client.init()
@ -64,6 +65,55 @@ func (c *Client) init() {
c.Meta = NewMetaClient(c.config)
}
type (
// config is the configuration for the client and its builder.
config struct {
// driver used for executing database requests.
driver dialect.Driver
// debug enable a debug logging.
debug bool
// log used for logging on debug mode.
log func(...any)
// hooks to execute on mutations.
hooks *hooks
// interceptors to execute on queries.
inters *inters
}
// Option function to configure the client.
Option func(*config)
)
// options applies the options on the config object.
func (c *config) options(opts ...Option) {
for _, opt := range opts {
opt(c)
}
if c.debug {
c.driver = dialect.Debug(c.driver, c.log)
}
}
// Debug enables debug logging on the ent.Driver.
func Debug() Option {
return func(c *config) {
c.debug = true
}
}
// Log sets the logging function for debug mode.
func Log(fn func(...any)) Option {
return func(c *config) {
c.log = fn
}
}
// Driver configures the client driver.
func Driver(driver dialect.Driver) Option {
return func(c *config) {
c.driver = driver
}
}
// Open opens a database/sql.DB specified by the driver name and
// the data source name, and returns a new client attached to it.
// Optional parameters can be added for configuring the client.
@ -80,11 +130,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error)
}
}
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
// Tx returns a new transactional client. The provided context
// is used until the transaction is committed or rolled back.
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
if _, ok := c.driver.(*txDriver); ok {
return nil, errors.New("ent: cannot start a transaction within a transaction")
return nil, ErrTxStarted
}
tx, err := newTx(ctx, c.driver)
if err != nil {
@ -156,13 +209,43 @@ func (c *Client) Close() error {
// Use adds the mutation hooks to all the entity clients.
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) {
c.Alert.Use(hooks...)
c.Bouncer.Use(hooks...)
c.ConfigItem.Use(hooks...)
c.Decision.Use(hooks...)
c.Event.Use(hooks...)
c.Machine.Use(hooks...)
c.Meta.Use(hooks...)
for _, n := range []interface{ Use(...Hook) }{
c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta,
} {
n.Use(hooks...)
}
}
// Intercept adds the query interceptors to all the entity clients.
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
func (c *Client) Intercept(interceptors ...Interceptor) {
for _, n := range []interface{ Intercept(...Interceptor) }{
c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta,
} {
n.Intercept(interceptors...)
}
}
// Mutate implements the ent.Mutator interface.
func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
switch m := m.(type) {
case *AlertMutation:
return c.Alert.mutate(ctx, m)
case *BouncerMutation:
return c.Bouncer.mutate(ctx, m)
case *ConfigItemMutation:
return c.ConfigItem.mutate(ctx, m)
case *DecisionMutation:
return c.Decision.mutate(ctx, m)
case *EventMutation:
return c.Event.mutate(ctx, m)
case *MachineMutation:
return c.Machine.mutate(ctx, m)
case *MetaMutation:
return c.Meta.mutate(ctx, m)
default:
return nil, fmt.Errorf("ent: unknown mutation type %T", m)
}
}
// AlertClient is a client for the Alert schema.
@ -181,6 +264,12 @@ func (c *AlertClient) Use(hooks ...Hook) {
c.hooks.Alert = append(c.hooks.Alert, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `alert.Intercept(f(g(h())))`.
func (c *AlertClient) Intercept(interceptors ...Interceptor) {
c.inters.Alert = append(c.inters.Alert, interceptors...)
}
// Create returns a builder for creating a Alert entity.
func (c *AlertClient) Create() *AlertCreate {
mutation := newAlertMutation(c.config, OpCreate)
@ -192,6 +281,21 @@ func (c *AlertClient) CreateBulk(builders ...*AlertCreate) *AlertCreateBulk {
return &AlertCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *AlertClient) MapCreateBulk(slice any, setFunc func(*AlertCreate, int)) *AlertCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &AlertCreateBulk{err: fmt.Errorf("calling to AlertClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*AlertCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &AlertCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Alert.
func (c *AlertClient) Update() *AlertUpdate {
mutation := newAlertMutation(c.config, OpUpdate)
@ -221,7 +325,7 @@ func (c *AlertClient) DeleteOne(a *Alert) *AlertDeleteOne {
return c.DeleteOneID(a.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne {
builder := c.Delete().Where(alert.ID(id))
builder.mutation.id = &id
@ -233,6 +337,8 @@ func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne {
func (c *AlertClient) Query() *AlertQuery {
return &AlertQuery{
config: c.config,
ctx: &QueryContext{Type: TypeAlert},
inters: c.Interceptors(),
}
}
@ -252,8 +358,8 @@ func (c *AlertClient) GetX(ctx context.Context, id int) *Alert {
// QueryOwner queries the owner edge of a Alert.
func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery {
query := &MachineQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&MachineClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(alert.Table, alert.FieldID, id),
@ -268,8 +374,8 @@ func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery {
// QueryDecisions queries the decisions edge of a Alert.
func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery {
query := &DecisionQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&DecisionClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(alert.Table, alert.FieldID, id),
@ -284,8 +390,8 @@ func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery {
// QueryEvents queries the events edge of a Alert.
func (c *AlertClient) QueryEvents(a *Alert) *EventQuery {
query := &EventQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&EventClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(alert.Table, alert.FieldID, id),
@ -300,8 +406,8 @@ func (c *AlertClient) QueryEvents(a *Alert) *EventQuery {
// QueryMetas queries the metas edge of a Alert.
func (c *AlertClient) QueryMetas(a *Alert) *MetaQuery {
query := &MetaQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&MetaClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := a.ID
step := sqlgraph.NewStep(
sqlgraph.From(alert.Table, alert.FieldID, id),
@ -319,6 +425,26 @@ func (c *AlertClient) Hooks() []Hook {
return c.hooks.Alert
}
// Interceptors returns the client interceptors.
func (c *AlertClient) Interceptors() []Interceptor {
return c.inters.Alert
}
func (c *AlertClient) mutate(ctx context.Context, m *AlertMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&AlertCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&AlertUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&AlertUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&AlertDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown Alert mutation op: %q", m.Op())
}
}
// BouncerClient is a client for the Bouncer schema.
type BouncerClient struct {
config
@ -335,6 +461,12 @@ func (c *BouncerClient) Use(hooks ...Hook) {
c.hooks.Bouncer = append(c.hooks.Bouncer, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `bouncer.Intercept(f(g(h())))`.
func (c *BouncerClient) Intercept(interceptors ...Interceptor) {
c.inters.Bouncer = append(c.inters.Bouncer, interceptors...)
}
// Create returns a builder for creating a Bouncer entity.
func (c *BouncerClient) Create() *BouncerCreate {
mutation := newBouncerMutation(c.config, OpCreate)
@ -346,6 +478,21 @@ func (c *BouncerClient) CreateBulk(builders ...*BouncerCreate) *BouncerCreateBul
return &BouncerCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *BouncerClient) MapCreateBulk(slice any, setFunc func(*BouncerCreate, int)) *BouncerCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &BouncerCreateBulk{err: fmt.Errorf("calling to BouncerClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*BouncerCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &BouncerCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Bouncer.
func (c *BouncerClient) Update() *BouncerUpdate {
mutation := newBouncerMutation(c.config, OpUpdate)
@ -375,7 +522,7 @@ func (c *BouncerClient) DeleteOne(b *Bouncer) *BouncerDeleteOne {
return c.DeleteOneID(b.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne {
builder := c.Delete().Where(bouncer.ID(id))
builder.mutation.id = &id
@ -387,6 +534,8 @@ func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne {
func (c *BouncerClient) Query() *BouncerQuery {
return &BouncerQuery{
config: c.config,
ctx: &QueryContext{Type: TypeBouncer},
inters: c.Interceptors(),
}
}
@ -409,6 +558,26 @@ func (c *BouncerClient) Hooks() []Hook {
return c.hooks.Bouncer
}
// Interceptors returns the client interceptors.
func (c *BouncerClient) Interceptors() []Interceptor {
return c.inters.Bouncer
}
func (c *BouncerClient) mutate(ctx context.Context, m *BouncerMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&BouncerCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&BouncerUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&BouncerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&BouncerDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown Bouncer mutation op: %q", m.Op())
}
}
// ConfigItemClient is a client for the ConfigItem schema.
type ConfigItemClient struct {
config
@ -425,6 +594,12 @@ func (c *ConfigItemClient) Use(hooks ...Hook) {
c.hooks.ConfigItem = append(c.hooks.ConfigItem, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `configitem.Intercept(f(g(h())))`.
func (c *ConfigItemClient) Intercept(interceptors ...Interceptor) {
c.inters.ConfigItem = append(c.inters.ConfigItem, interceptors...)
}
// Create returns a builder for creating a ConfigItem entity.
func (c *ConfigItemClient) Create() *ConfigItemCreate {
mutation := newConfigItemMutation(c.config, OpCreate)
@ -436,6 +611,21 @@ func (c *ConfigItemClient) CreateBulk(builders ...*ConfigItemCreate) *ConfigItem
return &ConfigItemCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *ConfigItemClient) MapCreateBulk(slice any, setFunc func(*ConfigItemCreate, int)) *ConfigItemCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &ConfigItemCreateBulk{err: fmt.Errorf("calling to ConfigItemClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*ConfigItemCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &ConfigItemCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for ConfigItem.
func (c *ConfigItemClient) Update() *ConfigItemUpdate {
mutation := newConfigItemMutation(c.config, OpUpdate)
@ -465,7 +655,7 @@ func (c *ConfigItemClient) DeleteOne(ci *ConfigItem) *ConfigItemDeleteOne {
return c.DeleteOneID(ci.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *ConfigItemClient) DeleteOneID(id int) *ConfigItemDeleteOne {
builder := c.Delete().Where(configitem.ID(id))
builder.mutation.id = &id
@ -477,6 +667,8 @@ func (c *ConfigItemClient) DeleteOneID(id int) *ConfigItemDeleteOne {
func (c *ConfigItemClient) Query() *ConfigItemQuery {
return &ConfigItemQuery{
config: c.config,
ctx: &QueryContext{Type: TypeConfigItem},
inters: c.Interceptors(),
}
}
@ -499,6 +691,26 @@ func (c *ConfigItemClient) Hooks() []Hook {
return c.hooks.ConfigItem
}
// Interceptors returns the client interceptors.
func (c *ConfigItemClient) Interceptors() []Interceptor {
return c.inters.ConfigItem
}
func (c *ConfigItemClient) mutate(ctx context.Context, m *ConfigItemMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&ConfigItemCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&ConfigItemUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&ConfigItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&ConfigItemDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown ConfigItem mutation op: %q", m.Op())
}
}
// DecisionClient is a client for the Decision schema.
type DecisionClient struct {
config
@ -515,6 +727,12 @@ func (c *DecisionClient) Use(hooks ...Hook) {
c.hooks.Decision = append(c.hooks.Decision, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `decision.Intercept(f(g(h())))`.
func (c *DecisionClient) Intercept(interceptors ...Interceptor) {
c.inters.Decision = append(c.inters.Decision, interceptors...)
}
// Create returns a builder for creating a Decision entity.
func (c *DecisionClient) Create() *DecisionCreate {
mutation := newDecisionMutation(c.config, OpCreate)
@ -526,6 +744,21 @@ func (c *DecisionClient) CreateBulk(builders ...*DecisionCreate) *DecisionCreate
return &DecisionCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *DecisionClient) MapCreateBulk(slice any, setFunc func(*DecisionCreate, int)) *DecisionCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &DecisionCreateBulk{err: fmt.Errorf("calling to DecisionClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*DecisionCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &DecisionCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Decision.
func (c *DecisionClient) Update() *DecisionUpdate {
mutation := newDecisionMutation(c.config, OpUpdate)
@ -555,7 +788,7 @@ func (c *DecisionClient) DeleteOne(d *Decision) *DecisionDeleteOne {
return c.DeleteOneID(d.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne {
builder := c.Delete().Where(decision.ID(id))
builder.mutation.id = &id
@ -567,6 +800,8 @@ func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne {
func (c *DecisionClient) Query() *DecisionQuery {
return &DecisionQuery{
config: c.config,
ctx: &QueryContext{Type: TypeDecision},
inters: c.Interceptors(),
}
}
@ -586,8 +821,8 @@ func (c *DecisionClient) GetX(ctx context.Context, id int) *Decision {
// QueryOwner queries the owner edge of a Decision.
func (c *DecisionClient) QueryOwner(d *Decision) *AlertQuery {
query := &AlertQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&AlertClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := d.ID
step := sqlgraph.NewStep(
sqlgraph.From(decision.Table, decision.FieldID, id),
@ -605,6 +840,26 @@ func (c *DecisionClient) Hooks() []Hook {
return c.hooks.Decision
}
// Interceptors returns the client interceptors.
func (c *DecisionClient) Interceptors() []Interceptor {
return c.inters.Decision
}
func (c *DecisionClient) mutate(ctx context.Context, m *DecisionMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&DecisionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&DecisionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&DecisionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&DecisionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown Decision mutation op: %q", m.Op())
}
}
// EventClient is a client for the Event schema.
type EventClient struct {
config
@ -621,6 +876,12 @@ func (c *EventClient) Use(hooks ...Hook) {
c.hooks.Event = append(c.hooks.Event, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `event.Intercept(f(g(h())))`.
func (c *EventClient) Intercept(interceptors ...Interceptor) {
c.inters.Event = append(c.inters.Event, interceptors...)
}
// Create returns a builder for creating a Event entity.
func (c *EventClient) Create() *EventCreate {
mutation := newEventMutation(c.config, OpCreate)
@ -632,6 +893,21 @@ func (c *EventClient) CreateBulk(builders ...*EventCreate) *EventCreateBulk {
return &EventCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *EventClient) MapCreateBulk(slice any, setFunc func(*EventCreate, int)) *EventCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &EventCreateBulk{err: fmt.Errorf("calling to EventClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*EventCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &EventCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Event.
func (c *EventClient) Update() *EventUpdate {
mutation := newEventMutation(c.config, OpUpdate)
@ -661,7 +937,7 @@ func (c *EventClient) DeleteOne(e *Event) *EventDeleteOne {
return c.DeleteOneID(e.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *EventClient) DeleteOneID(id int) *EventDeleteOne {
builder := c.Delete().Where(event.ID(id))
builder.mutation.id = &id
@ -673,6 +949,8 @@ func (c *EventClient) DeleteOneID(id int) *EventDeleteOne {
func (c *EventClient) Query() *EventQuery {
return &EventQuery{
config: c.config,
ctx: &QueryContext{Type: TypeEvent},
inters: c.Interceptors(),
}
}
@ -692,8 +970,8 @@ func (c *EventClient) GetX(ctx context.Context, id int) *Event {
// QueryOwner queries the owner edge of a Event.
func (c *EventClient) QueryOwner(e *Event) *AlertQuery {
query := &AlertQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&AlertClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := e.ID
step := sqlgraph.NewStep(
sqlgraph.From(event.Table, event.FieldID, id),
@ -711,6 +989,26 @@ func (c *EventClient) Hooks() []Hook {
return c.hooks.Event
}
// Interceptors returns the client interceptors.
func (c *EventClient) Interceptors() []Interceptor {
return c.inters.Event
}
func (c *EventClient) mutate(ctx context.Context, m *EventMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&EventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&EventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&EventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown Event mutation op: %q", m.Op())
}
}
// MachineClient is a client for the Machine schema.
type MachineClient struct {
config
@ -727,6 +1025,12 @@ func (c *MachineClient) Use(hooks ...Hook) {
c.hooks.Machine = append(c.hooks.Machine, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `machine.Intercept(f(g(h())))`.
func (c *MachineClient) Intercept(interceptors ...Interceptor) {
c.inters.Machine = append(c.inters.Machine, interceptors...)
}
// Create returns a builder for creating a Machine entity.
func (c *MachineClient) Create() *MachineCreate {
mutation := newMachineMutation(c.config, OpCreate)
@ -738,6 +1042,21 @@ func (c *MachineClient) CreateBulk(builders ...*MachineCreate) *MachineCreateBul
return &MachineCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *MachineClient) MapCreateBulk(slice any, setFunc func(*MachineCreate, int)) *MachineCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &MachineCreateBulk{err: fmt.Errorf("calling to MachineClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*MachineCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &MachineCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Machine.
func (c *MachineClient) Update() *MachineUpdate {
mutation := newMachineMutation(c.config, OpUpdate)
@ -767,7 +1086,7 @@ func (c *MachineClient) DeleteOne(m *Machine) *MachineDeleteOne {
return c.DeleteOneID(m.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne {
builder := c.Delete().Where(machine.ID(id))
builder.mutation.id = &id
@ -779,6 +1098,8 @@ func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne {
func (c *MachineClient) Query() *MachineQuery {
return &MachineQuery{
config: c.config,
ctx: &QueryContext{Type: TypeMachine},
inters: c.Interceptors(),
}
}
@ -798,8 +1119,8 @@ func (c *MachineClient) GetX(ctx context.Context, id int) *Machine {
// QueryAlerts queries the alerts edge of a Machine.
func (c *MachineClient) QueryAlerts(m *Machine) *AlertQuery {
query := &AlertQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&AlertClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := m.ID
step := sqlgraph.NewStep(
sqlgraph.From(machine.Table, machine.FieldID, id),
@ -817,6 +1138,26 @@ func (c *MachineClient) Hooks() []Hook {
return c.hooks.Machine
}
// Interceptors returns the client interceptors.
func (c *MachineClient) Interceptors() []Interceptor {
return c.inters.Machine
}
func (c *MachineClient) mutate(ctx context.Context, m *MachineMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&MachineCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&MachineUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&MachineDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown Machine mutation op: %q", m.Op())
}
}
// MetaClient is a client for the Meta schema.
type MetaClient struct {
config
@ -833,6 +1174,12 @@ func (c *MetaClient) Use(hooks ...Hook) {
c.hooks.Meta = append(c.hooks.Meta, hooks...)
}
// Intercept adds a list of query interceptors to the interceptors stack.
// A call to `Intercept(f, g, h)` equals to `meta.Intercept(f(g(h())))`.
func (c *MetaClient) Intercept(interceptors ...Interceptor) {
c.inters.Meta = append(c.inters.Meta, interceptors...)
}
// Create returns a builder for creating a Meta entity.
func (c *MetaClient) Create() *MetaCreate {
mutation := newMetaMutation(c.config, OpCreate)
@ -844,6 +1191,21 @@ func (c *MetaClient) CreateBulk(builders ...*MetaCreate) *MetaCreateBulk {
return &MetaCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *MetaClient) MapCreateBulk(slice any, setFunc func(*MetaCreate, int)) *MetaCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &MetaCreateBulk{err: fmt.Errorf("calling to MetaClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*MetaCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &MetaCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Meta.
func (c *MetaClient) Update() *MetaUpdate {
mutation := newMetaMutation(c.config, OpUpdate)
@ -873,7 +1235,7 @@ func (c *MetaClient) DeleteOne(m *Meta) *MetaDeleteOne {
return c.DeleteOneID(m.ID)
}
// DeleteOne returns a builder for deleting the given entity by its id.
// DeleteOneID returns a builder for deleting the given entity by its id.
func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne {
builder := c.Delete().Where(meta.ID(id))
builder.mutation.id = &id
@ -885,6 +1247,8 @@ func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne {
func (c *MetaClient) Query() *MetaQuery {
return &MetaQuery{
config: c.config,
ctx: &QueryContext{Type: TypeMeta},
inters: c.Interceptors(),
}
}
@ -904,8 +1268,8 @@ func (c *MetaClient) GetX(ctx context.Context, id int) *Meta {
// QueryOwner queries the owner edge of a Meta.
func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery {
query := &AlertQuery{config: c.config}
query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) {
query := (&AlertClient{config: c.config}).Query()
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
id := m.ID
step := sqlgraph.NewStep(
sqlgraph.From(meta.Table, meta.FieldID, id),
@ -922,3 +1286,33 @@ func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery {
func (c *MetaClient) Hooks() []Hook {
return c.hooks.Meta
}
// Interceptors returns the client interceptors.
func (c *MetaClient) Interceptors() []Interceptor {
return c.inters.Meta
}
func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) {
switch m.Op() {
case OpCreate:
return (&MetaCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdate:
return (&MetaUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpUpdateOne:
return (&MetaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
case OpDelete, OpDeleteOne:
return (&MetaDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
default:
return nil, fmt.Errorf("ent: unknown Meta mutation op: %q", m.Op())
}
}
// hooks and interceptors per client, for fast access.
type (
hooks struct {
Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Hook
}
inters struct {
Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Interceptor
}
)

View file

@ -1,65 +0,0 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"entgo.io/ent"
"entgo.io/ent/dialect"
)
// Option function to configure the client.
type Option func(*config)
// Config is the configuration for the client and its builder.
type config struct {
// driver used for executing database requests.
driver dialect.Driver
// debug enable a debug logging.
debug bool
// log used for logging on debug mode.
log func(...any)
// hooks to execute on mutations.
hooks *hooks
}
// hooks per client, for fast access.
type hooks struct {
Alert []ent.Hook
Bouncer []ent.Hook
ConfigItem []ent.Hook
Decision []ent.Hook
Event []ent.Hook
Machine []ent.Hook
Meta []ent.Hook
}
// Options applies the options on the config object.
func (c *config) options(opts ...Option) {
for _, opt := range opts {
opt(c)
}
if c.debug {
c.driver = dialect.Debug(c.driver, c.log)
}
}
// Debug enables debug logging on the ent.Driver.
func Debug() Option {
return func(c *config) {
c.debug = true
}
}
// Log sets the logging function for debug mode.
func Log(fn func(...any)) Option {
return func(c *config) {
c.log = fn
}
}
// Driver configures the client driver.
func Driver(driver dialect.Driver) Option {
return func(c *config) {
c.driver = driver
}
}

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
)
@ -23,7 +24,8 @@ type ConfigItem struct {
// Name holds the value of the "name" field.
Name string `json:"name"`
// Value holds the value of the "value" field.
Value string `json:"value"`
Value string `json:"value"`
selectValues sql.SelectValues
}
// scanValues returns the types for scanning values from sql.Rows.
@ -38,7 +40,7 @@ func (*ConfigItem) scanValues(columns []string) ([]any, error) {
case configitem.FieldCreatedAt, configitem.FieldUpdatedAt:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type ConfigItem", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -84,16 +86,24 @@ func (ci *ConfigItem) assignValues(columns []string, values []any) error {
} else if value.Valid {
ci.Value = value.String
}
default:
ci.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// GetValue returns the ent.Value that was dynamically selected and assigned to the ConfigItem.
// This includes values selected through modifiers, order, etc.
func (ci *ConfigItem) GetValue(name string) (ent.Value, error) {
return ci.selectValues.Get(name)
}
// Update returns a builder for updating this ConfigItem.
// Note that you need to call ConfigItem.Unwrap() before calling this method if this ConfigItem
// was returned from a transaction, and the transaction was committed or rolled back.
func (ci *ConfigItem) Update() *ConfigItemUpdateOne {
return (&ConfigItemClient{config: ci.config}).UpdateOne(ci)
return NewConfigItemClient(ci.config).UpdateOne(ci)
}
// Unwrap unwraps the ConfigItem entity that was returned from a transaction after it was closed,
@ -133,9 +143,3 @@ func (ci *ConfigItem) String() string {
// ConfigItems is a parsable slice of ConfigItem.
type ConfigItems []*ConfigItem
func (ci ConfigItems) config(cfg config) {
for _i := range ci {
ci[_i].config = cfg
}
}

View file

@ -4,6 +4,8 @@ package configitem
import (
"time"
"entgo.io/ent/dialect/sql"
)
const (
@ -52,3 +54,31 @@ var (
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
)
// OrderOption defines the ordering options for the ConfigItem queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByValue orders the results by the value field.
func ByValue(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldValue, opts...).ToFunc()
}

View file

@ -11,485 +11,310 @@ import (
// ID filters vertices based on their ID field.
func ID(id int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
return predicate.ConfigItem(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
return predicate.ConfigItem(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldID), id))
})
return predicate.ConfigItem(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
v := make([]any, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.In(s.C(FieldID), v...))
})
return predicate.ConfigItem(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
v := make([]any, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.NotIn(s.C(FieldID), v...))
})
return predicate.ConfigItem(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldID), id))
})
return predicate.ConfigItem(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldID), id))
})
return predicate.ConfigItem(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldID), id))
})
return predicate.ConfigItem(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldID), id))
})
return predicate.ConfigItem(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v))
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldName, v))
}
// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
func Value(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldValue, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
})
return predicate.ConfigItem(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldCreatedAt), v...))
})
return predicate.ConfigItem(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
})
return predicate.ConfigItem(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldCreatedAt), v))
})
return predicate.ConfigItem(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldCreatedAt), v))
})
return predicate.ConfigItem(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldCreatedAt), v))
})
return predicate.ConfigItem(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldCreatedAt), v))
})
return predicate.ConfigItem(sql.FieldLTE(FieldCreatedAt, v))
}
// CreatedAtIsNil applies the IsNil predicate on the "created_at" field.
func CreatedAtIsNil() predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldCreatedAt)))
})
return predicate.ConfigItem(sql.FieldIsNull(FieldCreatedAt))
}
// CreatedAtNotNil applies the NotNil predicate on the "created_at" field.
func CreatedAtNotNil() predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldCreatedAt)))
})
return predicate.ConfigItem(sql.FieldNotNull(FieldCreatedAt))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
})
return predicate.ConfigItem(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldUpdatedAt), v...))
})
return predicate.ConfigItem(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
})
return predicate.ConfigItem(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldUpdatedAt), v))
})
return predicate.ConfigItem(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
})
return predicate.ConfigItem(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldUpdatedAt), v))
})
return predicate.ConfigItem(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
})
return predicate.ConfigItem(sql.FieldLTE(FieldUpdatedAt, v))
}
// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field.
func UpdatedAtIsNil() predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldUpdatedAt)))
})
return predicate.ConfigItem(sql.FieldIsNull(FieldUpdatedAt))
}
// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field.
func UpdatedAtNotNil() predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldUpdatedAt)))
})
return predicate.ConfigItem(sql.FieldNotNull(FieldUpdatedAt))
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldName, v))
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldNEQ(FieldName, v))
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldName), v...))
})
return predicate.ConfigItem(sql.FieldIn(FieldName, vs...))
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldName), v...))
})
return predicate.ConfigItem(sql.FieldNotIn(FieldName, vs...))
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldGT(FieldName, v))
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldGTE(FieldName, v))
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldLT(FieldName, v))
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldLTE(FieldName, v))
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldContains(FieldName, v))
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldHasPrefix(FieldName, v))
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldHasSuffix(FieldName, v))
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldEqualFold(FieldName, v))
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldName), v))
})
return predicate.ConfigItem(sql.FieldContainsFold(FieldName, v))
}
// ValueEQ applies the EQ predicate on the "value" field.
func ValueEQ(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldEQ(FieldValue, v))
}
// ValueNEQ applies the NEQ predicate on the "value" field.
func ValueNEQ(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldNEQ(FieldValue, v))
}
// ValueIn applies the In predicate on the "value" field.
func ValueIn(vs ...string) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldValue), v...))
})
return predicate.ConfigItem(sql.FieldIn(FieldValue, vs...))
}
// ValueNotIn applies the NotIn predicate on the "value" field.
func ValueNotIn(vs ...string) predicate.ConfigItem {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldValue), v...))
})
return predicate.ConfigItem(sql.FieldNotIn(FieldValue, vs...))
}
// ValueGT applies the GT predicate on the "value" field.
func ValueGT(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldGT(FieldValue, v))
}
// ValueGTE applies the GTE predicate on the "value" field.
func ValueGTE(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldGTE(FieldValue, v))
}
// ValueLT applies the LT predicate on the "value" field.
func ValueLT(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldLT(FieldValue, v))
}
// ValueLTE applies the LTE predicate on the "value" field.
func ValueLTE(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldLTE(FieldValue, v))
}
// ValueContains applies the Contains predicate on the "value" field.
func ValueContains(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldContains(FieldValue, v))
}
// ValueHasPrefix applies the HasPrefix predicate on the "value" field.
func ValueHasPrefix(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldHasPrefix(FieldValue, v))
}
// ValueHasSuffix applies the HasSuffix predicate on the "value" field.
func ValueHasSuffix(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldHasSuffix(FieldValue, v))
}
// ValueEqualFold applies the EqualFold predicate on the "value" field.
func ValueEqualFold(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldEqualFold(FieldValue, v))
}
// ValueContainsFold applies the ContainsFold predicate on the "value" field.
func ValueContainsFold(v string) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldValue), v))
})
return predicate.ConfigItem(sql.FieldContainsFold(FieldValue, v))
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.ConfigItem) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.ConfigItem(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.ConfigItem) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.ConfigItem(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.ConfigItem) predicate.ConfigItem {
return predicate.ConfigItem(func(s *sql.Selector) {
p(s.Not())
})
return predicate.ConfigItem(sql.NotPredicates(p))
}

View file

@ -67,50 +67,8 @@ func (cic *ConfigItemCreate) Mutation() *ConfigItemMutation {
// Save creates the ConfigItem in the database.
func (cic *ConfigItemCreate) Save(ctx context.Context) (*ConfigItem, error) {
var (
err error
node *ConfigItem
)
cic.defaults()
if len(cic.hooks) == 0 {
if err = cic.check(); err != nil {
return nil, err
}
node, err = cic.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*ConfigItemMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = cic.check(); err != nil {
return nil, err
}
cic.mutation = mutation
if node, err = cic.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(cic.hooks) - 1; i >= 0; i-- {
if cic.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = cic.hooks[i](mut)
}
v, err := mut.Mutate(ctx, cic.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*ConfigItem)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from ConfigItemMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, cic.sqlSave, cic.mutation, cic.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -159,6 +117,9 @@ func (cic *ConfigItemCreate) check() error {
}
func (cic *ConfigItemCreate) sqlSave(ctx context.Context) (*ConfigItem, error) {
if err := cic.check(); err != nil {
return nil, err
}
_node, _spec := cic.createSpec()
if err := sqlgraph.CreateNode(ctx, cic.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -168,50 +129,30 @@ func (cic *ConfigItemCreate) sqlSave(ctx context.Context) (*ConfigItem, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
cic.mutation.id = &_node.ID
cic.mutation.done = true
return _node, nil
}
func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) {
var (
_node = &ConfigItem{config: cic.config}
_spec = &sqlgraph.CreateSpec{
Table: configitem.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: configitem.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(configitem.Table, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt))
)
if value, ok := cic.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: configitem.FieldCreatedAt,
})
_spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := cic.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: configitem.FieldUpdatedAt,
})
_spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := cic.mutation.Name(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: configitem.FieldName,
})
_spec.SetField(configitem.FieldName, field.TypeString, value)
_node.Name = value
}
if value, ok := cic.mutation.Value(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: configitem.FieldValue,
})
_spec.SetField(configitem.FieldValue, field.TypeString, value)
_node.Value = value
}
return _node, _spec
@ -220,11 +161,15 @@ func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) {
// ConfigItemCreateBulk is the builder for creating many ConfigItem entities in bulk.
type ConfigItemCreateBulk struct {
config
err error
builders []*ConfigItemCreate
}
// Save creates the ConfigItem entities in the database.
func (cicb *ConfigItemCreateBulk) Save(ctx context.Context) ([]*ConfigItem, error) {
if cicb.err != nil {
return nil, cicb.err
}
specs := make([]*sqlgraph.CreateSpec, len(cicb.builders))
nodes := make([]*ConfigItem, len(cicb.builders))
mutators := make([]Mutator, len(cicb.builders))
@ -241,8 +186,8 @@ func (cicb *ConfigItemCreateBulk) Save(ctx context.Context) ([]*ConfigItem, erro
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, cicb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (cid *ConfigItemDelete) Where(ps ...predicate.ConfigItem) *ConfigItemDelete
// Exec executes the deletion query and returns how many vertices were deleted.
func (cid *ConfigItemDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(cid.hooks) == 0 {
affected, err = cid.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*ConfigItemMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
cid.mutation = mutation
affected, err = cid.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(cid.hooks) - 1; i >= 0; i-- {
if cid.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = cid.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, cid.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, cid.sqlExec, cid.mutation, cid.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (cid *ConfigItemDelete) ExecX(ctx context.Context) int {
}
func (cid *ConfigItemDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: configitem.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: configitem.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(configitem.Table, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt))
if ps := cid.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (cid *ConfigItemDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
cid.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type ConfigItemDeleteOne struct {
cid *ConfigItemDelete
}
// Where appends a list predicates to the ConfigItemDelete builder.
func (cido *ConfigItemDeleteOne) Where(ps ...predicate.ConfigItem) *ConfigItemDeleteOne {
cido.cid.mutation.Where(ps...)
return cido
}
// Exec executes the deletion query.
func (cido *ConfigItemDeleteOne) Exec(ctx context.Context) error {
n, err := cido.cid.Exec(ctx)
@ -111,5 +82,7 @@ func (cido *ConfigItemDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (cido *ConfigItemDeleteOne) ExecX(ctx context.Context) {
cido.cid.ExecX(ctx)
if err := cido.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -17,11 +17,9 @@ import (
// ConfigItemQuery is the builder for querying ConfigItem entities.
type ConfigItemQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []configitem.OrderOption
inters []Interceptor
predicates []predicate.ConfigItem
// intermediate query (i.e. traversal path).
sql *sql.Selector
@ -34,27 +32,27 @@ func (ciq *ConfigItemQuery) Where(ps ...predicate.ConfigItem) *ConfigItemQuery {
return ciq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (ciq *ConfigItemQuery) Limit(limit int) *ConfigItemQuery {
ciq.limit = &limit
ciq.ctx.Limit = &limit
return ciq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (ciq *ConfigItemQuery) Offset(offset int) *ConfigItemQuery {
ciq.offset = &offset
ciq.ctx.Offset = &offset
return ciq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (ciq *ConfigItemQuery) Unique(unique bool) *ConfigItemQuery {
ciq.unique = &unique
ciq.ctx.Unique = &unique
return ciq
}
// Order adds an order step to the query.
func (ciq *ConfigItemQuery) Order(o ...OrderFunc) *ConfigItemQuery {
// Order specifies how the records should be ordered.
func (ciq *ConfigItemQuery) Order(o ...configitem.OrderOption) *ConfigItemQuery {
ciq.order = append(ciq.order, o...)
return ciq
}
@ -62,7 +60,7 @@ func (ciq *ConfigItemQuery) Order(o ...OrderFunc) *ConfigItemQuery {
// First returns the first ConfigItem entity from the query.
// Returns a *NotFoundError when no ConfigItem was found.
func (ciq *ConfigItemQuery) First(ctx context.Context) (*ConfigItem, error) {
nodes, err := ciq.Limit(1).All(ctx)
nodes, err := ciq.Limit(1).All(setContextOp(ctx, ciq.ctx, "First"))
if err != nil {
return nil, err
}
@ -85,7 +83,7 @@ func (ciq *ConfigItemQuery) FirstX(ctx context.Context) *ConfigItem {
// Returns a *NotFoundError when no ConfigItem ID was found.
func (ciq *ConfigItemQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = ciq.Limit(1).IDs(ctx); err != nil {
if ids, err = ciq.Limit(1).IDs(setContextOp(ctx, ciq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -108,7 +106,7 @@ func (ciq *ConfigItemQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one ConfigItem entity is found.
// Returns a *NotFoundError when no ConfigItem entities are found.
func (ciq *ConfigItemQuery) Only(ctx context.Context) (*ConfigItem, error) {
nodes, err := ciq.Limit(2).All(ctx)
nodes, err := ciq.Limit(2).All(setContextOp(ctx, ciq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -136,7 +134,7 @@ func (ciq *ConfigItemQuery) OnlyX(ctx context.Context) *ConfigItem {
// Returns a *NotFoundError when no entities are found.
func (ciq *ConfigItemQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = ciq.Limit(2).IDs(ctx); err != nil {
if ids, err = ciq.Limit(2).IDs(setContextOp(ctx, ciq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -161,10 +159,12 @@ func (ciq *ConfigItemQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of ConfigItems.
func (ciq *ConfigItemQuery) All(ctx context.Context) ([]*ConfigItem, error) {
ctx = setContextOp(ctx, ciq.ctx, "All")
if err := ciq.prepareQuery(ctx); err != nil {
return nil, err
}
return ciq.sqlAll(ctx)
qr := querierAll[[]*ConfigItem, *ConfigItemQuery]()
return withInterceptors[[]*ConfigItem](ctx, ciq, qr, ciq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -177,9 +177,12 @@ func (ciq *ConfigItemQuery) AllX(ctx context.Context) []*ConfigItem {
}
// IDs executes the query and returns a list of ConfigItem IDs.
func (ciq *ConfigItemQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil {
func (ciq *ConfigItemQuery) IDs(ctx context.Context) (ids []int, err error) {
if ciq.ctx.Unique == nil && ciq.path != nil {
ciq.Unique(true)
}
ctx = setContextOp(ctx, ciq.ctx, "IDs")
if err = ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -196,10 +199,11 @@ func (ciq *ConfigItemQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (ciq *ConfigItemQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, ciq.ctx, "Count")
if err := ciq.prepareQuery(ctx); err != nil {
return 0, err
}
return ciq.sqlCount(ctx)
return withInterceptors[int](ctx, ciq, querierCount[*ConfigItemQuery](), ciq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -213,10 +217,15 @@ func (ciq *ConfigItemQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (ciq *ConfigItemQuery) Exist(ctx context.Context) (bool, error) {
if err := ciq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, ciq.ctx, "Exist")
switch _, err := ciq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return ciq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -236,14 +245,13 @@ func (ciq *ConfigItemQuery) Clone() *ConfigItemQuery {
}
return &ConfigItemQuery{
config: ciq.config,
limit: ciq.limit,
offset: ciq.offset,
order: append([]OrderFunc{}, ciq.order...),
ctx: ciq.ctx.Clone(),
order: append([]configitem.OrderOption{}, ciq.order...),
inters: append([]Interceptor{}, ciq.inters...),
predicates: append([]predicate.ConfigItem{}, ciq.predicates...),
// clone intermediate query.
sql: ciq.sql.Clone(),
path: ciq.path,
unique: ciq.unique,
sql: ciq.sql.Clone(),
path: ciq.path,
}
}
@ -262,16 +270,11 @@ func (ciq *ConfigItemQuery) Clone() *ConfigItemQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (ciq *ConfigItemQuery) GroupBy(field string, fields ...string) *ConfigItemGroupBy {
grbuild := &ConfigItemGroupBy{config: ciq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := ciq.prepareQuery(ctx); err != nil {
return nil, err
}
return ciq.sqlQuery(ctx), nil
}
ciq.ctx.Fields = append([]string{field}, fields...)
grbuild := &ConfigItemGroupBy{build: ciq}
grbuild.flds = &ciq.ctx.Fields
grbuild.label = configitem.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -288,15 +291,30 @@ func (ciq *ConfigItemQuery) GroupBy(field string, fields ...string) *ConfigItemG
// Select(configitem.FieldCreatedAt).
// Scan(ctx, &v)
func (ciq *ConfigItemQuery) Select(fields ...string) *ConfigItemSelect {
ciq.fields = append(ciq.fields, fields...)
selbuild := &ConfigItemSelect{ConfigItemQuery: ciq}
selbuild.label = configitem.Label
selbuild.flds, selbuild.scan = &ciq.fields, selbuild.Scan
return selbuild
ciq.ctx.Fields = append(ciq.ctx.Fields, fields...)
sbuild := &ConfigItemSelect{ConfigItemQuery: ciq}
sbuild.label = configitem.Label
sbuild.flds, sbuild.scan = &ciq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a ConfigItemSelect configured with the given aggregations.
func (ciq *ConfigItemQuery) Aggregate(fns ...AggregateFunc) *ConfigItemSelect {
return ciq.Select().Aggregate(fns...)
}
func (ciq *ConfigItemQuery) prepareQuery(ctx context.Context) error {
for _, f := range ciq.fields {
for _, inter := range ciq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, ciq); err != nil {
return err
}
}
}
for _, f := range ciq.ctx.Fields {
if !configitem.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -338,41 +356,22 @@ func (ciq *ConfigItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*
func (ciq *ConfigItemQuery) sqlCount(ctx context.Context) (int, error) {
_spec := ciq.querySpec()
_spec.Node.Columns = ciq.fields
if len(ciq.fields) > 0 {
_spec.Unique = ciq.unique != nil && *ciq.unique
_spec.Node.Columns = ciq.ctx.Fields
if len(ciq.ctx.Fields) > 0 {
_spec.Unique = ciq.ctx.Unique != nil && *ciq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, ciq.driver, _spec)
}
func (ciq *ConfigItemQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := ciq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: configitem.Table,
Columns: configitem.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: configitem.FieldID,
},
},
From: ciq.sql,
Unique: true,
}
if unique := ciq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt))
_spec.From = ciq.sql
if unique := ciq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if ciq.path != nil {
_spec.Unique = true
}
if fields := ciq.fields; len(fields) > 0 {
if fields := ciq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, configitem.FieldID)
for i := range fields {
@ -388,10 +387,10 @@ func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := ciq.limit; limit != nil {
if limit := ciq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := ciq.offset; offset != nil {
if offset := ciq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := ciq.order; len(ps) > 0 {
@ -407,7 +406,7 @@ func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec {
func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(ciq.driver.Dialect())
t1 := builder.Table(configitem.Table)
columns := ciq.fields
columns := ciq.ctx.Fields
if len(columns) == 0 {
columns = configitem.Columns
}
@ -416,7 +415,7 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = ciq.sql
selector.Select(selector.Columns(columns...)...)
}
if ciq.unique != nil && *ciq.unique {
if ciq.ctx.Unique != nil && *ciq.ctx.Unique {
selector.Distinct()
}
for _, p := range ciq.predicates {
@ -425,12 +424,12 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range ciq.order {
p(selector)
}
if offset := ciq.offset; offset != nil {
if offset := ciq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := ciq.limit; limit != nil {
if limit := ciq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -438,13 +437,8 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector {
// ConfigItemGroupBy is the group-by builder for ConfigItem entities.
type ConfigItemGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *ConfigItemQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -453,74 +447,77 @@ func (cigb *ConfigItemGroupBy) Aggregate(fns ...AggregateFunc) *ConfigItemGroupB
return cigb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (cigb *ConfigItemGroupBy) Scan(ctx context.Context, v any) error {
query, err := cigb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, cigb.build.ctx, "GroupBy")
if err := cigb.build.prepareQuery(ctx); err != nil {
return err
}
cigb.sql = query
return cigb.sqlScan(ctx, v)
return scanWithInterceptors[*ConfigItemQuery, *ConfigItemGroupBy](ctx, cigb.build, cigb, cigb.build.inters, v)
}
func (cigb *ConfigItemGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range cigb.fields {
if !configitem.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := cigb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := cigb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (cigb *ConfigItemGroupBy) sqlQuery() *sql.Selector {
selector := cigb.sql.Select()
func (cigb *ConfigItemGroupBy) sqlScan(ctx context.Context, root *ConfigItemQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(cigb.fns))
for _, fn := range cigb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(cigb.fields)+len(cigb.fns))
for _, f := range cigb.fields {
columns := make([]string, 0, len(*cigb.flds)+len(cigb.fns))
for _, f := range *cigb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(cigb.fields...)...)
selector.GroupBy(selector.Columns(*cigb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := cigb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// ConfigItemSelect is the builder for selecting fields of ConfigItem entities.
type ConfigItemSelect struct {
*ConfigItemQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (cis *ConfigItemSelect) Aggregate(fns ...AggregateFunc) *ConfigItemSelect {
cis.fns = append(cis.fns, fns...)
return cis
}
// Scan applies the selector query and scans the result into the given value.
func (cis *ConfigItemSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, cis.ctx, "Select")
if err := cis.prepareQuery(ctx); err != nil {
return err
}
cis.sql = cis.ConfigItemQuery.sqlQuery(ctx)
return cis.sqlScan(ctx, v)
return scanWithInterceptors[*ConfigItemQuery, *ConfigItemSelect](ctx, cis.ConfigItemQuery, cis, cis.inters, v)
}
func (cis *ConfigItemSelect) sqlScan(ctx context.Context, v any) error {
func (cis *ConfigItemSelect) sqlScan(ctx context.Context, root *ConfigItemQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(cis.fns))
for _, fn := range cis.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*cis.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := cis.sql.Query()
query, args := selector.Query()
if err := cis.driver.Query(ctx, query, args, rows); err != nil {
return err
}

View file

@ -71,35 +71,8 @@ func (ciu *ConfigItemUpdate) Mutation() *ConfigItemMutation {
// Save executes the query and returns the number of nodes affected by the update operation.
func (ciu *ConfigItemUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
ciu.defaults()
if len(ciu.hooks) == 0 {
affected, err = ciu.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*ConfigItemMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
ciu.mutation = mutation
affected, err = ciu.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(ciu.hooks) - 1; i >= 0; i-- {
if ciu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ciu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ciu.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, ciu.sqlSave, ciu.mutation, ciu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -137,16 +110,7 @@ func (ciu *ConfigItemUpdate) defaults() {
}
func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: configitem.Table,
Columns: configitem.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: configitem.FieldID,
},
},
}
_spec := sqlgraph.NewUpdateSpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt))
if ps := ciu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -155,44 +119,22 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := ciu.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: configitem.FieldCreatedAt,
})
_spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value)
}
if ciu.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: configitem.FieldCreatedAt,
})
_spec.ClearField(configitem.FieldCreatedAt, field.TypeTime)
}
if value, ok := ciu.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: configitem.FieldUpdatedAt,
})
_spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value)
}
if ciu.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: configitem.FieldUpdatedAt,
})
_spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime)
}
if value, ok := ciu.mutation.Name(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: configitem.FieldName,
})
_spec.SetField(configitem.FieldName, field.TypeString, value)
}
if value, ok := ciu.mutation.Value(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: configitem.FieldValue,
})
_spec.SetField(configitem.FieldValue, field.TypeString, value)
}
if n, err = sqlgraph.UpdateNodes(ctx, ciu.driver, _spec); err != nil {
if _, ok := err.(*sqlgraph.NotFoundError); ok {
@ -202,6 +144,7 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
ciu.mutation.done = true
return n, nil
}
@ -254,6 +197,12 @@ func (ciuo *ConfigItemUpdateOne) Mutation() *ConfigItemMutation {
return ciuo.mutation
}
// Where appends a list predicates to the ConfigItemUpdate builder.
func (ciuo *ConfigItemUpdateOne) Where(ps ...predicate.ConfigItem) *ConfigItemUpdateOne {
ciuo.mutation.Where(ps...)
return ciuo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (ciuo *ConfigItemUpdateOne) Select(field string, fields ...string) *ConfigItemUpdateOne {
@ -263,41 +212,8 @@ func (ciuo *ConfigItemUpdateOne) Select(field string, fields ...string) *ConfigI
// Save executes the query and returns the updated ConfigItem entity.
func (ciuo *ConfigItemUpdateOne) Save(ctx context.Context) (*ConfigItem, error) {
var (
err error
node *ConfigItem
)
ciuo.defaults()
if len(ciuo.hooks) == 0 {
node, err = ciuo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*ConfigItemMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
ciuo.mutation = mutation
node, err = ciuo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(ciuo.hooks) - 1; i >= 0; i-- {
if ciuo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ciuo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, ciuo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*ConfigItem)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from ConfigItemMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, ciuo.sqlSave, ciuo.mutation, ciuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -335,16 +251,7 @@ func (ciuo *ConfigItemUpdateOne) defaults() {
}
func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: configitem.Table,
Columns: configitem.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: configitem.FieldID,
},
},
}
_spec := sqlgraph.NewUpdateSpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt))
id, ok := ciuo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ConfigItem.id" for update`)}
@ -370,44 +277,22 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem
}
}
if value, ok := ciuo.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: configitem.FieldCreatedAt,
})
_spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value)
}
if ciuo.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: configitem.FieldCreatedAt,
})
_spec.ClearField(configitem.FieldCreatedAt, field.TypeTime)
}
if value, ok := ciuo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: configitem.FieldUpdatedAt,
})
_spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value)
}
if ciuo.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: configitem.FieldUpdatedAt,
})
_spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime)
}
if value, ok := ciuo.mutation.Name(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: configitem.FieldName,
})
_spec.SetField(configitem.FieldName, field.TypeString, value)
}
if value, ok := ciuo.mutation.Value(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: configitem.FieldValue,
})
_spec.SetField(configitem.FieldValue, field.TypeString, value)
}
_node = &ConfigItem{config: ciuo.config}
_spec.Assign = _node.assignValues
@ -420,5 +305,6 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem
}
return nil, err
}
ciuo.mutation.done = true
return _node, nil
}

View file

@ -1,33 +0,0 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
)
type clientCtxKey struct{}
// FromContext returns a Client stored inside a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Client {
c, _ := ctx.Value(clientCtxKey{}).(*Client)
return c
}
// NewContext returns a new context with the given Client attached.
func NewContext(parent context.Context, c *Client) context.Context {
return context.WithValue(parent, clientCtxKey{}, c)
}
type txCtxKey struct{}
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
func TxFromContext(ctx context.Context) *Tx {
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
return tx
}
// NewTxContext returns a new context with the given Tx attached.
func NewTxContext(parent context.Context, tx *Tx) context.Context {
return context.WithValue(parent, txCtxKey{}, tx)
}

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/decision"
@ -51,7 +52,8 @@ type Decision struct {
AlertDecisions int `json:"alert_decisions,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the DecisionQuery when eager-loading is set.
Edges DecisionEdges `json:"edges"`
Edges DecisionEdges `json:"edges"`
selectValues sql.SelectValues
}
// DecisionEdges holds the relations/edges for other nodes in the graph.
@ -90,7 +92,7 @@ func (*Decision) scanValues(columns []string) ([]any, error) {
case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Decision", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -209,21 +211,29 @@ func (d *Decision) assignValues(columns []string, values []any) error {
} else if value.Valid {
d.AlertDecisions = int(value.Int64)
}
default:
d.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// GetValue returns the ent.Value that was dynamically selected and assigned to the Decision.
// This includes values selected through modifiers, order, etc.
func (d *Decision) GetValue(name string) (ent.Value, error) {
return d.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the Decision entity.
func (d *Decision) QueryOwner() *AlertQuery {
return (&DecisionClient{config: d.config}).QueryOwner(d)
return NewDecisionClient(d.config).QueryOwner(d)
}
// Update returns a builder for updating this Decision.
// Note that you need to call Decision.Unwrap() before calling this method if this Decision
// was returned from a transaction, and the transaction was committed or rolled back.
func (d *Decision) Update() *DecisionUpdateOne {
return (&DecisionClient{config: d.config}).UpdateOne(d)
return NewDecisionClient(d.config).UpdateOne(d)
}
// Unwrap unwraps the Decision entity that was returned from a transaction after it was closed,
@ -301,9 +311,3 @@ func (d *Decision) String() string {
// Decisions is a parsable slice of Decision.
type Decisions []*Decision
func (d Decisions) config(cfg config) {
for _i := range d {
d[_i].config = cfg
}
}

View file

@ -4,6 +4,9 @@ package decision
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
@ -99,3 +102,105 @@ var (
// DefaultSimulated holds the default value on creation for the "simulated" field.
DefaultSimulated bool
)
// OrderOption defines the ordering options for the Decision queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByUntil orders the results by the until field.
func ByUntil(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUntil, opts...).ToFunc()
}
// ByScenario orders the results by the scenario field.
func ByScenario(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenario, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// ByStartIP orders the results by the start_ip field.
func ByStartIP(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStartIP, opts...).ToFunc()
}
// ByEndIP orders the results by the end_ip field.
func ByEndIP(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldEndIP, opts...).ToFunc()
}
// ByStartSuffix orders the results by the start_suffix field.
func ByStartSuffix(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStartSuffix, opts...).ToFunc()
}
// ByEndSuffix orders the results by the end_suffix field.
func ByEndSuffix(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldEndSuffix, opts...).ToFunc()
}
// ByIPSize orders the results by the ip_size field.
func ByIPSize(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIPSize, opts...).ToFunc()
}
// ByScope orders the results by the scope field.
func ByScope(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScope, opts...).ToFunc()
}
// ByValue orders the results by the value field.
func ByValue(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldValue, opts...).ToFunc()
}
// ByOrigin orders the results by the origin field.
func ByOrigin(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldOrigin, opts...).ToFunc()
}
// BySimulated orders the results by the simulated field.
func BySimulated(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSimulated, opts...).ToFunc()
}
// ByUUID orders the results by the uuid field.
func ByUUID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUUID, opts...).ToFunc()
}
// ByAlertDecisions orders the results by the alert_decisions field.
func ByAlertDecisions(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAlertDecisions, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}

File diff suppressed because it is too large Load diff

View file

@ -231,50 +231,8 @@ func (dc *DecisionCreate) Mutation() *DecisionMutation {
// Save creates the Decision in the database.
func (dc *DecisionCreate) Save(ctx context.Context) (*Decision, error) {
var (
err error
node *Decision
)
dc.defaults()
if len(dc.hooks) == 0 {
if err = dc.check(); err != nil {
return nil, err
}
node, err = dc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DecisionMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = dc.check(); err != nil {
return nil, err
}
dc.mutation = mutation
if node, err = dc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(dc.hooks) - 1; i >= 0; i-- {
if dc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dc.hooks[i](mut)
}
v, err := mut.Mutate(ctx, dc.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Decision)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -339,6 +297,9 @@ func (dc *DecisionCreate) check() error {
}
func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) {
if err := dc.check(); err != nil {
return nil, err
}
_node, _spec := dc.createSpec()
if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -348,138 +309,74 @@ func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
dc.mutation.id = &_node.ID
dc.mutation.done = true
return _node, nil
}
func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) {
var (
_node = &Decision{config: dc.config}
_spec = &sqlgraph.CreateSpec{
Table: decision.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: decision.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(decision.Table, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt))
)
if value, ok := dc.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldCreatedAt,
})
_spec.SetField(decision.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := dc.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldUpdatedAt,
})
_spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := dc.mutation.Until(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldUntil,
})
_spec.SetField(decision.FieldUntil, field.TypeTime, value)
_node.Until = &value
}
if value, ok := dc.mutation.Scenario(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldScenario,
})
_spec.SetField(decision.FieldScenario, field.TypeString, value)
_node.Scenario = value
}
if value, ok := dc.mutation.GetType(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldType,
})
_spec.SetField(decision.FieldType, field.TypeString, value)
_node.Type = value
}
if value, ok := dc.mutation.StartIP(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartIP,
})
_spec.SetField(decision.FieldStartIP, field.TypeInt64, value)
_node.StartIP = value
}
if value, ok := dc.mutation.EndIP(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndIP,
})
_spec.SetField(decision.FieldEndIP, field.TypeInt64, value)
_node.EndIP = value
}
if value, ok := dc.mutation.StartSuffix(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartSuffix,
})
_spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value)
_node.StartSuffix = value
}
if value, ok := dc.mutation.EndSuffix(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndSuffix,
})
_spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value)
_node.EndSuffix = value
}
if value, ok := dc.mutation.IPSize(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldIPSize,
})
_spec.SetField(decision.FieldIPSize, field.TypeInt64, value)
_node.IPSize = value
}
if value, ok := dc.mutation.Scope(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldScope,
})
_spec.SetField(decision.FieldScope, field.TypeString, value)
_node.Scope = value
}
if value, ok := dc.mutation.Value(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldValue,
})
_spec.SetField(decision.FieldValue, field.TypeString, value)
_node.Value = value
}
if value, ok := dc.mutation.Origin(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldOrigin,
})
_spec.SetField(decision.FieldOrigin, field.TypeString, value)
_node.Origin = value
}
if value, ok := dc.mutation.Simulated(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: decision.FieldSimulated,
})
_spec.SetField(decision.FieldSimulated, field.TypeBool, value)
_node.Simulated = value
}
if value, ok := dc.mutation.UUID(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldUUID,
})
_spec.SetField(decision.FieldUUID, field.TypeString, value)
_node.UUID = value
}
if nodes := dc.mutation.OwnerIDs(); len(nodes) > 0 {
@ -490,10 +387,7 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) {
Columns: []string{decision.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -508,11 +402,15 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) {
// DecisionCreateBulk is the builder for creating many Decision entities in bulk.
type DecisionCreateBulk struct {
config
err error
builders []*DecisionCreate
}
// Save creates the Decision entities in the database.
func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) {
if dcb.err != nil {
return nil, dcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(dcb.builders))
nodes := make([]*Decision, len(dcb.builders))
mutators := make([]Mutator, len(dcb.builders))
@ -529,8 +427,8 @@ func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (dd *DecisionDelete) Where(ps ...predicate.Decision) *DecisionDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (dd *DecisionDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(dd.hooks) == 0 {
affected, err = dd.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DecisionMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
dd.mutation = mutation
affected, err = dd.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(dd.hooks) - 1; i >= 0; i-- {
if dd.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = dd.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, dd.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (dd *DecisionDelete) ExecX(ctx context.Context) int {
}
func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: decision.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: decision.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(decision.Table, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt))
if ps := dd.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
dd.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type DecisionDeleteOne struct {
dd *DecisionDelete
}
// Where appends a list predicates to the DecisionDelete builder.
func (ddo *DecisionDeleteOne) Where(ps ...predicate.Decision) *DecisionDeleteOne {
ddo.dd.mutation.Where(ps...)
return ddo
}
// Exec executes the deletion query.
func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error {
n, err := ddo.dd.Exec(ctx)
@ -111,5 +82,7 @@ func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (ddo *DecisionDeleteOne) ExecX(ctx context.Context) {
ddo.dd.ExecX(ctx)
if err := ddo.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -18,11 +18,9 @@ import (
// DecisionQuery is the builder for querying Decision entities.
type DecisionQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []decision.OrderOption
inters []Interceptor
predicates []predicate.Decision
withOwner *AlertQuery
// intermediate query (i.e. traversal path).
@ -36,34 +34,34 @@ func (dq *DecisionQuery) Where(ps ...predicate.Decision) *DecisionQuery {
return dq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (dq *DecisionQuery) Limit(limit int) *DecisionQuery {
dq.limit = &limit
dq.ctx.Limit = &limit
return dq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (dq *DecisionQuery) Offset(offset int) *DecisionQuery {
dq.offset = &offset
dq.ctx.Offset = &offset
return dq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (dq *DecisionQuery) Unique(unique bool) *DecisionQuery {
dq.unique = &unique
dq.ctx.Unique = &unique
return dq
}
// Order adds an order step to the query.
func (dq *DecisionQuery) Order(o ...OrderFunc) *DecisionQuery {
// Order specifies how the records should be ordered.
func (dq *DecisionQuery) Order(o ...decision.OrderOption) *DecisionQuery {
dq.order = append(dq.order, o...)
return dq
}
// QueryOwner chains the current query on the "owner" edge.
func (dq *DecisionQuery) QueryOwner() *AlertQuery {
query := &AlertQuery{config: dq.config}
query := (&AlertClient{config: dq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
@ -86,7 +84,7 @@ func (dq *DecisionQuery) QueryOwner() *AlertQuery {
// First returns the first Decision entity from the query.
// Returns a *NotFoundError when no Decision was found.
func (dq *DecisionQuery) First(ctx context.Context) (*Decision, error) {
nodes, err := dq.Limit(1).All(ctx)
nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First"))
if err != nil {
return nil, err
}
@ -109,7 +107,7 @@ func (dq *DecisionQuery) FirstX(ctx context.Context) *Decision {
// Returns a *NotFoundError when no Decision ID was found.
func (dq *DecisionQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = dq.Limit(1).IDs(ctx); err != nil {
if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -132,7 +130,7 @@ func (dq *DecisionQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one Decision entity is found.
// Returns a *NotFoundError when no Decision entities are found.
func (dq *DecisionQuery) Only(ctx context.Context) (*Decision, error) {
nodes, err := dq.Limit(2).All(ctx)
nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -160,7 +158,7 @@ func (dq *DecisionQuery) OnlyX(ctx context.Context) *Decision {
// Returns a *NotFoundError when no entities are found.
func (dq *DecisionQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = dq.Limit(2).IDs(ctx); err != nil {
if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -185,10 +183,12 @@ func (dq *DecisionQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of Decisions.
func (dq *DecisionQuery) All(ctx context.Context) ([]*Decision, error) {
ctx = setContextOp(ctx, dq.ctx, "All")
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
}
return dq.sqlAll(ctx)
qr := querierAll[[]*Decision, *DecisionQuery]()
return withInterceptors[[]*Decision](ctx, dq, qr, dq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -201,9 +201,12 @@ func (dq *DecisionQuery) AllX(ctx context.Context) []*Decision {
}
// IDs executes the query and returns a list of Decision IDs.
func (dq *DecisionQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil {
func (dq *DecisionQuery) IDs(ctx context.Context) (ids []int, err error) {
if dq.ctx.Unique == nil && dq.path != nil {
dq.Unique(true)
}
ctx = setContextOp(ctx, dq.ctx, "IDs")
if err = dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -220,10 +223,11 @@ func (dq *DecisionQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (dq *DecisionQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, dq.ctx, "Count")
if err := dq.prepareQuery(ctx); err != nil {
return 0, err
}
return dq.sqlCount(ctx)
return withInterceptors[int](ctx, dq, querierCount[*DecisionQuery](), dq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -237,10 +241,15 @@ func (dq *DecisionQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (dq *DecisionQuery) Exist(ctx context.Context) (bool, error) {
if err := dq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, dq.ctx, "Exist")
switch _, err := dq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return dq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -260,22 +269,21 @@ func (dq *DecisionQuery) Clone() *DecisionQuery {
}
return &DecisionQuery{
config: dq.config,
limit: dq.limit,
offset: dq.offset,
order: append([]OrderFunc{}, dq.order...),
ctx: dq.ctx.Clone(),
order: append([]decision.OrderOption{}, dq.order...),
inters: append([]Interceptor{}, dq.inters...),
predicates: append([]predicate.Decision{}, dq.predicates...),
withOwner: dq.withOwner.Clone(),
// clone intermediate query.
sql: dq.sql.Clone(),
path: dq.path,
unique: dq.unique,
sql: dq.sql.Clone(),
path: dq.path,
}
}
// WithOwner tells the query-builder to eager-load the nodes that are connected to
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery {
query := &AlertQuery{config: dq.config}
query := (&AlertClient{config: dq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -298,16 +306,11 @@ func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupBy {
grbuild := &DecisionGroupBy{config: dq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := dq.prepareQuery(ctx); err != nil {
return nil, err
}
return dq.sqlQuery(ctx), nil
}
dq.ctx.Fields = append([]string{field}, fields...)
grbuild := &DecisionGroupBy{build: dq}
grbuild.flds = &dq.ctx.Fields
grbuild.label = decision.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -324,15 +327,30 @@ func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupB
// Select(decision.FieldCreatedAt).
// Scan(ctx, &v)
func (dq *DecisionQuery) Select(fields ...string) *DecisionSelect {
dq.fields = append(dq.fields, fields...)
selbuild := &DecisionSelect{DecisionQuery: dq}
selbuild.label = decision.Label
selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan
return selbuild
dq.ctx.Fields = append(dq.ctx.Fields, fields...)
sbuild := &DecisionSelect{DecisionQuery: dq}
sbuild.label = decision.Label
sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a DecisionSelect configured with the given aggregations.
func (dq *DecisionQuery) Aggregate(fns ...AggregateFunc) *DecisionSelect {
return dq.Select().Aggregate(fns...)
}
func (dq *DecisionQuery) prepareQuery(ctx context.Context) error {
for _, f := range dq.fields {
for _, inter := range dq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, dq); err != nil {
return err
}
}
}
for _, f := range dq.ctx.Fields {
if !decision.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -392,6 +410,9 @@ func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(alert.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@ -411,41 +432,22 @@ func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes
func (dq *DecisionQuery) sqlCount(ctx context.Context) (int, error) {
_spec := dq.querySpec()
_spec.Node.Columns = dq.fields
if len(dq.fields) > 0 {
_spec.Unique = dq.unique != nil && *dq.unique
_spec.Node.Columns = dq.ctx.Fields
if len(dq.ctx.Fields) > 0 {
_spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, dq.driver, _spec)
}
func (dq *DecisionQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := dq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: decision.Table,
Columns: decision.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: decision.FieldID,
},
},
From: dq.sql,
Unique: true,
}
if unique := dq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt))
_spec.From = dq.sql
if unique := dq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if dq.path != nil {
_spec.Unique = true
}
if fields := dq.fields; len(fields) > 0 {
if fields := dq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID)
for i := range fields {
@ -453,6 +455,9 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if dq.withOwner != nil {
_spec.Node.AddColumnOnce(decision.FieldAlertDecisions)
}
}
if ps := dq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
@ -461,10 +466,10 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := dq.limit; limit != nil {
if limit := dq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := dq.offset; offset != nil {
if offset := dq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := dq.order; len(ps) > 0 {
@ -480,7 +485,7 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec {
func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(dq.driver.Dialect())
t1 := builder.Table(decision.Table)
columns := dq.fields
columns := dq.ctx.Fields
if len(columns) == 0 {
columns = decision.Columns
}
@ -489,7 +494,7 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = dq.sql
selector.Select(selector.Columns(columns...)...)
}
if dq.unique != nil && *dq.unique {
if dq.ctx.Unique != nil && *dq.ctx.Unique {
selector.Distinct()
}
for _, p := range dq.predicates {
@ -498,12 +503,12 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range dq.order {
p(selector)
}
if offset := dq.offset; offset != nil {
if offset := dq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := dq.limit; limit != nil {
if limit := dq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -511,13 +516,8 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector {
// DecisionGroupBy is the group-by builder for Decision entities.
type DecisionGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *DecisionQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -526,74 +526,77 @@ func (dgb *DecisionGroupBy) Aggregate(fns ...AggregateFunc) *DecisionGroupBy {
return dgb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (dgb *DecisionGroupBy) Scan(ctx context.Context, v any) error {
query, err := dgb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy")
if err := dgb.build.prepareQuery(ctx); err != nil {
return err
}
dgb.sql = query
return dgb.sqlScan(ctx, v)
return scanWithInterceptors[*DecisionQuery, *DecisionGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v)
}
func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range dgb.fields {
if !decision.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := dgb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dgb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (dgb *DecisionGroupBy) sqlQuery() *sql.Selector {
selector := dgb.sql.Select()
func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, root *DecisionQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(dgb.fns))
for _, fn := range dgb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(dgb.fields)+len(dgb.fns))
for _, f := range dgb.fields {
columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns))
for _, f := range *dgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(dgb.fields...)...)
selector.GroupBy(selector.Columns(*dgb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// DecisionSelect is the builder for selecting fields of Decision entities.
type DecisionSelect struct {
*DecisionQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (ds *DecisionSelect) Aggregate(fns ...AggregateFunc) *DecisionSelect {
ds.fns = append(ds.fns, fns...)
return ds
}
// Scan applies the selector query and scans the result into the given value.
func (ds *DecisionSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, ds.ctx, "Select")
if err := ds.prepareQuery(ctx); err != nil {
return err
}
ds.sql = ds.DecisionQuery.sqlQuery(ctx)
return ds.sqlScan(ctx, v)
return scanWithInterceptors[*DecisionQuery, *DecisionSelect](ctx, ds.DecisionQuery, ds, ds.inters, v)
}
func (ds *DecisionSelect) sqlScan(ctx context.Context, v any) error {
func (ds *DecisionSelect) sqlScan(ctx context.Context, root *DecisionQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(ds.fns))
for _, fn := range ds.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*ds.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := ds.sql.Query()
query, args := selector.Query()
if err := ds.driver.Query(ctx, query, args, rows); err != nil {
return err
}

View file

@ -324,35 +324,8 @@ func (du *DecisionUpdate) ClearOwner() *DecisionUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (du *DecisionUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
du.defaults()
if len(du.hooks) == 0 {
affected, err = du.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DecisionMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
du.mutation = mutation
affected, err = du.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(du.hooks) - 1; i >= 0; i-- {
if du.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = du.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, du.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, du.sqlSave, du.mutation, du.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -390,16 +363,7 @@ func (du *DecisionUpdate) defaults() {
}
func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: decision.Table,
Columns: decision.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: decision.FieldID,
},
},
}
_spec := sqlgraph.NewUpdateSpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt))
if ps := du.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -408,198 +372,91 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := du.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldCreatedAt,
})
_spec.SetField(decision.FieldCreatedAt, field.TypeTime, value)
}
if du.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: decision.FieldCreatedAt,
})
_spec.ClearField(decision.FieldCreatedAt, field.TypeTime)
}
if value, ok := du.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldUpdatedAt,
})
_spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value)
}
if du.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: decision.FieldUpdatedAt,
})
_spec.ClearField(decision.FieldUpdatedAt, field.TypeTime)
}
if value, ok := du.mutation.Until(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldUntil,
})
_spec.SetField(decision.FieldUntil, field.TypeTime, value)
}
if du.mutation.UntilCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: decision.FieldUntil,
})
_spec.ClearField(decision.FieldUntil, field.TypeTime)
}
if value, ok := du.mutation.Scenario(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldScenario,
})
_spec.SetField(decision.FieldScenario, field.TypeString, value)
}
if value, ok := du.mutation.GetType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldType,
})
_spec.SetField(decision.FieldType, field.TypeString, value)
}
if value, ok := du.mutation.StartIP(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartIP,
})
_spec.SetField(decision.FieldStartIP, field.TypeInt64, value)
}
if value, ok := du.mutation.AddedStartIP(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartIP,
})
_spec.AddField(decision.FieldStartIP, field.TypeInt64, value)
}
if du.mutation.StartIPCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldStartIP,
})
_spec.ClearField(decision.FieldStartIP, field.TypeInt64)
}
if value, ok := du.mutation.EndIP(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndIP,
})
_spec.SetField(decision.FieldEndIP, field.TypeInt64, value)
}
if value, ok := du.mutation.AddedEndIP(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndIP,
})
_spec.AddField(decision.FieldEndIP, field.TypeInt64, value)
}
if du.mutation.EndIPCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldEndIP,
})
_spec.ClearField(decision.FieldEndIP, field.TypeInt64)
}
if value, ok := du.mutation.StartSuffix(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartSuffix,
})
_spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value)
}
if value, ok := du.mutation.AddedStartSuffix(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartSuffix,
})
_spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value)
}
if du.mutation.StartSuffixCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldStartSuffix,
})
_spec.ClearField(decision.FieldStartSuffix, field.TypeInt64)
}
if value, ok := du.mutation.EndSuffix(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndSuffix,
})
_spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value)
}
if value, ok := du.mutation.AddedEndSuffix(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndSuffix,
})
_spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value)
}
if du.mutation.EndSuffixCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldEndSuffix,
})
_spec.ClearField(decision.FieldEndSuffix, field.TypeInt64)
}
if value, ok := du.mutation.IPSize(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldIPSize,
})
_spec.SetField(decision.FieldIPSize, field.TypeInt64, value)
}
if value, ok := du.mutation.AddedIPSize(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldIPSize,
})
_spec.AddField(decision.FieldIPSize, field.TypeInt64, value)
}
if du.mutation.IPSizeCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldIPSize,
})
_spec.ClearField(decision.FieldIPSize, field.TypeInt64)
}
if value, ok := du.mutation.Scope(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldScope,
})
_spec.SetField(decision.FieldScope, field.TypeString, value)
}
if value, ok := du.mutation.Value(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldValue,
})
_spec.SetField(decision.FieldValue, field.TypeString, value)
}
if value, ok := du.mutation.Origin(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldOrigin,
})
_spec.SetField(decision.FieldOrigin, field.TypeString, value)
}
if value, ok := du.mutation.Simulated(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: decision.FieldSimulated,
})
_spec.SetField(decision.FieldSimulated, field.TypeBool, value)
}
if value, ok := du.mutation.UUID(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldUUID,
})
_spec.SetField(decision.FieldUUID, field.TypeString, value)
}
if du.mutation.UUIDCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: decision.FieldUUID,
})
_spec.ClearField(decision.FieldUUID, field.TypeString)
}
if du.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
@ -609,10 +466,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{decision.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -625,10 +479,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{decision.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -644,6 +495,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
du.mutation.done = true
return n, nil
}
@ -948,6 +800,12 @@ func (duo *DecisionUpdateOne) ClearOwner() *DecisionUpdateOne {
return duo
}
// Where appends a list predicates to the DecisionUpdate builder.
func (duo *DecisionUpdateOne) Where(ps ...predicate.Decision) *DecisionUpdateOne {
duo.mutation.Where(ps...)
return duo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUpdateOne {
@ -957,41 +815,8 @@ func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUp
// Save executes the query and returns the updated Decision entity.
func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) {
var (
err error
node *Decision
)
duo.defaults()
if len(duo.hooks) == 0 {
node, err = duo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*DecisionMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
duo.mutation = mutation
node, err = duo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(duo.hooks) - 1; i >= 0; i-- {
if duo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = duo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, duo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Decision)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -1029,16 +854,7 @@ func (duo *DecisionUpdateOne) defaults() {
}
func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: decision.Table,
Columns: decision.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: decision.FieldID,
},
},
}
_spec := sqlgraph.NewUpdateSpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt))
id, ok := duo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Decision.id" for update`)}
@ -1064,198 +880,91 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
}
}
if value, ok := duo.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldCreatedAt,
})
_spec.SetField(decision.FieldCreatedAt, field.TypeTime, value)
}
if duo.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: decision.FieldCreatedAt,
})
_spec.ClearField(decision.FieldCreatedAt, field.TypeTime)
}
if value, ok := duo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldUpdatedAt,
})
_spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value)
}
if duo.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: decision.FieldUpdatedAt,
})
_spec.ClearField(decision.FieldUpdatedAt, field.TypeTime)
}
if value, ok := duo.mutation.Until(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: decision.FieldUntil,
})
_spec.SetField(decision.FieldUntil, field.TypeTime, value)
}
if duo.mutation.UntilCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: decision.FieldUntil,
})
_spec.ClearField(decision.FieldUntil, field.TypeTime)
}
if value, ok := duo.mutation.Scenario(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldScenario,
})
_spec.SetField(decision.FieldScenario, field.TypeString, value)
}
if value, ok := duo.mutation.GetType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldType,
})
_spec.SetField(decision.FieldType, field.TypeString, value)
}
if value, ok := duo.mutation.StartIP(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartIP,
})
_spec.SetField(decision.FieldStartIP, field.TypeInt64, value)
}
if value, ok := duo.mutation.AddedStartIP(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartIP,
})
_spec.AddField(decision.FieldStartIP, field.TypeInt64, value)
}
if duo.mutation.StartIPCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldStartIP,
})
_spec.ClearField(decision.FieldStartIP, field.TypeInt64)
}
if value, ok := duo.mutation.EndIP(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndIP,
})
_spec.SetField(decision.FieldEndIP, field.TypeInt64, value)
}
if value, ok := duo.mutation.AddedEndIP(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndIP,
})
_spec.AddField(decision.FieldEndIP, field.TypeInt64, value)
}
if duo.mutation.EndIPCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldEndIP,
})
_spec.ClearField(decision.FieldEndIP, field.TypeInt64)
}
if value, ok := duo.mutation.StartSuffix(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartSuffix,
})
_spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value)
}
if value, ok := duo.mutation.AddedStartSuffix(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldStartSuffix,
})
_spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value)
}
if duo.mutation.StartSuffixCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldStartSuffix,
})
_spec.ClearField(decision.FieldStartSuffix, field.TypeInt64)
}
if value, ok := duo.mutation.EndSuffix(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndSuffix,
})
_spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value)
}
if value, ok := duo.mutation.AddedEndSuffix(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldEndSuffix,
})
_spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value)
}
if duo.mutation.EndSuffixCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldEndSuffix,
})
_spec.ClearField(decision.FieldEndSuffix, field.TypeInt64)
}
if value, ok := duo.mutation.IPSize(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldIPSize,
})
_spec.SetField(decision.FieldIPSize, field.TypeInt64, value)
}
if value, ok := duo.mutation.AddedIPSize(); ok {
_spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Value: value,
Column: decision.FieldIPSize,
})
_spec.AddField(decision.FieldIPSize, field.TypeInt64, value)
}
if duo.mutation.IPSizeCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeInt64,
Column: decision.FieldIPSize,
})
_spec.ClearField(decision.FieldIPSize, field.TypeInt64)
}
if value, ok := duo.mutation.Scope(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldScope,
})
_spec.SetField(decision.FieldScope, field.TypeString, value)
}
if value, ok := duo.mutation.Value(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldValue,
})
_spec.SetField(decision.FieldValue, field.TypeString, value)
}
if value, ok := duo.mutation.Origin(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldOrigin,
})
_spec.SetField(decision.FieldOrigin, field.TypeString, value)
}
if value, ok := duo.mutation.Simulated(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: decision.FieldSimulated,
})
_spec.SetField(decision.FieldSimulated, field.TypeBool, value)
}
if value, ok := duo.mutation.UUID(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: decision.FieldUUID,
})
_spec.SetField(decision.FieldUUID, field.TypeString, value)
}
if duo.mutation.UUIDCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: decision.FieldUUID,
})
_spec.ClearField(decision.FieldUUID, field.TypeString)
}
if duo.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
@ -1265,10 +974,7 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
Columns: []string{decision.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -1281,10 +987,7 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
Columns: []string{decision.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -1303,5 +1006,6 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err
}
return nil, err
}
duo.mutation.done = true
return _node, nil
}

View file

@ -6,6 +6,8 @@ import (
"context"
"errors"
"fmt"
"reflect"
"sync"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
@ -21,50 +23,79 @@ import (
// ent aliases to avoid import conflicts in user's code.
type (
Op = ent.Op
Hook = ent.Hook
Value = ent.Value
Query = ent.Query
Policy = ent.Policy
Mutator = ent.Mutator
Mutation = ent.Mutation
MutateFunc = ent.MutateFunc
Op = ent.Op
Hook = ent.Hook
Value = ent.Value
Query = ent.Query
QueryContext = ent.QueryContext
Querier = ent.Querier
QuerierFunc = ent.QuerierFunc
Interceptor = ent.Interceptor
InterceptFunc = ent.InterceptFunc
Traverser = ent.Traverser
TraverseFunc = ent.TraverseFunc
Policy = ent.Policy
Mutator = ent.Mutator
Mutation = ent.Mutation
MutateFunc = ent.MutateFunc
)
type clientCtxKey struct{}
// FromContext returns a Client stored inside a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Client {
c, _ := ctx.Value(clientCtxKey{}).(*Client)
return c
}
// NewContext returns a new context with the given Client attached.
func NewContext(parent context.Context, c *Client) context.Context {
return context.WithValue(parent, clientCtxKey{}, c)
}
type txCtxKey struct{}
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
func TxFromContext(ctx context.Context) *Tx {
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
return tx
}
// NewTxContext returns a new context with the given Tx attached.
func NewTxContext(parent context.Context, tx *Tx) context.Context {
return context.WithValue(parent, txCtxKey{}, tx)
}
// OrderFunc applies an ordering on the sql selector.
// Deprecated: Use Asc/Desc functions or the package builders instead.
type OrderFunc func(*sql.Selector)
// columnChecker returns a function indicates if the column exists in the given column.
func columnChecker(table string) func(string) error {
checks := map[string]func(string) bool{
alert.Table: alert.ValidColumn,
bouncer.Table: bouncer.ValidColumn,
configitem.Table: configitem.ValidColumn,
decision.Table: decision.ValidColumn,
event.Table: event.ValidColumn,
machine.Table: machine.ValidColumn,
meta.Table: meta.ValidColumn,
}
check, ok := checks[table]
if !ok {
return func(string) error {
return fmt.Errorf("unknown table %q", table)
}
}
return func(column string) error {
if !check(column) {
return fmt.Errorf("unknown column %q for table %q", column, table)
}
return nil
}
var (
initCheck sync.Once
columnCheck sql.ColumnCheck
)
// columnChecker checks if the column exists in the given table.
func checkColumn(table, column string) error {
initCheck.Do(func() {
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
alert.Table: alert.ValidColumn,
bouncer.Table: bouncer.ValidColumn,
configitem.Table: configitem.ValidColumn,
decision.Table: decision.ValidColumn,
event.Table: event.ValidColumn,
machine.Table: machine.ValidColumn,
meta.Table: meta.ValidColumn,
})
})
return columnCheck(table, column)
}
// Asc applies the given fields in ASC order.
func Asc(fields ...string) OrderFunc {
func Asc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
check := columnChecker(s.TableName())
for _, f := range fields {
if err := check(f); err != nil {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Asc(s.C(f)))
@ -73,11 +104,10 @@ func Asc(fields ...string) OrderFunc {
}
// Desc applies the given fields in DESC order.
func Desc(fields ...string) OrderFunc {
func Desc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
check := columnChecker(s.TableName())
for _, f := range fields {
if err := check(f); err != nil {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Desc(s.C(f)))
@ -109,8 +139,7 @@ func Count() AggregateFunc {
// Max applies the "max" aggregation function on the given field of each group.
func Max(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@ -121,8 +150,7 @@ func Max(field string) AggregateFunc {
// Mean applies the "mean" aggregation function on the given field of each group.
func Mean(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@ -133,8 +161,7 @@ func Mean(field string) AggregateFunc {
// Min applies the "min" aggregation function on the given field of each group.
func Min(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@ -145,8 +172,7 @@ func Min(field string) AggregateFunc {
// Sum applies the "sum" aggregation function on the given field of each group.
func Sum(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@ -275,6 +301,7 @@ func IsConstraintError(err error) bool {
type selector struct {
label string
flds *[]string
fns []AggregateFunc
scan func(context.Context, any) error
}
@ -473,5 +500,121 @@ func (s *selector) BoolX(ctx context.Context) bool {
return v
}
// withHooks invokes the builder operation with the given hooks, if any.
func withHooks[V Value, M any, PM interface {
*M
Mutation
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
if len(hooks) == 0 {
return exec(ctx)
}
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutationT, ok := any(m).(PM)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
// Set the mutation to the builder.
*mutation = *mutationT
return exec(ctx)
})
for i := len(hooks) - 1; i >= 0; i-- {
if hooks[i] == nil {
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = hooks[i](mut)
}
v, err := mut.Mutate(ctx, mutation)
if err != nil {
return value, err
}
nv, ok := v.(V)
if !ok {
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
}
return nv, nil
}
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
if ent.QueryFromContext(ctx) == nil {
qc.Op = op
ctx = ent.NewQueryContext(ctx, qc)
}
return ctx
}
func querierAll[V Value, Q interface {
sqlAll(context.Context, ...queryHook) (V, error)
}]() Querier {
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
return query.sqlAll(ctx)
})
}
func querierCount[Q interface {
sqlCount(context.Context) (int, error)
}]() Querier {
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
return query.sqlCount(ctx)
})
}
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
for i := len(inters) - 1; i >= 0; i-- {
qr = inters[i].Intercept(qr)
}
rv, err := qr.Query(ctx, q)
if err != nil {
return v, err
}
vt, ok := rv.(V)
if !ok {
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
}
return vt, nil
}
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
sqlScan(context.Context, Q1, any) error
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
rv := reflect.ValueOf(v)
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
query, ok := q.(Q1)
if !ok {
return nil, fmt.Errorf("unexpected query type %T", q)
}
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
return nil, err
}
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
return rv.Elem().Interface(), nil
}
return v, nil
})
for i := len(inters) - 1; i >= 0; i-- {
qr = inters[i].Intercept(qr)
}
vv, err := qr.Query(ctx, rootQuery)
if err != nil {
return err
}
switch rv2 := reflect.ValueOf(vv); {
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
case rv.Type() == rv2.Type():
rv.Elem().Set(rv2.Elem())
case rv.Elem().Type() == rv2.Type():
rv.Elem().Set(rv2)
}
return nil
}
// queryHook describes an internal hook for the different sqlAll methods.
type queryHook func(context.Context, *sqlgraph.QuerySpec)

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/event"
@ -29,7 +30,8 @@ type Event struct {
AlertEvents int `json:"alert_events,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the EventQuery when eager-loading is set.
Edges EventEdges `json:"edges"`
Edges EventEdges `json:"edges"`
selectValues sql.SelectValues
}
// EventEdges holds the relations/edges for other nodes in the graph.
@ -66,7 +68,7 @@ func (*Event) scanValues(columns []string) ([]any, error) {
case event.FieldCreatedAt, event.FieldUpdatedAt, event.FieldTime:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Event", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -118,21 +120,29 @@ func (e *Event) assignValues(columns []string, values []any) error {
} else if value.Valid {
e.AlertEvents = int(value.Int64)
}
default:
e.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Event.
// This includes values selected through modifiers, order, etc.
func (e *Event) Value(name string) (ent.Value, error) {
return e.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the Event entity.
func (e *Event) QueryOwner() *AlertQuery {
return (&EventClient{config: e.config}).QueryOwner(e)
return NewEventClient(e.config).QueryOwner(e)
}
// Update returns a builder for updating this Event.
// Note that you need to call Event.Unwrap() before calling this method if this Event
// was returned from a transaction, and the transaction was committed or rolled back.
func (e *Event) Update() *EventUpdateOne {
return (&EventClient{config: e.config}).UpdateOne(e)
return NewEventClient(e.config).UpdateOne(e)
}
// Unwrap unwraps the Event entity that was returned from a transaction after it was closed,
@ -175,9 +185,3 @@ func (e *Event) String() string {
// Events is a parsable slice of Event.
type Events []*Event
func (e Events) config(cfg config) {
for _i := range e {
e[_i].config = cfg
}
}

View file

@ -4,6 +4,9 @@ package event
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
@ -66,3 +69,50 @@ var (
// SerializedValidator is a validator for the "serialized" field. It is called by the builders before save.
SerializedValidator func(string) error
)
// OrderOption defines the ordering options for the Event queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByTime orders the results by the time field.
func ByTime(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldTime, opts...).ToFunc()
}
// BySerialized orders the results by the serialized field.
func BySerialized(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldSerialized, opts...).ToFunc()
}
// ByAlertEvents orders the results by the alert_events field.
func ByAlertEvents(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAlertEvents, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}

View file

@ -12,477 +12,307 @@ import (
// ID filters vertices based on their ID field.
func ID(id int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
return predicate.Event(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
return predicate.Event(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldID), id))
})
return predicate.Event(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
v := make([]any, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.In(s.C(FieldID), v...))
})
return predicate.Event(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
v := make([]any, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.NotIn(s.C(FieldID), v...))
})
return predicate.Event(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldID), id))
})
return predicate.Event(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldID), id))
})
return predicate.Event(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldID), id))
})
return predicate.Event(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldID), id))
})
return predicate.Event(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
return predicate.Event(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v))
}
// Time applies equality check predicate on the "time" field. It's identical to TimeEQ.
func Time(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldTime), v))
})
return predicate.Event(sql.FieldEQ(FieldTime, v))
}
// Serialized applies equality check predicate on the "serialized" field. It's identical to SerializedEQ.
func Serialized(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldEQ(FieldSerialized, v))
}
// AlertEvents applies equality check predicate on the "alert_events" field. It's identical to AlertEventsEQ.
func AlertEvents(v int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldAlertEvents), v))
})
return predicate.Event(sql.FieldEQ(FieldAlertEvents, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
return predicate.Event(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
})
return predicate.Event(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldCreatedAt), v...))
})
return predicate.Event(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
})
return predicate.Event(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldCreatedAt), v))
})
return predicate.Event(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldCreatedAt), v))
})
return predicate.Event(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldCreatedAt), v))
})
return predicate.Event(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldCreatedAt), v))
})
return predicate.Event(sql.FieldLTE(FieldCreatedAt, v))
}
// CreatedAtIsNil applies the IsNil predicate on the "created_at" field.
func CreatedAtIsNil() predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldCreatedAt)))
})
return predicate.Event(sql.FieldIsNull(FieldCreatedAt))
}
// CreatedAtNotNil applies the NotNil predicate on the "created_at" field.
func CreatedAtNotNil() predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldCreatedAt)))
})
return predicate.Event(sql.FieldNotNull(FieldCreatedAt))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
})
return predicate.Event(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldUpdatedAt), v...))
})
return predicate.Event(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
})
return predicate.Event(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldUpdatedAt), v))
})
return predicate.Event(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
})
return predicate.Event(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldUpdatedAt), v))
})
return predicate.Event(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
})
return predicate.Event(sql.FieldLTE(FieldUpdatedAt, v))
}
// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field.
func UpdatedAtIsNil() predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldUpdatedAt)))
})
return predicate.Event(sql.FieldIsNull(FieldUpdatedAt))
}
// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field.
func UpdatedAtNotNil() predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldUpdatedAt)))
})
return predicate.Event(sql.FieldNotNull(FieldUpdatedAt))
}
// TimeEQ applies the EQ predicate on the "time" field.
func TimeEQ(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldTime), v))
})
return predicate.Event(sql.FieldEQ(FieldTime, v))
}
// TimeNEQ applies the NEQ predicate on the "time" field.
func TimeNEQ(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldTime), v))
})
return predicate.Event(sql.FieldNEQ(FieldTime, v))
}
// TimeIn applies the In predicate on the "time" field.
func TimeIn(vs ...time.Time) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldTime), v...))
})
return predicate.Event(sql.FieldIn(FieldTime, vs...))
}
// TimeNotIn applies the NotIn predicate on the "time" field.
func TimeNotIn(vs ...time.Time) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldTime), v...))
})
return predicate.Event(sql.FieldNotIn(FieldTime, vs...))
}
// TimeGT applies the GT predicate on the "time" field.
func TimeGT(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldTime), v))
})
return predicate.Event(sql.FieldGT(FieldTime, v))
}
// TimeGTE applies the GTE predicate on the "time" field.
func TimeGTE(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldTime), v))
})
return predicate.Event(sql.FieldGTE(FieldTime, v))
}
// TimeLT applies the LT predicate on the "time" field.
func TimeLT(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldTime), v))
})
return predicate.Event(sql.FieldLT(FieldTime, v))
}
// TimeLTE applies the LTE predicate on the "time" field.
func TimeLTE(v time.Time) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldTime), v))
})
return predicate.Event(sql.FieldLTE(FieldTime, v))
}
// SerializedEQ applies the EQ predicate on the "serialized" field.
func SerializedEQ(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldEQ(FieldSerialized, v))
}
// SerializedNEQ applies the NEQ predicate on the "serialized" field.
func SerializedNEQ(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldNEQ(FieldSerialized, v))
}
// SerializedIn applies the In predicate on the "serialized" field.
func SerializedIn(vs ...string) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldSerialized), v...))
})
return predicate.Event(sql.FieldIn(FieldSerialized, vs...))
}
// SerializedNotIn applies the NotIn predicate on the "serialized" field.
func SerializedNotIn(vs ...string) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldSerialized), v...))
})
return predicate.Event(sql.FieldNotIn(FieldSerialized, vs...))
}
// SerializedGT applies the GT predicate on the "serialized" field.
func SerializedGT(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldGT(FieldSerialized, v))
}
// SerializedGTE applies the GTE predicate on the "serialized" field.
func SerializedGTE(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldGTE(FieldSerialized, v))
}
// SerializedLT applies the LT predicate on the "serialized" field.
func SerializedLT(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldLT(FieldSerialized, v))
}
// SerializedLTE applies the LTE predicate on the "serialized" field.
func SerializedLTE(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldLTE(FieldSerialized, v))
}
// SerializedContains applies the Contains predicate on the "serialized" field.
func SerializedContains(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldContains(FieldSerialized, v))
}
// SerializedHasPrefix applies the HasPrefix predicate on the "serialized" field.
func SerializedHasPrefix(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldHasPrefix(FieldSerialized, v))
}
// SerializedHasSuffix applies the HasSuffix predicate on the "serialized" field.
func SerializedHasSuffix(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldHasSuffix(FieldSerialized, v))
}
// SerializedEqualFold applies the EqualFold predicate on the "serialized" field.
func SerializedEqualFold(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldEqualFold(FieldSerialized, v))
}
// SerializedContainsFold applies the ContainsFold predicate on the "serialized" field.
func SerializedContainsFold(v string) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldSerialized), v))
})
return predicate.Event(sql.FieldContainsFold(FieldSerialized, v))
}
// AlertEventsEQ applies the EQ predicate on the "alert_events" field.
func AlertEventsEQ(v int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldAlertEvents), v))
})
return predicate.Event(sql.FieldEQ(FieldAlertEvents, v))
}
// AlertEventsNEQ applies the NEQ predicate on the "alert_events" field.
func AlertEventsNEQ(v int) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldAlertEvents), v))
})
return predicate.Event(sql.FieldNEQ(FieldAlertEvents, v))
}
// AlertEventsIn applies the In predicate on the "alert_events" field.
func AlertEventsIn(vs ...int) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldAlertEvents), v...))
})
return predicate.Event(sql.FieldIn(FieldAlertEvents, vs...))
}
// AlertEventsNotIn applies the NotIn predicate on the "alert_events" field.
func AlertEventsNotIn(vs ...int) predicate.Event {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldAlertEvents), v...))
})
return predicate.Event(sql.FieldNotIn(FieldAlertEvents, vs...))
}
// AlertEventsIsNil applies the IsNil predicate on the "alert_events" field.
func AlertEventsIsNil() predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldAlertEvents)))
})
return predicate.Event(sql.FieldIsNull(FieldAlertEvents))
}
// AlertEventsNotNil applies the NotNil predicate on the "alert_events" field.
func AlertEventsNotNil() predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldAlertEvents)))
})
return predicate.Event(sql.FieldNotNull(FieldAlertEvents))
}
// HasOwner applies the HasEdge predicate on the "owner" edge.
@ -490,7 +320,6 @@ func HasOwner() predicate.Event {
return predicate.Event(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
sqlgraph.HasNeighbors(s, step)
@ -500,11 +329,7 @@ func HasOwner() predicate.Event {
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
func HasOwnerWith(preds ...predicate.Alert) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
step := newOwnerStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@ -515,32 +340,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Event {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Event) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.Event(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Event) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.Event(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Event) predicate.Event {
return predicate.Event(func(s *sql.Selector) {
p(s.Not())
})
return predicate.Event(sql.NotPredicates(p))
}

View file

@ -101,50 +101,8 @@ func (ec *EventCreate) Mutation() *EventMutation {
// Save creates the Event in the database.
func (ec *EventCreate) Save(ctx context.Context) (*Event, error) {
var (
err error
node *Event
)
ec.defaults()
if len(ec.hooks) == 0 {
if err = ec.check(); err != nil {
return nil, err
}
node, err = ec.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*EventMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = ec.check(); err != nil {
return nil, err
}
ec.mutation = mutation
if node, err = ec.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(ec.hooks) - 1; i >= 0; i-- {
if ec.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ec.hooks[i](mut)
}
v, err := mut.Mutate(ctx, ec.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Event)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, ec.sqlSave, ec.mutation, ec.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -198,6 +156,9 @@ func (ec *EventCreate) check() error {
}
func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) {
if err := ec.check(); err != nil {
return nil, err
}
_node, _spec := ec.createSpec()
if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -207,50 +168,30 @@ func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
ec.mutation.id = &_node.ID
ec.mutation.done = true
return _node, nil
}
func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) {
var (
_node = &Event{config: ec.config}
_spec = &sqlgraph.CreateSpec{
Table: event.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: event.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(event.Table, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt))
)
if value, ok := ec.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldCreatedAt,
})
_spec.SetField(event.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := ec.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldUpdatedAt,
})
_spec.SetField(event.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := ec.mutation.Time(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldTime,
})
_spec.SetField(event.FieldTime, field.TypeTime, value)
_node.Time = value
}
if value, ok := ec.mutation.Serialized(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: event.FieldSerialized,
})
_spec.SetField(event.FieldSerialized, field.TypeString, value)
_node.Serialized = value
}
if nodes := ec.mutation.OwnerIDs(); len(nodes) > 0 {
@ -261,10 +202,7 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) {
Columns: []string{event.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -279,11 +217,15 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) {
// EventCreateBulk is the builder for creating many Event entities in bulk.
type EventCreateBulk struct {
config
err error
builders []*EventCreate
}
// Save creates the Event entities in the database.
func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) {
if ecb.err != nil {
return nil, ecb.err
}
specs := make([]*sqlgraph.CreateSpec, len(ecb.builders))
nodes := make([]*Event, len(ecb.builders))
mutators := make([]Mutator, len(ecb.builders))
@ -300,8 +242,8 @@ func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (ed *EventDelete) Where(ps ...predicate.Event) *EventDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ed *EventDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(ed.hooks) == 0 {
affected, err = ed.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*EventMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
ed.mutation = mutation
affected, err = ed.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(ed.hooks) - 1; i >= 0; i-- {
if ed.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = ed.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, ed.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, ed.sqlExec, ed.mutation, ed.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (ed *EventDelete) ExecX(ctx context.Context) int {
}
func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: event.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: event.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(event.Table, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt))
if ps := ed.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
ed.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type EventDeleteOne struct {
ed *EventDelete
}
// Where appends a list predicates to the EventDelete builder.
func (edo *EventDeleteOne) Where(ps ...predicate.Event) *EventDeleteOne {
edo.ed.mutation.Where(ps...)
return edo
}
// Exec executes the deletion query.
func (edo *EventDeleteOne) Exec(ctx context.Context) error {
n, err := edo.ed.Exec(ctx)
@ -111,5 +82,7 @@ func (edo *EventDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (edo *EventDeleteOne) ExecX(ctx context.Context) {
edo.ed.ExecX(ctx)
if err := edo.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -18,11 +18,9 @@ import (
// EventQuery is the builder for querying Event entities.
type EventQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []event.OrderOption
inters []Interceptor
predicates []predicate.Event
withOwner *AlertQuery
// intermediate query (i.e. traversal path).
@ -36,34 +34,34 @@ func (eq *EventQuery) Where(ps ...predicate.Event) *EventQuery {
return eq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (eq *EventQuery) Limit(limit int) *EventQuery {
eq.limit = &limit
eq.ctx.Limit = &limit
return eq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (eq *EventQuery) Offset(offset int) *EventQuery {
eq.offset = &offset
eq.ctx.Offset = &offset
return eq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (eq *EventQuery) Unique(unique bool) *EventQuery {
eq.unique = &unique
eq.ctx.Unique = &unique
return eq
}
// Order adds an order step to the query.
func (eq *EventQuery) Order(o ...OrderFunc) *EventQuery {
// Order specifies how the records should be ordered.
func (eq *EventQuery) Order(o ...event.OrderOption) *EventQuery {
eq.order = append(eq.order, o...)
return eq
}
// QueryOwner chains the current query on the "owner" edge.
func (eq *EventQuery) QueryOwner() *AlertQuery {
query := &AlertQuery{config: eq.config}
query := (&AlertClient{config: eq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
@ -86,7 +84,7 @@ func (eq *EventQuery) QueryOwner() *AlertQuery {
// First returns the first Event entity from the query.
// Returns a *NotFoundError when no Event was found.
func (eq *EventQuery) First(ctx context.Context) (*Event, error) {
nodes, err := eq.Limit(1).All(ctx)
nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First"))
if err != nil {
return nil, err
}
@ -109,7 +107,7 @@ func (eq *EventQuery) FirstX(ctx context.Context) *Event {
// Returns a *NotFoundError when no Event ID was found.
func (eq *EventQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(1).IDs(ctx); err != nil {
if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -132,7 +130,7 @@ func (eq *EventQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one Event entity is found.
// Returns a *NotFoundError when no Event entities are found.
func (eq *EventQuery) Only(ctx context.Context) (*Event, error) {
nodes, err := eq.Limit(2).All(ctx)
nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -160,7 +158,7 @@ func (eq *EventQuery) OnlyX(ctx context.Context) *Event {
// Returns a *NotFoundError when no entities are found.
func (eq *EventQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = eq.Limit(2).IDs(ctx); err != nil {
if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -185,10 +183,12 @@ func (eq *EventQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of Events.
func (eq *EventQuery) All(ctx context.Context) ([]*Event, error) {
ctx = setContextOp(ctx, eq.ctx, "All")
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
return eq.sqlAll(ctx)
qr := querierAll[[]*Event, *EventQuery]()
return withInterceptors[[]*Event](ctx, eq, qr, eq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -201,9 +201,12 @@ func (eq *EventQuery) AllX(ctx context.Context) []*Event {
}
// IDs executes the query and returns a list of Event IDs.
func (eq *EventQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := eq.Select(event.FieldID).Scan(ctx, &ids); err != nil {
func (eq *EventQuery) IDs(ctx context.Context) (ids []int, err error) {
if eq.ctx.Unique == nil && eq.path != nil {
eq.Unique(true)
}
ctx = setContextOp(ctx, eq.ctx, "IDs")
if err = eq.Select(event.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -220,10 +223,11 @@ func (eq *EventQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (eq *EventQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, eq.ctx, "Count")
if err := eq.prepareQuery(ctx); err != nil {
return 0, err
}
return eq.sqlCount(ctx)
return withInterceptors[int](ctx, eq, querierCount[*EventQuery](), eq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -237,10 +241,15 @@ func (eq *EventQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (eq *EventQuery) Exist(ctx context.Context) (bool, error) {
if err := eq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, eq.ctx, "Exist")
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return eq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -260,22 +269,21 @@ func (eq *EventQuery) Clone() *EventQuery {
}
return &EventQuery{
config: eq.config,
limit: eq.limit,
offset: eq.offset,
order: append([]OrderFunc{}, eq.order...),
ctx: eq.ctx.Clone(),
order: append([]event.OrderOption{}, eq.order...),
inters: append([]Interceptor{}, eq.inters...),
predicates: append([]predicate.Event{}, eq.predicates...),
withOwner: eq.withOwner.Clone(),
// clone intermediate query.
sql: eq.sql.Clone(),
path: eq.path,
unique: eq.unique,
sql: eq.sql.Clone(),
path: eq.path,
}
}
// WithOwner tells the query-builder to eager-load the nodes that are connected to
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery {
query := &AlertQuery{config: eq.config}
query := (&AlertClient{config: eq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -298,16 +306,11 @@ func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy {
grbuild := &EventGroupBy{config: eq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := eq.prepareQuery(ctx); err != nil {
return nil, err
}
return eq.sqlQuery(ctx), nil
}
eq.ctx.Fields = append([]string{field}, fields...)
grbuild := &EventGroupBy{build: eq}
grbuild.flds = &eq.ctx.Fields
grbuild.label = event.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -324,15 +327,30 @@ func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy {
// Select(event.FieldCreatedAt).
// Scan(ctx, &v)
func (eq *EventQuery) Select(fields ...string) *EventSelect {
eq.fields = append(eq.fields, fields...)
selbuild := &EventSelect{EventQuery: eq}
selbuild.label = event.Label
selbuild.flds, selbuild.scan = &eq.fields, selbuild.Scan
return selbuild
eq.ctx.Fields = append(eq.ctx.Fields, fields...)
sbuild := &EventSelect{EventQuery: eq}
sbuild.label = event.Label
sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a EventSelect configured with the given aggregations.
func (eq *EventQuery) Aggregate(fns ...AggregateFunc) *EventSelect {
return eq.Select().Aggregate(fns...)
}
func (eq *EventQuery) prepareQuery(ctx context.Context) error {
for _, f := range eq.fields {
for _, inter := range eq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, eq); err != nil {
return err
}
}
}
for _, f := range eq.ctx.Fields {
if !event.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -392,6 +410,9 @@ func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(alert.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@ -411,41 +432,22 @@ func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []
func (eq *EventQuery) sqlCount(ctx context.Context) (int, error) {
_spec := eq.querySpec()
_spec.Node.Columns = eq.fields
if len(eq.fields) > 0 {
_spec.Unique = eq.unique != nil && *eq.unique
_spec.Node.Columns = eq.ctx.Fields
if len(eq.ctx.Fields) > 0 {
_spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, eq.driver, _spec)
}
func (eq *EventQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := eq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: event.Table,
Columns: event.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: event.FieldID,
},
},
From: eq.sql,
Unique: true,
}
if unique := eq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt))
_spec.From = eq.sql
if unique := eq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if eq.path != nil {
_spec.Unique = true
}
if fields := eq.fields; len(fields) > 0 {
if fields := eq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, event.FieldID)
for i := range fields {
@ -453,6 +455,9 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if eq.withOwner != nil {
_spec.Node.AddColumnOnce(event.FieldAlertEvents)
}
}
if ps := eq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
@ -461,10 +466,10 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := eq.limit; limit != nil {
if limit := eq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := eq.offset; offset != nil {
if offset := eq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := eq.order; len(ps) > 0 {
@ -480,7 +485,7 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec {
func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(eq.driver.Dialect())
t1 := builder.Table(event.Table)
columns := eq.fields
columns := eq.ctx.Fields
if len(columns) == 0 {
columns = event.Columns
}
@ -489,7 +494,7 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = eq.sql
selector.Select(selector.Columns(columns...)...)
}
if eq.unique != nil && *eq.unique {
if eq.ctx.Unique != nil && *eq.ctx.Unique {
selector.Distinct()
}
for _, p := range eq.predicates {
@ -498,12 +503,12 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range eq.order {
p(selector)
}
if offset := eq.offset; offset != nil {
if offset := eq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := eq.limit; limit != nil {
if limit := eq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -511,13 +516,8 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector {
// EventGroupBy is the group-by builder for Event entities.
type EventGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *EventQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -526,74 +526,77 @@ func (egb *EventGroupBy) Aggregate(fns ...AggregateFunc) *EventGroupBy {
return egb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (egb *EventGroupBy) Scan(ctx context.Context, v any) error {
query, err := egb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, egb.build.ctx, "GroupBy")
if err := egb.build.prepareQuery(ctx); err != nil {
return err
}
egb.sql = query
return egb.sqlScan(ctx, v)
return scanWithInterceptors[*EventQuery, *EventGroupBy](ctx, egb.build, egb, egb.build.inters, v)
}
func (egb *EventGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range egb.fields {
if !event.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := egb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (egb *EventGroupBy) sqlQuery() *sql.Selector {
selector := egb.sql.Select()
func (egb *EventGroupBy) sqlScan(ctx context.Context, root *EventQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(egb.fns))
for _, fn := range egb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(egb.fields)+len(egb.fns))
for _, f := range egb.fields {
columns := make([]string, 0, len(*egb.flds)+len(egb.fns))
for _, f := range *egb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(egb.fields...)...)
selector.GroupBy(selector.Columns(*egb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := egb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// EventSelect is the builder for selecting fields of Event entities.
type EventSelect struct {
*EventQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (es *EventSelect) Aggregate(fns ...AggregateFunc) *EventSelect {
es.fns = append(es.fns, fns...)
return es
}
// Scan applies the selector query and scans the result into the given value.
func (es *EventSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, es.ctx, "Select")
if err := es.prepareQuery(ctx); err != nil {
return err
}
es.sql = es.EventQuery.sqlQuery(ctx)
return es.sqlScan(ctx, v)
return scanWithInterceptors[*EventQuery, *EventSelect](ctx, es.EventQuery, es, es.inters, v)
}
func (es *EventSelect) sqlScan(ctx context.Context, v any) error {
func (es *EventSelect) sqlScan(ctx context.Context, root *EventQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(es.fns))
for _, fn := range es.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*es.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := es.sql.Query()
query, args := selector.Query()
if err := es.driver.Query(ctx, query, args, rows); err != nil {
return err
}

View file

@ -117,41 +117,8 @@ func (eu *EventUpdate) ClearOwner() *EventUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (eu *EventUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
eu.defaults()
if len(eu.hooks) == 0 {
if err = eu.check(); err != nil {
return 0, err
}
affected, err = eu.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*EventMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = eu.check(); err != nil {
return 0, err
}
eu.mutation = mutation
affected, err = eu.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(eu.hooks) - 1; i >= 0; i-- {
if eu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = eu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, eu.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, eu.sqlSave, eu.mutation, eu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -199,16 +166,10 @@ func (eu *EventUpdate) check() error {
}
func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: event.Table,
Columns: event.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: event.FieldID,
},
},
if err := eu.check(); err != nil {
return n, err
}
_spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt))
if ps := eu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -217,44 +178,22 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := eu.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldCreatedAt,
})
_spec.SetField(event.FieldCreatedAt, field.TypeTime, value)
}
if eu.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: event.FieldCreatedAt,
})
_spec.ClearField(event.FieldCreatedAt, field.TypeTime)
}
if value, ok := eu.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldUpdatedAt,
})
_spec.SetField(event.FieldUpdatedAt, field.TypeTime, value)
}
if eu.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: event.FieldUpdatedAt,
})
_spec.ClearField(event.FieldUpdatedAt, field.TypeTime)
}
if value, ok := eu.mutation.Time(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldTime,
})
_spec.SetField(event.FieldTime, field.TypeTime, value)
}
if value, ok := eu.mutation.Serialized(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: event.FieldSerialized,
})
_spec.SetField(event.FieldSerialized, field.TypeString, value)
}
if eu.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
@ -264,10 +203,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{event.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -280,10 +216,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{event.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -299,6 +232,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
eu.mutation.done = true
return n, nil
}
@ -396,6 +330,12 @@ func (euo *EventUpdateOne) ClearOwner() *EventUpdateOne {
return euo
}
// Where appends a list predicates to the EventUpdate builder.
func (euo *EventUpdateOne) Where(ps ...predicate.Event) *EventUpdateOne {
euo.mutation.Where(ps...)
return euo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOne {
@ -405,47 +345,8 @@ func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOn
// Save executes the query and returns the updated Event entity.
func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) {
var (
err error
node *Event
)
euo.defaults()
if len(euo.hooks) == 0 {
if err = euo.check(); err != nil {
return nil, err
}
node, err = euo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*EventMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = euo.check(); err != nil {
return nil, err
}
euo.mutation = mutation
node, err = euo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(euo.hooks) - 1; i >= 0; i-- {
if euo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = euo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, euo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Event)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, euo.sqlSave, euo.mutation, euo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -493,16 +394,10 @@ func (euo *EventUpdateOne) check() error {
}
func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: event.Table,
Columns: event.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: event.FieldID,
},
},
if err := euo.check(); err != nil {
return _node, err
}
_spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt))
id, ok := euo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Event.id" for update`)}
@ -528,44 +423,22 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
}
}
if value, ok := euo.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldCreatedAt,
})
_spec.SetField(event.FieldCreatedAt, field.TypeTime, value)
}
if euo.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: event.FieldCreatedAt,
})
_spec.ClearField(event.FieldCreatedAt, field.TypeTime)
}
if value, ok := euo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldUpdatedAt,
})
_spec.SetField(event.FieldUpdatedAt, field.TypeTime, value)
}
if euo.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: event.FieldUpdatedAt,
})
_spec.ClearField(event.FieldUpdatedAt, field.TypeTime)
}
if value, ok := euo.mutation.Time(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: event.FieldTime,
})
_spec.SetField(event.FieldTime, field.TypeTime, value)
}
if value, ok := euo.mutation.Serialized(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: event.FieldSerialized,
})
_spec.SetField(event.FieldSerialized, field.TypeString, value)
}
if euo.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
@ -575,10 +448,7 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
Columns: []string{event.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -591,10 +461,7 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
Columns: []string{event.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -613,5 +480,6 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error
}
return nil, err
}
euo.mutation.done = true
return _node, nil
}

View file

@ -15,11 +15,10 @@ type AlertFunc func(context.Context, *ent.AlertMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f AlertFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.AlertMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m)
if mv, ok := m.(*ent.AlertMutation); ok {
return f(ctx, mv)
}
return f(ctx, mv)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m)
}
// The BouncerFunc type is an adapter to allow the use of ordinary
@ -28,11 +27,10 @@ type BouncerFunc func(context.Context, *ent.BouncerMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f BouncerFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.BouncerMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m)
if mv, ok := m.(*ent.BouncerMutation); ok {
return f(ctx, mv)
}
return f(ctx, mv)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m)
}
// The ConfigItemFunc type is an adapter to allow the use of ordinary
@ -41,11 +39,10 @@ type ConfigItemFunc func(context.Context, *ent.ConfigItemMutation) (ent.Value, e
// Mutate calls f(ctx, m).
func (f ConfigItemFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.ConfigItemMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ConfigItemMutation", m)
if mv, ok := m.(*ent.ConfigItemMutation); ok {
return f(ctx, mv)
}
return f(ctx, mv)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ConfigItemMutation", m)
}
// The DecisionFunc type is an adapter to allow the use of ordinary
@ -54,11 +51,10 @@ type DecisionFunc func(context.Context, *ent.DecisionMutation) (ent.Value, error
// Mutate calls f(ctx, m).
func (f DecisionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.DecisionMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m)
if mv, ok := m.(*ent.DecisionMutation); ok {
return f(ctx, mv)
}
return f(ctx, mv)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m)
}
// The EventFunc type is an adapter to allow the use of ordinary
@ -67,11 +63,10 @@ type EventFunc func(context.Context, *ent.EventMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.EventMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m)
if mv, ok := m.(*ent.EventMutation); ok {
return f(ctx, mv)
}
return f(ctx, mv)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m)
}
// The MachineFunc type is an adapter to allow the use of ordinary
@ -80,11 +75,10 @@ type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f MachineFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.MachineMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m)
if mv, ok := m.(*ent.MachineMutation); ok {
return f(ctx, mv)
}
return f(ctx, mv)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m)
}
// The MetaFunc type is an adapter to allow the use of ordinary
@ -93,11 +87,10 @@ type MetaFunc func(context.Context, *ent.MetaMutation) (ent.Value, error)
// Mutate calls f(ctx, m).
func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
mv, ok := m.(*ent.MetaMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m)
if mv, ok := m.(*ent.MetaMutation); ok {
return f(ctx, mv)
}
return f(ctx, mv)
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m)
}
// Condition is a hook condition function.

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
)
@ -42,7 +43,8 @@ type Machine struct {
AuthType string `json:"auth_type"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the MachineQuery when eager-loading is set.
Edges MachineEdges `json:"edges"`
Edges MachineEdges `json:"edges"`
selectValues sql.SelectValues
}
// MachineEdges holds the relations/edges for other nodes in the graph.
@ -77,7 +79,7 @@ func (*Machine) scanValues(columns []string) ([]any, error) {
case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Machine", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -173,21 +175,29 @@ func (m *Machine) assignValues(columns []string, values []any) error {
} else if value.Valid {
m.AuthType = value.String
}
default:
m.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Machine.
// This includes values selected through modifiers, order, etc.
func (m *Machine) Value(name string) (ent.Value, error) {
return m.selectValues.Get(name)
}
// QueryAlerts queries the "alerts" edge of the Machine entity.
func (m *Machine) QueryAlerts() *AlertQuery {
return (&MachineClient{config: m.config}).QueryAlerts(m)
return NewMachineClient(m.config).QueryAlerts(m)
}
// Update returns a builder for updating this Machine.
// Note that you need to call Machine.Unwrap() before calling this method if this Machine
// was returned from a transaction, and the transaction was committed or rolled back.
func (m *Machine) Update() *MachineUpdateOne {
return (&MachineClient{config: m.config}).UpdateOne(m)
return NewMachineClient(m.config).UpdateOne(m)
}
// Unwrap unwraps the Machine entity that was returned from a transaction after it was closed,
@ -254,9 +264,3 @@ func (m *Machine) String() string {
// Machines is a parsable slice of Machine.
type Machines []*Machine
func (m Machines) config(cfg config) {
for _i := range m {
m[_i].config = cfg
}
}

View file

@ -4,6 +4,9 @@ package machine
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
@ -99,3 +102,92 @@ var (
// DefaultAuthType holds the default value on creation for the "auth_type" field.
DefaultAuthType string
)
// OrderOption defines the ordering options for the Machine queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByLastPush orders the results by the last_push field.
func ByLastPush(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLastPush, opts...).ToFunc()
}
// ByLastHeartbeat orders the results by the last_heartbeat field.
func ByLastHeartbeat(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldLastHeartbeat, opts...).ToFunc()
}
// ByMachineId orders the results by the machineId field.
func ByMachineId(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldMachineId, opts...).ToFunc()
}
// ByPassword orders the results by the password field.
func ByPassword(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPassword, opts...).ToFunc()
}
// ByIpAddress orders the results by the ipAddress field.
func ByIpAddress(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIpAddress, opts...).ToFunc()
}
// ByScenarios orders the results by the scenarios field.
func ByScenarios(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldScenarios, opts...).ToFunc()
}
// ByVersion orders the results by the version field.
func ByVersion(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldVersion, opts...).ToFunc()
}
// ByIsValidated orders the results by the isValidated field.
func ByIsValidated(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldIsValidated, opts...).ToFunc()
}
// ByStatus orders the results by the status field.
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStatus, opts...).ToFunc()
}
// ByAuthType orders the results by the auth_type field.
func ByAuthType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAuthType, opts...).ToFunc()
}
// ByAlertsCount orders the results by alerts count.
func ByAlertsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newAlertsStep(), opts...)
}
}
// ByAlerts orders the results by alerts terms.
func ByAlerts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newAlertsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newAlertsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(AlertsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn),
)
}

File diff suppressed because it is too large Load diff

View file

@ -187,50 +187,8 @@ func (mc *MachineCreate) Mutation() *MachineMutation {
// Save creates the Machine in the database.
func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) {
var (
err error
node *Machine
)
mc.defaults()
if len(mc.hooks) == 0 {
if err = mc.check(); err != nil {
return nil, err
}
node, err = mc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MachineMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = mc.check(); err != nil {
return nil, err
}
mc.mutation = mutation
if node, err = mc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(mc.hooks) - 1; i >= 0; i-- {
if mc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mc.hooks[i](mut)
}
v, err := mut.Mutate(ctx, mc.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Machine)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -309,6 +267,9 @@ func (mc *MachineCreate) check() error {
}
func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) {
if err := mc.check(); err != nil {
return nil, err
}
_node, _spec := mc.createSpec()
if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -318,114 +279,62 @@ func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
mc.mutation.id = &_node.ID
mc.mutation.done = true
return _node, nil
}
func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) {
var (
_node = &Machine{config: mc.config}
_spec = &sqlgraph.CreateSpec{
Table: machine.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: machine.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt))
)
if value, ok := mc.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldCreatedAt,
})
_spec.SetField(machine.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := mc.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldUpdatedAt,
})
_spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := mc.mutation.LastPush(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldLastPush,
})
_spec.SetField(machine.FieldLastPush, field.TypeTime, value)
_node.LastPush = &value
}
if value, ok := mc.mutation.LastHeartbeat(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldLastHeartbeat,
})
_spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value)
_node.LastHeartbeat = &value
}
if value, ok := mc.mutation.MachineId(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldMachineId,
})
_spec.SetField(machine.FieldMachineId, field.TypeString, value)
_node.MachineId = value
}
if value, ok := mc.mutation.Password(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldPassword,
})
_spec.SetField(machine.FieldPassword, field.TypeString, value)
_node.Password = value
}
if value, ok := mc.mutation.IpAddress(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldIpAddress,
})
_spec.SetField(machine.FieldIpAddress, field.TypeString, value)
_node.IpAddress = value
}
if value, ok := mc.mutation.Scenarios(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldScenarios,
})
_spec.SetField(machine.FieldScenarios, field.TypeString, value)
_node.Scenarios = value
}
if value, ok := mc.mutation.Version(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldVersion,
})
_spec.SetField(machine.FieldVersion, field.TypeString, value)
_node.Version = value
}
if value, ok := mc.mutation.IsValidated(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: machine.FieldIsValidated,
})
_spec.SetField(machine.FieldIsValidated, field.TypeBool, value)
_node.IsValidated = value
}
if value, ok := mc.mutation.Status(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldStatus,
})
_spec.SetField(machine.FieldStatus, field.TypeString, value)
_node.Status = value
}
if value, ok := mc.mutation.AuthType(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldAuthType,
})
_spec.SetField(machine.FieldAuthType, field.TypeString, value)
_node.AuthType = value
}
if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 {
@ -436,10 +345,7 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) {
Columns: []string{machine.AlertsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -453,11 +359,15 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) {
// MachineCreateBulk is the builder for creating many Machine entities in bulk.
type MachineCreateBulk struct {
config
err error
builders []*MachineCreate
}
// Save creates the Machine entities in the database.
func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) {
if mcb.err != nil {
return nil, mcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(mcb.builders))
nodes := make([]*Machine, len(mcb.builders))
mutators := make([]Mutator, len(mcb.builders))
@ -474,8 +384,8 @@ func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (md *MachineDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(md.hooks) == 0 {
affected, err = md.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MachineMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
md.mutation = mutation
affected, err = md.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(md.hooks) - 1; i >= 0; i-- {
if md.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = md.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, md.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, md.sqlExec, md.mutation, md.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (md *MachineDelete) ExecX(ctx context.Context) int {
}
func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: machine.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: machine.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt))
if ps := md.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
md.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type MachineDeleteOne struct {
md *MachineDelete
}
// Where appends a list predicates to the MachineDelete builder.
func (mdo *MachineDeleteOne) Where(ps ...predicate.Machine) *MachineDeleteOne {
mdo.md.mutation.Where(ps...)
return mdo
}
// Exec executes the deletion query.
func (mdo *MachineDeleteOne) Exec(ctx context.Context) error {
n, err := mdo.md.Exec(ctx)
@ -111,5 +82,7 @@ func (mdo *MachineDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (mdo *MachineDeleteOne) ExecX(ctx context.Context) {
mdo.md.ExecX(ctx)
if err := mdo.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -19,11 +19,9 @@ import (
// MachineQuery is the builder for querying Machine entities.
type MachineQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []machine.OrderOption
inters []Interceptor
predicates []predicate.Machine
withAlerts *AlertQuery
// intermediate query (i.e. traversal path).
@ -37,34 +35,34 @@ func (mq *MachineQuery) Where(ps ...predicate.Machine) *MachineQuery {
return mq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (mq *MachineQuery) Limit(limit int) *MachineQuery {
mq.limit = &limit
mq.ctx.Limit = &limit
return mq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (mq *MachineQuery) Offset(offset int) *MachineQuery {
mq.offset = &offset
mq.ctx.Offset = &offset
return mq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (mq *MachineQuery) Unique(unique bool) *MachineQuery {
mq.unique = &unique
mq.ctx.Unique = &unique
return mq
}
// Order adds an order step to the query.
func (mq *MachineQuery) Order(o ...OrderFunc) *MachineQuery {
// Order specifies how the records should be ordered.
func (mq *MachineQuery) Order(o ...machine.OrderOption) *MachineQuery {
mq.order = append(mq.order, o...)
return mq
}
// QueryAlerts chains the current query on the "alerts" edge.
func (mq *MachineQuery) QueryAlerts() *AlertQuery {
query := &AlertQuery{config: mq.config}
query := (&AlertClient{config: mq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := mq.prepareQuery(ctx); err != nil {
return nil, err
@ -87,7 +85,7 @@ func (mq *MachineQuery) QueryAlerts() *AlertQuery {
// First returns the first Machine entity from the query.
// Returns a *NotFoundError when no Machine was found.
func (mq *MachineQuery) First(ctx context.Context) (*Machine, error) {
nodes, err := mq.Limit(1).All(ctx)
nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First"))
if err != nil {
return nil, err
}
@ -110,7 +108,7 @@ func (mq *MachineQuery) FirstX(ctx context.Context) *Machine {
// Returns a *NotFoundError when no Machine ID was found.
func (mq *MachineQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = mq.Limit(1).IDs(ctx); err != nil {
if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -133,7 +131,7 @@ func (mq *MachineQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one Machine entity is found.
// Returns a *NotFoundError when no Machine entities are found.
func (mq *MachineQuery) Only(ctx context.Context) (*Machine, error) {
nodes, err := mq.Limit(2).All(ctx)
nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -161,7 +159,7 @@ func (mq *MachineQuery) OnlyX(ctx context.Context) *Machine {
// Returns a *NotFoundError when no entities are found.
func (mq *MachineQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = mq.Limit(2).IDs(ctx); err != nil {
if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -186,10 +184,12 @@ func (mq *MachineQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of Machines.
func (mq *MachineQuery) All(ctx context.Context) ([]*Machine, error) {
ctx = setContextOp(ctx, mq.ctx, "All")
if err := mq.prepareQuery(ctx); err != nil {
return nil, err
}
return mq.sqlAll(ctx)
qr := querierAll[[]*Machine, *MachineQuery]()
return withInterceptors[[]*Machine](ctx, mq, qr, mq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -202,9 +202,12 @@ func (mq *MachineQuery) AllX(ctx context.Context) []*Machine {
}
// IDs executes the query and returns a list of Machine IDs.
func (mq *MachineQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil {
func (mq *MachineQuery) IDs(ctx context.Context) (ids []int, err error) {
if mq.ctx.Unique == nil && mq.path != nil {
mq.Unique(true)
}
ctx = setContextOp(ctx, mq.ctx, "IDs")
if err = mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -221,10 +224,11 @@ func (mq *MachineQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (mq *MachineQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, mq.ctx, "Count")
if err := mq.prepareQuery(ctx); err != nil {
return 0, err
}
return mq.sqlCount(ctx)
return withInterceptors[int](ctx, mq, querierCount[*MachineQuery](), mq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -238,10 +242,15 @@ func (mq *MachineQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (mq *MachineQuery) Exist(ctx context.Context) (bool, error) {
if err := mq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, mq.ctx, "Exist")
switch _, err := mq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return mq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -261,22 +270,21 @@ func (mq *MachineQuery) Clone() *MachineQuery {
}
return &MachineQuery{
config: mq.config,
limit: mq.limit,
offset: mq.offset,
order: append([]OrderFunc{}, mq.order...),
ctx: mq.ctx.Clone(),
order: append([]machine.OrderOption{}, mq.order...),
inters: append([]Interceptor{}, mq.inters...),
predicates: append([]predicate.Machine{}, mq.predicates...),
withAlerts: mq.withAlerts.Clone(),
// clone intermediate query.
sql: mq.sql.Clone(),
path: mq.path,
unique: mq.unique,
sql: mq.sql.Clone(),
path: mq.path,
}
}
// WithAlerts tells the query-builder to eager-load the nodes that are connected to
// the "alerts" edge. The optional arguments are used to configure the query builder of the edge.
func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery {
query := &AlertQuery{config: mq.config}
query := (&AlertClient{config: mq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -299,16 +307,11 @@ func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy {
grbuild := &MachineGroupBy{config: mq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := mq.prepareQuery(ctx); err != nil {
return nil, err
}
return mq.sqlQuery(ctx), nil
}
mq.ctx.Fields = append([]string{field}, fields...)
grbuild := &MachineGroupBy{build: mq}
grbuild.flds = &mq.ctx.Fields
grbuild.label = machine.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -325,15 +328,30 @@ func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy
// Select(machine.FieldCreatedAt).
// Scan(ctx, &v)
func (mq *MachineQuery) Select(fields ...string) *MachineSelect {
mq.fields = append(mq.fields, fields...)
selbuild := &MachineSelect{MachineQuery: mq}
selbuild.label = machine.Label
selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan
return selbuild
mq.ctx.Fields = append(mq.ctx.Fields, fields...)
sbuild := &MachineSelect{MachineQuery: mq}
sbuild.label = machine.Label
sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a MachineSelect configured with the given aggregations.
func (mq *MachineQuery) Aggregate(fns ...AggregateFunc) *MachineSelect {
return mq.Select().Aggregate(fns...)
}
func (mq *MachineQuery) prepareQuery(ctx context.Context) error {
for _, f := range mq.fields {
for _, inter := range mq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, mq); err != nil {
return err
}
}
}
for _, f := range mq.ctx.Fields {
if !machine.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -396,7 +414,7 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes
}
query.withFKs = true
query.Where(predicate.Alert(func(s *sql.Selector) {
s.Where(sql.InValues(machine.AlertsColumn, fks...))
s.Where(sql.InValues(s.C(machine.AlertsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@ -409,7 +427,7 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@ -418,41 +436,22 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes
func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) {
_spec := mq.querySpec()
_spec.Node.Columns = mq.fields
if len(mq.fields) > 0 {
_spec.Unique = mq.unique != nil && *mq.unique
_spec.Node.Columns = mq.ctx.Fields
if len(mq.ctx.Fields) > 0 {
_spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, mq.driver, _spec)
}
func (mq *MachineQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := mq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: machine.Table,
Columns: machine.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: machine.FieldID,
},
},
From: mq.sql,
Unique: true,
}
if unique := mq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt))
_spec.From = mq.sql
if unique := mq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if mq.path != nil {
_spec.Unique = true
}
if fields := mq.fields; len(fields) > 0 {
if fields := mq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID)
for i := range fields {
@ -468,10 +467,10 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := mq.limit; limit != nil {
if limit := mq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := mq.offset; offset != nil {
if offset := mq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := mq.order; len(ps) > 0 {
@ -487,7 +486,7 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec {
func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(mq.driver.Dialect())
t1 := builder.Table(machine.Table)
columns := mq.fields
columns := mq.ctx.Fields
if len(columns) == 0 {
columns = machine.Columns
}
@ -496,7 +495,7 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = mq.sql
selector.Select(selector.Columns(columns...)...)
}
if mq.unique != nil && *mq.unique {
if mq.ctx.Unique != nil && *mq.ctx.Unique {
selector.Distinct()
}
for _, p := range mq.predicates {
@ -505,12 +504,12 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range mq.order {
p(selector)
}
if offset := mq.offset; offset != nil {
if offset := mq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := mq.limit; limit != nil {
if limit := mq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -518,13 +517,8 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector {
// MachineGroupBy is the group-by builder for Machine entities.
type MachineGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *MachineQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -533,74 +527,77 @@ func (mgb *MachineGroupBy) Aggregate(fns ...AggregateFunc) *MachineGroupBy {
return mgb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (mgb *MachineGroupBy) Scan(ctx context.Context, v any) error {
query, err := mgb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy")
if err := mgb.build.prepareQuery(ctx); err != nil {
return err
}
mgb.sql = query
return mgb.sqlScan(ctx, v)
return scanWithInterceptors[*MachineQuery, *MachineGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v)
}
func (mgb *MachineGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range mgb.fields {
if !machine.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := mgb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := mgb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (mgb *MachineGroupBy) sqlQuery() *sql.Selector {
selector := mgb.sql.Select()
func (mgb *MachineGroupBy) sqlScan(ctx context.Context, root *MachineQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(mgb.fns))
for _, fn := range mgb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
for _, f := range mgb.fields {
columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns))
for _, f := range *mgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(mgb.fields...)...)
selector.GroupBy(selector.Columns(*mgb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// MachineSelect is the builder for selecting fields of Machine entities.
type MachineSelect struct {
*MachineQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (ms *MachineSelect) Aggregate(fns ...AggregateFunc) *MachineSelect {
ms.fns = append(ms.fns, fns...)
return ms
}
// Scan applies the selector query and scans the result into the given value.
func (ms *MachineSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, ms.ctx, "Select")
if err := ms.prepareQuery(ctx); err != nil {
return err
}
ms.sql = ms.MachineQuery.sqlQuery(ctx)
return ms.sqlScan(ctx, v)
return scanWithInterceptors[*MachineQuery, *MachineSelect](ctx, ms.MachineQuery, ms, ms.inters, v)
}
func (ms *MachineSelect) sqlScan(ctx context.Context, v any) error {
func (ms *MachineSelect) sqlScan(ctx context.Context, root *MachineQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(ms.fns))
for _, fn := range ms.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*ms.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := ms.sql.Query()
query, args := selector.Query()
if err := ms.driver.Query(ctx, query, args, rows); err != nil {
return err
}

View file

@ -226,41 +226,8 @@ func (mu *MachineUpdate) RemoveAlerts(a ...*Alert) *MachineUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (mu *MachineUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
mu.defaults()
if len(mu.hooks) == 0 {
if err = mu.check(); err != nil {
return 0, err
}
affected, err = mu.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MachineMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = mu.check(); err != nil {
return 0, err
}
mu.mutation = mutation
affected, err = mu.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(mu.hooks) - 1; i >= 0; i-- {
if mu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, mu.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -316,16 +283,10 @@ func (mu *MachineUpdate) check() error {
}
func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: machine.Table,
Columns: machine.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: machine.FieldID,
},
},
if err := mu.check(); err != nil {
return n, err
}
_spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt))
if ps := mu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -334,130 +295,61 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := mu.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldCreatedAt,
})
_spec.SetField(machine.FieldCreatedAt, field.TypeTime, value)
}
if mu.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldCreatedAt,
})
_spec.ClearField(machine.FieldCreatedAt, field.TypeTime)
}
if value, ok := mu.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldUpdatedAt,
})
_spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value)
}
if mu.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldUpdatedAt,
})
_spec.ClearField(machine.FieldUpdatedAt, field.TypeTime)
}
if value, ok := mu.mutation.LastPush(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldLastPush,
})
_spec.SetField(machine.FieldLastPush, field.TypeTime, value)
}
if mu.mutation.LastPushCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldLastPush,
})
_spec.ClearField(machine.FieldLastPush, field.TypeTime)
}
if value, ok := mu.mutation.LastHeartbeat(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldLastHeartbeat,
})
_spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value)
}
if mu.mutation.LastHeartbeatCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldLastHeartbeat,
})
_spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime)
}
if value, ok := mu.mutation.MachineId(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldMachineId,
})
_spec.SetField(machine.FieldMachineId, field.TypeString, value)
}
if value, ok := mu.mutation.Password(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldPassword,
})
_spec.SetField(machine.FieldPassword, field.TypeString, value)
}
if value, ok := mu.mutation.IpAddress(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldIpAddress,
})
_spec.SetField(machine.FieldIpAddress, field.TypeString, value)
}
if value, ok := mu.mutation.Scenarios(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldScenarios,
})
_spec.SetField(machine.FieldScenarios, field.TypeString, value)
}
if mu.mutation.ScenariosCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: machine.FieldScenarios,
})
_spec.ClearField(machine.FieldScenarios, field.TypeString)
}
if value, ok := mu.mutation.Version(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldVersion,
})
_spec.SetField(machine.FieldVersion, field.TypeString, value)
}
if mu.mutation.VersionCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: machine.FieldVersion,
})
_spec.ClearField(machine.FieldVersion, field.TypeString)
}
if value, ok := mu.mutation.IsValidated(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: machine.FieldIsValidated,
})
_spec.SetField(machine.FieldIsValidated, field.TypeBool, value)
}
if value, ok := mu.mutation.Status(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldStatus,
})
_spec.SetField(machine.FieldStatus, field.TypeString, value)
}
if mu.mutation.StatusCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: machine.FieldStatus,
})
_spec.ClearField(machine.FieldStatus, field.TypeString)
}
if value, ok := mu.mutation.AuthType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldAuthType,
})
_spec.SetField(machine.FieldAuthType, field.TypeString, value)
}
if mu.mutation.AlertsCleared() {
edge := &sqlgraph.EdgeSpec{
@ -467,10 +359,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{machine.AlertsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -483,10 +372,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{machine.AlertsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -502,10 +388,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{machine.AlertsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -521,6 +404,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
mu.mutation.done = true
return n, nil
}
@ -727,6 +611,12 @@ func (muo *MachineUpdateOne) RemoveAlerts(a ...*Alert) *MachineUpdateOne {
return muo.RemoveAlertIDs(ids...)
}
// Where appends a list predicates to the MachineUpdate builder.
func (muo *MachineUpdateOne) Where(ps ...predicate.Machine) *MachineUpdateOne {
muo.mutation.Where(ps...)
return muo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpdateOne {
@ -736,47 +626,8 @@ func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpda
// Save executes the query and returns the updated Machine entity.
func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) {
var (
err error
node *Machine
)
muo.defaults()
if len(muo.hooks) == 0 {
if err = muo.check(); err != nil {
return nil, err
}
node, err = muo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MachineMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = muo.check(); err != nil {
return nil, err
}
muo.mutation = mutation
node, err = muo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(muo.hooks) - 1; i >= 0; i-- {
if muo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = muo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, muo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Machine)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -832,16 +683,10 @@ func (muo *MachineUpdateOne) check() error {
}
func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: machine.Table,
Columns: machine.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: machine.FieldID,
},
},
if err := muo.check(); err != nil {
return _node, err
}
_spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt))
id, ok := muo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Machine.id" for update`)}
@ -867,130 +712,61 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
}
}
if value, ok := muo.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldCreatedAt,
})
_spec.SetField(machine.FieldCreatedAt, field.TypeTime, value)
}
if muo.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldCreatedAt,
})
_spec.ClearField(machine.FieldCreatedAt, field.TypeTime)
}
if value, ok := muo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldUpdatedAt,
})
_spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value)
}
if muo.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldUpdatedAt,
})
_spec.ClearField(machine.FieldUpdatedAt, field.TypeTime)
}
if value, ok := muo.mutation.LastPush(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldLastPush,
})
_spec.SetField(machine.FieldLastPush, field.TypeTime, value)
}
if muo.mutation.LastPushCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldLastPush,
})
_spec.ClearField(machine.FieldLastPush, field.TypeTime)
}
if value, ok := muo.mutation.LastHeartbeat(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: machine.FieldLastHeartbeat,
})
_spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value)
}
if muo.mutation.LastHeartbeatCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: machine.FieldLastHeartbeat,
})
_spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime)
}
if value, ok := muo.mutation.MachineId(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldMachineId,
})
_spec.SetField(machine.FieldMachineId, field.TypeString, value)
}
if value, ok := muo.mutation.Password(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldPassword,
})
_spec.SetField(machine.FieldPassword, field.TypeString, value)
}
if value, ok := muo.mutation.IpAddress(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldIpAddress,
})
_spec.SetField(machine.FieldIpAddress, field.TypeString, value)
}
if value, ok := muo.mutation.Scenarios(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldScenarios,
})
_spec.SetField(machine.FieldScenarios, field.TypeString, value)
}
if muo.mutation.ScenariosCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: machine.FieldScenarios,
})
_spec.ClearField(machine.FieldScenarios, field.TypeString)
}
if value, ok := muo.mutation.Version(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldVersion,
})
_spec.SetField(machine.FieldVersion, field.TypeString, value)
}
if muo.mutation.VersionCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: machine.FieldVersion,
})
_spec.ClearField(machine.FieldVersion, field.TypeString)
}
if value, ok := muo.mutation.IsValidated(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeBool,
Value: value,
Column: machine.FieldIsValidated,
})
_spec.SetField(machine.FieldIsValidated, field.TypeBool, value)
}
if value, ok := muo.mutation.Status(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldStatus,
})
_spec.SetField(machine.FieldStatus, field.TypeString, value)
}
if muo.mutation.StatusCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeString,
Column: machine.FieldStatus,
})
_spec.ClearField(machine.FieldStatus, field.TypeString)
}
if value, ok := muo.mutation.AuthType(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: machine.FieldAuthType,
})
_spec.SetField(machine.FieldAuthType, field.TypeString, value)
}
if muo.mutation.AlertsCleared() {
edge := &sqlgraph.EdgeSpec{
@ -1000,10 +776,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
Columns: []string{machine.AlertsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -1016,10 +789,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
Columns: []string{machine.AlertsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -1035,10 +805,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
Columns: []string{machine.AlertsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -1057,5 +824,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e
}
return nil, err
}
muo.mutation.done = true
return _node, nil
}

View file

@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
@ -29,7 +30,8 @@ type Meta struct {
AlertMetas int `json:"alert_metas,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the MetaQuery when eager-loading is set.
Edges MetaEdges `json:"edges"`
Edges MetaEdges `json:"edges"`
selectValues sql.SelectValues
}
// MetaEdges holds the relations/edges for other nodes in the graph.
@ -66,7 +68,7 @@ func (*Meta) scanValues(columns []string) ([]any, error) {
case meta.FieldCreatedAt, meta.FieldUpdatedAt:
values[i] = new(sql.NullTime)
default:
return nil, fmt.Errorf("unexpected column %q for type Meta", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@ -118,21 +120,29 @@ func (m *Meta) assignValues(columns []string, values []any) error {
} else if value.Valid {
m.AlertMetas = int(value.Int64)
}
default:
m.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// GetValue returns the ent.Value that was dynamically selected and assigned to the Meta.
// This includes values selected through modifiers, order, etc.
func (m *Meta) GetValue(name string) (ent.Value, error) {
return m.selectValues.Get(name)
}
// QueryOwner queries the "owner" edge of the Meta entity.
func (m *Meta) QueryOwner() *AlertQuery {
return (&MetaClient{config: m.config}).QueryOwner(m)
return NewMetaClient(m.config).QueryOwner(m)
}
// Update returns a builder for updating this Meta.
// Note that you need to call Meta.Unwrap() before calling this method if this Meta
// was returned from a transaction, and the transaction was committed or rolled back.
func (m *Meta) Update() *MetaUpdateOne {
return (&MetaClient{config: m.config}).UpdateOne(m)
return NewMetaClient(m.config).UpdateOne(m)
}
// Unwrap unwraps the Meta entity that was returned from a transaction after it was closed,
@ -175,9 +185,3 @@ func (m *Meta) String() string {
// MetaSlice is a parsable slice of Meta.
type MetaSlice []*Meta
func (m MetaSlice) config(cfg config) {
for _i := range m {
m[_i].config = cfg
}
}

View file

@ -4,6 +4,9 @@ package meta
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
@ -66,3 +69,50 @@ var (
// ValueValidator is a validator for the "value" field. It is called by the builders before save.
ValueValidator func(string) error
)
// OrderOption defines the ordering options for the Meta queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByKey orders the results by the key field.
func ByKey(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldKey, opts...).ToFunc()
}
// ByValue orders the results by the value field.
func ByValue(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldValue, opts...).ToFunc()
}
// ByAlertMetas orders the results by the alert_metas field.
func ByAlertMetas(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldAlertMetas, opts...).ToFunc()
}
// ByOwnerField orders the results by owner field.
func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...))
}
}
func newOwnerStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
}

View file

@ -12,512 +12,332 @@ import (
// ID filters vertices based on their ID field.
func ID(id int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
return predicate.Meta(sql.FieldEQ(FieldID, id))
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
return predicate.Meta(sql.FieldEQ(FieldID, id))
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldID), id))
})
return predicate.Meta(sql.FieldNEQ(FieldID, id))
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
v := make([]any, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.In(s.C(FieldID), v...))
})
return predicate.Meta(sql.FieldIn(FieldID, ids...))
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
v := make([]any, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.NotIn(s.C(FieldID), v...))
})
return predicate.Meta(sql.FieldNotIn(FieldID, ids...))
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldID), id))
})
return predicate.Meta(sql.FieldGT(FieldID, id))
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldID), id))
})
return predicate.Meta(sql.FieldGTE(FieldID, id))
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldID), id))
})
return predicate.Meta(sql.FieldLT(FieldID, id))
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldID), id))
})
return predicate.Meta(sql.FieldLTE(FieldID, id))
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
return predicate.Meta(sql.FieldEQ(FieldCreatedAt, v))
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v))
}
// Key applies equality check predicate on the "key" field. It's identical to KeyEQ.
func Key(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldEQ(FieldKey, v))
}
// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
func Value(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldEQ(FieldValue, v))
}
// AlertMetas applies equality check predicate on the "alert_metas" field. It's identical to AlertMetasEQ.
func AlertMetas(v int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldAlertMetas), v))
})
return predicate.Meta(sql.FieldEQ(FieldAlertMetas, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
return predicate.Meta(sql.FieldEQ(FieldCreatedAt, v))
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
})
return predicate.Meta(sql.FieldNEQ(FieldCreatedAt, v))
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldCreatedAt), v...))
})
return predicate.Meta(sql.FieldIn(FieldCreatedAt, vs...))
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
})
return predicate.Meta(sql.FieldNotIn(FieldCreatedAt, vs...))
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldCreatedAt), v))
})
return predicate.Meta(sql.FieldGT(FieldCreatedAt, v))
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldCreatedAt), v))
})
return predicate.Meta(sql.FieldGTE(FieldCreatedAt, v))
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldCreatedAt), v))
})
return predicate.Meta(sql.FieldLT(FieldCreatedAt, v))
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldCreatedAt), v))
})
return predicate.Meta(sql.FieldLTE(FieldCreatedAt, v))
}
// CreatedAtIsNil applies the IsNil predicate on the "created_at" field.
func CreatedAtIsNil() predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldCreatedAt)))
})
return predicate.Meta(sql.FieldIsNull(FieldCreatedAt))
}
// CreatedAtNotNil applies the NotNil predicate on the "created_at" field.
func CreatedAtNotNil() predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldCreatedAt)))
})
return predicate.Meta(sql.FieldNotNull(FieldCreatedAt))
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v))
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
})
return predicate.Meta(sql.FieldNEQ(FieldUpdatedAt, v))
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldUpdatedAt), v...))
})
return predicate.Meta(sql.FieldIn(FieldUpdatedAt, vs...))
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
})
return predicate.Meta(sql.FieldNotIn(FieldUpdatedAt, vs...))
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldUpdatedAt), v))
})
return predicate.Meta(sql.FieldGT(FieldUpdatedAt, v))
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
})
return predicate.Meta(sql.FieldGTE(FieldUpdatedAt, v))
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldUpdatedAt), v))
})
return predicate.Meta(sql.FieldLT(FieldUpdatedAt, v))
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
})
return predicate.Meta(sql.FieldLTE(FieldUpdatedAt, v))
}
// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field.
func UpdatedAtIsNil() predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldUpdatedAt)))
})
return predicate.Meta(sql.FieldIsNull(FieldUpdatedAt))
}
// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field.
func UpdatedAtNotNil() predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldUpdatedAt)))
})
return predicate.Meta(sql.FieldNotNull(FieldUpdatedAt))
}
// KeyEQ applies the EQ predicate on the "key" field.
func KeyEQ(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldEQ(FieldKey, v))
}
// KeyNEQ applies the NEQ predicate on the "key" field.
func KeyNEQ(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldNEQ(FieldKey, v))
}
// KeyIn applies the In predicate on the "key" field.
func KeyIn(vs ...string) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldKey), v...))
})
return predicate.Meta(sql.FieldIn(FieldKey, vs...))
}
// KeyNotIn applies the NotIn predicate on the "key" field.
func KeyNotIn(vs ...string) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldKey), v...))
})
return predicate.Meta(sql.FieldNotIn(FieldKey, vs...))
}
// KeyGT applies the GT predicate on the "key" field.
func KeyGT(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldGT(FieldKey, v))
}
// KeyGTE applies the GTE predicate on the "key" field.
func KeyGTE(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldGTE(FieldKey, v))
}
// KeyLT applies the LT predicate on the "key" field.
func KeyLT(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldLT(FieldKey, v))
}
// KeyLTE applies the LTE predicate on the "key" field.
func KeyLTE(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldLTE(FieldKey, v))
}
// KeyContains applies the Contains predicate on the "key" field.
func KeyContains(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldContains(FieldKey, v))
}
// KeyHasPrefix applies the HasPrefix predicate on the "key" field.
func KeyHasPrefix(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldHasPrefix(FieldKey, v))
}
// KeyHasSuffix applies the HasSuffix predicate on the "key" field.
func KeyHasSuffix(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldHasSuffix(FieldKey, v))
}
// KeyEqualFold applies the EqualFold predicate on the "key" field.
func KeyEqualFold(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldEqualFold(FieldKey, v))
}
// KeyContainsFold applies the ContainsFold predicate on the "key" field.
func KeyContainsFold(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldKey), v))
})
return predicate.Meta(sql.FieldContainsFold(FieldKey, v))
}
// ValueEQ applies the EQ predicate on the "value" field.
func ValueEQ(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldEQ(FieldValue, v))
}
// ValueNEQ applies the NEQ predicate on the "value" field.
func ValueNEQ(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldNEQ(FieldValue, v))
}
// ValueIn applies the In predicate on the "value" field.
func ValueIn(vs ...string) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldValue), v...))
})
return predicate.Meta(sql.FieldIn(FieldValue, vs...))
}
// ValueNotIn applies the NotIn predicate on the "value" field.
func ValueNotIn(vs ...string) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldValue), v...))
})
return predicate.Meta(sql.FieldNotIn(FieldValue, vs...))
}
// ValueGT applies the GT predicate on the "value" field.
func ValueGT(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldGT(FieldValue, v))
}
// ValueGTE applies the GTE predicate on the "value" field.
func ValueGTE(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldGTE(FieldValue, v))
}
// ValueLT applies the LT predicate on the "value" field.
func ValueLT(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldLT(FieldValue, v))
}
// ValueLTE applies the LTE predicate on the "value" field.
func ValueLTE(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldLTE(FieldValue, v))
}
// ValueContains applies the Contains predicate on the "value" field.
func ValueContains(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldContains(FieldValue, v))
}
// ValueHasPrefix applies the HasPrefix predicate on the "value" field.
func ValueHasPrefix(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldHasPrefix(FieldValue, v))
}
// ValueHasSuffix applies the HasSuffix predicate on the "value" field.
func ValueHasSuffix(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldHasSuffix(FieldValue, v))
}
// ValueEqualFold applies the EqualFold predicate on the "value" field.
func ValueEqualFold(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldEqualFold(FieldValue, v))
}
// ValueContainsFold applies the ContainsFold predicate on the "value" field.
func ValueContainsFold(v string) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldValue), v))
})
return predicate.Meta(sql.FieldContainsFold(FieldValue, v))
}
// AlertMetasEQ applies the EQ predicate on the "alert_metas" field.
func AlertMetasEQ(v int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldAlertMetas), v))
})
return predicate.Meta(sql.FieldEQ(FieldAlertMetas, v))
}
// AlertMetasNEQ applies the NEQ predicate on the "alert_metas" field.
func AlertMetasNEQ(v int) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldAlertMetas), v))
})
return predicate.Meta(sql.FieldNEQ(FieldAlertMetas, v))
}
// AlertMetasIn applies the In predicate on the "alert_metas" field.
func AlertMetasIn(vs ...int) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.In(s.C(FieldAlertMetas), v...))
})
return predicate.Meta(sql.FieldIn(FieldAlertMetas, vs...))
}
// AlertMetasNotIn applies the NotIn predicate on the "alert_metas" field.
func AlertMetasNotIn(vs ...int) predicate.Meta {
v := make([]any, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotIn(s.C(FieldAlertMetas), v...))
})
return predicate.Meta(sql.FieldNotIn(FieldAlertMetas, vs...))
}
// AlertMetasIsNil applies the IsNil predicate on the "alert_metas" field.
func AlertMetasIsNil() predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.IsNull(s.C(FieldAlertMetas)))
})
return predicate.Meta(sql.FieldIsNull(FieldAlertMetas))
}
// AlertMetasNotNil applies the NotNil predicate on the "alert_metas" field.
func AlertMetasNotNil() predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s.Where(sql.NotNull(s.C(FieldAlertMetas)))
})
return predicate.Meta(sql.FieldNotNull(FieldAlertMetas))
}
// HasOwner applies the HasEdge predicate on the "owner" edge.
@ -525,7 +345,6 @@ func HasOwner() predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
sqlgraph.HasNeighbors(s, step)
@ -535,11 +354,7 @@ func HasOwner() predicate.Meta {
// HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates).
func HasOwnerWith(preds ...predicate.Alert) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(OwnerInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn),
)
step := newOwnerStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@ -550,32 +365,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Meta {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Meta) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.Meta(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Meta) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.Meta(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Meta) predicate.Meta {
return predicate.Meta(func(s *sql.Selector) {
p(s.Not())
})
return predicate.Meta(sql.NotPredicates(p))
}

View file

@ -101,50 +101,8 @@ func (mc *MetaCreate) Mutation() *MetaMutation {
// Save creates the Meta in the database.
func (mc *MetaCreate) Save(ctx context.Context) (*Meta, error) {
var (
err error
node *Meta
)
mc.defaults()
if len(mc.hooks) == 0 {
if err = mc.check(); err != nil {
return nil, err
}
node, err = mc.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MetaMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = mc.check(); err != nil {
return nil, err
}
mc.mutation = mutation
if node, err = mc.sqlSave(ctx); err != nil {
return nil, err
}
mutation.id = &node.ID
mutation.done = true
return node, err
})
for i := len(mc.hooks) - 1; i >= 0; i-- {
if mc.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mc.hooks[i](mut)
}
v, err := mut.Mutate(ctx, mc.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Meta)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@ -198,6 +156,9 @@ func (mc *MetaCreate) check() error {
}
func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) {
if err := mc.check(); err != nil {
return nil, err
}
_node, _spec := mc.createSpec()
if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil {
if sqlgraph.IsConstraintError(err) {
@ -207,50 +168,30 @@ func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) {
}
id := _spec.ID.Value.(int64)
_node.ID = int(id)
mc.mutation.id = &_node.ID
mc.mutation.done = true
return _node, nil
}
func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) {
var (
_node = &Meta{config: mc.config}
_spec = &sqlgraph.CreateSpec{
Table: meta.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: meta.FieldID,
},
}
_spec = sqlgraph.NewCreateSpec(meta.Table, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt))
)
if value, ok := mc.mutation.CreatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: meta.FieldCreatedAt,
})
_spec.SetField(meta.FieldCreatedAt, field.TypeTime, value)
_node.CreatedAt = &value
}
if value, ok := mc.mutation.UpdatedAt(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: meta.FieldUpdatedAt,
})
_spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value)
_node.UpdatedAt = &value
}
if value, ok := mc.mutation.Key(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: meta.FieldKey,
})
_spec.SetField(meta.FieldKey, field.TypeString, value)
_node.Key = value
}
if value, ok := mc.mutation.Value(); ok {
_spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: meta.FieldValue,
})
_spec.SetField(meta.FieldValue, field.TypeString, value)
_node.Value = value
}
if nodes := mc.mutation.OwnerIDs(); len(nodes) > 0 {
@ -261,10 +202,7 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) {
Columns: []string{meta.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -279,11 +217,15 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) {
// MetaCreateBulk is the builder for creating many Meta entities in bulk.
type MetaCreateBulk struct {
config
err error
builders []*MetaCreate
}
// Save creates the Meta entities in the database.
func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) {
if mcb.err != nil {
return nil, mcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(mcb.builders))
nodes := make([]*Meta, len(mcb.builders))
mutators := make([]Mutator, len(mcb.builders))
@ -300,8 +242,8 @@ func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation)
} else {

View file

@ -4,7 +4,6 @@ package ent
import (
"context"
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
@ -28,34 +27,7 @@ func (md *MetaDelete) Where(ps ...predicate.Meta) *MetaDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (md *MetaDelete) Exec(ctx context.Context) (int, error) {
var (
err error
affected int
)
if len(md.hooks) == 0 {
affected, err = md.sqlExec(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MetaMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
md.mutation = mutation
affected, err = md.sqlExec(ctx)
mutation.done = true
return affected, err
})
for i := len(md.hooks) - 1; i >= 0; i-- {
if md.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = md.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, md.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, md.sqlExec, md.mutation, md.hooks)
}
// ExecX is like Exec, but panics if an error occurs.
@ -68,15 +40,7 @@ func (md *MetaDelete) ExecX(ctx context.Context) int {
}
func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) {
_spec := &sqlgraph.DeleteSpec{
Node: &sqlgraph.NodeSpec{
Table: meta.Table,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: meta.FieldID,
},
},
}
_spec := sqlgraph.NewDeleteSpec(meta.Table, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt))
if ps := md.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -88,6 +52,7 @@ func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) {
if err != nil && sqlgraph.IsConstraintError(err) {
err = &ConstraintError{msg: err.Error(), wrap: err}
}
md.mutation.done = true
return affected, err
}
@ -96,6 +61,12 @@ type MetaDeleteOne struct {
md *MetaDelete
}
// Where appends a list predicates to the MetaDelete builder.
func (mdo *MetaDeleteOne) Where(ps ...predicate.Meta) *MetaDeleteOne {
mdo.md.mutation.Where(ps...)
return mdo
}
// Exec executes the deletion query.
func (mdo *MetaDeleteOne) Exec(ctx context.Context) error {
n, err := mdo.md.Exec(ctx)
@ -111,5 +82,7 @@ func (mdo *MetaDeleteOne) Exec(ctx context.Context) error {
// ExecX is like Exec, but panics if an error occurs.
func (mdo *MetaDeleteOne) ExecX(ctx context.Context) {
mdo.md.ExecX(ctx)
if err := mdo.Exec(ctx); err != nil {
panic(err)
}
}

View file

@ -18,11 +18,9 @@ import (
// MetaQuery is the builder for querying Meta entities.
type MetaQuery struct {
config
limit *int
offset *int
unique *bool
order []OrderFunc
fields []string
ctx *QueryContext
order []meta.OrderOption
inters []Interceptor
predicates []predicate.Meta
withOwner *AlertQuery
// intermediate query (i.e. traversal path).
@ -36,34 +34,34 @@ func (mq *MetaQuery) Where(ps ...predicate.Meta) *MetaQuery {
return mq
}
// Limit adds a limit step to the query.
// Limit the number of records to be returned by this query.
func (mq *MetaQuery) Limit(limit int) *MetaQuery {
mq.limit = &limit
mq.ctx.Limit = &limit
return mq
}
// Offset adds an offset step to the query.
// Offset to start from.
func (mq *MetaQuery) Offset(offset int) *MetaQuery {
mq.offset = &offset
mq.ctx.Offset = &offset
return mq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (mq *MetaQuery) Unique(unique bool) *MetaQuery {
mq.unique = &unique
mq.ctx.Unique = &unique
return mq
}
// Order adds an order step to the query.
func (mq *MetaQuery) Order(o ...OrderFunc) *MetaQuery {
// Order specifies how the records should be ordered.
func (mq *MetaQuery) Order(o ...meta.OrderOption) *MetaQuery {
mq.order = append(mq.order, o...)
return mq
}
// QueryOwner chains the current query on the "owner" edge.
func (mq *MetaQuery) QueryOwner() *AlertQuery {
query := &AlertQuery{config: mq.config}
query := (&AlertClient{config: mq.config}).Query()
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
if err := mq.prepareQuery(ctx); err != nil {
return nil, err
@ -86,7 +84,7 @@ func (mq *MetaQuery) QueryOwner() *AlertQuery {
// First returns the first Meta entity from the query.
// Returns a *NotFoundError when no Meta was found.
func (mq *MetaQuery) First(ctx context.Context) (*Meta, error) {
nodes, err := mq.Limit(1).All(ctx)
nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First"))
if err != nil {
return nil, err
}
@ -109,7 +107,7 @@ func (mq *MetaQuery) FirstX(ctx context.Context) *Meta {
// Returns a *NotFoundError when no Meta ID was found.
func (mq *MetaQuery) FirstID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = mq.Limit(1).IDs(ctx); err != nil {
if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil {
return
}
if len(ids) == 0 {
@ -132,7 +130,7 @@ func (mq *MetaQuery) FirstIDX(ctx context.Context) int {
// Returns a *NotSingularError when more than one Meta entity is found.
// Returns a *NotFoundError when no Meta entities are found.
func (mq *MetaQuery) Only(ctx context.Context) (*Meta, error) {
nodes, err := mq.Limit(2).All(ctx)
nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only"))
if err != nil {
return nil, err
}
@ -160,7 +158,7 @@ func (mq *MetaQuery) OnlyX(ctx context.Context) *Meta {
// Returns a *NotFoundError when no entities are found.
func (mq *MetaQuery) OnlyID(ctx context.Context) (id int, err error) {
var ids []int
if ids, err = mq.Limit(2).IDs(ctx); err != nil {
if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil {
return
}
switch len(ids) {
@ -185,10 +183,12 @@ func (mq *MetaQuery) OnlyIDX(ctx context.Context) int {
// All executes the query and returns a list of MetaSlice.
func (mq *MetaQuery) All(ctx context.Context) ([]*Meta, error) {
ctx = setContextOp(ctx, mq.ctx, "All")
if err := mq.prepareQuery(ctx); err != nil {
return nil, err
}
return mq.sqlAll(ctx)
qr := querierAll[[]*Meta, *MetaQuery]()
return withInterceptors[[]*Meta](ctx, mq, qr, mq.inters)
}
// AllX is like All, but panics if an error occurs.
@ -201,9 +201,12 @@ func (mq *MetaQuery) AllX(ctx context.Context) []*Meta {
}
// IDs executes the query and returns a list of Meta IDs.
func (mq *MetaQuery) IDs(ctx context.Context) ([]int, error) {
var ids []int
if err := mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil {
func (mq *MetaQuery) IDs(ctx context.Context) (ids []int, err error) {
if mq.ctx.Unique == nil && mq.path != nil {
mq.Unique(true)
}
ctx = setContextOp(ctx, mq.ctx, "IDs")
if err = mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
@ -220,10 +223,11 @@ func (mq *MetaQuery) IDsX(ctx context.Context) []int {
// Count returns the count of the given query.
func (mq *MetaQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, mq.ctx, "Count")
if err := mq.prepareQuery(ctx); err != nil {
return 0, err
}
return mq.sqlCount(ctx)
return withInterceptors[int](ctx, mq, querierCount[*MetaQuery](), mq.inters)
}
// CountX is like Count, but panics if an error occurs.
@ -237,10 +241,15 @@ func (mq *MetaQuery) CountX(ctx context.Context) int {
// Exist returns true if the query has elements in the graph.
func (mq *MetaQuery) Exist(ctx context.Context) (bool, error) {
if err := mq.prepareQuery(ctx); err != nil {
return false, err
ctx = setContextOp(ctx, mq.ctx, "Exist")
switch _, err := mq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
return mq.sqlExist(ctx)
}
// ExistX is like Exist, but panics if an error occurs.
@ -260,22 +269,21 @@ func (mq *MetaQuery) Clone() *MetaQuery {
}
return &MetaQuery{
config: mq.config,
limit: mq.limit,
offset: mq.offset,
order: append([]OrderFunc{}, mq.order...),
ctx: mq.ctx.Clone(),
order: append([]meta.OrderOption{}, mq.order...),
inters: append([]Interceptor{}, mq.inters...),
predicates: append([]predicate.Meta{}, mq.predicates...),
withOwner: mq.withOwner.Clone(),
// clone intermediate query.
sql: mq.sql.Clone(),
path: mq.path,
unique: mq.unique,
sql: mq.sql.Clone(),
path: mq.path,
}
}
// WithOwner tells the query-builder to eager-load the nodes that are connected to
// the "owner" edge. The optional arguments are used to configure the query builder of the edge.
func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery {
query := &AlertQuery{config: mq.config}
query := (&AlertClient{config: mq.config}).Query()
for _, opt := range opts {
opt(query)
}
@ -298,16 +306,11 @@ func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery {
// Aggregate(ent.Count()).
// Scan(ctx, &v)
func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy {
grbuild := &MetaGroupBy{config: mq.config}
grbuild.fields = append([]string{field}, fields...)
grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) {
if err := mq.prepareQuery(ctx); err != nil {
return nil, err
}
return mq.sqlQuery(ctx), nil
}
mq.ctx.Fields = append([]string{field}, fields...)
grbuild := &MetaGroupBy{build: mq}
grbuild.flds = &mq.ctx.Fields
grbuild.label = meta.Label
grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan
grbuild.scan = grbuild.Scan
return grbuild
}
@ -324,15 +327,30 @@ func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy {
// Select(meta.FieldCreatedAt).
// Scan(ctx, &v)
func (mq *MetaQuery) Select(fields ...string) *MetaSelect {
mq.fields = append(mq.fields, fields...)
selbuild := &MetaSelect{MetaQuery: mq}
selbuild.label = meta.Label
selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan
return selbuild
mq.ctx.Fields = append(mq.ctx.Fields, fields...)
sbuild := &MetaSelect{MetaQuery: mq}
sbuild.label = meta.Label
sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a MetaSelect configured with the given aggregations.
func (mq *MetaQuery) Aggregate(fns ...AggregateFunc) *MetaSelect {
return mq.Select().Aggregate(fns...)
}
func (mq *MetaQuery) prepareQuery(ctx context.Context) error {
for _, f := range mq.fields {
for _, inter := range mq.inters {
if inter == nil {
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, mq); err != nil {
return err
}
}
}
for _, f := range mq.ctx.Fields {
if !meta.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
}
@ -392,6 +410,9 @@ func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []*
}
nodeids[fk] = append(nodeids[fk], nodes[i])
}
if len(ids) == 0 {
return nil
}
query.Where(alert.IDIn(ids...))
neighbors, err := query.All(ctx)
if err != nil {
@ -411,41 +432,22 @@ func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []*
func (mq *MetaQuery) sqlCount(ctx context.Context) (int, error) {
_spec := mq.querySpec()
_spec.Node.Columns = mq.fields
if len(mq.fields) > 0 {
_spec.Unique = mq.unique != nil && *mq.unique
_spec.Node.Columns = mq.ctx.Fields
if len(mq.ctx.Fields) > 0 {
_spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, mq.driver, _spec)
}
func (mq *MetaQuery) sqlExist(ctx context.Context) (bool, error) {
switch _, err := mq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("ent: check existence: %w", err)
default:
return true, nil
}
}
func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
_spec := &sqlgraph.QuerySpec{
Node: &sqlgraph.NodeSpec{
Table: meta.Table,
Columns: meta.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: meta.FieldID,
},
},
From: mq.sql,
Unique: true,
}
if unique := mq.unique; unique != nil {
_spec := sqlgraph.NewQuerySpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt))
_spec.From = mq.sql
if unique := mq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if mq.path != nil {
_spec.Unique = true
}
if fields := mq.fields; len(fields) > 0 {
if fields := mq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID)
for i := range fields {
@ -453,6 +455,9 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
if mq.withOwner != nil {
_spec.Node.AddColumnOnce(meta.FieldAlertMetas)
}
}
if ps := mq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
@ -461,10 +466,10 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
}
}
}
if limit := mq.limit; limit != nil {
if limit := mq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := mq.offset; offset != nil {
if offset := mq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := mq.order; len(ps) > 0 {
@ -480,7 +485,7 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec {
func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(mq.driver.Dialect())
t1 := builder.Table(meta.Table)
columns := mq.fields
columns := mq.ctx.Fields
if len(columns) == 0 {
columns = meta.Columns
}
@ -489,7 +494,7 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
selector = mq.sql
selector.Select(selector.Columns(columns...)...)
}
if mq.unique != nil && *mq.unique {
if mq.ctx.Unique != nil && *mq.ctx.Unique {
selector.Distinct()
}
for _, p := range mq.predicates {
@ -498,12 +503,12 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
for _, p := range mq.order {
p(selector)
}
if offset := mq.offset; offset != nil {
if offset := mq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := mq.limit; limit != nil {
if limit := mq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
@ -511,13 +516,8 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector {
// MetaGroupBy is the group-by builder for Meta entities.
type MetaGroupBy struct {
config
selector
fields []string
fns []AggregateFunc
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
build *MetaQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
@ -526,74 +526,77 @@ func (mgb *MetaGroupBy) Aggregate(fns ...AggregateFunc) *MetaGroupBy {
return mgb
}
// Scan applies the group-by query and scans the result into the given value.
// Scan applies the selector query and scans the result into the given value.
func (mgb *MetaGroupBy) Scan(ctx context.Context, v any) error {
query, err := mgb.path(ctx)
if err != nil {
ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy")
if err := mgb.build.prepareQuery(ctx); err != nil {
return err
}
mgb.sql = query
return mgb.sqlScan(ctx, v)
return scanWithInterceptors[*MetaQuery, *MetaGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v)
}
func (mgb *MetaGroupBy) sqlScan(ctx context.Context, v any) error {
for _, f := range mgb.fields {
if !meta.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)}
}
}
selector := mgb.sqlQuery()
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := mgb.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
func (mgb *MetaGroupBy) sqlQuery() *sql.Selector {
selector := mgb.sql.Select()
func (mgb *MetaGroupBy) sqlScan(ctx context.Context, root *MetaQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(mgb.fns))
for _, fn := range mgb.fns {
aggregation = append(aggregation, fn(selector))
}
// If no columns were selected in a custom aggregation function, the default
// selection is the fields used for "group-by", and the aggregation functions.
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(mgb.fields)+len(mgb.fns))
for _, f := range mgb.fields {
columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns))
for _, f := range *mgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
return selector.GroupBy(selector.Columns(mgb.fields...)...)
selector.GroupBy(selector.Columns(*mgb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// MetaSelect is the builder for selecting fields of Meta entities.
type MetaSelect struct {
*MetaQuery
selector
// intermediate query (i.e. traversal path).
sql *sql.Selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (ms *MetaSelect) Aggregate(fns ...AggregateFunc) *MetaSelect {
ms.fns = append(ms.fns, fns...)
return ms
}
// Scan applies the selector query and scans the result into the given value.
func (ms *MetaSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, ms.ctx, "Select")
if err := ms.prepareQuery(ctx); err != nil {
return err
}
ms.sql = ms.MetaQuery.sqlQuery(ctx)
return ms.sqlScan(ctx, v)
return scanWithInterceptors[*MetaQuery, *MetaSelect](ctx, ms.MetaQuery, ms, ms.inters, v)
}
func (ms *MetaSelect) sqlScan(ctx context.Context, v any) error {
func (ms *MetaSelect) sqlScan(ctx context.Context, root *MetaQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(ms.fns))
for _, fn := range ms.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*ms.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := ms.sql.Query()
query, args := selector.Query()
if err := ms.driver.Query(ctx, query, args, rows); err != nil {
return err
}

View file

@ -117,41 +117,8 @@ func (mu *MetaUpdate) ClearOwner() *MetaUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (mu *MetaUpdate) Save(ctx context.Context) (int, error) {
var (
err error
affected int
)
mu.defaults()
if len(mu.hooks) == 0 {
if err = mu.check(); err != nil {
return 0, err
}
affected, err = mu.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MetaMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = mu.check(); err != nil {
return 0, err
}
mu.mutation = mutation
affected, err = mu.sqlSave(ctx)
mutation.done = true
return affected, err
})
for i := len(mu.hooks) - 1; i >= 0; i-- {
if mu.hooks[i] == nil {
return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = mu.hooks[i](mut)
}
if _, err := mut.Mutate(ctx, mu.mutation); err != nil {
return 0, err
}
}
return affected, err
return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -199,16 +166,10 @@ func (mu *MetaUpdate) check() error {
}
func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: meta.Table,
Columns: meta.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: meta.FieldID,
},
},
if err := mu.check(); err != nil {
return n, err
}
_spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt))
if ps := mu.mutation.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
@ -217,44 +178,22 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
}
if value, ok := mu.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: meta.FieldCreatedAt,
})
_spec.SetField(meta.FieldCreatedAt, field.TypeTime, value)
}
if mu.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: meta.FieldCreatedAt,
})
_spec.ClearField(meta.FieldCreatedAt, field.TypeTime)
}
if value, ok := mu.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: meta.FieldUpdatedAt,
})
_spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value)
}
if mu.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: meta.FieldUpdatedAt,
})
_spec.ClearField(meta.FieldUpdatedAt, field.TypeTime)
}
if value, ok := mu.mutation.Key(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: meta.FieldKey,
})
_spec.SetField(meta.FieldKey, field.TypeString, value)
}
if value, ok := mu.mutation.Value(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: meta.FieldValue,
})
_spec.SetField(meta.FieldValue, field.TypeString, value)
}
if mu.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
@ -264,10 +203,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{meta.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -280,10 +216,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{meta.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -299,6 +232,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) {
}
return 0, err
}
mu.mutation.done = true
return n, nil
}
@ -396,6 +330,12 @@ func (muo *MetaUpdateOne) ClearOwner() *MetaUpdateOne {
return muo
}
// Where appends a list predicates to the MetaUpdate builder.
func (muo *MetaUpdateOne) Where(ps ...predicate.Meta) *MetaUpdateOne {
muo.mutation.Where(ps...)
return muo
}
// Select allows selecting one or more fields (columns) of the returned entity.
// The default is selecting all fields defined in the entity schema.
func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne {
@ -405,47 +345,8 @@ func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne
// Save executes the query and returns the updated Meta entity.
func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) {
var (
err error
node *Meta
)
muo.defaults()
if len(muo.hooks) == 0 {
if err = muo.check(); err != nil {
return nil, err
}
node, err = muo.sqlSave(ctx)
} else {
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutation, ok := m.(*MetaMutation)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}
if err = muo.check(); err != nil {
return nil, err
}
muo.mutation = mutation
node, err = muo.sqlSave(ctx)
mutation.done = true
return node, err
})
for i := len(muo.hooks) - 1; i >= 0; i-- {
if muo.hooks[i] == nil {
return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
}
mut = muo.hooks[i](mut)
}
v, err := mut.Mutate(ctx, muo.mutation)
if err != nil {
return nil, err
}
nv, ok := v.(*Meta)
if !ok {
return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v)
}
node = nv
}
return node, err
return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@ -493,16 +394,10 @@ func (muo *MetaUpdateOne) check() error {
}
func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) {
_spec := &sqlgraph.UpdateSpec{
Node: &sqlgraph.NodeSpec{
Table: meta.Table,
Columns: meta.Columns,
ID: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: meta.FieldID,
},
},
if err := muo.check(); err != nil {
return _node, err
}
_spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt))
id, ok := muo.mutation.ID()
if !ok {
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Meta.id" for update`)}
@ -528,44 +423,22 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
}
}
if value, ok := muo.mutation.CreatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: meta.FieldCreatedAt,
})
_spec.SetField(meta.FieldCreatedAt, field.TypeTime, value)
}
if muo.mutation.CreatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: meta.FieldCreatedAt,
})
_spec.ClearField(meta.FieldCreatedAt, field.TypeTime)
}
if value, ok := muo.mutation.UpdatedAt(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Value: value,
Column: meta.FieldUpdatedAt,
})
_spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value)
}
if muo.mutation.UpdatedAtCleared() {
_spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{
Type: field.TypeTime,
Column: meta.FieldUpdatedAt,
})
_spec.ClearField(meta.FieldUpdatedAt, field.TypeTime)
}
if value, ok := muo.mutation.Key(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: meta.FieldKey,
})
_spec.SetField(meta.FieldKey, field.TypeString, value)
}
if value, ok := muo.mutation.Value(); ok {
_spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{
Type: field.TypeString,
Value: value,
Column: meta.FieldValue,
})
_spec.SetField(meta.FieldValue, field.TypeString, value)
}
if muo.mutation.OwnerCleared() {
edge := &sqlgraph.EdgeSpec{
@ -575,10 +448,7 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
Columns: []string{meta.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@ -591,10 +461,7 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
Columns: []string{meta.OwnerColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: alert.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@ -613,5 +480,6 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error)
}
return nil, err
}
muo.mutation.done = true
return _node, nil
}

View file

@ -9,6 +9,8 @@ import (
"sync"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/alert"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem"
@ -17,8 +19,6 @@ import (
"github.com/crowdsecurity/crowdsec/pkg/database/ent/machine"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/meta"
"github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate"
"entgo.io/ent"
)
const (
@ -1578,11 +1578,26 @@ func (m *AlertMutation) Where(ps ...predicate.Alert) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the AlertMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *AlertMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.Alert, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *AlertMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *AlertMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (Alert).
func (m *AlertMutation) Type() string {
return m.typ
@ -2997,11 +3012,26 @@ func (m *BouncerMutation) Where(ps ...predicate.Bouncer) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the BouncerMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *BouncerMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.Bouncer, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *BouncerMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *BouncerMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (Bouncer).
func (m *BouncerMutation) Type() string {
return m.typ
@ -3654,11 +3684,26 @@ func (m *ConfigItemMutation) Where(ps ...predicate.ConfigItem) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the ConfigItemMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *ConfigItemMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.ConfigItem, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *ConfigItemMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *ConfigItemMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (ConfigItem).
func (m *ConfigItemMutation) Type() string {
return m.typ
@ -4830,6 +4875,7 @@ func (m *DecisionMutation) SetOwnerID(id int) {
// ClearOwner clears the "owner" edge to the Alert entity.
func (m *DecisionMutation) ClearOwner() {
m.clearedowner = true
m.clearedFields[decision.FieldAlertDecisions] = struct{}{}
}
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
@ -4866,11 +4912,26 @@ func (m *DecisionMutation) Where(ps ...predicate.Decision) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the DecisionMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *DecisionMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.Decision, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *DecisionMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *DecisionMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (Decision).
func (m *DecisionMutation) Type() string {
return m.typ
@ -5775,6 +5836,7 @@ func (m *EventMutation) SetOwnerID(id int) {
// ClearOwner clears the "owner" edge to the Alert entity.
func (m *EventMutation) ClearOwner() {
m.clearedowner = true
m.clearedFields[event.FieldAlertEvents] = struct{}{}
}
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
@ -5811,11 +5873,26 @@ func (m *EventMutation) Where(ps ...predicate.Event) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the EventMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *EventMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.Event, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *EventMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *EventMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (Event).
func (m *EventMutation) Type() string {
return m.typ
@ -6795,11 +6872,26 @@ func (m *MachineMutation) Where(ps ...predicate.Machine) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the MachineMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *MachineMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.Machine, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *MachineMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *MachineMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (Machine).
func (m *MachineMutation) Type() string {
return m.typ
@ -7565,6 +7657,7 @@ func (m *MetaMutation) SetOwnerID(id int) {
// ClearOwner clears the "owner" edge to the Alert entity.
func (m *MetaMutation) ClearOwner() {
m.clearedowner = true
m.clearedFields[meta.FieldAlertMetas] = struct{}{}
}
// OwnerCleared reports if the "owner" edge to the Alert entity was cleared.
@ -7601,11 +7694,26 @@ func (m *MetaMutation) Where(ps ...predicate.Meta) {
m.predicates = append(m.predicates, ps...)
}
// WhereP appends storage-level predicates to the MetaMutation builder. Using this method,
// users can use type-assertion to append predicates that do not depend on any generated package.
func (m *MetaMutation) WhereP(ps ...func(*sql.Selector)) {
p := make([]predicate.Meta, len(ps))
for i := range ps {
p[i] = ps[i]
}
m.Where(p...)
}
// Op returns the operation name.
func (m *MetaMutation) Op() Op {
return m.op
}
// SetOp allows setting the mutation operation.
func (m *MetaMutation) SetOp(op Op) {
m.op = op
}
// Type returns the node type of this mutation (Meta).
func (m *MetaMutation) Type() string {
return m.typ

View file

@ -5,6 +5,6 @@ package runtime
// The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go
const (
Version = "v0.11.3" // Version of ent codegen.
Sum = "h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=" // Sum of ent codegen.
Version = "v0.12.4" // Version of ent codegen.
Sum = "h1:LddPnAyxls/O7DTXZvUGDj0NZIdGSu317+aoNLJWbD8=" // Sum of ent codegen.
)

View file

@ -30,12 +30,6 @@ type Tx struct {
// lazily loaded.
client *Client
clientOnce sync.Once
// completion callbacks.
mu sync.Mutex
onCommit []CommitHook
onRollback []RollbackHook
// ctx lives for the life of the transaction. It is
// the same context used by the underlying connection.
ctx context.Context
@ -80,9 +74,9 @@ func (tx *Tx) Commit() error {
var fn Committer = CommitFunc(func(context.Context, *Tx) error {
return txDriver.tx.Commit()
})
tx.mu.Lock()
hooks := append([]CommitHook(nil), tx.onCommit...)
tx.mu.Unlock()
txDriver.mu.Lock()
hooks := append([]CommitHook(nil), txDriver.onCommit...)
txDriver.mu.Unlock()
for i := len(hooks) - 1; i >= 0; i-- {
fn = hooks[i](fn)
}
@ -91,9 +85,10 @@ func (tx *Tx) Commit() error {
// OnCommit adds a hook to call on commit.
func (tx *Tx) OnCommit(f CommitHook) {
tx.mu.Lock()
defer tx.mu.Unlock()
tx.onCommit = append(tx.onCommit, f)
txDriver := tx.config.driver.(*txDriver)
txDriver.mu.Lock()
txDriver.onCommit = append(txDriver.onCommit, f)
txDriver.mu.Unlock()
}
type (
@ -135,9 +130,9 @@ func (tx *Tx) Rollback() error {
var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error {
return txDriver.tx.Rollback()
})
tx.mu.Lock()
hooks := append([]RollbackHook(nil), tx.onRollback...)
tx.mu.Unlock()
txDriver.mu.Lock()
hooks := append([]RollbackHook(nil), txDriver.onRollback...)
txDriver.mu.Unlock()
for i := len(hooks) - 1; i >= 0; i-- {
fn = hooks[i](fn)
}
@ -146,9 +141,10 @@ func (tx *Tx) Rollback() error {
// OnRollback adds a hook to call on rollback.
func (tx *Tx) OnRollback(f RollbackHook) {
tx.mu.Lock()
defer tx.mu.Unlock()
tx.onRollback = append(tx.onRollback, f)
txDriver := tx.config.driver.(*txDriver)
txDriver.mu.Lock()
txDriver.onRollback = append(txDriver.onRollback, f)
txDriver.mu.Unlock()
}
// Client returns a Client that binds to current transaction.
@ -186,6 +182,10 @@ type txDriver struct {
drv dialect.Driver
// tx is the underlying transaction.
tx dialect.Tx
// completion hooks.
mu sync.Mutex
onCommit []CommitHook
onRollback []RollbackHook
}
// newTx creates a new transactional driver.