791 lines
22 KiB
Go
791 lines
22 KiB
Go
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
|
|
|
package models
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"fmt"
|
|
"reflect"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/friendsofgo/errors"
|
|
"github.com/volatiletech/sqlboiler/boil"
|
|
"github.com/volatiletech/sqlboiler/queries"
|
|
"github.com/volatiletech/sqlboiler/queries/qm"
|
|
"github.com/volatiletech/sqlboiler/queries/qmhelper"
|
|
"github.com/volatiletech/sqlboiler/strmangle"
|
|
)
|
|
|
|
// Schema is an object representing the database table.
|
|
type Schema struct {
|
|
ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
|
|
|
|
R *schemaR `boil:"-" json:"-" toml:"-" yaml:"-"`
|
|
L schemaL `boil:"-" json:"-" toml:"-" yaml:"-"`
|
|
}
|
|
|
|
var SchemaColumns = struct {
|
|
ID string
|
|
}{
|
|
ID: "id",
|
|
}
|
|
|
|
// Generated where
|
|
|
|
type whereHelperint64 struct{ field string }
|
|
|
|
func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
|
|
func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
|
|
func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
|
|
func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
|
|
func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
|
|
func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
|
|
func (w whereHelperint64) IN(slice []int64) qm.QueryMod {
|
|
values := make([]interface{}, 0, len(slice))
|
|
for _, value := range slice {
|
|
values = append(values, value)
|
|
}
|
|
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
|
|
}
|
|
|
|
var SchemaWhere = struct {
|
|
ID whereHelperint64
|
|
}{
|
|
ID: whereHelperint64{field: "\"schema\".\"id\""},
|
|
}
|
|
|
|
// SchemaRels is where relationship names are stored.
|
|
var SchemaRels = struct {
|
|
}{}
|
|
|
|
// schemaR is where relationships are stored.
|
|
type schemaR struct {
|
|
}
|
|
|
|
// NewStruct creates a new relationship struct
|
|
func (*schemaR) NewStruct() *schemaR {
|
|
return &schemaR{}
|
|
}
|
|
|
|
// schemaL is where Load methods for each relationship are stored.
|
|
type schemaL struct{}
|
|
|
|
var (
|
|
schemaAllColumns = []string{"id"}
|
|
schemaColumnsWithoutDefault = []string{}
|
|
schemaColumnsWithDefault = []string{"id"}
|
|
schemaPrimaryKeyColumns = []string{"id"}
|
|
)
|
|
|
|
type (
|
|
// SchemaSlice is an alias for a slice of pointers to Schema.
|
|
// This should generally be used opposed to []Schema.
|
|
SchemaSlice []*Schema
|
|
// SchemaHook is the signature for custom Schema hook methods
|
|
SchemaHook func(context.Context, boil.ContextExecutor, *Schema) error
|
|
|
|
schemaQuery struct {
|
|
*queries.Query
|
|
}
|
|
)
|
|
|
|
// Cache for insert, update and upsert
|
|
var (
|
|
schemaType = reflect.TypeOf(&Schema{})
|
|
schemaMapping = queries.MakeStructMapping(schemaType)
|
|
schemaPrimaryKeyMapping, _ = queries.BindMapping(schemaType, schemaMapping, schemaPrimaryKeyColumns)
|
|
schemaInsertCacheMut sync.RWMutex
|
|
schemaInsertCache = make(map[string]insertCache)
|
|
schemaUpdateCacheMut sync.RWMutex
|
|
schemaUpdateCache = make(map[string]updateCache)
|
|
schemaUpsertCacheMut sync.RWMutex
|
|
schemaUpsertCache = make(map[string]insertCache)
|
|
)
|
|
|
|
var (
|
|
// Force time package dependency for automated UpdatedAt/CreatedAt.
|
|
_ = time.Second
|
|
// Force qmhelper dependency for where clause generation (which doesn't
|
|
// always happen)
|
|
_ = qmhelper.Where
|
|
)
|
|
|
|
var schemaBeforeInsertHooks []SchemaHook
|
|
var schemaBeforeUpdateHooks []SchemaHook
|
|
var schemaBeforeDeleteHooks []SchemaHook
|
|
var schemaBeforeUpsertHooks []SchemaHook
|
|
|
|
var schemaAfterInsertHooks []SchemaHook
|
|
var schemaAfterSelectHooks []SchemaHook
|
|
var schemaAfterUpdateHooks []SchemaHook
|
|
var schemaAfterDeleteHooks []SchemaHook
|
|
var schemaAfterUpsertHooks []SchemaHook
|
|
|
|
// doBeforeInsertHooks executes all "before insert" hooks.
|
|
func (o *Schema) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaBeforeInsertHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doBeforeUpdateHooks executes all "before Update" hooks.
|
|
func (o *Schema) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaBeforeUpdateHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doBeforeDeleteHooks executes all "before Delete" hooks.
|
|
func (o *Schema) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaBeforeDeleteHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doBeforeUpsertHooks executes all "before Upsert" hooks.
|
|
func (o *Schema) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaBeforeUpsertHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doAfterInsertHooks executes all "after Insert" hooks.
|
|
func (o *Schema) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaAfterInsertHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doAfterSelectHooks executes all "after Select" hooks.
|
|
func (o *Schema) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaAfterSelectHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doAfterUpdateHooks executes all "after Update" hooks.
|
|
func (o *Schema) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaAfterUpdateHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doAfterDeleteHooks executes all "after Delete" hooks.
|
|
func (o *Schema) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaAfterDeleteHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// doAfterUpsertHooks executes all "after Upsert" hooks.
|
|
func (o *Schema) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
|
if boil.HooksAreSkipped(ctx) {
|
|
return nil
|
|
}
|
|
|
|
for _, hook := range schemaAfterUpsertHooks {
|
|
if err := hook(ctx, exec, o); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// AddSchemaHook registers your hook function for all future operations.
|
|
func AddSchemaHook(hookPoint boil.HookPoint, schemaHook SchemaHook) {
|
|
switch hookPoint {
|
|
case boil.BeforeInsertHook:
|
|
schemaBeforeInsertHooks = append(schemaBeforeInsertHooks, schemaHook)
|
|
case boil.BeforeUpdateHook:
|
|
schemaBeforeUpdateHooks = append(schemaBeforeUpdateHooks, schemaHook)
|
|
case boil.BeforeDeleteHook:
|
|
schemaBeforeDeleteHooks = append(schemaBeforeDeleteHooks, schemaHook)
|
|
case boil.BeforeUpsertHook:
|
|
schemaBeforeUpsertHooks = append(schemaBeforeUpsertHooks, schemaHook)
|
|
case boil.AfterInsertHook:
|
|
schemaAfterInsertHooks = append(schemaAfterInsertHooks, schemaHook)
|
|
case boil.AfterSelectHook:
|
|
schemaAfterSelectHooks = append(schemaAfterSelectHooks, schemaHook)
|
|
case boil.AfterUpdateHook:
|
|
schemaAfterUpdateHooks = append(schemaAfterUpdateHooks, schemaHook)
|
|
case boil.AfterDeleteHook:
|
|
schemaAfterDeleteHooks = append(schemaAfterDeleteHooks, schemaHook)
|
|
case boil.AfterUpsertHook:
|
|
schemaAfterUpsertHooks = append(schemaAfterUpsertHooks, schemaHook)
|
|
}
|
|
}
|
|
|
|
// One returns a single schema record from the query.
|
|
func (q schemaQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Schema, error) {
|
|
o := &Schema{}
|
|
|
|
queries.SetLimit(q.Query, 1)
|
|
|
|
err := q.Bind(ctx, exec, o)
|
|
if err != nil {
|
|
if errors.Cause(err) == sql.ErrNoRows {
|
|
return nil, sql.ErrNoRows
|
|
}
|
|
return nil, errors.Wrap(err, "models: failed to execute a one query for schema")
|
|
}
|
|
|
|
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
|
|
return o, err
|
|
}
|
|
|
|
return o, nil
|
|
}
|
|
|
|
// All returns all Schema records from the query.
|
|
func (q schemaQuery) All(ctx context.Context, exec boil.ContextExecutor) (SchemaSlice, error) {
|
|
var o []*Schema
|
|
|
|
err := q.Bind(ctx, exec, &o)
|
|
if err != nil {
|
|
return nil, errors.Wrap(err, "models: failed to assign all query results to Schema slice")
|
|
}
|
|
|
|
if len(schemaAfterSelectHooks) != 0 {
|
|
for _, obj := range o {
|
|
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
|
|
return o, err
|
|
}
|
|
}
|
|
}
|
|
|
|
return o, nil
|
|
}
|
|
|
|
// Count returns the count of all Schema records in the query.
|
|
func (q schemaQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
|
var count int64
|
|
|
|
queries.SetSelect(q.Query, nil)
|
|
queries.SetCount(q.Query)
|
|
|
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: failed to count schema rows")
|
|
}
|
|
|
|
return count, nil
|
|
}
|
|
|
|
// Exists checks if the row exists in the table.
|
|
func (q schemaQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
|
|
var count int64
|
|
|
|
queries.SetSelect(q.Query, nil)
|
|
queries.SetCount(q.Query)
|
|
queries.SetLimit(q.Query, 1)
|
|
|
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
|
if err != nil {
|
|
return false, errors.Wrap(err, "models: failed to check if schema exists")
|
|
}
|
|
|
|
return count > 0, nil
|
|
}
|
|
|
|
// Schemas retrieves all the records using an executor.
|
|
func Schemas(mods ...qm.QueryMod) schemaQuery {
|
|
mods = append(mods, qm.From("\"schema\""))
|
|
return schemaQuery{NewQuery(mods...)}
|
|
}
|
|
|
|
// FindSchema retrieves a single record by ID with an executor.
|
|
// If selectCols is empty Find will return all columns.
|
|
func FindSchema(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Schema, error) {
|
|
schemaObj := &Schema{}
|
|
|
|
sel := "*"
|
|
if len(selectCols) > 0 {
|
|
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
|
|
}
|
|
query := fmt.Sprintf(
|
|
"select %s from \"schema\" where \"id\"=?", sel,
|
|
)
|
|
|
|
q := queries.Raw(query, iD)
|
|
|
|
err := q.Bind(ctx, exec, schemaObj)
|
|
if err != nil {
|
|
if errors.Cause(err) == sql.ErrNoRows {
|
|
return nil, sql.ErrNoRows
|
|
}
|
|
return nil, errors.Wrap(err, "models: unable to select from schema")
|
|
}
|
|
|
|
return schemaObj, nil
|
|
}
|
|
|
|
// Insert a single record using an executor.
|
|
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
|
|
func (o *Schema) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
|
|
if o == nil {
|
|
return errors.New("models: no schema provided for insertion")
|
|
}
|
|
|
|
var err error
|
|
|
|
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
|
|
return err
|
|
}
|
|
|
|
nzDefaults := queries.NonZeroDefaultSet(schemaColumnsWithDefault, o)
|
|
|
|
key := makeCacheKey(columns, nzDefaults)
|
|
schemaInsertCacheMut.RLock()
|
|
cache, cached := schemaInsertCache[key]
|
|
schemaInsertCacheMut.RUnlock()
|
|
|
|
if !cached {
|
|
wl, returnColumns := columns.InsertColumnSet(
|
|
schemaAllColumns,
|
|
schemaColumnsWithDefault,
|
|
schemaColumnsWithoutDefault,
|
|
nzDefaults,
|
|
)
|
|
|
|
cache.valueMapping, err = queries.BindMapping(schemaType, schemaMapping, wl)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
cache.retMapping, err = queries.BindMapping(schemaType, schemaMapping, returnColumns)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if len(wl) != 0 {
|
|
cache.query = fmt.Sprintf("INSERT INTO \"schema\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
|
|
} else {
|
|
cache.query = "INSERT INTO \"schema\" () VALUES ()%s%s"
|
|
}
|
|
|
|
var queryOutput, queryReturning string
|
|
|
|
if len(cache.retMapping) != 0 {
|
|
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"schema\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, schemaPrimaryKeyColumns))
|
|
}
|
|
|
|
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
|
|
}
|
|
|
|
value := reflect.Indirect(reflect.ValueOf(o))
|
|
vals := queries.ValuesFromMapping(value, cache.valueMapping)
|
|
|
|
if boil.IsDebug(ctx) {
|
|
writer := boil.DebugWriterFrom(ctx)
|
|
fmt.Fprintln(writer, cache.query)
|
|
fmt.Fprintln(writer, vals)
|
|
}
|
|
result, err := exec.ExecContext(ctx, cache.query, vals...)
|
|
|
|
if err != nil {
|
|
return errors.Wrap(err, "models: unable to insert into schema")
|
|
}
|
|
|
|
var lastID int64
|
|
var identifierCols []interface{}
|
|
|
|
if len(cache.retMapping) == 0 {
|
|
goto CacheNoHooks
|
|
}
|
|
|
|
lastID, err = result.LastInsertId()
|
|
if err != nil {
|
|
return ErrSyncFail
|
|
}
|
|
|
|
o.ID = int64(lastID)
|
|
if lastID != 0 && len(cache.retMapping) == 1 && cache.retMapping[0] == schemaMapping["id"] {
|
|
goto CacheNoHooks
|
|
}
|
|
|
|
identifierCols = []interface{}{
|
|
o.ID,
|
|
}
|
|
|
|
if boil.IsDebug(ctx) {
|
|
writer := boil.DebugWriterFrom(ctx)
|
|
fmt.Fprintln(writer, cache.retQuery)
|
|
fmt.Fprintln(writer, identifierCols...)
|
|
}
|
|
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
|
|
if err != nil {
|
|
return errors.Wrap(err, "models: unable to populate default values for schema")
|
|
}
|
|
|
|
CacheNoHooks:
|
|
if !cached {
|
|
schemaInsertCacheMut.Lock()
|
|
schemaInsertCache[key] = cache
|
|
schemaInsertCacheMut.Unlock()
|
|
}
|
|
|
|
return o.doAfterInsertHooks(ctx, exec)
|
|
}
|
|
|
|
// Update uses an executor to update the Schema.
|
|
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
|
|
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
|
|
func (o *Schema) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
|
|
var err error
|
|
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
|
|
return 0, err
|
|
}
|
|
key := makeCacheKey(columns, nil)
|
|
schemaUpdateCacheMut.RLock()
|
|
cache, cached := schemaUpdateCache[key]
|
|
schemaUpdateCacheMut.RUnlock()
|
|
|
|
if !cached {
|
|
wl := columns.UpdateColumnSet(
|
|
schemaAllColumns,
|
|
schemaPrimaryKeyColumns,
|
|
)
|
|
|
|
if !columns.IsWhitelist() {
|
|
wl = strmangle.SetComplement(wl, []string{"created_at"})
|
|
}
|
|
if len(wl) == 0 {
|
|
return 0, errors.New("models: unable to update schema, could not build whitelist")
|
|
}
|
|
|
|
cache.query = fmt.Sprintf("UPDATE \"schema\" SET %s WHERE %s",
|
|
strmangle.SetParamNames("\"", "\"", 0, wl),
|
|
strmangle.WhereClause("\"", "\"", 0, schemaPrimaryKeyColumns),
|
|
)
|
|
cache.valueMapping, err = queries.BindMapping(schemaType, schemaMapping, append(wl, schemaPrimaryKeyColumns...))
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
}
|
|
|
|
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
|
|
|
|
if boil.IsDebug(ctx) {
|
|
writer := boil.DebugWriterFrom(ctx)
|
|
fmt.Fprintln(writer, cache.query)
|
|
fmt.Fprintln(writer, values)
|
|
}
|
|
var result sql.Result
|
|
result, err = exec.ExecContext(ctx, cache.query, values...)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to update schema row")
|
|
}
|
|
|
|
rowsAff, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: failed to get rows affected by update for schema")
|
|
}
|
|
|
|
if !cached {
|
|
schemaUpdateCacheMut.Lock()
|
|
schemaUpdateCache[key] = cache
|
|
schemaUpdateCacheMut.Unlock()
|
|
}
|
|
|
|
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
|
|
}
|
|
|
|
// UpdateAll updates all rows with the specified column values.
|
|
func (q schemaQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
|
queries.SetUpdate(q.Query, cols)
|
|
|
|
result, err := q.Query.ExecContext(ctx, exec)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to update all for schema")
|
|
}
|
|
|
|
rowsAff, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for schema")
|
|
}
|
|
|
|
return rowsAff, nil
|
|
}
|
|
|
|
// UpdateAll updates all rows with the specified column values, using an executor.
|
|
func (o SchemaSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
|
ln := int64(len(o))
|
|
if ln == 0 {
|
|
return 0, nil
|
|
}
|
|
|
|
if len(cols) == 0 {
|
|
return 0, errors.New("models: update all requires at least one column argument")
|
|
}
|
|
|
|
colNames := make([]string, len(cols))
|
|
args := make([]interface{}, len(cols))
|
|
|
|
i := 0
|
|
for name, value := range cols {
|
|
colNames[i] = name
|
|
args[i] = value
|
|
i++
|
|
}
|
|
|
|
// Append all of the primary key values for each column
|
|
for _, obj := range o {
|
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), schemaPrimaryKeyMapping)
|
|
args = append(args, pkeyArgs...)
|
|
}
|
|
|
|
sql := fmt.Sprintf("UPDATE \"schema\" SET %s WHERE %s",
|
|
strmangle.SetParamNames("\"", "\"", 0, colNames),
|
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, schemaPrimaryKeyColumns, len(o)))
|
|
|
|
if boil.IsDebug(ctx) {
|
|
writer := boil.DebugWriterFrom(ctx)
|
|
fmt.Fprintln(writer, sql)
|
|
fmt.Fprintln(writer, args...)
|
|
}
|
|
result, err := exec.ExecContext(ctx, sql, args...)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to update all in schema slice")
|
|
}
|
|
|
|
rowsAff, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all schema")
|
|
}
|
|
return rowsAff, nil
|
|
}
|
|
|
|
// Delete deletes a single Schema record with an executor.
|
|
// Delete will match against the primary key column to find the record to delete.
|
|
func (o *Schema) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
|
if o == nil {
|
|
return 0, errors.New("models: no Schema provided for delete")
|
|
}
|
|
|
|
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
|
|
return 0, err
|
|
}
|
|
|
|
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), schemaPrimaryKeyMapping)
|
|
sql := "DELETE FROM \"schema\" WHERE \"id\"=?"
|
|
|
|
if boil.IsDebug(ctx) {
|
|
writer := boil.DebugWriterFrom(ctx)
|
|
fmt.Fprintln(writer, sql)
|
|
fmt.Fprintln(writer, args...)
|
|
}
|
|
result, err := exec.ExecContext(ctx, sql, args...)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to delete from schema")
|
|
}
|
|
|
|
rowsAff, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for schema")
|
|
}
|
|
|
|
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
|
|
return 0, err
|
|
}
|
|
|
|
return rowsAff, nil
|
|
}
|
|
|
|
// DeleteAll deletes all matching rows.
|
|
func (q schemaQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
|
if q.Query == nil {
|
|
return 0, errors.New("models: no schemaQuery provided for delete all")
|
|
}
|
|
|
|
queries.SetDelete(q.Query)
|
|
|
|
result, err := q.Query.ExecContext(ctx, exec)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to delete all from schema")
|
|
}
|
|
|
|
rowsAff, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for schema")
|
|
}
|
|
|
|
return rowsAff, nil
|
|
}
|
|
|
|
// DeleteAll deletes all rows in the slice, using an executor.
|
|
func (o SchemaSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
|
if len(o) == 0 {
|
|
return 0, nil
|
|
}
|
|
|
|
if len(schemaBeforeDeleteHooks) != 0 {
|
|
for _, obj := range o {
|
|
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
|
|
return 0, err
|
|
}
|
|
}
|
|
}
|
|
|
|
var args []interface{}
|
|
for _, obj := range o {
|
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), schemaPrimaryKeyMapping)
|
|
args = append(args, pkeyArgs...)
|
|
}
|
|
|
|
sql := "DELETE FROM \"schema\" WHERE " +
|
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, schemaPrimaryKeyColumns, len(o))
|
|
|
|
if boil.IsDebug(ctx) {
|
|
writer := boil.DebugWriterFrom(ctx)
|
|
fmt.Fprintln(writer, sql)
|
|
fmt.Fprintln(writer, args)
|
|
}
|
|
result, err := exec.ExecContext(ctx, sql, args...)
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: unable to delete all from schema slice")
|
|
}
|
|
|
|
rowsAff, err := result.RowsAffected()
|
|
if err != nil {
|
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for schema")
|
|
}
|
|
|
|
if len(schemaAfterDeleteHooks) != 0 {
|
|
for _, obj := range o {
|
|
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
|
|
return 0, err
|
|
}
|
|
}
|
|
}
|
|
|
|
return rowsAff, nil
|
|
}
|
|
|
|
// Reload refetches the object from the database
|
|
// using the primary keys with an executor.
|
|
func (o *Schema) Reload(ctx context.Context, exec boil.ContextExecutor) error {
|
|
ret, err := FindSchema(ctx, exec, o.ID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
*o = *ret
|
|
return nil
|
|
}
|
|
|
|
// ReloadAll refetches every row with matching primary key column values
|
|
// and overwrites the original object slice with the newly updated slice.
|
|
func (o *SchemaSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
|
|
if o == nil || len(*o) == 0 {
|
|
return nil
|
|
}
|
|
|
|
slice := SchemaSlice{}
|
|
var args []interface{}
|
|
for _, obj := range *o {
|
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), schemaPrimaryKeyMapping)
|
|
args = append(args, pkeyArgs...)
|
|
}
|
|
|
|
sql := "SELECT \"schema\".* FROM \"schema\" WHERE " +
|
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, schemaPrimaryKeyColumns, len(*o))
|
|
|
|
q := queries.Raw(sql, args...)
|
|
|
|
err := q.Bind(ctx, exec, &slice)
|
|
if err != nil {
|
|
return errors.Wrap(err, "models: unable to reload all in SchemaSlice")
|
|
}
|
|
|
|
*o = slice
|
|
|
|
return nil
|
|
}
|
|
|
|
// SchemaExists checks if the Schema row exists.
|
|
func SchemaExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
|
|
var exists bool
|
|
sql := "select exists(select 1 from \"schema\" where \"id\"=? limit 1)"
|
|
|
|
if boil.IsDebug(ctx) {
|
|
writer := boil.DebugWriterFrom(ctx)
|
|
fmt.Fprintln(writer, sql)
|
|
fmt.Fprintln(writer, iD)
|
|
}
|
|
row := exec.QueryRowContext(ctx, sql, iD)
|
|
|
|
err := row.Scan(&exists)
|
|
if err != nil {
|
|
return false, errors.Wrap(err, "models: unable to check if schema exists")
|
|
}
|
|
|
|
return exists, nil
|
|
}
|