initial commit
This commit is contained in:
commit
8b3db331c6
4
.dockerignore
Normal file
4
.dockerignore
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
node_modules/
|
||||||
|
.git/
|
||||||
|
wwwroot/js/app.js
|
||||||
|
wwwroot/css/app.css
|
9
.editorconfig
Normal file
9
.editorconfig
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
indent_style = tab
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
[*.json] # package.json, etc
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
10
.gitignore
vendored
Normal file
10
.gitignore
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# node.js dependencies
|
||||||
|
node_modules/
|
||||||
|
|
||||||
|
# Local instance customisation
|
||||||
|
webscaffold.db3
|
||||||
|
|
||||||
|
# Autogenerated files that are rebuilt both in dev + in the production Dockerfile
|
||||||
|
webscaffold
|
||||||
|
wwwroot/js/app.js
|
||||||
|
wwwroot/css/app.css
|
103
Application.go
Normal file
103
Application.go
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/NYTimes/gziphandler"
|
||||||
|
"golang.org/x/crypto/acme/autocert"
|
||||||
|
)
|
||||||
|
|
||||||
|
const AppName = `webscaffold`
|
||||||
|
|
||||||
|
type WebSession struct {
|
||||||
|
LastRefreshTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type Application struct {
|
||||||
|
c Config
|
||||||
|
webfile http.Handler
|
||||||
|
db *sql.DB
|
||||||
|
|
||||||
|
sessions map[string]WebSession
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Application) Start() error {
|
||||||
|
log.Printf("Starting %s application...", AppName)
|
||||||
|
|
||||||
|
// Connect to DB
|
||||||
|
err := this.connectToDB()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resources
|
||||||
|
this.webfile = http.FileServer(http.Dir(this.c.WebrootDir))
|
||||||
|
|
||||||
|
// Set up HTTP server
|
||||||
|
if this.c.Autocert == "" {
|
||||||
|
log.Printf("Starting web server (HTTP on %s)...", this.c.ListenAddress)
|
||||||
|
return http.ListenAndServe(this.c.ListenAddress, this) // Plain HTTP
|
||||||
|
|
||||||
|
} else {
|
||||||
|
log.Printf("Starting web server (HTTPS as '%s' on :80 + :443)...", this.c.Autocert)
|
||||||
|
return http.Serve(autocert.NewListener(this.c.Autocert), this) // HTTPS
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Application) respondWith(object interface{}, nerr netError) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) { // curry over extra arity
|
||||||
|
if nerr != nil {
|
||||||
|
nerr.Respond(w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Plaintext response
|
||||||
|
// Buffer the resulting json data, so that we can determine the final size
|
||||||
|
|
||||||
|
jb, err := json.Marshal(object)
|
||||||
|
if err != nil {
|
||||||
|
systemError{err}.Respond(w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set(`Content-Type`, `application/json`)
|
||||||
|
w.Header().Set(`Content-Length`, fmt.Sprintf("%d", len(jb)))
|
||||||
|
w.WriteHeader(200)
|
||||||
|
w.Write(jb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Application) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Header.Set(`Server`, AppName+`/1.0`)
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case "GET":
|
||||||
|
r.Header.Set(`X-Frame-Options`, `Deny`)
|
||||||
|
|
||||||
|
if r.URL.Path == `/` {
|
||||||
|
http.ServeFile(w, r, filepath.Join(this.c.WebrootDir, "index.html")) // n.b. no gzip
|
||||||
|
|
||||||
|
} else {
|
||||||
|
gziphandler.GzipHandler(this.webfile).ServeHTTP(w, r)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
case "POST":
|
||||||
|
if r.URL.Path == `/api/v1/login` {
|
||||||
|
resp, err := this.apiLogin(r)
|
||||||
|
gziphandler.GzipHandler(this.respondWith(resp, err)).ServeHTTP(w, r)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
http.Error(w, "not found", 404)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
26
Dockerfile
Normal file
26
Dockerfile
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
# Production Dockerfile
|
||||||
|
# Not necessary for development
|
||||||
|
|
||||||
|
# Minify JS
|
||||||
|
FROM node:10-alpine AS nodebuilder
|
||||||
|
WORKDIR /app
|
||||||
|
COPY . /app/
|
||||||
|
RUN \
|
||||||
|
npm ci && \
|
||||||
|
npm run build
|
||||||
|
|
||||||
|
# Compile Go binary
|
||||||
|
FROM golang:1.14-alpine AS gobuilder
|
||||||
|
WORKDIR /app
|
||||||
|
COPY . /app/
|
||||||
|
RUN apk --no-cache add gcc libc-dev
|
||||||
|
RUN go build -ldflags "-s -w"
|
||||||
|
|
||||||
|
# Minimal runtime container
|
||||||
|
FROM alpine:latest
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=nodebuilder /app/wwwroot /app/wwwroot
|
||||||
|
COPY --from=gobuilder /app/webscaffold /app/webscaffold
|
||||||
|
COPY ./schema /app/schema
|
||||||
|
|
||||||
|
CMD ["./webscaffold"]
|
12
LICENSE
Normal file
12
LICENSE
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
Copyright (C) 2020, The `webscaffold` author(s).
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any purpose
|
||||||
|
with or without fee is hereby granted.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||||
|
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
|
||||||
|
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||||
|
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
|
||||||
|
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||||
|
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
|
||||||
|
THIS SOFTWARE.
|
9
Makefile
Normal file
9
Makefile
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
.PHONY: models
|
||||||
|
models:
|
||||||
|
${GOPATH}/bin/sqlboiler ${GOPATH}/bin/sqlboiler-sqlite3
|
||||||
|
|
||||||
|
.PHONY: deps
|
||||||
|
deps:
|
||||||
|
go get
|
||||||
|
npm ci # use package versions from package.lock only
|
62
README.md
Normal file
62
README.md
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
# webscaffold
|
||||||
|
|
||||||
|
![](https://img.shields.io/badge/license-BSD0-informational)
|
||||||
|
|
||||||
|
`webscaffold` is a sample web application project that can be adopted as a base.
|
||||||
|
|
||||||
|
This project is made available to you under the terms of the [0-clause BSD license](LICENSE) i.e. you may take this code without attribution.
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
- Install node.js + Go dependencies: `make deps` *(gitignored)*
|
||||||
|
- Rebuild SQL model layer: `make models` *(should commit)*
|
||||||
|
- Live watch reload: `npm run watch` *(gitignored)*
|
||||||
|
- Compile backend: `go build`
|
||||||
|
- Run development instance: `./webscaffold`
|
||||||
|
- This will use the live wwwroot directory, that is managed by the live watch reload
|
||||||
|
- Run production instance: `dokku apps:create webscaffold`
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Go backend
|
||||||
|
- [X] Integrated HTTP server
|
||||||
|
- [X] HTTP/2 support
|
||||||
|
- [X] Optional Let's Encrypt integration for HTTPS
|
||||||
|
- [X] Gzip of API responses
|
||||||
|
- [X] Exact match router
|
||||||
|
- [X] SQLite database
|
||||||
|
- [X] Migrations support
|
||||||
|
- [X] `schema` table for tracking migrations
|
||||||
|
- [X] Automatically apply new migrations at daemon startup
|
||||||
|
- [X] SQLBoiler for strict types
|
||||||
|
- [X] Basic admin/user login data types ready implemented
|
||||||
|
- [X] Password hashing with `argon2i`
|
||||||
|
- [X] Session support
|
||||||
|
- [X] DB persistence for sessions
|
||||||
|
- [X] Automatic session cleanup
|
||||||
|
- [ ] Authentication helpers for validating session data
|
||||||
|
- SPA frontend
|
||||||
|
- [X] Typescript
|
||||||
|
- [X] Object-oriented class design
|
||||||
|
- [X] jQuery for DOM access and ajax, using async promises
|
||||||
|
- [X] CoreJS polyfill for IE browsers
|
||||||
|
- [X] Hash fragment routing
|
||||||
|
- [X] `var state;`
|
||||||
|
- [X] Toast popups
|
||||||
|
- [X] LESS CSS
|
||||||
|
- [X] Bootstrap 4.x template integrated
|
||||||
|
- [X] Responsive mobile friendly
|
||||||
|
- [X] LESS CSS compilation working within the rollup watch script
|
||||||
|
- [X] Secure headers
|
||||||
|
- [X] Strict Content-Security-Policy (CSP)
|
||||||
|
- [-] Subresource integrity (SRI)
|
||||||
|
- [X] for vendor scripts
|
||||||
|
- Regenerate via `cat FILE | openssl dgst -sha384 -binary | openssl base64 -A`
|
||||||
|
- [ ] for first-party scripts
|
||||||
|
- [X] X-Frame-Options deny
|
||||||
|
- Docker for production
|
||||||
|
- [X] Multi-stage docker build
|
||||||
|
- [X] Dokku deployment steps
|
||||||
|
- Makefile for all scripts
|
||||||
|
- [X] Watch-based minification
|
||||||
|
- [ ] Single command to reset base app name / branding
|
75
api_login.go
Normal file
75
api_login.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"webscaffold/models"
|
||||||
|
|
||||||
|
"github.com/gofrs/uuid"
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type apiLoginResponse struct {
|
||||||
|
SessionKey string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Application) apiLogin(r *http.Request) (*apiLoginResponse, netError) {
|
||||||
|
|
||||||
|
ctx := r.Context()
|
||||||
|
email := r.PostFormValue(`email`)
|
||||||
|
passwd := r.PostFormValue(`password`)
|
||||||
|
|
||||||
|
// find user with matching email
|
||||||
|
user, err := models.Users(qm.Where(`email = ?`, email), qm.Limit(1)).One(ctx, this.db)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sql.ErrNoRows) {
|
||||||
|
return nil, userFacingError{code: 403, e: errors.New(`Invalid authentication`)}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, systemError{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify password
|
||||||
|
ok, err := verifyPassword(int(user.PasswordFormat), user.PasswordHash, passwd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, systemError{err}
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
return nil, userFacingError{code: 403, e: errors.New(`Invalid authentication`)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Successful login
|
||||||
|
// Maybe upgrade password format?
|
||||||
|
if int(user.PasswordFormat) != hashFmtPreferred {
|
||||||
|
if newHash, err := hashPassword(hashFmtPreferred, passwd); err == nil {
|
||||||
|
user.PasswordFormat = int64(hashFmtPreferred)
|
||||||
|
user.PasswordHash = newHash
|
||||||
|
_, err := user.Update(ctx, this.db, boil.Whitelist(models.UserColumns.PasswordFormat, models.UserColumns.PasswordHash))
|
||||||
|
if err != nil {
|
||||||
|
// Continue, but log error
|
||||||
|
log.Printf(`couldn't update stored hash format for user '%s': %s`, email, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a session token
|
||||||
|
sess := models.Session{
|
||||||
|
ID: uuid.Must(uuid.NewV4()).String(),
|
||||||
|
Mtime: time.Now().Unix(),
|
||||||
|
UserID: user.ID,
|
||||||
|
}
|
||||||
|
err = sess.Insert(ctx, this.db, boil.Infer())
|
||||||
|
if err != nil {
|
||||||
|
return nil, systemError{err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Successful login
|
||||||
|
return &apiLoginResponse{SessionKey: sess.ID}, nil
|
||||||
|
|
||||||
|
}
|
27
bg_cleanupSessions.go
Normal file
27
bg_cleanupSessions.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sessionCleanupInterval = 30 * time.Minute
|
||||||
|
sessionCleanupMaxAge = 14 * 24 * time.Hour // stay logged in for 14 days of inactivity
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
func (this *Application) cleanupSessionsWorker() {
|
||||||
|
for {
|
||||||
|
|
||||||
|
cutOff := time.Now().Add(-sessionCleanupMaxAge).Unix()
|
||||||
|
_, err := this.db.Exec(`DELETE FROM sessions WHERE mtime < ?`, cutOff)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf(`cleaning up sessions: %s`, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for next ticker
|
||||||
|
time.Sleep(sessionCleanupInterval)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
130
db.go
Normal file
130
db.go
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (this *Application) connectToDB() error {
|
||||||
|
|
||||||
|
db, err := sql.Open("sqlite3", this.c.DBPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
this.db = db
|
||||||
|
|
||||||
|
// Check current schema version
|
||||||
|
currentSchemaVer, err := this.getCurrentSchemaVersion()
|
||||||
|
if err != nil && (errors.Is(err, sql.ErrNoRows) || strings.Contains(err.Error(), `no such table: schema`)) {
|
||||||
|
currentSchemaVer = 0
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform migrations
|
||||||
|
err = this.applyMigrations(currentSchemaVer)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("applying migrations: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Application) getCurrentSchemaVersion() (int, error) {
|
||||||
|
var maxId int
|
||||||
|
err := this.db.QueryRow(`SELECT MAX(id) m FROM schema`).Scan(&maxId)
|
||||||
|
return maxId, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *Application) applyMigrations(currentSchemaVer int) error {
|
||||||
|
|
||||||
|
rx := regexp.MustCompile(`^([0-9\-]+).*\.sql$`)
|
||||||
|
|
||||||
|
dh, err := os.Open(this.c.SchemaDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
filenames, err := dh.Readdirnames(-1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(filenames)
|
||||||
|
|
||||||
|
log.Printf("DB Schema version %d, searching for migrations...", currentSchemaVer)
|
||||||
|
|
||||||
|
applied := 0
|
||||||
|
|
||||||
|
for _, filename := range filenames {
|
||||||
|
|
||||||
|
parts := rx.FindStringSubmatch(filename)
|
||||||
|
if parts == nil {
|
||||||
|
return fmt.Errorf("found file '%s' in %s directory not matching expected file format, aborting", filename, this.c.SchemaDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
schemaVer, err := strconv.Atoi(strings.Replace(parts[1], `-`, "", -1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if currentSchemaVer >= schemaVer {
|
||||||
|
continue // already applied
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need to apply this schema
|
||||||
|
|
||||||
|
fpath := filepath.Join(this.c.SchemaDir, filename)
|
||||||
|
sqlFile, err := ioutil.ReadFile(fpath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("loading '%s' for schema migration: %w", fpath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The SQLite driver does not support multiple SQL statements in a single Exec() call
|
||||||
|
// Try to break it up into multiple by splitting on `);`
|
||||||
|
sqlStmts := strings.Split(string(sqlFile), `);`)
|
||||||
|
for i := 0; i < len(sqlStmts)-1; i++ {
|
||||||
|
sqlStmts[i] += `);`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't need to call Exec() if the trailing part is just blank
|
||||||
|
if strings.TrimSpace(sqlStmts[len(sqlStmts)-1]) == "" {
|
||||||
|
sqlStmts = sqlStmts[0 : len(sqlStmts)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Applying schema migration '%s' (%d statement(s))", fpath, len(sqlStmts))
|
||||||
|
|
||||||
|
for _, stmt := range sqlStmts {
|
||||||
|
_, err = this.db.Exec(stmt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("applying schema '%s': %w", fpath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
applied += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if applied > 0 {
|
||||||
|
log.Printf("Successfully applied %d schema migration(s)", applied)
|
||||||
|
} else {
|
||||||
|
log.Println("No new schema migrations to apply.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done
|
||||||
|
return nil
|
||||||
|
}
|
19
go.mod
Normal file
19
go.mod
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
module webscaffold
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1
|
||||||
|
github.com/friendsofgo/errors v0.9.2
|
||||||
|
github.com/gofrs/uuid v3.2.0+incompatible
|
||||||
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||||
|
github.com/pkg/errors v0.9.1
|
||||||
|
github.com/spf13/cast v1.3.1 // indirect
|
||||||
|
github.com/spf13/cobra v1.0.0 // indirect
|
||||||
|
github.com/spf13/viper v1.6.3
|
||||||
|
github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d // indirect
|
||||||
|
github.com/volatiletech/null v8.0.0+incompatible
|
||||||
|
github.com/volatiletech/sqlboiler v3.7.0+incompatible
|
||||||
|
github.com/volatiletech/sqlboiler-sqlite3 v0.0.0-20180915213852-a153537eb0c3
|
||||||
|
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a
|
||||||
|
)
|
187
go.sum
Normal file
187
go.sum
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||||
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
|
github.com/friendsofgo/errors v0.9.2 h1:X6NYxef4efCBdwI7BgS820zFaN7Cphrmb+Pljdzjtgk=
|
||||||
|
github.com/friendsofgo/errors v0.9.2/go.mod h1:yCvFW5AkDIL9qn7suHVLiI/gH228n7PC4Pn44IGoTOI=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||||
|
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
|
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
|
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||||
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||||
|
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||||
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
|
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||||
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
|
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||||
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||||
|
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||||
|
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
|
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||||
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
|
github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs=
|
||||||
|
github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
|
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||||
|
github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d h1:gI4/tqP6lCY5k6Sg+4k9qSoBXmPwG+xXgMpK7jivD4M=
|
||||||
|
github.com/volatiletech/inflect v0.0.0-20170731032912-e7201282ae8d/go.mod h1:jspfvgf53t5NLUT4o9L1IX0kIBNKamGq1tWc/MgWK9Q=
|
||||||
|
github.com/volatiletech/null v8.0.0+incompatible h1:7wP8m5d/gZ6kW/9GnrLtMCRre2dlEnaQ9Km5OXlK4zg=
|
||||||
|
github.com/volatiletech/null v8.0.0+incompatible/go.mod h1:0wD98JzdqB+rLyZ70fN05VDbXbafIb0KU0MdVhCzmOQ=
|
||||||
|
github.com/volatiletech/sqlboiler v3.7.0+incompatible h1:tgc0UL8e1YW2g01ic472UMtSqrert5cSyz7tWWDIJbU=
|
||||||
|
github.com/volatiletech/sqlboiler v3.7.0+incompatible/go.mod h1:jLfDkkHWPbS2cWRLkyC20vQWaIQsASEY7gM7zSo11Yw=
|
||||||
|
github.com/volatiletech/sqlboiler-sqlite3 v0.0.0-20180915213852-a153537eb0c3 h1:E9EXfaMmPetEMgM4oRlaRgRKRLTeO13Y3XhqIUYdm+g=
|
||||||
|
github.com/volatiletech/sqlboiler-sqlite3 v0.0.0-20180915213852-a153537eb0c3/go.mod h1:NCs62D64luU94iqaIIGPOeFzhvoxSTIJkPofw0/MY+M=
|
||||||
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a h1:y6sBfNd1b9Wy08a6K1Z1DZc4aXABUN5TKjkYhz7UKmo=
|
||||||
|
golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
|
||||||
|
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||||
|
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
90
hash.go
Normal file
90
hash.go
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/subtle"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/argon2"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Hash formats used by the database
|
||||||
|
hashFmtPlaintext int = 0
|
||||||
|
hashFmtArgon2i_v1 int = 1
|
||||||
|
|
||||||
|
hashFmtPreferred = hashFmtArgon2i_v1
|
||||||
|
|
||||||
|
// Constant parameters for our v1 version of this
|
||||||
|
argon2_time = 1
|
||||||
|
argon2_memory = 16 * 1024 // 16 MiB
|
||||||
|
argon2_want_output_bytes = 32 // 256-bit
|
||||||
|
)
|
||||||
|
|
||||||
|
// verifyPassword checks if a provided plaintext password is valid for a known
|
||||||
|
// hashed password.
|
||||||
|
// If the verification is successful, but the used format is not the current
|
||||||
|
// preferred format, you may want to re-hash the password with the current
|
||||||
|
// preferred format.
|
||||||
|
func verifyPassword(format int, hashValue, testValue string) (bool, error) {
|
||||||
|
switch format {
|
||||||
|
case hashFmtPlaintext:
|
||||||
|
return subtle.ConstantTimeCompare([]byte(hashValue), []byte(testValue)) == 1, nil
|
||||||
|
|
||||||
|
case hashFmtArgon2i_v1:
|
||||||
|
// base64(salt) $ base64(hash)
|
||||||
|
tParts := strings.SplitN(hashValue, `$`, 2)
|
||||||
|
if len(tParts) != 2 {
|
||||||
|
return false, fmt.Errorf("malformed hash value (expected 2 segments, got %d)", len(tParts))
|
||||||
|
}
|
||||||
|
|
||||||
|
salt, err := base64.StdEncoding.DecodeString(tParts[0])
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.New(`malformed hash value (malformed base64 of salt)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
existingHashBytes, err := base64.StdEncoding.DecodeString(tParts[1])
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.New(`malformed hash value (malformed base64 of hash)`)
|
||||||
|
}
|
||||||
|
|
||||||
|
newHash := argon2.Key([]byte(testValue), salt, argon2_time, argon2_memory, 1, argon2_want_output_bytes)
|
||||||
|
|
||||||
|
return subtle.ConstantTimeCompare(existingHashBytes, newHash) == 1, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("unrecognised password hash format %d", format)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashPassword converts the provided plaintext password into a hash format.
|
||||||
|
// It is recommended to pass in `hashFmtPreferred` as the format.
|
||||||
|
func hashPassword(format int, newValue string) (string, error) {
|
||||||
|
switch format {
|
||||||
|
case hashFmtPlaintext:
|
||||||
|
return newValue, nil
|
||||||
|
|
||||||
|
case hashFmtArgon2i_v1:
|
||||||
|
|
||||||
|
salt := make([]byte, 32)
|
||||||
|
n, err := rand.Read(salt)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if n != len(salt) {
|
||||||
|
return "", fmt.Errorf(`short read from urandom (got %d expected %d)`, n, len(salt))
|
||||||
|
}
|
||||||
|
|
||||||
|
newHash := argon2.Key([]byte(newValue), salt, argon2_time, argon2_memory, 1, argon2_want_output_bytes)
|
||||||
|
|
||||||
|
// base64(salt) $ base64(hash)
|
||||||
|
return base64.StdEncoding.EncodeToString(salt) + `$` + base64.StdEncoding.EncodeToString(newHash), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unrecognised password hash format %d", format)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
31
main.go
Normal file
31
main.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
WebrootDir string
|
||||||
|
SchemaDir string
|
||||||
|
ListenAddress string
|
||||||
|
Autocert string
|
||||||
|
DBPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
var cfg Config
|
||||||
|
|
||||||
|
flag.StringVar(&cfg.WebrootDir, `WebrootDir`, "wwwroot/", "Webroot resource directory")
|
||||||
|
flag.StringVar(&cfg.SchemaDir, `SchemaDir`, `schema/`, "SQL Schema resource directory")
|
||||||
|
flag.StringVar(&cfg.ListenAddress, `ListenAddress`, "0.0.0.0:5454", "HTTP server network bind address")
|
||||||
|
flag.StringVar(&cfg.Autocert, `Autocert`, "", "(Optional) Domain name to use for automatic HTTPS (leave blank for HTTP)")
|
||||||
|
flag.StringVar(&cfg.DBPath, `DBPath`, AppName+`.db3`, "Path to SQLite3 database file")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
app := Application{c: cfg}
|
||||||
|
err := app.Start()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
119
models/boil_main_test.go
Normal file
119
models/boil_main_test.go
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagDebugMode = flag.Bool("test.sqldebug", false, "Turns on debug mode for SQL statements")
|
||||||
|
var flagConfigFile = flag.String("test.config", "", "Overrides the default config")
|
||||||
|
|
||||||
|
const outputDirDepth = 1
|
||||||
|
|
||||||
|
var (
|
||||||
|
dbMain tester
|
||||||
|
)
|
||||||
|
|
||||||
|
type tester interface {
|
||||||
|
setup() error
|
||||||
|
conn() (*sql.DB, error)
|
||||||
|
teardown() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
if dbMain == nil {
|
||||||
|
fmt.Println("no dbMain tester interface was ready")
|
||||||
|
os.Exit(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Load configuration
|
||||||
|
err = initViper()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("unable to load config file")
|
||||||
|
os.Exit(-2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set DebugMode so we can see generated sql statements
|
||||||
|
boil.DebugMode = *flagDebugMode
|
||||||
|
|
||||||
|
if err = dbMain.setup(); err != nil {
|
||||||
|
fmt.Println("Unable to execute setup:", err)
|
||||||
|
os.Exit(-4)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := dbMain.conn()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("failed to get connection:", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var code int
|
||||||
|
boil.SetDB(conn)
|
||||||
|
code = m.Run()
|
||||||
|
|
||||||
|
if err = dbMain.teardown(); err != nil {
|
||||||
|
fmt.Println("Unable to execute teardown:", err)
|
||||||
|
os.Exit(-5)
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Exit(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initViper() error {
|
||||||
|
if flagConfigFile != nil && *flagConfigFile != "" {
|
||||||
|
viper.SetConfigFile(*flagConfigFile)
|
||||||
|
if err := viper.ReadInConfig(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
viper.SetConfigName("sqlboiler")
|
||||||
|
|
||||||
|
configHome := os.Getenv("XDG_CONFIG_HOME")
|
||||||
|
homePath := os.Getenv("HOME")
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
wd = strings.Repeat("../", outputDirDepth)
|
||||||
|
} else {
|
||||||
|
wd = wd + strings.Repeat("/..", outputDirDepth)
|
||||||
|
}
|
||||||
|
|
||||||
|
configPaths := []string{wd}
|
||||||
|
if len(configHome) > 0 {
|
||||||
|
configPaths = append(configPaths, filepath.Join(configHome, "sqlboiler"))
|
||||||
|
} else {
|
||||||
|
configPaths = append(configPaths, filepath.Join(homePath, ".config/sqlboiler"))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range configPaths {
|
||||||
|
viper.AddConfigPath(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore errors here, fall back to defaults and validation to provide errs
|
||||||
|
_ = viper.ReadInConfig()
|
||||||
|
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||||
|
viper.AutomaticEnv()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
33
models/boil_queries.go
Normal file
33
models/boil_queries.go
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/volatiletech/sqlboiler/drivers"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qm"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dialect = drivers.Dialect{
|
||||||
|
LQ: 0x22,
|
||||||
|
RQ: 0x22,
|
||||||
|
|
||||||
|
UseIndexPlaceholders: false,
|
||||||
|
UseLastInsertID: true,
|
||||||
|
UseSchema: false,
|
||||||
|
UseDefaultKeyword: false,
|
||||||
|
UseAutoColumns: false,
|
||||||
|
UseTopClause: false,
|
||||||
|
UseOutputClause: false,
|
||||||
|
UseCaseWhenExistsClause: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQuery initializes a new Query using the passed in QueryMods
|
||||||
|
func NewQuery(mods ...qm.QueryMod) *queries.Query {
|
||||||
|
q := &queries.Query{}
|
||||||
|
queries.SetDialect(q, &dialect)
|
||||||
|
qm.Apply(q, mods...)
|
||||||
|
|
||||||
|
return q
|
||||||
|
}
|
52
models/boil_queries_test.go
Normal file
52
models/boil_queries_test.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var dbNameRand *rand.Rand
|
||||||
|
|
||||||
|
func MustTx(transactor boil.ContextTransactor, err error) boil.ContextTransactor {
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Cannot create a transactor: %s", err))
|
||||||
|
}
|
||||||
|
return transactor
|
||||||
|
}
|
||||||
|
|
||||||
|
func newFKeyDestroyer(regex *regexp.Regexp, reader io.Reader) io.Reader {
|
||||||
|
return &fKeyDestroyer{
|
||||||
|
reader: reader,
|
||||||
|
rgx: regex,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type fKeyDestroyer struct {
|
||||||
|
reader io.Reader
|
||||||
|
buf *bytes.Buffer
|
||||||
|
rgx *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fKeyDestroyer) Read(b []byte) (int, error) {
|
||||||
|
if f.buf == nil {
|
||||||
|
all, err := ioutil.ReadAll(f.reader)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
all = bytes.Replace(all, []byte{'\r', '\n'}, []byte{'\n'}, -1)
|
||||||
|
all = f.rgx.ReplaceAll(all, []byte{})
|
||||||
|
f.buf = bytes.NewBuffer(all)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.buf.Read(b)
|
||||||
|
}
|
157
models/boil_suites_test.go
Normal file
157
models/boil_suites_test.go
Normal file
@ -0,0 +1,157 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
// This test suite runs each operation test in parallel.
|
||||||
|
// Example, if your database has 3 tables, the suite will run:
|
||||||
|
// table1, table2 and table3 Delete in parallel
|
||||||
|
// table1, table2 and table3 Insert in parallel, and so forth.
|
||||||
|
// It does NOT run each operation group in parallel.
|
||||||
|
// Separating the tests thusly grants avoidance of Postgres deadlocks.
|
||||||
|
func TestParent(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemas)
|
||||||
|
t.Run("Sessions", testSessions)
|
||||||
|
t.Run("Users", testUsers)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDelete(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasDelete)
|
||||||
|
t.Run("Sessions", testSessionsDelete)
|
||||||
|
t.Run("Users", testUsersDelete)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQueryDeleteAll(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasQueryDeleteAll)
|
||||||
|
t.Run("Sessions", testSessionsQueryDeleteAll)
|
||||||
|
t.Run("Users", testUsersQueryDeleteAll)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSliceDeleteAll(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasSliceDeleteAll)
|
||||||
|
t.Run("Sessions", testSessionsSliceDeleteAll)
|
||||||
|
t.Run("Users", testUsersSliceDeleteAll)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExists(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasExists)
|
||||||
|
t.Run("Sessions", testSessionsExists)
|
||||||
|
t.Run("Users", testUsersExists)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFind(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasFind)
|
||||||
|
t.Run("Sessions", testSessionsFind)
|
||||||
|
t.Run("Users", testUsersFind)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBind(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasBind)
|
||||||
|
t.Run("Sessions", testSessionsBind)
|
||||||
|
t.Run("Users", testUsersBind)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOne(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasOne)
|
||||||
|
t.Run("Sessions", testSessionsOne)
|
||||||
|
t.Run("Users", testUsersOne)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAll(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasAll)
|
||||||
|
t.Run("Sessions", testSessionsAll)
|
||||||
|
t.Run("Users", testUsersAll)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCount(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasCount)
|
||||||
|
t.Run("Sessions", testSessionsCount)
|
||||||
|
t.Run("Users", testUsersCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHooks(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasHooks)
|
||||||
|
t.Run("Sessions", testSessionsHooks)
|
||||||
|
t.Run("Users", testUsersHooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInsert(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasInsert)
|
||||||
|
t.Run("Schemas", testSchemasInsertWhitelist)
|
||||||
|
t.Run("Sessions", testSessionsInsert)
|
||||||
|
t.Run("Sessions", testSessionsInsertWhitelist)
|
||||||
|
t.Run("Users", testUsersInsert)
|
||||||
|
t.Run("Users", testUsersInsertWhitelist)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestToOne tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestToOne(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestOneToOne tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestOneToOne(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestToMany tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestToMany(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestToOneSet tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestToOneSet(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestToOneRemove tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestToOneRemove(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestOneToOneSet tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestOneToOneSet(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestOneToOneRemove tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestOneToOneRemove(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestToManyAdd tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestToManyAdd(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestToManySet tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestToManySet(t *testing.T) {}
|
||||||
|
|
||||||
|
// TestToManyRemove tests cannot be run in parallel
|
||||||
|
// or deadlocks can occur.
|
||||||
|
func TestToManyRemove(t *testing.T) {}
|
||||||
|
|
||||||
|
func TestReload(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasReload)
|
||||||
|
t.Run("Sessions", testSessionsReload)
|
||||||
|
t.Run("Users", testUsersReload)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReloadAll(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasReloadAll)
|
||||||
|
t.Run("Sessions", testSessionsReloadAll)
|
||||||
|
t.Run("Users", testUsersReloadAll)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelect(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasSelect)
|
||||||
|
t.Run("Sessions", testSessionsSelect)
|
||||||
|
t.Run("Users", testUsersSelect)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdate(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasUpdate)
|
||||||
|
t.Run("Sessions", testSessionsUpdate)
|
||||||
|
t.Run("Users", testUsersUpdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSliceUpdateAll(t *testing.T) {
|
||||||
|
t.Run("Schemas", testSchemasSliceUpdateAll)
|
||||||
|
t.Run("Sessions", testSessionsSliceUpdateAll)
|
||||||
|
t.Run("Users", testUsersSliceUpdateAll)
|
||||||
|
}
|
14
models/boil_table_names.go
Normal file
14
models/boil_table_names.go
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
var TableNames = struct {
|
||||||
|
Schema string
|
||||||
|
Session string
|
||||||
|
User string
|
||||||
|
}{
|
||||||
|
Schema: "schema",
|
||||||
|
Session: "session",
|
||||||
|
User: "user",
|
||||||
|
}
|
52
models/boil_types.go
Normal file
52
models/boil_types.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/friendsofgo/errors"
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/strmangle"
|
||||||
|
)
|
||||||
|
|
||||||
|
// M type is for providing columns and column values to UpdateAll.
|
||||||
|
type M map[string]interface{}
|
||||||
|
|
||||||
|
// ErrSyncFail occurs during insert when the record could not be retrieved in
|
||||||
|
// order to populate default value information. This usually happens when LastInsertId
|
||||||
|
// fails or there was a primary key configuration that was not resolvable.
|
||||||
|
var ErrSyncFail = errors.New("models: failed to synchronize data after insert")
|
||||||
|
|
||||||
|
type insertCache struct {
|
||||||
|
query string
|
||||||
|
retQuery string
|
||||||
|
valueMapping []uint64
|
||||||
|
retMapping []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type updateCache struct {
|
||||||
|
query string
|
||||||
|
valueMapping []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeCacheKey(cols boil.Columns, nzDefaults []string) string {
|
||||||
|
buf := strmangle.GetBuffer()
|
||||||
|
|
||||||
|
buf.WriteString(strconv.Itoa(cols.Kind))
|
||||||
|
for _, w := range cols.Cols {
|
||||||
|
buf.WriteString(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nzDefaults) != 0 {
|
||||||
|
buf.WriteByte('.')
|
||||||
|
}
|
||||||
|
for _, nz := range nzDefaults {
|
||||||
|
buf.WriteString(nz)
|
||||||
|
}
|
||||||
|
|
||||||
|
str := buf.String()
|
||||||
|
strmangle.PutBuffer(buf)
|
||||||
|
return str
|
||||||
|
}
|
790
models/schema.go
Normal file
790
models/schema.go
Normal file
@ -0,0 +1,790 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/friendsofgo/errors"
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qm"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qmhelper"
|
||||||
|
"github.com/volatiletech/sqlboiler/strmangle"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schema is an object representing the database table.
|
||||||
|
type Schema struct {
|
||||||
|
ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"`
|
||||||
|
|
||||||
|
R *schemaR `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||||
|
L schemaL `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var SchemaColumns = struct {
|
||||||
|
ID string
|
||||||
|
}{
|
||||||
|
ID: "id",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generated where
|
||||||
|
|
||||||
|
type whereHelperint64 struct{ field string }
|
||||||
|
|
||||||
|
func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
|
||||||
|
func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
|
||||||
|
func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
|
||||||
|
func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
|
||||||
|
func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
|
||||||
|
func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
|
||||||
|
func (w whereHelperint64) IN(slice []int64) qm.QueryMod {
|
||||||
|
values := make([]interface{}, 0, len(slice))
|
||||||
|
for _, value := range slice {
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var SchemaWhere = struct {
|
||||||
|
ID whereHelperint64
|
||||||
|
}{
|
||||||
|
ID: whereHelperint64{field: "\"schema\".\"id\""},
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchemaRels is where relationship names are stored.
|
||||||
|
var SchemaRels = struct {
|
||||||
|
}{}
|
||||||
|
|
||||||
|
// schemaR is where relationships are stored.
|
||||||
|
type schemaR struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStruct creates a new relationship struct
|
||||||
|
func (*schemaR) NewStruct() *schemaR {
|
||||||
|
return &schemaR{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// schemaL is where Load methods for each relationship are stored.
|
||||||
|
type schemaL struct{}
|
||||||
|
|
||||||
|
var (
|
||||||
|
schemaAllColumns = []string{"id"}
|
||||||
|
schemaColumnsWithoutDefault = []string{}
|
||||||
|
schemaColumnsWithDefault = []string{"id"}
|
||||||
|
schemaPrimaryKeyColumns = []string{"id"}
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// SchemaSlice is an alias for a slice of pointers to Schema.
|
||||||
|
// This should generally be used opposed to []Schema.
|
||||||
|
SchemaSlice []*Schema
|
||||||
|
// SchemaHook is the signature for custom Schema hook methods
|
||||||
|
SchemaHook func(context.Context, boil.ContextExecutor, *Schema) error
|
||||||
|
|
||||||
|
schemaQuery struct {
|
||||||
|
*queries.Query
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cache for insert, update and upsert
|
||||||
|
var (
|
||||||
|
schemaType = reflect.TypeOf(&Schema{})
|
||||||
|
schemaMapping = queries.MakeStructMapping(schemaType)
|
||||||
|
schemaPrimaryKeyMapping, _ = queries.BindMapping(schemaType, schemaMapping, schemaPrimaryKeyColumns)
|
||||||
|
schemaInsertCacheMut sync.RWMutex
|
||||||
|
schemaInsertCache = make(map[string]insertCache)
|
||||||
|
schemaUpdateCacheMut sync.RWMutex
|
||||||
|
schemaUpdateCache = make(map[string]updateCache)
|
||||||
|
schemaUpsertCacheMut sync.RWMutex
|
||||||
|
schemaUpsertCache = make(map[string]insertCache)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Force time package dependency for automated UpdatedAt/CreatedAt.
|
||||||
|
_ = time.Second
|
||||||
|
// Force qmhelper dependency for where clause generation (which doesn't
|
||||||
|
// always happen)
|
||||||
|
_ = qmhelper.Where
|
||||||
|
)
|
||||||
|
|
||||||
|
var schemaBeforeInsertHooks []SchemaHook
|
||||||
|
var schemaBeforeUpdateHooks []SchemaHook
|
||||||
|
var schemaBeforeDeleteHooks []SchemaHook
|
||||||
|
var schemaBeforeUpsertHooks []SchemaHook
|
||||||
|
|
||||||
|
var schemaAfterInsertHooks []SchemaHook
|
||||||
|
var schemaAfterSelectHooks []SchemaHook
|
||||||
|
var schemaAfterUpdateHooks []SchemaHook
|
||||||
|
var schemaAfterDeleteHooks []SchemaHook
|
||||||
|
var schemaAfterUpsertHooks []SchemaHook
|
||||||
|
|
||||||
|
// doBeforeInsertHooks executes all "before insert" hooks.
|
||||||
|
func (o *Schema) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaBeforeInsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeUpdateHooks executes all "before Update" hooks.
|
||||||
|
func (o *Schema) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaBeforeUpdateHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeDeleteHooks executes all "before Delete" hooks.
|
||||||
|
func (o *Schema) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaBeforeDeleteHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeUpsertHooks executes all "before Upsert" hooks.
|
||||||
|
func (o *Schema) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaBeforeUpsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterInsertHooks executes all "after Insert" hooks.
|
||||||
|
func (o *Schema) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaAfterInsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterSelectHooks executes all "after Select" hooks.
|
||||||
|
func (o *Schema) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaAfterSelectHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterUpdateHooks executes all "after Update" hooks.
|
||||||
|
func (o *Schema) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaAfterUpdateHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterDeleteHooks executes all "after Delete" hooks.
|
||||||
|
func (o *Schema) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaAfterDeleteHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterUpsertHooks executes all "after Upsert" hooks.
|
||||||
|
func (o *Schema) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range schemaAfterUpsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSchemaHook registers your hook function for all future operations.
|
||||||
|
func AddSchemaHook(hookPoint boil.HookPoint, schemaHook SchemaHook) {
|
||||||
|
switch hookPoint {
|
||||||
|
case boil.BeforeInsertHook:
|
||||||
|
schemaBeforeInsertHooks = append(schemaBeforeInsertHooks, schemaHook)
|
||||||
|
case boil.BeforeUpdateHook:
|
||||||
|
schemaBeforeUpdateHooks = append(schemaBeforeUpdateHooks, schemaHook)
|
||||||
|
case boil.BeforeDeleteHook:
|
||||||
|
schemaBeforeDeleteHooks = append(schemaBeforeDeleteHooks, schemaHook)
|
||||||
|
case boil.BeforeUpsertHook:
|
||||||
|
schemaBeforeUpsertHooks = append(schemaBeforeUpsertHooks, schemaHook)
|
||||||
|
case boil.AfterInsertHook:
|
||||||
|
schemaAfterInsertHooks = append(schemaAfterInsertHooks, schemaHook)
|
||||||
|
case boil.AfterSelectHook:
|
||||||
|
schemaAfterSelectHooks = append(schemaAfterSelectHooks, schemaHook)
|
||||||
|
case boil.AfterUpdateHook:
|
||||||
|
schemaAfterUpdateHooks = append(schemaAfterUpdateHooks, schemaHook)
|
||||||
|
case boil.AfterDeleteHook:
|
||||||
|
schemaAfterDeleteHooks = append(schemaAfterDeleteHooks, schemaHook)
|
||||||
|
case boil.AfterUpsertHook:
|
||||||
|
schemaAfterUpsertHooks = append(schemaAfterUpsertHooks, schemaHook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// One returns a single schema record from the query.
|
||||||
|
func (q schemaQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Schema, error) {
|
||||||
|
o := &Schema{}
|
||||||
|
|
||||||
|
queries.SetLimit(q.Query, 1)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, o)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == sql.ErrNoRows {
|
||||||
|
return nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return nil, errors.Wrap(err, "models: failed to execute a one query for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All returns all Schema records from the query.
|
||||||
|
func (q schemaQuery) All(ctx context.Context, exec boil.ContextExecutor) (SchemaSlice, error) {
|
||||||
|
var o []*Schema
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, &o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "models: failed to assign all query results to Schema slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schemaAfterSelectHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of all Schema records in the query.
|
||||||
|
func (q schemaQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
var count int64
|
||||||
|
|
||||||
|
queries.SetSelect(q.Query, nil)
|
||||||
|
queries.SetCount(q.Query)
|
||||||
|
|
||||||
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to count schema rows")
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists checks if the row exists in the table.
|
||||||
|
func (q schemaQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
|
||||||
|
var count int64
|
||||||
|
|
||||||
|
queries.SetSelect(q.Query, nil)
|
||||||
|
queries.SetCount(q.Query)
|
||||||
|
queries.SetLimit(q.Query, 1)
|
||||||
|
|
||||||
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrap(err, "models: failed to check if schema exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
return count > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schemas retrieves all the records using an executor.
|
||||||
|
func Schemas(mods ...qm.QueryMod) schemaQuery {
|
||||||
|
mods = append(mods, qm.From("\"schema\""))
|
||||||
|
return schemaQuery{NewQuery(mods...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindSchema retrieves a single record by ID with an executor.
|
||||||
|
// If selectCols is empty Find will return all columns.
|
||||||
|
func FindSchema(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Schema, error) {
|
||||||
|
schemaObj := &Schema{}
|
||||||
|
|
||||||
|
sel := "*"
|
||||||
|
if len(selectCols) > 0 {
|
||||||
|
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
|
||||||
|
}
|
||||||
|
query := fmt.Sprintf(
|
||||||
|
"select %s from \"schema\" where \"id\"=?", sel,
|
||||||
|
)
|
||||||
|
|
||||||
|
q := queries.Raw(query, iD)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, schemaObj)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == sql.ErrNoRows {
|
||||||
|
return nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return nil, errors.Wrap(err, "models: unable to select from schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
return schemaObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert a single record using an executor.
|
||||||
|
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
|
||||||
|
func (o *Schema) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
|
||||||
|
if o == nil {
|
||||||
|
return errors.New("models: no schema provided for insertion")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nzDefaults := queries.NonZeroDefaultSet(schemaColumnsWithDefault, o)
|
||||||
|
|
||||||
|
key := makeCacheKey(columns, nzDefaults)
|
||||||
|
schemaInsertCacheMut.RLock()
|
||||||
|
cache, cached := schemaInsertCache[key]
|
||||||
|
schemaInsertCacheMut.RUnlock()
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
wl, returnColumns := columns.InsertColumnSet(
|
||||||
|
schemaAllColumns,
|
||||||
|
schemaColumnsWithDefault,
|
||||||
|
schemaColumnsWithoutDefault,
|
||||||
|
nzDefaults,
|
||||||
|
)
|
||||||
|
|
||||||
|
cache.valueMapping, err = queries.BindMapping(schemaType, schemaMapping, wl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cache.retMapping, err = queries.BindMapping(schemaType, schemaMapping, returnColumns)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(wl) != 0 {
|
||||||
|
cache.query = fmt.Sprintf("INSERT INTO \"schema\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
|
||||||
|
} else {
|
||||||
|
cache.query = "INSERT INTO \"schema\" () VALUES ()%s%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
var queryOutput, queryReturning string
|
||||||
|
|
||||||
|
if len(cache.retMapping) != 0 {
|
||||||
|
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"schema\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, schemaPrimaryKeyColumns))
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(o))
|
||||||
|
vals := queries.ValuesFromMapping(value, cache.valueMapping)
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.query)
|
||||||
|
fmt.Fprintln(writer, vals)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, cache.query, vals...)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to insert into schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
var lastID int64
|
||||||
|
var identifierCols []interface{}
|
||||||
|
|
||||||
|
if len(cache.retMapping) == 0 {
|
||||||
|
goto CacheNoHooks
|
||||||
|
}
|
||||||
|
|
||||||
|
lastID, err = result.LastInsertId()
|
||||||
|
if err != nil {
|
||||||
|
return ErrSyncFail
|
||||||
|
}
|
||||||
|
|
||||||
|
o.ID = int64(lastID)
|
||||||
|
if lastID != 0 && len(cache.retMapping) == 1 && cache.retMapping[0] == schemaMapping["id"] {
|
||||||
|
goto CacheNoHooks
|
||||||
|
}
|
||||||
|
|
||||||
|
identifierCols = []interface{}{
|
||||||
|
o.ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.retQuery)
|
||||||
|
fmt.Fprintln(writer, identifierCols...)
|
||||||
|
}
|
||||||
|
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to populate default values for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
CacheNoHooks:
|
||||||
|
if !cached {
|
||||||
|
schemaInsertCacheMut.Lock()
|
||||||
|
schemaInsertCache[key] = cache
|
||||||
|
schemaInsertCacheMut.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.doAfterInsertHooks(ctx, exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update uses an executor to update the Schema.
|
||||||
|
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
|
||||||
|
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
|
||||||
|
func (o *Schema) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
|
||||||
|
var err error
|
||||||
|
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
key := makeCacheKey(columns, nil)
|
||||||
|
schemaUpdateCacheMut.RLock()
|
||||||
|
cache, cached := schemaUpdateCache[key]
|
||||||
|
schemaUpdateCacheMut.RUnlock()
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
wl := columns.UpdateColumnSet(
|
||||||
|
schemaAllColumns,
|
||||||
|
schemaPrimaryKeyColumns,
|
||||||
|
)
|
||||||
|
|
||||||
|
if !columns.IsWhitelist() {
|
||||||
|
wl = strmangle.SetComplement(wl, []string{"created_at"})
|
||||||
|
}
|
||||||
|
if len(wl) == 0 {
|
||||||
|
return 0, errors.New("models: unable to update schema, could not build whitelist")
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.query = fmt.Sprintf("UPDATE \"schema\" SET %s WHERE %s",
|
||||||
|
strmangle.SetParamNames("\"", "\"", 0, wl),
|
||||||
|
strmangle.WhereClause("\"", "\"", 0, schemaPrimaryKeyColumns),
|
||||||
|
)
|
||||||
|
cache.valueMapping, err = queries.BindMapping(schemaType, schemaMapping, append(wl, schemaPrimaryKeyColumns...))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.query)
|
||||||
|
fmt.Fprintln(writer, values)
|
||||||
|
}
|
||||||
|
var result sql.Result
|
||||||
|
result, err = exec.ExecContext(ctx, cache.query, values...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update schema row")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by update for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
schemaUpdateCacheMut.Lock()
|
||||||
|
schemaUpdateCache[key] = cache
|
||||||
|
schemaUpdateCacheMut.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAll updates all rows with the specified column values.
|
||||||
|
func (q schemaQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||||
|
queries.SetUpdate(q.Query, cols)
|
||||||
|
|
||||||
|
result, err := q.Query.ExecContext(ctx, exec)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update all for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAll updates all rows with the specified column values, using an executor.
|
||||||
|
func (o SchemaSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||||
|
ln := int64(len(o))
|
||||||
|
if ln == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cols) == 0 {
|
||||||
|
return 0, errors.New("models: update all requires at least one column argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
colNames := make([]string, len(cols))
|
||||||
|
args := make([]interface{}, len(cols))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for name, value := range cols {
|
||||||
|
colNames[i] = name
|
||||||
|
args[i] = value
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append all of the primary key values for each column
|
||||||
|
for _, obj := range o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), schemaPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := fmt.Sprintf("UPDATE \"schema\" SET %s WHERE %s",
|
||||||
|
strmangle.SetParamNames("\"", "\"", 0, colNames),
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, schemaPrimaryKeyColumns, len(o)))
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args...)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update all in schema slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all schema")
|
||||||
|
}
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes a single Schema record with an executor.
|
||||||
|
// Delete will match against the primary key column to find the record to delete.
|
||||||
|
func (o *Schema) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if o == nil {
|
||||||
|
return 0, errors.New("models: no Schema provided for delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), schemaPrimaryKeyMapping)
|
||||||
|
sql := "DELETE FROM \"schema\" WHERE \"id\"=?"
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args...)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete from schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes all matching rows.
|
||||||
|
func (q schemaQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if q.Query == nil {
|
||||||
|
return 0, errors.New("models: no schemaQuery provided for delete all")
|
||||||
|
}
|
||||||
|
|
||||||
|
queries.SetDelete(q.Query)
|
||||||
|
|
||||||
|
result, err := q.Query.ExecContext(ctx, exec)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete all from schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes all rows in the slice, using an executor.
|
||||||
|
func (o SchemaSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schemaBeforeDeleteHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var args []interface{}
|
||||||
|
for _, obj := range o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), schemaPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "DELETE FROM \"schema\" WHERE " +
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, schemaPrimaryKeyColumns, len(o))
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete all from schema slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for schema")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(schemaAfterDeleteHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload refetches the object from the database
|
||||||
|
// using the primary keys with an executor.
|
||||||
|
func (o *Schema) Reload(ctx context.Context, exec boil.ContextExecutor) error {
|
||||||
|
ret, err := FindSchema(ctx, exec, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*o = *ret
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadAll refetches every row with matching primary key column values
|
||||||
|
// and overwrites the original object slice with the newly updated slice.
|
||||||
|
func (o *SchemaSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
|
||||||
|
if o == nil || len(*o) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SchemaSlice{}
|
||||||
|
var args []interface{}
|
||||||
|
for _, obj := range *o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), schemaPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "SELECT \"schema\".* FROM \"schema\" WHERE " +
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, schemaPrimaryKeyColumns, len(*o))
|
||||||
|
|
||||||
|
q := queries.Raw(sql, args...)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, &slice)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to reload all in SchemaSlice")
|
||||||
|
}
|
||||||
|
|
||||||
|
*o = slice
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SchemaExists checks if the Schema row exists.
|
||||||
|
func SchemaExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) {
|
||||||
|
var exists bool
|
||||||
|
sql := "select exists(select 1 from \"schema\" where \"id\"=? limit 1)"
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, iD)
|
||||||
|
}
|
||||||
|
row := exec.QueryRowContext(ctx, sql, iD)
|
||||||
|
|
||||||
|
err := row.Scan(&exists)
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrap(err, "models: unable to check if schema exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
return exists, nil
|
||||||
|
}
|
684
models/schema_test.go
Normal file
684
models/schema_test.go
Normal file
@ -0,0 +1,684 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries"
|
||||||
|
"github.com/volatiletech/sqlboiler/randomize"
|
||||||
|
"github.com/volatiletech/sqlboiler/strmangle"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
|
||||||
|
// so force a package dependency in case they don't.
|
||||||
|
_ = queries.Equal
|
||||||
|
)
|
||||||
|
|
||||||
|
func testSchemas(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
query := Schemas()
|
||||||
|
|
||||||
|
if query.Query == nil {
|
||||||
|
t.Error("expected a query, got nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasDelete(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := o.Delete(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasQueryDeleteAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := Schemas().DeleteAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasSliceDeleteAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SchemaSlice{o}
|
||||||
|
|
||||||
|
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasExists(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e, err := SchemaExists(ctx, tx, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unable to check if Schema exists: %s", err)
|
||||||
|
}
|
||||||
|
if !e {
|
||||||
|
t.Errorf("Expected SchemaExists to return true, but got false.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasFind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schemaFound, err := FindSchema(ctx, tx, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if schemaFound == nil {
|
||||||
|
t.Error("want a record, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasBind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = Schemas().Bind(ctx, tx, o); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasOne(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if x, err := Schemas().One(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if x == nil {
|
||||||
|
t.Error("expected to get a non nil record")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
schemaOne := &Schema{}
|
||||||
|
schemaTwo := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, schemaOne, schemaDBTypes, false, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
if err = randomize.Struct(seed, schemaTwo, schemaDBTypes, false, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = schemaOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err = schemaTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice, err := Schemas().All(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(slice) != 2 {
|
||||||
|
t.Error("want 2 records, got:", len(slice))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasCount(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
schemaOne := &Schema{}
|
||||||
|
schemaTwo := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, schemaOne, schemaDBTypes, false, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
if err = randomize.Struct(seed, schemaTwo, schemaDBTypes, false, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = schemaOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err = schemaTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 2 {
|
||||||
|
t.Error("want 2 records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func schemaAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Schema) error {
|
||||||
|
*o = Schema{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasHooks(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
empty := &Schema{}
|
||||||
|
o := &Schema{}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, false); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema object: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.BeforeInsertHook, schemaBeforeInsertHook)
|
||||||
|
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaBeforeInsertHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.AfterInsertHook, schemaAfterInsertHook)
|
||||||
|
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaAfterInsertHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.AfterSelectHook, schemaAfterSelectHook)
|
||||||
|
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaAfterSelectHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.BeforeUpdateHook, schemaBeforeUpdateHook)
|
||||||
|
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaBeforeUpdateHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.AfterUpdateHook, schemaAfterUpdateHook)
|
||||||
|
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaAfterUpdateHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.BeforeDeleteHook, schemaBeforeDeleteHook)
|
||||||
|
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaBeforeDeleteHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.AfterDeleteHook, schemaAfterDeleteHook)
|
||||||
|
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaAfterDeleteHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.BeforeUpsertHook, schemaBeforeUpsertHook)
|
||||||
|
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaBeforeUpsertHooks = []SchemaHook{}
|
||||||
|
|
||||||
|
AddSchemaHook(boil.AfterUpsertHook, schemaAfterUpsertHook)
|
||||||
|
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
schemaAfterUpsertHooks = []SchemaHook{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasInsert(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasInsertWhitelist(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Whitelist(schemaColumnsWithoutDefault...)); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasReload(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = o.Reload(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasReloadAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SchemaSlice{o}
|
||||||
|
|
||||||
|
if err = slice.ReloadAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasSelect(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice, err := Schemas().All(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(slice) != 1 {
|
||||||
|
t.Error("want one record, got:", len(slice))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
schemaDBTypes = map[string]string{`ID`: `INTEGER`}
|
||||||
|
_ = bytes.MinRead
|
||||||
|
)
|
||||||
|
|
||||||
|
func testSchemasUpdate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if 0 == len(schemaPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with no primary key columns")
|
||||||
|
}
|
||||||
|
if len(schemaAllColumns) == len(schemaPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with only primary key columns")
|
||||||
|
}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaPrimaryKeyColumns...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only affect one row but affected", rowsAff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSchemasSliceUpdateAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if len(schemaAllColumns) == len(schemaPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with only primary key columns")
|
||||||
|
}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Schema{}
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Schemas().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = randomize.Struct(seed, o, schemaDBTypes, true, schemaPrimaryKeyColumns...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Schema struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove Primary keys and unique columns from what we plan to update
|
||||||
|
var fields []string
|
||||||
|
if strmangle.StringSliceMatch(schemaAllColumns, schemaPrimaryKeyColumns) {
|
||||||
|
fields = schemaAllColumns
|
||||||
|
} else {
|
||||||
|
fields = strmangle.SetComplement(
|
||||||
|
schemaAllColumns,
|
||||||
|
schemaPrimaryKeyColumns,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(o))
|
||||||
|
typ := reflect.TypeOf(o).Elem()
|
||||||
|
n := typ.NumField()
|
||||||
|
|
||||||
|
updateMap := M{}
|
||||||
|
for _, col := range fields {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := typ.Field(i)
|
||||||
|
if f.Tag.Get("boil") == col {
|
||||||
|
updateMap[col] = value.Field(i).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SchemaSlice{o}
|
||||||
|
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("wanted one record updated but got", rowsAff)
|
||||||
|
}
|
||||||
|
}
|
789
models/session.go
Normal file
789
models/session.go
Normal file
@ -0,0 +1,789 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/friendsofgo/errors"
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qm"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qmhelper"
|
||||||
|
"github.com/volatiletech/sqlboiler/strmangle"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Session is an object representing the database table.
|
||||||
|
type Session struct {
|
||||||
|
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
|
||||||
|
Mtime int64 `boil:"mtime" json:"mtime" toml:"mtime" yaml:"mtime"`
|
||||||
|
UserID string `boil:"user_id" json:"user_id" toml:"user_id" yaml:"user_id"`
|
||||||
|
|
||||||
|
R *sessionR `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||||
|
L sessionL `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var SessionColumns = struct {
|
||||||
|
ID string
|
||||||
|
Mtime string
|
||||||
|
UserID string
|
||||||
|
}{
|
||||||
|
ID: "id",
|
||||||
|
Mtime: "mtime",
|
||||||
|
UserID: "user_id",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generated where
|
||||||
|
|
||||||
|
type whereHelperstring struct{ field string }
|
||||||
|
|
||||||
|
func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) }
|
||||||
|
func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) }
|
||||||
|
func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) }
|
||||||
|
func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) }
|
||||||
|
func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) }
|
||||||
|
func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) }
|
||||||
|
func (w whereHelperstring) IN(slice []string) qm.QueryMod {
|
||||||
|
values := make([]interface{}, 0, len(slice))
|
||||||
|
for _, value := range slice {
|
||||||
|
values = append(values, value)
|
||||||
|
}
|
||||||
|
return qm.WhereIn(fmt.Sprintf("%s IN ?", w.field), values...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var SessionWhere = struct {
|
||||||
|
ID whereHelperstring
|
||||||
|
Mtime whereHelperint64
|
||||||
|
UserID whereHelperstring
|
||||||
|
}{
|
||||||
|
ID: whereHelperstring{field: "\"session\".\"id\""},
|
||||||
|
Mtime: whereHelperint64{field: "\"session\".\"mtime\""},
|
||||||
|
UserID: whereHelperstring{field: "\"session\".\"user_id\""},
|
||||||
|
}
|
||||||
|
|
||||||
|
// SessionRels is where relationship names are stored.
|
||||||
|
var SessionRels = struct {
|
||||||
|
}{}
|
||||||
|
|
||||||
|
// sessionR is where relationships are stored.
|
||||||
|
type sessionR struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStruct creates a new relationship struct
|
||||||
|
func (*sessionR) NewStruct() *sessionR {
|
||||||
|
return &sessionR{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sessionL is where Load methods for each relationship are stored.
|
||||||
|
type sessionL struct{}
|
||||||
|
|
||||||
|
var (
|
||||||
|
sessionAllColumns = []string{"id", "mtime", "user_id"}
|
||||||
|
sessionColumnsWithoutDefault = []string{"id", "mtime", "user_id"}
|
||||||
|
sessionColumnsWithDefault = []string{}
|
||||||
|
sessionPrimaryKeyColumns = []string{"id"}
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// SessionSlice is an alias for a slice of pointers to Session.
|
||||||
|
// This should generally be used opposed to []Session.
|
||||||
|
SessionSlice []*Session
|
||||||
|
// SessionHook is the signature for custom Session hook methods
|
||||||
|
SessionHook func(context.Context, boil.ContextExecutor, *Session) error
|
||||||
|
|
||||||
|
sessionQuery struct {
|
||||||
|
*queries.Query
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cache for insert, update and upsert
|
||||||
|
var (
|
||||||
|
sessionType = reflect.TypeOf(&Session{})
|
||||||
|
sessionMapping = queries.MakeStructMapping(sessionType)
|
||||||
|
sessionPrimaryKeyMapping, _ = queries.BindMapping(sessionType, sessionMapping, sessionPrimaryKeyColumns)
|
||||||
|
sessionInsertCacheMut sync.RWMutex
|
||||||
|
sessionInsertCache = make(map[string]insertCache)
|
||||||
|
sessionUpdateCacheMut sync.RWMutex
|
||||||
|
sessionUpdateCache = make(map[string]updateCache)
|
||||||
|
sessionUpsertCacheMut sync.RWMutex
|
||||||
|
sessionUpsertCache = make(map[string]insertCache)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Force time package dependency for automated UpdatedAt/CreatedAt.
|
||||||
|
_ = time.Second
|
||||||
|
// Force qmhelper dependency for where clause generation (which doesn't
|
||||||
|
// always happen)
|
||||||
|
_ = qmhelper.Where
|
||||||
|
)
|
||||||
|
|
||||||
|
var sessionBeforeInsertHooks []SessionHook
|
||||||
|
var sessionBeforeUpdateHooks []SessionHook
|
||||||
|
var sessionBeforeDeleteHooks []SessionHook
|
||||||
|
var sessionBeforeUpsertHooks []SessionHook
|
||||||
|
|
||||||
|
var sessionAfterInsertHooks []SessionHook
|
||||||
|
var sessionAfterSelectHooks []SessionHook
|
||||||
|
var sessionAfterUpdateHooks []SessionHook
|
||||||
|
var sessionAfterDeleteHooks []SessionHook
|
||||||
|
var sessionAfterUpsertHooks []SessionHook
|
||||||
|
|
||||||
|
// doBeforeInsertHooks executes all "before insert" hooks.
|
||||||
|
func (o *Session) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionBeforeInsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeUpdateHooks executes all "before Update" hooks.
|
||||||
|
func (o *Session) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionBeforeUpdateHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeDeleteHooks executes all "before Delete" hooks.
|
||||||
|
func (o *Session) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionBeforeDeleteHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeUpsertHooks executes all "before Upsert" hooks.
|
||||||
|
func (o *Session) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionBeforeUpsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterInsertHooks executes all "after Insert" hooks.
|
||||||
|
func (o *Session) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionAfterInsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterSelectHooks executes all "after Select" hooks.
|
||||||
|
func (o *Session) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionAfterSelectHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterUpdateHooks executes all "after Update" hooks.
|
||||||
|
func (o *Session) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionAfterUpdateHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterDeleteHooks executes all "after Delete" hooks.
|
||||||
|
func (o *Session) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionAfterDeleteHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterUpsertHooks executes all "after Upsert" hooks.
|
||||||
|
func (o *Session) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range sessionAfterUpsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSessionHook registers your hook function for all future operations.
|
||||||
|
func AddSessionHook(hookPoint boil.HookPoint, sessionHook SessionHook) {
|
||||||
|
switch hookPoint {
|
||||||
|
case boil.BeforeInsertHook:
|
||||||
|
sessionBeforeInsertHooks = append(sessionBeforeInsertHooks, sessionHook)
|
||||||
|
case boil.BeforeUpdateHook:
|
||||||
|
sessionBeforeUpdateHooks = append(sessionBeforeUpdateHooks, sessionHook)
|
||||||
|
case boil.BeforeDeleteHook:
|
||||||
|
sessionBeforeDeleteHooks = append(sessionBeforeDeleteHooks, sessionHook)
|
||||||
|
case boil.BeforeUpsertHook:
|
||||||
|
sessionBeforeUpsertHooks = append(sessionBeforeUpsertHooks, sessionHook)
|
||||||
|
case boil.AfterInsertHook:
|
||||||
|
sessionAfterInsertHooks = append(sessionAfterInsertHooks, sessionHook)
|
||||||
|
case boil.AfterSelectHook:
|
||||||
|
sessionAfterSelectHooks = append(sessionAfterSelectHooks, sessionHook)
|
||||||
|
case boil.AfterUpdateHook:
|
||||||
|
sessionAfterUpdateHooks = append(sessionAfterUpdateHooks, sessionHook)
|
||||||
|
case boil.AfterDeleteHook:
|
||||||
|
sessionAfterDeleteHooks = append(sessionAfterDeleteHooks, sessionHook)
|
||||||
|
case boil.AfterUpsertHook:
|
||||||
|
sessionAfterUpsertHooks = append(sessionAfterUpsertHooks, sessionHook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// One returns a single session record from the query.
|
||||||
|
func (q sessionQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Session, error) {
|
||||||
|
o := &Session{}
|
||||||
|
|
||||||
|
queries.SetLimit(q.Query, 1)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, o)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == sql.ErrNoRows {
|
||||||
|
return nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return nil, errors.Wrap(err, "models: failed to execute a one query for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All returns all Session records from the query.
|
||||||
|
func (q sessionQuery) All(ctx context.Context, exec boil.ContextExecutor) (SessionSlice, error) {
|
||||||
|
var o []*Session
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, &o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "models: failed to assign all query results to Session slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sessionAfterSelectHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of all Session records in the query.
|
||||||
|
func (q sessionQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
var count int64
|
||||||
|
|
||||||
|
queries.SetSelect(q.Query, nil)
|
||||||
|
queries.SetCount(q.Query)
|
||||||
|
|
||||||
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to count session rows")
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists checks if the row exists in the table.
|
||||||
|
func (q sessionQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
|
||||||
|
var count int64
|
||||||
|
|
||||||
|
queries.SetSelect(q.Query, nil)
|
||||||
|
queries.SetCount(q.Query)
|
||||||
|
queries.SetLimit(q.Query, 1)
|
||||||
|
|
||||||
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrap(err, "models: failed to check if session exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
return count > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sessions retrieves all the records using an executor.
|
||||||
|
func Sessions(mods ...qm.QueryMod) sessionQuery {
|
||||||
|
mods = append(mods, qm.From("\"session\""))
|
||||||
|
return sessionQuery{NewQuery(mods...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindSession retrieves a single record by ID with an executor.
|
||||||
|
// If selectCols is empty Find will return all columns.
|
||||||
|
func FindSession(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*Session, error) {
|
||||||
|
sessionObj := &Session{}
|
||||||
|
|
||||||
|
sel := "*"
|
||||||
|
if len(selectCols) > 0 {
|
||||||
|
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
|
||||||
|
}
|
||||||
|
query := fmt.Sprintf(
|
||||||
|
"select %s from \"session\" where \"id\"=?", sel,
|
||||||
|
)
|
||||||
|
|
||||||
|
q := queries.Raw(query, iD)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, sessionObj)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == sql.ErrNoRows {
|
||||||
|
return nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return nil, errors.Wrap(err, "models: unable to select from session")
|
||||||
|
}
|
||||||
|
|
||||||
|
return sessionObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert a single record using an executor.
|
||||||
|
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
|
||||||
|
func (o *Session) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
|
||||||
|
if o == nil {
|
||||||
|
return errors.New("models: no session provided for insertion")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nzDefaults := queries.NonZeroDefaultSet(sessionColumnsWithDefault, o)
|
||||||
|
|
||||||
|
key := makeCacheKey(columns, nzDefaults)
|
||||||
|
sessionInsertCacheMut.RLock()
|
||||||
|
cache, cached := sessionInsertCache[key]
|
||||||
|
sessionInsertCacheMut.RUnlock()
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
wl, returnColumns := columns.InsertColumnSet(
|
||||||
|
sessionAllColumns,
|
||||||
|
sessionColumnsWithDefault,
|
||||||
|
sessionColumnsWithoutDefault,
|
||||||
|
nzDefaults,
|
||||||
|
)
|
||||||
|
|
||||||
|
cache.valueMapping, err = queries.BindMapping(sessionType, sessionMapping, wl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cache.retMapping, err = queries.BindMapping(sessionType, sessionMapping, returnColumns)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(wl) != 0 {
|
||||||
|
cache.query = fmt.Sprintf("INSERT INTO \"session\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
|
||||||
|
} else {
|
||||||
|
cache.query = "INSERT INTO \"session\" () VALUES ()%s%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
var queryOutput, queryReturning string
|
||||||
|
|
||||||
|
if len(cache.retMapping) != 0 {
|
||||||
|
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"session\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, sessionPrimaryKeyColumns))
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(o))
|
||||||
|
vals := queries.ValuesFromMapping(value, cache.valueMapping)
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.query)
|
||||||
|
fmt.Fprintln(writer, vals)
|
||||||
|
}
|
||||||
|
_, err = exec.ExecContext(ctx, cache.query, vals...)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to insert into session")
|
||||||
|
}
|
||||||
|
|
||||||
|
var identifierCols []interface{}
|
||||||
|
|
||||||
|
if len(cache.retMapping) == 0 {
|
||||||
|
goto CacheNoHooks
|
||||||
|
}
|
||||||
|
|
||||||
|
identifierCols = []interface{}{
|
||||||
|
o.ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.retQuery)
|
||||||
|
fmt.Fprintln(writer, identifierCols...)
|
||||||
|
}
|
||||||
|
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to populate default values for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
CacheNoHooks:
|
||||||
|
if !cached {
|
||||||
|
sessionInsertCacheMut.Lock()
|
||||||
|
sessionInsertCache[key] = cache
|
||||||
|
sessionInsertCacheMut.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.doAfterInsertHooks(ctx, exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update uses an executor to update the Session.
|
||||||
|
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
|
||||||
|
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
|
||||||
|
func (o *Session) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
|
||||||
|
var err error
|
||||||
|
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
key := makeCacheKey(columns, nil)
|
||||||
|
sessionUpdateCacheMut.RLock()
|
||||||
|
cache, cached := sessionUpdateCache[key]
|
||||||
|
sessionUpdateCacheMut.RUnlock()
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
wl := columns.UpdateColumnSet(
|
||||||
|
sessionAllColumns,
|
||||||
|
sessionPrimaryKeyColumns,
|
||||||
|
)
|
||||||
|
|
||||||
|
if !columns.IsWhitelist() {
|
||||||
|
wl = strmangle.SetComplement(wl, []string{"created_at"})
|
||||||
|
}
|
||||||
|
if len(wl) == 0 {
|
||||||
|
return 0, errors.New("models: unable to update session, could not build whitelist")
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.query = fmt.Sprintf("UPDATE \"session\" SET %s WHERE %s",
|
||||||
|
strmangle.SetParamNames("\"", "\"", 0, wl),
|
||||||
|
strmangle.WhereClause("\"", "\"", 0, sessionPrimaryKeyColumns),
|
||||||
|
)
|
||||||
|
cache.valueMapping, err = queries.BindMapping(sessionType, sessionMapping, append(wl, sessionPrimaryKeyColumns...))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.query)
|
||||||
|
fmt.Fprintln(writer, values)
|
||||||
|
}
|
||||||
|
var result sql.Result
|
||||||
|
result, err = exec.ExecContext(ctx, cache.query, values...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update session row")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by update for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
sessionUpdateCacheMut.Lock()
|
||||||
|
sessionUpdateCache[key] = cache
|
||||||
|
sessionUpdateCacheMut.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAll updates all rows with the specified column values.
|
||||||
|
func (q sessionQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||||
|
queries.SetUpdate(q.Query, cols)
|
||||||
|
|
||||||
|
result, err := q.Query.ExecContext(ctx, exec)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update all for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAll updates all rows with the specified column values, using an executor.
|
||||||
|
func (o SessionSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||||
|
ln := int64(len(o))
|
||||||
|
if ln == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cols) == 0 {
|
||||||
|
return 0, errors.New("models: update all requires at least one column argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
colNames := make([]string, len(cols))
|
||||||
|
args := make([]interface{}, len(cols))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for name, value := range cols {
|
||||||
|
colNames[i] = name
|
||||||
|
args[i] = value
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append all of the primary key values for each column
|
||||||
|
for _, obj := range o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), sessionPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := fmt.Sprintf("UPDATE \"session\" SET %s WHERE %s",
|
||||||
|
strmangle.SetParamNames("\"", "\"", 0, colNames),
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, sessionPrimaryKeyColumns, len(o)))
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args...)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update all in session slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all session")
|
||||||
|
}
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes a single Session record with an executor.
|
||||||
|
// Delete will match against the primary key column to find the record to delete.
|
||||||
|
func (o *Session) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if o == nil {
|
||||||
|
return 0, errors.New("models: no Session provided for delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), sessionPrimaryKeyMapping)
|
||||||
|
sql := "DELETE FROM \"session\" WHERE \"id\"=?"
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args...)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete from session")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes all matching rows.
|
||||||
|
func (q sessionQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if q.Query == nil {
|
||||||
|
return 0, errors.New("models: no sessionQuery provided for delete all")
|
||||||
|
}
|
||||||
|
|
||||||
|
queries.SetDelete(q.Query)
|
||||||
|
|
||||||
|
result, err := q.Query.ExecContext(ctx, exec)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete all from session")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes all rows in the slice, using an executor.
|
||||||
|
func (o SessionSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sessionBeforeDeleteHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var args []interface{}
|
||||||
|
for _, obj := range o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), sessionPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "DELETE FROM \"session\" WHERE " +
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, sessionPrimaryKeyColumns, len(o))
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete all from session slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for session")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(sessionAfterDeleteHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload refetches the object from the database
|
||||||
|
// using the primary keys with an executor.
|
||||||
|
func (o *Session) Reload(ctx context.Context, exec boil.ContextExecutor) error {
|
||||||
|
ret, err := FindSession(ctx, exec, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*o = *ret
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadAll refetches every row with matching primary key column values
|
||||||
|
// and overwrites the original object slice with the newly updated slice.
|
||||||
|
func (o *SessionSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
|
||||||
|
if o == nil || len(*o) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SessionSlice{}
|
||||||
|
var args []interface{}
|
||||||
|
for _, obj := range *o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), sessionPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "SELECT \"session\".* FROM \"session\" WHERE " +
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, sessionPrimaryKeyColumns, len(*o))
|
||||||
|
|
||||||
|
q := queries.Raw(sql, args...)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, &slice)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to reload all in SessionSlice")
|
||||||
|
}
|
||||||
|
|
||||||
|
*o = slice
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SessionExists checks if the Session row exists.
|
||||||
|
func SessionExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
|
||||||
|
var exists bool
|
||||||
|
sql := "select exists(select 1 from \"session\" where \"id\"=? limit 1)"
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, iD)
|
||||||
|
}
|
||||||
|
row := exec.QueryRowContext(ctx, sql, iD)
|
||||||
|
|
||||||
|
err := row.Scan(&exists)
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrap(err, "models: unable to check if session exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
return exists, nil
|
||||||
|
}
|
684
models/session_test.go
Normal file
684
models/session_test.go
Normal file
@ -0,0 +1,684 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries"
|
||||||
|
"github.com/volatiletech/sqlboiler/randomize"
|
||||||
|
"github.com/volatiletech/sqlboiler/strmangle"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
|
||||||
|
// so force a package dependency in case they don't.
|
||||||
|
_ = queries.Equal
|
||||||
|
)
|
||||||
|
|
||||||
|
func testSessions(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
query := Sessions()
|
||||||
|
|
||||||
|
if query.Query == nil {
|
||||||
|
t.Error("expected a query, got nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsDelete(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := o.Delete(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsQueryDeleteAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := Sessions().DeleteAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsSliceDeleteAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SessionSlice{o}
|
||||||
|
|
||||||
|
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsExists(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e, err := SessionExists(ctx, tx, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unable to check if Session exists: %s", err)
|
||||||
|
}
|
||||||
|
if !e {
|
||||||
|
t.Errorf("Expected SessionExists to return true, but got false.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsFind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sessionFound, err := FindSession(ctx, tx, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sessionFound == nil {
|
||||||
|
t.Error("want a record, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsBind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = Sessions().Bind(ctx, tx, o); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsOne(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if x, err := Sessions().One(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if x == nil {
|
||||||
|
t.Error("expected to get a non nil record")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
sessionOne := &Session{}
|
||||||
|
sessionTwo := &Session{}
|
||||||
|
if err = randomize.Struct(seed, sessionOne, sessionDBTypes, false, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
if err = randomize.Struct(seed, sessionTwo, sessionDBTypes, false, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = sessionOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err = sessionTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice, err := Sessions().All(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(slice) != 2 {
|
||||||
|
t.Error("want 2 records, got:", len(slice))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsCount(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
sessionOne := &Session{}
|
||||||
|
sessionTwo := &Session{}
|
||||||
|
if err = randomize.Struct(seed, sessionOne, sessionDBTypes, false, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
if err = randomize.Struct(seed, sessionTwo, sessionDBTypes, false, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = sessionOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err = sessionTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 2 {
|
||||||
|
t.Error("want 2 records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sessionAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Session) error {
|
||||||
|
*o = Session{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsHooks(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
empty := &Session{}
|
||||||
|
o := &Session{}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, false); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session object: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
AddSessionHook(boil.BeforeInsertHook, sessionBeforeInsertHook)
|
||||||
|
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionBeforeInsertHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.AfterInsertHook, sessionAfterInsertHook)
|
||||||
|
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionAfterInsertHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.AfterSelectHook, sessionAfterSelectHook)
|
||||||
|
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionAfterSelectHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.BeforeUpdateHook, sessionBeforeUpdateHook)
|
||||||
|
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionBeforeUpdateHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.AfterUpdateHook, sessionAfterUpdateHook)
|
||||||
|
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionAfterUpdateHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.BeforeDeleteHook, sessionBeforeDeleteHook)
|
||||||
|
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionBeforeDeleteHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.AfterDeleteHook, sessionAfterDeleteHook)
|
||||||
|
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionAfterDeleteHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.BeforeUpsertHook, sessionBeforeUpsertHook)
|
||||||
|
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionBeforeUpsertHooks = []SessionHook{}
|
||||||
|
|
||||||
|
AddSessionHook(boil.AfterUpsertHook, sessionAfterUpsertHook)
|
||||||
|
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
sessionAfterUpsertHooks = []SessionHook{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsInsert(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsInsertWhitelist(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Whitelist(sessionColumnsWithoutDefault...)); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsReload(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = o.Reload(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsReloadAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SessionSlice{o}
|
||||||
|
|
||||||
|
if err = slice.ReloadAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsSelect(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice, err := Sessions().All(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(slice) != 1 {
|
||||||
|
t.Error("want one record, got:", len(slice))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
sessionDBTypes = map[string]string{`ID`: `TEXT`, `Mtime`: `INTEGER`, `UserID`: `TEXT`}
|
||||||
|
_ = bytes.MinRead
|
||||||
|
)
|
||||||
|
|
||||||
|
func testSessionsUpdate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if 0 == len(sessionPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with no primary key columns")
|
||||||
|
}
|
||||||
|
if len(sessionAllColumns) == len(sessionPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with only primary key columns")
|
||||||
|
}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionPrimaryKeyColumns...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only affect one row but affected", rowsAff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSessionsSliceUpdateAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if len(sessionAllColumns) == len(sessionPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with only primary key columns")
|
||||||
|
}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &Session{}
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Sessions().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = randomize.Struct(seed, o, sessionDBTypes, true, sessionPrimaryKeyColumns...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize Session struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove Primary keys and unique columns from what we plan to update
|
||||||
|
var fields []string
|
||||||
|
if strmangle.StringSliceMatch(sessionAllColumns, sessionPrimaryKeyColumns) {
|
||||||
|
fields = sessionAllColumns
|
||||||
|
} else {
|
||||||
|
fields = strmangle.SetComplement(
|
||||||
|
sessionAllColumns,
|
||||||
|
sessionPrimaryKeyColumns,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(o))
|
||||||
|
typ := reflect.TypeOf(o).Elem()
|
||||||
|
n := typ.NumField()
|
||||||
|
|
||||||
|
updateMap := M{}
|
||||||
|
for _, col := range fields {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := typ.Field(i)
|
||||||
|
if f.Tag.Get("boil") == col {
|
||||||
|
updateMap[col] = value.Field(i).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := SessionSlice{o}
|
||||||
|
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("wanted one record updated but got", rowsAff)
|
||||||
|
}
|
||||||
|
}
|
93
models/sqlite3_main_test.go
Normal file
93
models/sqlite3_main_test.go
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
_ "github.com/mattn/go-sqlite3"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
var rgxSQLitekey = regexp.MustCompile(`(?mi)((,\n)?\s+foreign key.*?\n)+`)
|
||||||
|
|
||||||
|
type sqliteTester struct {
|
||||||
|
dbConn *sql.DB
|
||||||
|
|
||||||
|
dbName string
|
||||||
|
testDBName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
dbMain = &sqliteTester{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sqliteTester) setup() error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
s.dbName = viper.GetString("sqlite3.dbname")
|
||||||
|
if len(s.dbName) == 0 {
|
||||||
|
return errors.New("no dbname specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.testDBName = filepath.Join(os.TempDir(), fmt.Sprintf("boil-sqlite3-%d.sql", rand.Int()))
|
||||||
|
|
||||||
|
dumpCmd := exec.Command("sqlite3", "-cmd", ".dump", s.dbName)
|
||||||
|
createCmd := exec.Command("sqlite3", s.testDBName)
|
||||||
|
|
||||||
|
r, w := io.Pipe()
|
||||||
|
dumpCmd.Stdout = w
|
||||||
|
createCmd.Stdin = newFKeyDestroyer(rgxSQLitekey, r)
|
||||||
|
|
||||||
|
if err = dumpCmd.Start(); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to start sqlite3 dump command")
|
||||||
|
}
|
||||||
|
if err = createCmd.Start(); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to start sqlite3 create command")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = dumpCmd.Wait(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return errors.Wrap(err, "failed to wait for sqlite3 dump command")
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Close() // After dumpCmd is done, close the write end of the pipe
|
||||||
|
|
||||||
|
if err = createCmd.Wait(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
return errors.Wrap(err, "failed to wait for sqlite3 create command")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sqliteTester) teardown() error {
|
||||||
|
if s.dbConn != nil {
|
||||||
|
s.dbConn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.Remove(s.testDBName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sqliteTester) conn() (*sql.DB, error) {
|
||||||
|
if s.dbConn != nil {
|
||||||
|
return s.dbConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
s.dbConn, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?_loc=UTC", s.testDBName))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.dbConn, nil
|
||||||
|
}
|
788
models/user.go
Normal file
788
models/user.go
Normal file
@ -0,0 +1,788 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/friendsofgo/errors"
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qm"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries/qmhelper"
|
||||||
|
"github.com/volatiletech/sqlboiler/strmangle"
|
||||||
|
)
|
||||||
|
|
||||||
|
// User is an object representing the database table.
|
||||||
|
type User struct {
|
||||||
|
ID string `boil:"id" json:"id" toml:"id" yaml:"id"`
|
||||||
|
DisplayName string `boil:"display_name" json:"display_name" toml:"display_name" yaml:"display_name"`
|
||||||
|
Email string `boil:"email" json:"email" toml:"email" yaml:"email"`
|
||||||
|
PasswordFormat int64 `boil:"password_format" json:"password_format" toml:"password_format" yaml:"password_format"`
|
||||||
|
PasswordHash string `boil:"password_hash" json:"password_hash" toml:"password_hash" yaml:"password_hash"`
|
||||||
|
IsAdmin int64 `boil:"is_admin" json:"is_admin" toml:"is_admin" yaml:"is_admin"`
|
||||||
|
|
||||||
|
R *userR `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||||
|
L userL `boil:"-" json:"-" toml:"-" yaml:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var UserColumns = struct {
|
||||||
|
ID string
|
||||||
|
DisplayName string
|
||||||
|
Email string
|
||||||
|
PasswordFormat string
|
||||||
|
PasswordHash string
|
||||||
|
IsAdmin string
|
||||||
|
}{
|
||||||
|
ID: "id",
|
||||||
|
DisplayName: "display_name",
|
||||||
|
Email: "email",
|
||||||
|
PasswordFormat: "password_format",
|
||||||
|
PasswordHash: "password_hash",
|
||||||
|
IsAdmin: "is_admin",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generated where
|
||||||
|
|
||||||
|
var UserWhere = struct {
|
||||||
|
ID whereHelperstring
|
||||||
|
DisplayName whereHelperstring
|
||||||
|
Email whereHelperstring
|
||||||
|
PasswordFormat whereHelperint64
|
||||||
|
PasswordHash whereHelperstring
|
||||||
|
IsAdmin whereHelperint64
|
||||||
|
}{
|
||||||
|
ID: whereHelperstring{field: "\"user\".\"id\""},
|
||||||
|
DisplayName: whereHelperstring{field: "\"user\".\"display_name\""},
|
||||||
|
Email: whereHelperstring{field: "\"user\".\"email\""},
|
||||||
|
PasswordFormat: whereHelperint64{field: "\"user\".\"password_format\""},
|
||||||
|
PasswordHash: whereHelperstring{field: "\"user\".\"password_hash\""},
|
||||||
|
IsAdmin: whereHelperint64{field: "\"user\".\"is_admin\""},
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserRels is where relationship names are stored.
|
||||||
|
var UserRels = struct {
|
||||||
|
}{}
|
||||||
|
|
||||||
|
// userR is where relationships are stored.
|
||||||
|
type userR struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStruct creates a new relationship struct
|
||||||
|
func (*userR) NewStruct() *userR {
|
||||||
|
return &userR{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// userL is where Load methods for each relationship are stored.
|
||||||
|
type userL struct{}
|
||||||
|
|
||||||
|
var (
|
||||||
|
userAllColumns = []string{"id", "display_name", "email", "password_format", "password_hash", "is_admin"}
|
||||||
|
userColumnsWithoutDefault = []string{"id", "display_name", "email", "password_format", "password_hash", "is_admin"}
|
||||||
|
userColumnsWithDefault = []string{}
|
||||||
|
userPrimaryKeyColumns = []string{"id"}
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// UserSlice is an alias for a slice of pointers to User.
|
||||||
|
// This should generally be used opposed to []User.
|
||||||
|
UserSlice []*User
|
||||||
|
// UserHook is the signature for custom User hook methods
|
||||||
|
UserHook func(context.Context, boil.ContextExecutor, *User) error
|
||||||
|
|
||||||
|
userQuery struct {
|
||||||
|
*queries.Query
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cache for insert, update and upsert
|
||||||
|
var (
|
||||||
|
userType = reflect.TypeOf(&User{})
|
||||||
|
userMapping = queries.MakeStructMapping(userType)
|
||||||
|
userPrimaryKeyMapping, _ = queries.BindMapping(userType, userMapping, userPrimaryKeyColumns)
|
||||||
|
userInsertCacheMut sync.RWMutex
|
||||||
|
userInsertCache = make(map[string]insertCache)
|
||||||
|
userUpdateCacheMut sync.RWMutex
|
||||||
|
userUpdateCache = make(map[string]updateCache)
|
||||||
|
userUpsertCacheMut sync.RWMutex
|
||||||
|
userUpsertCache = make(map[string]insertCache)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Force time package dependency for automated UpdatedAt/CreatedAt.
|
||||||
|
_ = time.Second
|
||||||
|
// Force qmhelper dependency for where clause generation (which doesn't
|
||||||
|
// always happen)
|
||||||
|
_ = qmhelper.Where
|
||||||
|
)
|
||||||
|
|
||||||
|
var userBeforeInsertHooks []UserHook
|
||||||
|
var userBeforeUpdateHooks []UserHook
|
||||||
|
var userBeforeDeleteHooks []UserHook
|
||||||
|
var userBeforeUpsertHooks []UserHook
|
||||||
|
|
||||||
|
var userAfterInsertHooks []UserHook
|
||||||
|
var userAfterSelectHooks []UserHook
|
||||||
|
var userAfterUpdateHooks []UserHook
|
||||||
|
var userAfterDeleteHooks []UserHook
|
||||||
|
var userAfterUpsertHooks []UserHook
|
||||||
|
|
||||||
|
// doBeforeInsertHooks executes all "before insert" hooks.
|
||||||
|
func (o *User) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userBeforeInsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeUpdateHooks executes all "before Update" hooks.
|
||||||
|
func (o *User) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userBeforeUpdateHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeDeleteHooks executes all "before Delete" hooks.
|
||||||
|
func (o *User) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userBeforeDeleteHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doBeforeUpsertHooks executes all "before Upsert" hooks.
|
||||||
|
func (o *User) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userBeforeUpsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterInsertHooks executes all "after Insert" hooks.
|
||||||
|
func (o *User) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userAfterInsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterSelectHooks executes all "after Select" hooks.
|
||||||
|
func (o *User) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userAfterSelectHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterUpdateHooks executes all "after Update" hooks.
|
||||||
|
func (o *User) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userAfterUpdateHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterDeleteHooks executes all "after Delete" hooks.
|
||||||
|
func (o *User) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userAfterDeleteHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// doAfterUpsertHooks executes all "after Upsert" hooks.
|
||||||
|
func (o *User) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) {
|
||||||
|
if boil.HooksAreSkipped(ctx) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hook := range userAfterUpsertHooks {
|
||||||
|
if err := hook(ctx, exec, o); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUserHook registers your hook function for all future operations.
|
||||||
|
func AddUserHook(hookPoint boil.HookPoint, userHook UserHook) {
|
||||||
|
switch hookPoint {
|
||||||
|
case boil.BeforeInsertHook:
|
||||||
|
userBeforeInsertHooks = append(userBeforeInsertHooks, userHook)
|
||||||
|
case boil.BeforeUpdateHook:
|
||||||
|
userBeforeUpdateHooks = append(userBeforeUpdateHooks, userHook)
|
||||||
|
case boil.BeforeDeleteHook:
|
||||||
|
userBeforeDeleteHooks = append(userBeforeDeleteHooks, userHook)
|
||||||
|
case boil.BeforeUpsertHook:
|
||||||
|
userBeforeUpsertHooks = append(userBeforeUpsertHooks, userHook)
|
||||||
|
case boil.AfterInsertHook:
|
||||||
|
userAfterInsertHooks = append(userAfterInsertHooks, userHook)
|
||||||
|
case boil.AfterSelectHook:
|
||||||
|
userAfterSelectHooks = append(userAfterSelectHooks, userHook)
|
||||||
|
case boil.AfterUpdateHook:
|
||||||
|
userAfterUpdateHooks = append(userAfterUpdateHooks, userHook)
|
||||||
|
case boil.AfterDeleteHook:
|
||||||
|
userAfterDeleteHooks = append(userAfterDeleteHooks, userHook)
|
||||||
|
case boil.AfterUpsertHook:
|
||||||
|
userAfterUpsertHooks = append(userAfterUpsertHooks, userHook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// One returns a single user record from the query.
|
||||||
|
func (q userQuery) One(ctx context.Context, exec boil.ContextExecutor) (*User, error) {
|
||||||
|
o := &User{}
|
||||||
|
|
||||||
|
queries.SetLimit(q.Query, 1)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, o)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == sql.ErrNoRows {
|
||||||
|
return nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return nil, errors.Wrap(err, "models: failed to execute a one query for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doAfterSelectHooks(ctx, exec); err != nil {
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All returns all User records from the query.
|
||||||
|
func (q userQuery) All(ctx context.Context, exec boil.ContextExecutor) (UserSlice, error) {
|
||||||
|
var o []*User
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, &o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "models: failed to assign all query results to User slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(userAfterSelectHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doAfterSelectHooks(ctx, exec); err != nil {
|
||||||
|
return o, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of all User records in the query.
|
||||||
|
func (q userQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
var count int64
|
||||||
|
|
||||||
|
queries.SetSelect(q.Query, nil)
|
||||||
|
queries.SetCount(q.Query)
|
||||||
|
|
||||||
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to count user rows")
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists checks if the row exists in the table.
|
||||||
|
func (q userQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {
|
||||||
|
var count int64
|
||||||
|
|
||||||
|
queries.SetSelect(q.Query, nil)
|
||||||
|
queries.SetCount(q.Query)
|
||||||
|
queries.SetLimit(q.Query, 1)
|
||||||
|
|
||||||
|
err := q.Query.QueryRowContext(ctx, exec).Scan(&count)
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrap(err, "models: failed to check if user exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
return count > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Users retrieves all the records using an executor.
|
||||||
|
func Users(mods ...qm.QueryMod) userQuery {
|
||||||
|
mods = append(mods, qm.From("\"user\""))
|
||||||
|
return userQuery{NewQuery(mods...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindUser retrieves a single record by ID with an executor.
|
||||||
|
// If selectCols is empty Find will return all columns.
|
||||||
|
func FindUser(ctx context.Context, exec boil.ContextExecutor, iD string, selectCols ...string) (*User, error) {
|
||||||
|
userObj := &User{}
|
||||||
|
|
||||||
|
sel := "*"
|
||||||
|
if len(selectCols) > 0 {
|
||||||
|
sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",")
|
||||||
|
}
|
||||||
|
query := fmt.Sprintf(
|
||||||
|
"select %s from \"user\" where \"id\"=?", sel,
|
||||||
|
)
|
||||||
|
|
||||||
|
q := queries.Raw(query, iD)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, userObj)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == sql.ErrNoRows {
|
||||||
|
return nil, sql.ErrNoRows
|
||||||
|
}
|
||||||
|
return nil, errors.Wrap(err, "models: unable to select from user")
|
||||||
|
}
|
||||||
|
|
||||||
|
return userObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert a single record using an executor.
|
||||||
|
// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts.
|
||||||
|
func (o *User) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error {
|
||||||
|
if o == nil {
|
||||||
|
return errors.New("models: no user provided for insertion")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if err := o.doBeforeInsertHooks(ctx, exec); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nzDefaults := queries.NonZeroDefaultSet(userColumnsWithDefault, o)
|
||||||
|
|
||||||
|
key := makeCacheKey(columns, nzDefaults)
|
||||||
|
userInsertCacheMut.RLock()
|
||||||
|
cache, cached := userInsertCache[key]
|
||||||
|
userInsertCacheMut.RUnlock()
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
wl, returnColumns := columns.InsertColumnSet(
|
||||||
|
userAllColumns,
|
||||||
|
userColumnsWithDefault,
|
||||||
|
userColumnsWithoutDefault,
|
||||||
|
nzDefaults,
|
||||||
|
)
|
||||||
|
|
||||||
|
cache.valueMapping, err = queries.BindMapping(userType, userMapping, wl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cache.retMapping, err = queries.BindMapping(userType, userMapping, returnColumns)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(wl) != 0 {
|
||||||
|
cache.query = fmt.Sprintf("INSERT INTO \"user\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1))
|
||||||
|
} else {
|
||||||
|
cache.query = "INSERT INTO \"user\" () VALUES ()%s%s"
|
||||||
|
}
|
||||||
|
|
||||||
|
var queryOutput, queryReturning string
|
||||||
|
|
||||||
|
if len(cache.retMapping) != 0 {
|
||||||
|
cache.retQuery = fmt.Sprintf("SELECT \"%s\" FROM \"user\" WHERE %s", strings.Join(returnColumns, "\",\""), strmangle.WhereClause("\"", "\"", 0, userPrimaryKeyColumns))
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(o))
|
||||||
|
vals := queries.ValuesFromMapping(value, cache.valueMapping)
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.query)
|
||||||
|
fmt.Fprintln(writer, vals)
|
||||||
|
}
|
||||||
|
_, err = exec.ExecContext(ctx, cache.query, vals...)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to insert into user")
|
||||||
|
}
|
||||||
|
|
||||||
|
var identifierCols []interface{}
|
||||||
|
|
||||||
|
if len(cache.retMapping) == 0 {
|
||||||
|
goto CacheNoHooks
|
||||||
|
}
|
||||||
|
|
||||||
|
identifierCols = []interface{}{
|
||||||
|
o.ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.retQuery)
|
||||||
|
fmt.Fprintln(writer, identifierCols...)
|
||||||
|
}
|
||||||
|
err = exec.QueryRowContext(ctx, cache.retQuery, identifierCols...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to populate default values for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
CacheNoHooks:
|
||||||
|
if !cached {
|
||||||
|
userInsertCacheMut.Lock()
|
||||||
|
userInsertCache[key] = cache
|
||||||
|
userInsertCacheMut.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.doAfterInsertHooks(ctx, exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update uses an executor to update the User.
|
||||||
|
// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates.
|
||||||
|
// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records.
|
||||||
|
func (o *User) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) {
|
||||||
|
var err error
|
||||||
|
if err = o.doBeforeUpdateHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
key := makeCacheKey(columns, nil)
|
||||||
|
userUpdateCacheMut.RLock()
|
||||||
|
cache, cached := userUpdateCache[key]
|
||||||
|
userUpdateCacheMut.RUnlock()
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
wl := columns.UpdateColumnSet(
|
||||||
|
userAllColumns,
|
||||||
|
userPrimaryKeyColumns,
|
||||||
|
)
|
||||||
|
|
||||||
|
if !columns.IsWhitelist() {
|
||||||
|
wl = strmangle.SetComplement(wl, []string{"created_at"})
|
||||||
|
}
|
||||||
|
if len(wl) == 0 {
|
||||||
|
return 0, errors.New("models: unable to update user, could not build whitelist")
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.query = fmt.Sprintf("UPDATE \"user\" SET %s WHERE %s",
|
||||||
|
strmangle.SetParamNames("\"", "\"", 0, wl),
|
||||||
|
strmangle.WhereClause("\"", "\"", 0, userPrimaryKeyColumns),
|
||||||
|
)
|
||||||
|
cache.valueMapping, err = queries.BindMapping(userType, userMapping, append(wl, userPrimaryKeyColumns...))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping)
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, cache.query)
|
||||||
|
fmt.Fprintln(writer, values)
|
||||||
|
}
|
||||||
|
var result sql.Result
|
||||||
|
result, err = exec.ExecContext(ctx, cache.query, values...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update user row")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by update for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cached {
|
||||||
|
userUpdateCacheMut.Lock()
|
||||||
|
userUpdateCache[key] = cache
|
||||||
|
userUpdateCacheMut.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, o.doAfterUpdateHooks(ctx, exec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAll updates all rows with the specified column values.
|
||||||
|
func (q userQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||||
|
queries.SetUpdate(q.Query, cols)
|
||||||
|
|
||||||
|
result, err := q.Query.ExecContext(ctx, exec)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update all for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAll updates all rows with the specified column values, using an executor.
|
||||||
|
func (o UserSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) {
|
||||||
|
ln := int64(len(o))
|
||||||
|
if ln == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cols) == 0 {
|
||||||
|
return 0, errors.New("models: update all requires at least one column argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
colNames := make([]string, len(cols))
|
||||||
|
args := make([]interface{}, len(cols))
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for name, value := range cols {
|
||||||
|
colNames[i] = name
|
||||||
|
args[i] = value
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append all of the primary key values for each column
|
||||||
|
for _, obj := range o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), userPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := fmt.Sprintf("UPDATE \"user\" SET %s WHERE %s",
|
||||||
|
strmangle.SetParamNames("\"", "\"", 0, colNames),
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, userPrimaryKeyColumns, len(o)))
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args...)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to update all in user slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to retrieve rows affected all in update all user")
|
||||||
|
}
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes a single User record with an executor.
|
||||||
|
// Delete will match against the primary key column to find the record to delete.
|
||||||
|
func (o *User) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if o == nil {
|
||||||
|
return 0, errors.New("models: no User provided for delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), userPrimaryKeyMapping)
|
||||||
|
sql := "DELETE FROM \"user\" WHERE \"id\"=?"
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args...)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete from user")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by delete for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := o.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes all matching rows.
|
||||||
|
func (q userQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if q.Query == nil {
|
||||||
|
return 0, errors.New("models: no userQuery provided for delete all")
|
||||||
|
}
|
||||||
|
|
||||||
|
queries.SetDelete(q.Query)
|
||||||
|
|
||||||
|
result, err := q.Query.ExecContext(ctx, exec)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete all from user")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes all rows in the slice, using an executor.
|
||||||
|
func (o UserSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(userBeforeDeleteHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var args []interface{}
|
||||||
|
for _, obj := range o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), userPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "DELETE FROM \"user\" WHERE " +
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, userPrimaryKeyColumns, len(o))
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, args)
|
||||||
|
}
|
||||||
|
result, err := exec.ExecContext(ctx, sql, args...)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: unable to delete all from user slice")
|
||||||
|
}
|
||||||
|
|
||||||
|
rowsAff, err := result.RowsAffected()
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrap(err, "models: failed to get rows affected by deleteall for user")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(userAfterDeleteHooks) != 0 {
|
||||||
|
for _, obj := range o {
|
||||||
|
if err := obj.doAfterDeleteHooks(ctx, exec); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rowsAff, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload refetches the object from the database
|
||||||
|
// using the primary keys with an executor.
|
||||||
|
func (o *User) Reload(ctx context.Context, exec boil.ContextExecutor) error {
|
||||||
|
ret, err := FindUser(ctx, exec, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*o = *ret
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReloadAll refetches every row with matching primary key column values
|
||||||
|
// and overwrites the original object slice with the newly updated slice.
|
||||||
|
func (o *UserSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error {
|
||||||
|
if o == nil || len(*o) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := UserSlice{}
|
||||||
|
var args []interface{}
|
||||||
|
for _, obj := range *o {
|
||||||
|
pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), userPrimaryKeyMapping)
|
||||||
|
args = append(args, pkeyArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
sql := "SELECT \"user\".* FROM \"user\" WHERE " +
|
||||||
|
strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, userPrimaryKeyColumns, len(*o))
|
||||||
|
|
||||||
|
q := queries.Raw(sql, args...)
|
||||||
|
|
||||||
|
err := q.Bind(ctx, exec, &slice)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "models: unable to reload all in UserSlice")
|
||||||
|
}
|
||||||
|
|
||||||
|
*o = slice
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserExists checks if the User row exists.
|
||||||
|
func UserExists(ctx context.Context, exec boil.ContextExecutor, iD string) (bool, error) {
|
||||||
|
var exists bool
|
||||||
|
sql := "select exists(select 1 from \"user\" where \"id\"=? limit 1)"
|
||||||
|
|
||||||
|
if boil.IsDebug(ctx) {
|
||||||
|
writer := boil.DebugWriterFrom(ctx)
|
||||||
|
fmt.Fprintln(writer, sql)
|
||||||
|
fmt.Fprintln(writer, iD)
|
||||||
|
}
|
||||||
|
row := exec.QueryRowContext(ctx, sql, iD)
|
||||||
|
|
||||||
|
err := row.Scan(&exists)
|
||||||
|
if err != nil {
|
||||||
|
return false, errors.Wrap(err, "models: unable to check if user exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
return exists, nil
|
||||||
|
}
|
684
models/user_test.go
Normal file
684
models/user_test.go
Normal file
@ -0,0 +1,684 @@
|
|||||||
|
// Code generated by SQLBoiler 3.6.1 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT.
|
||||||
|
// This file is meant to be re-generated in place and/or deleted at any time.
|
||||||
|
|
||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/volatiletech/sqlboiler/boil"
|
||||||
|
"github.com/volatiletech/sqlboiler/queries"
|
||||||
|
"github.com/volatiletech/sqlboiler/randomize"
|
||||||
|
"github.com/volatiletech/sqlboiler/strmangle"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Relationships sometimes use the reflection helper queries.Equal/queries.Assign
|
||||||
|
// so force a package dependency in case they don't.
|
||||||
|
_ = queries.Equal
|
||||||
|
)
|
||||||
|
|
||||||
|
func testUsers(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
query := Users()
|
||||||
|
|
||||||
|
if query.Query == nil {
|
||||||
|
t.Error("expected a query, got nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersDelete(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := o.Delete(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersQueryDeleteAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := Users().DeleteAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersSliceDeleteAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := UserSlice{o}
|
||||||
|
|
||||||
|
if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only have deleted one row, but affected:", rowsAff)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 0 {
|
||||||
|
t.Error("want zero records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersExists(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e, err := UserExists(ctx, tx, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Unable to check if User exists: %s", err)
|
||||||
|
}
|
||||||
|
if !e {
|
||||||
|
t.Errorf("Expected UserExists to return true, but got false.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersFind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
userFound, err := FindUser(ctx, tx, o.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if userFound == nil {
|
||||||
|
t.Error("want a record, got nil")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersBind(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = Users().Bind(ctx, tx, o); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersOne(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if x, err := Users().One(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if x == nil {
|
||||||
|
t.Error("expected to get a non nil record")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
userOne := &User{}
|
||||||
|
userTwo := &User{}
|
||||||
|
if err = randomize.Struct(seed, userOne, userDBTypes, false, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
if err = randomize.Struct(seed, userTwo, userDBTypes, false, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = userOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err = userTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice, err := Users().All(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(slice) != 2 {
|
||||||
|
t.Error("want 2 records, got:", len(slice))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersCount(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
userOne := &User{}
|
||||||
|
userTwo := &User{}
|
||||||
|
if err = randomize.Struct(seed, userOne, userDBTypes, false, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
if err = randomize.Struct(seed, userTwo, userDBTypes, false, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = userOne.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
if err = userTwo.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 2 {
|
||||||
|
t.Error("want 2 records, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func userBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func userAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *User) error {
|
||||||
|
*o = User{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersHooks(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
empty := &User{}
|
||||||
|
o := &User{}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, false); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User object: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
AddUserHook(boil.BeforeInsertHook, userBeforeInsertHook)
|
||||||
|
if err = o.doBeforeInsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeInsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userBeforeInsertHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.AfterInsertHook, userAfterInsertHook)
|
||||||
|
if err = o.doAfterInsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterInsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userAfterInsertHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.AfterSelectHook, userAfterSelectHook)
|
||||||
|
if err = o.doAfterSelectHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterSelectHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userAfterSelectHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.BeforeUpdateHook, userBeforeUpdateHook)
|
||||||
|
if err = o.doBeforeUpdateHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userBeforeUpdateHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.AfterUpdateHook, userAfterUpdateHook)
|
||||||
|
if err = o.doAfterUpdateHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterUpdateHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userAfterUpdateHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.BeforeDeleteHook, userBeforeDeleteHook)
|
||||||
|
if err = o.doBeforeDeleteHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userBeforeDeleteHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.AfterDeleteHook, userAfterDeleteHook)
|
||||||
|
if err = o.doAfterDeleteHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterDeleteHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userAfterDeleteHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.BeforeUpsertHook, userBeforeUpsertHook)
|
||||||
|
if err = o.doBeforeUpsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userBeforeUpsertHooks = []UserHook{}
|
||||||
|
|
||||||
|
AddUserHook(boil.AfterUpsertHook, userAfterUpsertHook)
|
||||||
|
if err = o.doAfterUpsertHooks(ctx, nil); err != nil {
|
||||||
|
t.Errorf("Unable to execute doAfterUpsertHooks: %s", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(o, empty) {
|
||||||
|
t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o)
|
||||||
|
}
|
||||||
|
userAfterUpsertHooks = []UserHook{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersInsert(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersInsertWhitelist(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Whitelist(userColumnsWithoutDefault...)); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersReload(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = o.Reload(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersReloadAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := UserSlice{o}
|
||||||
|
|
||||||
|
if err = slice.ReloadAll(ctx, tx); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersSelect(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slice, err := Users().All(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(slice) != 1 {
|
||||||
|
t.Error("want one record, got:", len(slice))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
userDBTypes = map[string]string{`ID`: `TEXT`, `DisplayName`: `TEXT`, `Email`: `TEXT`, `PasswordFormat`: `INTEGER`, `PasswordHash`: `TEXT`, `IsAdmin`: `INTEGER`}
|
||||||
|
_ = bytes.MinRead
|
||||||
|
)
|
||||||
|
|
||||||
|
func testUsersUpdate(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if 0 == len(userPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with no primary key columns")
|
||||||
|
}
|
||||||
|
if len(userAllColumns) == len(userPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with only primary key columns")
|
||||||
|
}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userPrimaryKeyColumns...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("should only affect one row but affected", rowsAff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUsersSliceUpdateAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if len(userAllColumns) == len(userPrimaryKeyColumns) {
|
||||||
|
t.Skip("Skipping table with only primary key columns")
|
||||||
|
}
|
||||||
|
|
||||||
|
seed := randomize.NewSeed()
|
||||||
|
var err error
|
||||||
|
o := &User{}
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userColumnsWithDefault...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
tx := MustTx(boil.BeginTx(ctx, nil))
|
||||||
|
defer func() { _ = tx.Rollback() }()
|
||||||
|
if err = o.Insert(ctx, tx, boil.Infer()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := Users().Count(ctx, tx)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 1 {
|
||||||
|
t.Error("want one record, got:", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = randomize.Struct(seed, o, userDBTypes, true, userPrimaryKeyColumns...); err != nil {
|
||||||
|
t.Errorf("Unable to randomize User struct: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove Primary keys and unique columns from what we plan to update
|
||||||
|
var fields []string
|
||||||
|
if strmangle.StringSliceMatch(userAllColumns, userPrimaryKeyColumns) {
|
||||||
|
fields = userAllColumns
|
||||||
|
} else {
|
||||||
|
fields = strmangle.SetComplement(
|
||||||
|
userAllColumns,
|
||||||
|
userPrimaryKeyColumns,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
value := reflect.Indirect(reflect.ValueOf(o))
|
||||||
|
typ := reflect.TypeOf(o).Elem()
|
||||||
|
n := typ.NumField()
|
||||||
|
|
||||||
|
updateMap := M{}
|
||||||
|
for _, col := range fields {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := typ.Field(i)
|
||||||
|
if f.Tag.Get("boil") == col {
|
||||||
|
updateMap[col] = value.Field(i).Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slice := UserSlice{o}
|
||||||
|
if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
} else if rowsAff != 1 {
|
||||||
|
t.Error("wanted one record updated but got", rowsAff)
|
||||||
|
}
|
||||||
|
}
|
60
neterror.go
Normal file
60
neterror.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type netError interface {
|
||||||
|
Error() string
|
||||||
|
Unwrap() error
|
||||||
|
Respond(w http.ResponseWriter)
|
||||||
|
}
|
||||||
|
|
||||||
|
type userFacingError struct {
|
||||||
|
code int
|
||||||
|
e error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ufe userFacingError) Error() string {
|
||||||
|
return ufe.e.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ufe userFacingError) Unwrap() error {
|
||||||
|
return ufe.e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ufe userFacingError) Respond(w http.ResponseWriter) {
|
||||||
|
w.Header().Set(`Content-Type`, `text/plain;charset=UTF-8`)
|
||||||
|
if ufe.code != 0 {
|
||||||
|
w.WriteHeader(ufe.code)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(400) // default
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Write([]byte(ufe.e.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ netError = userFacingError{} // interface assertion
|
||||||
|
|
||||||
|
type systemError struct {
|
||||||
|
e error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (se systemError) Error() string {
|
||||||
|
return se.e.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (se systemError) Unwrap() error {
|
||||||
|
return se.e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (se systemError) Respond(w http.ResponseWriter) {
|
||||||
|
log.Printf(`[internal error] %s`, se.e.Error())
|
||||||
|
|
||||||
|
w.Header().Set(`Content-Type`, `text/plain;charset=UTF-8`)
|
||||||
|
w.WriteHeader(500)
|
||||||
|
w.Write([]byte(`An internal error occurred.`))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ netError = systemError{} // interface assertion
|
1041
package-lock.json
generated
Normal file
1041
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
25
package.json
Normal file
25
package.json
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
{
|
||||||
|
"name": "webscaffold",
|
||||||
|
"version": "0.0.0",
|
||||||
|
"description": "",
|
||||||
|
"main": "",
|
||||||
|
"scripts": {
|
||||||
|
"build": "npx rollup -c",
|
||||||
|
"watch": "npx rollup -cw"
|
||||||
|
},
|
||||||
|
"author": "",
|
||||||
|
"license": "BSD0",
|
||||||
|
"devDependencies": {
|
||||||
|
"@rollup/plugin-typescript": "^4.1.1",
|
||||||
|
"@types/bootstrap": "^4.3.2",
|
||||||
|
"@types/jquery": "^3.3.35",
|
||||||
|
"less-plugin-clean-css": "^1.5.1",
|
||||||
|
"rollup": "^2.7.2",
|
||||||
|
"rollup-plugin-less": "^1.1.2",
|
||||||
|
"rollup-plugin-terser": "^5.3.0",
|
||||||
|
"tslib": "^1.11.1",
|
||||||
|
"typescript": "^3.8.3"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
}
|
||||||
|
}
|
24
rollup.config.js
Normal file
24
rollup.config.js
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// rollup.config.js
|
||||||
|
import typescript from '@rollup/plugin-typescript';
|
||||||
|
import {terser} from 'rollup-plugin-terser';
|
||||||
|
import less from 'rollup-plugin-less';
|
||||||
|
import LessPluginCleanCSS from 'less-plugin-clean-css';
|
||||||
|
|
||||||
|
export default {
|
||||||
|
input: './webapp/js/app.ts',
|
||||||
|
output: {
|
||||||
|
dir: 'wwwroot/js/',
|
||||||
|
format: 'cjs'
|
||||||
|
},
|
||||||
|
plugins: [
|
||||||
|
typescript(),
|
||||||
|
terser(),
|
||||||
|
less({
|
||||||
|
input: "./webapp/css/app.less",
|
||||||
|
output: "wwwroot/css/app.css",
|
||||||
|
option: {
|
||||||
|
plugins: [new LessPluginCleanCSS({advanced: true})]
|
||||||
|
}
|
||||||
|
})
|
||||||
|
]
|
||||||
|
};
|
9
schema/2020-0001-initial.sql
Normal file
9
schema/2020-0001-initial.sql
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
-- 2020-0001. The initial schema
|
||||||
|
-- The Go SQLite driver does not support splitting by multiple statements
|
||||||
|
-- However, we simulate it by breaking on ENDBRACKET-SEMICOLON, so don't use that otherwise
|
||||||
|
|
||||||
|
CREATE TABLE `schema` (
|
||||||
|
id INTEGER PRIMARY KEY NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO `schema` (id) VALUES (20200001);
|
18
schema/2020-0002-user-table.sql
Normal file
18
schema/2020-0002-user-table.sql
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
-- 2020-0002. Initial user table
|
||||||
|
-- SQLite doesn't support UUID/BOOLEAN so just use TEXT/INTEGER
|
||||||
|
|
||||||
|
CREATE TABLE user (
|
||||||
|
id TEXT PRIMARY KEY NOT NULL,
|
||||||
|
display_name TEXT NOT NULL,
|
||||||
|
email TEXT NOT NULL,
|
||||||
|
password_format INTEGER NOT NULL,
|
||||||
|
password_hash TEXT NOT NULL,
|
||||||
|
is_admin INTEGER NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE UNIQUE INDEX user_email ON user(email);
|
||||||
|
|
||||||
|
INSERT INTO user (id, display_name, email, password_format, password_hash, is_admin) VALUES
|
||||||
|
( "c3e6a5f2-3707-4799-a845-2dc9f51ebc31", "admin", "admin@example.com", 0, "admin", 1 );
|
||||||
|
|
||||||
|
INSERT INTO `schema` (id) VALUES (20200002);
|
13
schema/2020-0003-sessions.sql
Normal file
13
schema/2020-0003-sessions.sql
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
-- 2020-0003. Sessions
|
||||||
|
-- An index is added for optimised time-based cleanup.
|
||||||
|
-- Session data could either be added directly as columns to this table, for type-safety; or as an untyped json data column
|
||||||
|
|
||||||
|
CREATE TABLE session (
|
||||||
|
id TEXT PRIMARY KEY NOT NULL,
|
||||||
|
mtime INTEGER NOT NULL,
|
||||||
|
user_id TEXT NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE INDEX session_mtime ON session(mtime);
|
||||||
|
|
||||||
|
INSERT INTO `schema` (id) VALUES (20200003);
|
7
sqlboiler.json
Normal file
7
sqlboiler.json
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"sqlite3": {
|
||||||
|
"pkgname": "models",
|
||||||
|
"output": "models",
|
||||||
|
"dbname": "./webscaffold.db3"
|
||||||
|
}
|
||||||
|
}
|
8
tools.go
Normal file
8
tools.go
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/volatiletech/sqlboiler"
|
||||||
|
"github.com/volatiletech/sqlboiler-sqlite3"
|
||||||
|
)
|
73
tsconfig.json
Normal file
73
tsconfig.json
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
/* Basic Options */
|
||||||
|
// "incremental": true, /* Enable incremental compilation */
|
||||||
|
"target": "es3", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */
|
||||||
|
"module": "es2015", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */
|
||||||
|
|
||||||
|
/* Specify library files to be included in the compilation. */
|
||||||
|
"lib": [
|
||||||
|
"DOM", // assume `window` exists
|
||||||
|
"es2015" // assume presence of Promise() in environment (FIXME needs polyfill for IE11)
|
||||||
|
|
||||||
|
],
|
||||||
|
|
||||||
|
// "allowJs": true, /* Allow javascript files to be compiled. */
|
||||||
|
// "checkJs": true, /* Report errors in .js files. */
|
||||||
|
// "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */
|
||||||
|
// "declaration": true, /* Generates corresponding '.d.ts' file. */
|
||||||
|
// "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */
|
||||||
|
// "sourceMap": true, /* Generates corresponding '.map' file. */
|
||||||
|
// "outFile": "wwwroot/js/app.js", /* Concatenate and emit output to single file. */
|
||||||
|
// "outDir": "./", /* Redirect output structure to the directory. */
|
||||||
|
// "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */
|
||||||
|
// "composite": true, /* Enable project compilation */
|
||||||
|
// "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */
|
||||||
|
// "removeComments": true, /* Do not emit comments to output. */
|
||||||
|
// "noEmit": true, /* Do not emit outputs. */
|
||||||
|
// "importHelpers": true, /* Import emit helpers from 'tslib'. */
|
||||||
|
// "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */
|
||||||
|
// "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */
|
||||||
|
|
||||||
|
/* Strict Type-Checking Options */
|
||||||
|
"strict": true, /* Enable all strict type-checking options. */
|
||||||
|
// "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */
|
||||||
|
// "strictNullChecks": true, /* Enable strict null checks. */
|
||||||
|
// "strictFunctionTypes": true, /* Enable strict checking of function types. */
|
||||||
|
// "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */
|
||||||
|
// "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */
|
||||||
|
// "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */
|
||||||
|
// "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */
|
||||||
|
|
||||||
|
/* Additional Checks */
|
||||||
|
// "noUnusedLocals": true, /* Report errors on unused locals. */
|
||||||
|
// "noUnusedParameters": true, /* Report errors on unused parameters. */
|
||||||
|
// "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */
|
||||||
|
// "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */
|
||||||
|
|
||||||
|
/* Module Resolution Options */
|
||||||
|
// "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */
|
||||||
|
// "baseUrl": "./", /* Base directory to resolve non-absolute module names. */
|
||||||
|
// "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */
|
||||||
|
// "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */
|
||||||
|
// "typeRoots": [], /* List of folders to include type definitions from. */
|
||||||
|
// "types": [], /* Type declaration files to be included in compilation. */
|
||||||
|
// "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */
|
||||||
|
"esModuleInterop": true, /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */
|
||||||
|
// "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */
|
||||||
|
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
|
||||||
|
|
||||||
|
/* Source Map Options */
|
||||||
|
// "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */
|
||||||
|
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
|
||||||
|
// "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */
|
||||||
|
// "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */
|
||||||
|
|
||||||
|
/* Experimental Options */
|
||||||
|
// "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */
|
||||||
|
// "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */
|
||||||
|
|
||||||
|
/* Advanced Options */
|
||||||
|
"forceConsistentCasingInFileNames": true /* Disallow inconsistently-cased references to the same file. */
|
||||||
|
}
|
||||||
|
}
|
37
webapp/css/app.less
Normal file
37
webapp/css/app.less
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
/* Sticky footer styles */
|
||||||
|
|
||||||
|
html {
|
||||||
|
position: relative;
|
||||||
|
min-height: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
@sticky-footer-height: 60px;
|
||||||
|
|
||||||
|
body {
|
||||||
|
margin-bottom: @sticky-footer-height;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footer {
|
||||||
|
position: absolute;
|
||||||
|
bottom: 0;
|
||||||
|
width: 100%;
|
||||||
|
height: @sticky-footer-height;
|
||||||
|
line-height: @sticky-footer-height;
|
||||||
|
background-color: #f5f5f5;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Toasts */
|
||||||
|
|
||||||
|
.toast-container-outer {
|
||||||
|
position: relative;
|
||||||
|
height: 0 !important;
|
||||||
|
|
||||||
|
.toast-container {
|
||||||
|
position: absolute;
|
||||||
|
right: 1em;
|
||||||
|
top: 1em;
|
||||||
|
|
||||||
|
min-width: 200px;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
21
webapp/js/App/API.ts
Normal file
21
webapp/js/App/API.ts
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
|
||||||
|
export class ApiLoginResponse {
|
||||||
|
SessionKey: string = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
export class APIClient {
|
||||||
|
|
||||||
|
sessionKey: string = ""
|
||||||
|
|
||||||
|
constructor() {}
|
||||||
|
|
||||||
|
async Login(email: string, password: string): Promise<ApiLoginResponse> {
|
||||||
|
const postURL = window.location.protocol + "//" + window.location.host + "/api/v1/login";
|
||||||
|
const postData = {"email": email, "password": password};
|
||||||
|
|
||||||
|
let postResult = await $.post(postURL, $.param(postData)).promise();
|
||||||
|
|
||||||
|
return postResult as ApiLoginResponse; // hope the server/client types remain in sync
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
15
webapp/js/App/Component.ts
Normal file
15
webapp/js/App/Component.ts
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
|
||||||
|
export default abstract class Component {
|
||||||
|
|
||||||
|
$area: JQuery<HTMLElement>
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
this.$area = $("<div>");
|
||||||
|
}
|
||||||
|
|
||||||
|
mountInto($parent: JQuery<HTMLElement>): void {
|
||||||
|
$parent.html('');
|
||||||
|
$parent.append(this.$area);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
89
webapp/js/App/MainApplication.ts
Normal file
89
webapp/js/App/MainApplication.ts
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
import state from "./state.js";
|
||||||
|
import Component from "./Component.js";
|
||||||
|
import LoginPage from "./Page/LoginPage.js";
|
||||||
|
import HomePage from "./Page/HomePage.js";
|
||||||
|
import { html, toast, hesc } from "./util.js";
|
||||||
|
|
||||||
|
export default class MainApplication extends Component {
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
|
||||||
|
$("title").text('Application');
|
||||||
|
|
||||||
|
this.$area.html(html`
|
||||||
|
<div class="toast-container-outer"><div class="toast-container"></div></div>
|
||||||
|
|
||||||
|
<div class="d-flex flex-column flex-md-row align-items-center p-3 px-md-4 mb-3 bg-white border-bottom box-shadow">
|
||||||
|
<h5 class="my-0 mr-md-auto font-weight-normal">Application</h5>
|
||||||
|
<nav class="my-2 my-md-0 mr-md-3">
|
||||||
|
<a class="p-2 text-dark" href="#">Features</a>
|
||||||
|
<a class="p-2 text-dark" href="#">Enterprise</a>
|
||||||
|
<a class="p-2 text-dark" href="#">Support</a>
|
||||||
|
<a class="p-2 text-dark" href="#">Pricing</a>
|
||||||
|
</nav>
|
||||||
|
<a class="btn btn-outline-primary" href="#">Sign up</a>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<main class="component-page container"></main>
|
||||||
|
|
||||||
|
<footer class="footer">
|
||||||
|
<div class="container">
|
||||||
|
<span class="text-muted">Copyright © 2020</span>
|
||||||
|
</div>
|
||||||
|
</footer>
|
||||||
|
`);
|
||||||
|
}
|
||||||
|
|
||||||
|
mountInto($parent: JQuery<HTMLElement>): void {
|
||||||
|
super.mountInto($parent);
|
||||||
|
|
||||||
|
this.routeToHash();
|
||||||
|
window.addEventListener('hashchange', (ev) => this.routeToHash(), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
routeToHash() {
|
||||||
|
|
||||||
|
switch (window.location.hash.substr(1)) {
|
||||||
|
case "/": {
|
||||||
|
// Redirect to "home page" based on our current state
|
||||||
|
if (state.isLoggedIn()) {
|
||||||
|
window.location.hash = `/home`
|
||||||
|
} else {
|
||||||
|
window.location.hash = `/login`
|
||||||
|
}
|
||||||
|
} break;
|
||||||
|
|
||||||
|
case "/home": {
|
||||||
|
if (!state.isLoggedIn()) {
|
||||||
|
toast(hesc("Unauthorised"), hesc("Please log in."), "danger");
|
||||||
|
window.location.hash = `/`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let p = new HomePage();
|
||||||
|
this.renderPage(p);
|
||||||
|
} break;
|
||||||
|
|
||||||
|
case "/login": {
|
||||||
|
if (state.isLoggedIn()) {
|
||||||
|
window.location.hash = `/`;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let p = new LoginPage();
|
||||||
|
this.renderPage(p);
|
||||||
|
} break;
|
||||||
|
|
||||||
|
default: {
|
||||||
|
// Redirect to /
|
||||||
|
window.location.hash = `/`
|
||||||
|
} break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
renderPage(p: Component): void {
|
||||||
|
p.mountInto(this.$area.find(".component-page"));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
12
webapp/js/App/Page/HomePage.ts
Normal file
12
webapp/js/App/Page/HomePage.ts
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
import Component from "../Component";
|
||||||
|
import state from "../state";
|
||||||
|
|
||||||
|
export default class HomePage extends Component {
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
|
||||||
|
this.$area.html("home page (logged in with session key " + state.api.sessionKey + ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
58
webapp/js/App/Page/LoginPage.ts
Normal file
58
webapp/js/App/Page/LoginPage.ts
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
import Component from "../Component";
|
||||||
|
import state from "../state";
|
||||||
|
import { html, toast, hesc } from "../util";
|
||||||
|
|
||||||
|
export default class LoginPage extends Component {
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
|
||||||
|
this.$area.html(html`
|
||||||
|
<div class="row justify-content-center">
|
||||||
|
<div class="col-4 card">
|
||||||
|
<form class="form-group app-login card-body">
|
||||||
|
<h2>Log in</h2>
|
||||||
|
<input class="form-control login-email" type="email" placeholder="Email"><br>
|
||||||
|
<input class="form-control login-passwd" type="password" placeholder="Password"><br>
|
||||||
|
<input type="submit" class="btn btn-primary" value="Log in">
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`);
|
||||||
|
|
||||||
|
this.$area.find(".app-login").on('submit', (ev) => this.doLogin(ev));
|
||||||
|
}
|
||||||
|
|
||||||
|
async doLogin(ev: JQuery.SubmitEvent) {
|
||||||
|
ev.preventDefault();
|
||||||
|
|
||||||
|
let email = this.$area.find(".login-email").val() as string;
|
||||||
|
let passwd = this.$area.find(".login-passwd").val() as string;
|
||||||
|
|
||||||
|
try {
|
||||||
|
let resp = await state.api.Login(email, passwd);
|
||||||
|
this.$area.html(JSON.stringify(resp));
|
||||||
|
|
||||||
|
// Stash our successful login state
|
||||||
|
state.api.sessionKey = resp.SessionKey
|
||||||
|
|
||||||
|
// Navigate to homepage
|
||||||
|
window.location.hash = "/" // will take us to the homepage now
|
||||||
|
|
||||||
|
toast("Logged in", "", "success")
|
||||||
|
|
||||||
|
} catch (ex) {
|
||||||
|
// network error or some other kind of problem
|
||||||
|
if (("responseText" in ex) && ex.responseText.length > 0) {
|
||||||
|
toast(hesc("Login failed"), hesc(ex.responseText), "danger");
|
||||||
|
|
||||||
|
} else {
|
||||||
|
toast(hesc("Login failed"), hesc("An unknown error occurred.\n\n" + JSON.stringify(ex)), "danger");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return false; // like preventDefault()
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
20
webapp/js/App/state.ts
Normal file
20
webapp/js/App/state.ts
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// MUTABLE STATE
|
||||||
|
|
||||||
|
import { APIClient } from "./API";
|
||||||
|
|
||||||
|
class State {
|
||||||
|
|
||||||
|
api: APIClient;
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
this.api = new APIClient();
|
||||||
|
}
|
||||||
|
|
||||||
|
isLoggedIn(): boolean {
|
||||||
|
return this.api.sessionKey != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
let s = new State();
|
||||||
|
export default s;
|
60
webapp/js/App/util.ts
Normal file
60
webapp/js/App/util.ts
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
|
||||||
|
/**
|
||||||
|
* HTML escape
|
||||||
|
*
|
||||||
|
* @param {string} sz
|
||||||
|
* @return {string}
|
||||||
|
*/
|
||||||
|
export function hesc(sz: string): string {
|
||||||
|
return sz
|
||||||
|
.replace(/&/g,'&')
|
||||||
|
.replace(/</g,'<')
|
||||||
|
.replace(/>/g,'>')
|
||||||
|
.replace(/"/g,'"')
|
||||||
|
.replace(/'/g,''');
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* html is a no-op function for ES6 literals.
|
||||||
|
* Install the `lit-html` extension in VSCode to use this as a syntax-highlighted string literal.
|
||||||
|
*
|
||||||
|
* @param strings {TemplateStringsArray}
|
||||||
|
* @param keys {string[]}
|
||||||
|
*/
|
||||||
|
export function html(strings: TemplateStringsArray, ...keys: string[]): string {
|
||||||
|
let ret = [];
|
||||||
|
for (let i = 0; i < strings.length - 1; ++i) {
|
||||||
|
ret.push(strings[i], keys[i]);
|
||||||
|
}
|
||||||
|
ret.push(strings[strings.length - 1]);
|
||||||
|
return ret.join("");
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* toast shows a mini temporary informational popup, using bootstrap's toast library.
|
||||||
|
*
|
||||||
|
* @param titleHtml {string}
|
||||||
|
* @param bodyHtml {string}
|
||||||
|
* @param colorClass {string} One of 'info', 'success', 'danger'
|
||||||
|
*/
|
||||||
|
export function toast(titleHtml: string, bodyHtml: string, colorClass: string = "info") {
|
||||||
|
|
||||||
|
const template = html`
|
||||||
|
<div class="toast" role="alert" aria-live="assertive" aria-atomic="true" data-autohide="true" data-delay="5000">
|
||||||
|
<div class="toast-header bg-` + colorClass + ` text-white">
|
||||||
|
<strong class="mr-auto">` + hesc(titleHtml) + html`</strong>
|
||||||
|
<button type="button" class="ml-2 mb-1 close" data-dismiss="toast" aria-label="Close">
|
||||||
|
<span aria-hidden="true">×</span>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
<div class="toast-body">` + hesc(bodyHtml) + html`</div>
|
||||||
|
</div>`;
|
||||||
|
|
||||||
|
let $t = $(template);
|
||||||
|
$(".toast-container").append($t);
|
||||||
|
$t.toast('show');
|
||||||
|
$t.on('hidden.bs.toast', function () {
|
||||||
|
$t.remove();
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
8
webapp/js/app.ts
Normal file
8
webapp/js/app.ts
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
import MainApplication from './App/MainApplication.js';
|
||||||
|
|
||||||
|
import '../css/app.less'; // hit the rollup.js loader
|
||||||
|
|
||||||
|
$(function() {
|
||||||
|
let app = new MainApplication();
|
||||||
|
app.mountInto( $("body") );
|
||||||
|
});
|
7
wwwroot/css/bootstrap.min.css
vendored
Normal file
7
wwwroot/css/bootstrap.min.css
vendored
Normal file
File diff suppressed because one or more lines are too long
21
wwwroot/index.html
Normal file
21
wwwroot/index.html
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta http-equiv="Content-Security-Policy" content="default-src 'self'">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
|
||||||
|
<title>Loading...</title>
|
||||||
|
|
||||||
|
<link rel="stylesheet" type="text/css" href="css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh">
|
||||||
|
<link rel="stylesheet" type="text/css" href="css/app.css">
|
||||||
|
</head>
|
||||||
|
<body></body>
|
||||||
|
<!--[if IE ]>
|
||||||
|
<script src="js/vendor/corejs-3.6.5.min.js" integrity="sha384-TTVcaKdKKsLSvxkJU8mv/ZrkBlSMUiXn57PWhpGFzfeuzqQW/krdvVzW8yqtTqz5"></script>
|
||||||
|
<![endif]-->
|
||||||
|
<script src="js/vendor/jquery-3.5.0.min.js" integrity="sha384-LVoNJ6yst/aLxKvxwp6s2GAabqPczfWh6xzm38S/YtjUyZ+3aTKOnD/OJVGYLZDl"></script>
|
||||||
|
<script src="js/vendor/bootstrap.bundle.min.js" integrity="sha384-6khuMg9gaYr5AxOqhkVIODVIvm9ynTT5J4V1cfthmT+emCG6yVmEZsRHdxlotUnm"></script>
|
||||||
|
<script type="module" src="js/app.js"></script>
|
||||||
|
</html>
|
7
wwwroot/js/vendor/bootstrap.bundle.min.js
vendored
Normal file
7
wwwroot/js/vendor/bootstrap.bundle.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
12
wwwroot/js/vendor/corejs-3.6.5.min.js
vendored
Normal file
12
wwwroot/js/vendor/corejs-3.6.5.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
2
wwwroot/js/vendor/jquery-3.5.0.min.js
vendored
Normal file
2
wwwroot/js/vendor/jquery-3.5.0.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue
Block a user