diff --git a/plugins/jsvm/internal/types/generated/types.d.ts b/plugins/jsvm/internal/types/generated/types.d.ts index 273e0cab..3032b9b3 100644 --- a/plugins/jsvm/internal/types/generated/types.d.ts +++ b/plugins/jsvm/internal/types/generated/types.d.ts @@ -1,4 +1,4 @@ -// 1750184565 +// 1750492181 // GENERATED CODE - DO NOT MODIFY BY HAND // ------------------------------------------------------------------- @@ -1802,8 +1802,8 @@ namespace os { * than ReadFrom. This is used to permit ReadFrom to call io.Copy * without leading to a recursive call to ReadFrom. */ - type _shrmNzY = noReadFrom&File - interface fileWithoutReadFrom extends _shrmNzY { + type _slvUKNx = noReadFrom&File + interface fileWithoutReadFrom extends _slvUKNx { } interface File { /** @@ -1847,8 +1847,8 @@ namespace os { * than WriteTo. This is used to permit WriteTo to call io.Copy * without leading to a recursive call to WriteTo. */ - type _sxBBgtH = noWriteTo&File - interface fileWithoutWriteTo extends _sxBBgtH { + type _sGqrSOp = noWriteTo&File + interface fileWithoutWriteTo extends _sGqrSOp { } interface File { /** @@ -2492,8 +2492,8 @@ namespace os { * * The methods of File are safe for concurrent use. */ - type _sSLeaYx = file - interface File extends _sSLeaYx { + type _sgluwGO = file + interface File extends _sgluwGO { } /** * A FileInfo describes a file and is returned by [Stat] and [Lstat]. @@ -3239,14 +3239,14 @@ namespace dbx { /** * MssqlBuilder is the builder for SQL Server databases. */ - type _swcgtcI = BaseBuilder - interface MssqlBuilder extends _swcgtcI { + type _sMQOVvy = BaseBuilder + interface MssqlBuilder extends _sMQOVvy { } /** * MssqlQueryBuilder is the query builder for SQL Server databases. */ - type _sPWbwSQ = BaseQueryBuilder - interface MssqlQueryBuilder extends _sPWbwSQ { + type _sdzZflg = BaseQueryBuilder + interface MssqlQueryBuilder extends _sdzZflg { } interface newMssqlBuilder { /** @@ -3317,8 +3317,8 @@ namespace dbx { /** * MysqlBuilder is the builder for MySQL databases. */ - type _snuxlXk = BaseBuilder - interface MysqlBuilder extends _snuxlXk { + type _siQJCaD = BaseBuilder + interface MysqlBuilder extends _siQJCaD { } interface newMysqlBuilder { /** @@ -3393,14 +3393,14 @@ namespace dbx { /** * OciBuilder is the builder for Oracle databases. */ - type _stDiqZW = BaseBuilder - interface OciBuilder extends _stDiqZW { + type _sQGrzVB = BaseBuilder + interface OciBuilder extends _sQGrzVB { } /** * OciQueryBuilder is the query builder for Oracle databases. */ - type _sVFXgum = BaseQueryBuilder - interface OciQueryBuilder extends _sVFXgum { + type _sINWAUj = BaseQueryBuilder + interface OciQueryBuilder extends _sINWAUj { } interface newOciBuilder { /** @@ -3463,8 +3463,8 @@ namespace dbx { /** * PgsqlBuilder is the builder for PostgreSQL databases. */ - type _svdgbmE = BaseBuilder - interface PgsqlBuilder extends _svdgbmE { + type _sCvSmEV = BaseBuilder + interface PgsqlBuilder extends _sCvSmEV { } interface newPgsqlBuilder { /** @@ -3531,8 +3531,8 @@ namespace dbx { /** * SqliteBuilder is the builder for SQLite databases. */ - type _swTltNS = BaseBuilder - interface SqliteBuilder extends _swTltNS { + type _sJSOFqA = BaseBuilder + interface SqliteBuilder extends _sJSOFqA { } interface newSqliteBuilder { /** @@ -3631,8 +3631,8 @@ namespace dbx { /** * StandardBuilder is the builder that is used by DB for an unknown driver. */ - type _sNGKXPJ = BaseBuilder - interface StandardBuilder extends _sNGKXPJ { + type _sOzLvzF = BaseBuilder + interface StandardBuilder extends _sOzLvzF { } interface newStandardBuilder { /** @@ -3698,8 +3698,8 @@ namespace dbx { * DB enhances sql.DB by providing a set of DB-agnostic query building methods. * DB allows easier query building and population of data into Go variables. */ - type _sfVPElP = Builder - interface DB extends _sfVPElP { + type _sElkCGm = Builder + interface DB extends _sElkCGm { /** * FieldMapper maps struct fields to DB columns. Defaults to DefaultFieldMapFunc. */ @@ -4503,8 +4503,8 @@ namespace dbx { * Rows enhances sql.Rows by providing additional data query methods. * Rows can be obtained by calling Query.Rows(). It is mainly used to populate data row by row. */ - type _socUsAx = sql.Rows - interface Rows extends _socUsAx { + type _suxYfvT = sql.Rows + interface Rows extends _suxYfvT { } interface Rows { /** @@ -4876,8 +4876,8 @@ namespace dbx { }): string } interface structInfo { } - type _sVhGYlo = structInfo - interface structValue extends _sVhGYlo { + type _sWCgpKn = structInfo + interface structValue extends _sWCgpKn { } interface fieldInfo { } @@ -4916,8 +4916,8 @@ namespace dbx { /** * Tx enhances sql.Tx with additional querying methods. */ - type _sVsqpdo = Builder - interface Tx extends _sVsqpdo { + type _sEknhqZ = Builder + interface Tx extends _sEknhqZ { } interface Tx { /** @@ -5172,8 +5172,8 @@ namespace filesystem { */ open(): io.ReadSeekCloser } - type _sjaKQzS = bytes.Reader - interface bytesReadSeekCloser extends _sjaKQzS { + type _sGXtNgV = bytes.Reader + interface bytesReadSeekCloser extends _sGXtNgV { } interface bytesReadSeekCloser { /** @@ -7138,8 +7138,8 @@ namespace core { /** * AuthOrigin defines a Record proxy for working with the authOrigins collection. */ - type _szSNrqL = Record - interface AuthOrigin extends _szSNrqL { + type _szBYLhp = Record + interface AuthOrigin extends _szBYLhp { } interface newAuthOrigin { /** @@ -7884,8 +7884,8 @@ namespace core { /** * @todo experiment eventually replacing the rules *string with a struct? */ - type _sqBqwBt = BaseModel - interface baseCollection extends _sqBqwBt { + type _sWkvAgL = BaseModel + interface baseCollection extends _sWkvAgL { listRule?: string viewRule?: string createRule?: string @@ -7912,8 +7912,8 @@ namespace core { /** * Collection defines the table, fields and various options related to a set of records. */ - type _sUftAKD = baseCollection&collectionAuthOptions&collectionViewOptions - interface Collection extends _sUftAKD { + type _sOHZZKe = baseCollection&collectionAuthOptions&collectionViewOptions + interface Collection extends _sOHZZKe { } interface newCollection { /** @@ -8923,8 +8923,8 @@ namespace core { /** * RequestEvent defines the PocketBase router handler event. */ - type _sFdFukM = router.Event - interface RequestEvent extends _sFdFukM { + type _stvQwFb = router.Event + interface RequestEvent extends _stvQwFb { app: App auth?: Record } @@ -8984,8 +8984,8 @@ namespace core { */ clone(): (RequestInfo) } - type _sAeWehd = hook.Event&RequestEvent - interface BatchRequestEvent extends _sAeWehd { + type _sIpKrPW = hook.Event&RequestEvent + interface BatchRequestEvent extends _sIpKrPW { batch: Array<(InternalRequest | undefined)> } interface InternalRequest { @@ -9022,24 +9022,24 @@ namespace core { interface baseCollectionEventData { tags(): Array } - type _swrrYvS = hook.Event - interface BootstrapEvent extends _swrrYvS { + type _sKMBsNz = hook.Event + interface BootstrapEvent extends _sKMBsNz { app: App } - type _solNoDP = hook.Event - interface TerminateEvent extends _solNoDP { + type _syPQtoZ = hook.Event + interface TerminateEvent extends _syPQtoZ { app: App isRestart: boolean } - type _sxpKNWB = hook.Event - interface BackupEvent extends _sxpKNWB { + type _sfiueBt = hook.Event + interface BackupEvent extends _sfiueBt { app: App context: context.Context name: string // the name of the backup to create/restore. exclude: Array // list of dir entries to exclude from the backup create/restore. } - type _sITwiYX = hook.Event - interface ServeEvent extends _sITwiYX { + type _sEQOjgi = hook.Event + interface ServeEvent extends _sEQOjgi { app: App router?: router.Router server?: http.Server @@ -9062,31 +9062,31 @@ namespace core { */ installerFunc: (app: App, systemSuperuser: Record, baseURL: string) => void } - type _sajiSXK = hook.Event&RequestEvent - interface SettingsListRequestEvent extends _sajiSXK { + type _seHJyPG = hook.Event&RequestEvent + interface SettingsListRequestEvent extends _seHJyPG { settings?: Settings } - type _sooGarS = hook.Event&RequestEvent - interface SettingsUpdateRequestEvent extends _sooGarS { + type _siGVRhO = hook.Event&RequestEvent + interface SettingsUpdateRequestEvent extends _siGVRhO { oldSettings?: Settings newSettings?: Settings } - type _sHUnoPj = hook.Event - interface SettingsReloadEvent extends _sHUnoPj { + type _srYLoxE = hook.Event + interface SettingsReloadEvent extends _srYLoxE { app: App } - type _sXbmDYY = hook.Event - interface MailerEvent extends _sXbmDYY { + type _sILwIuS = hook.Event + interface MailerEvent extends _sILwIuS { app: App mailer: mailer.Mailer message?: mailer.Message } - type _samlPkW = MailerEvent&baseRecordEventData - interface MailerRecordEvent extends _samlPkW { + type _sXnxBfv = MailerEvent&baseRecordEventData + interface MailerRecordEvent extends _sXnxBfv { meta: _TygojaDict } - type _sEdVMuy = hook.Event&baseModelEventData - interface ModelEvent extends _sEdVMuy { + type _sBozUTh = hook.Event&baseModelEventData + interface ModelEvent extends _sBozUTh { app: App context: context.Context /** @@ -9098,12 +9098,12 @@ namespace core { */ type: string } - type _sNJYffc = ModelEvent - interface ModelErrorEvent extends _sNJYffc { + type _slFqYSy = ModelEvent + interface ModelErrorEvent extends _slFqYSy { error: Error } - type _sDKPXmo = hook.Event&baseRecordEventData - interface RecordEvent extends _sDKPXmo { + type _sEpesWL = hook.Event&baseRecordEventData + interface RecordEvent extends _sEpesWL { app: App context: context.Context /** @@ -9115,12 +9115,12 @@ namespace core { */ type: string } - type _sFoisED = RecordEvent - interface RecordErrorEvent extends _sFoisED { + type _syOyPxo = RecordEvent + interface RecordErrorEvent extends _syOyPxo { error: Error } - type _solsIoz = hook.Event&baseCollectionEventData - interface CollectionEvent extends _solsIoz { + type _sPJLzXM = hook.Event&baseCollectionEventData + interface CollectionEvent extends _sPJLzXM { app: App context: context.Context /** @@ -9132,95 +9132,95 @@ namespace core { */ type: string } - type _skPZKol = CollectionEvent - interface CollectionErrorEvent extends _skPZKol { + type _sjinXTe = CollectionEvent + interface CollectionErrorEvent extends _sjinXTe { error: Error } - type _sUExQHa = hook.Event&RequestEvent&baseRecordEventData - interface FileTokenRequestEvent extends _sUExQHa { + type _sljETVy = hook.Event&RequestEvent&baseRecordEventData + interface FileTokenRequestEvent extends _sljETVy { token: string } - type _sovMmpG = hook.Event&RequestEvent&baseCollectionEventData - interface FileDownloadRequestEvent extends _sovMmpG { + type _sIgbmQi = hook.Event&RequestEvent&baseCollectionEventData + interface FileDownloadRequestEvent extends _sIgbmQi { record?: Record fileField?: FileField servedPath: string servedName: string } - type _swAMTDc = hook.Event&RequestEvent - interface CollectionsListRequestEvent extends _swAMTDc { + type _sMeXLpW = hook.Event&RequestEvent + interface CollectionsListRequestEvent extends _sMeXLpW { collections: Array<(Collection | undefined)> result?: search.Result } - type _shjwnIQ = hook.Event&RequestEvent - interface CollectionsImportRequestEvent extends _shjwnIQ { + type _sAAmsFE = hook.Event&RequestEvent + interface CollectionsImportRequestEvent extends _sAAmsFE { collectionsData: Array<_TygojaDict> deleteMissing: boolean } - type _sVNbFSR = hook.Event&RequestEvent&baseCollectionEventData - interface CollectionRequestEvent extends _sVNbFSR { + type _sRisMVM = hook.Event&RequestEvent&baseCollectionEventData + interface CollectionRequestEvent extends _sRisMVM { } - type _sPeRNHM = hook.Event&RequestEvent - interface RealtimeConnectRequestEvent extends _sPeRNHM { + type _sFHfdtB = hook.Event&RequestEvent + interface RealtimeConnectRequestEvent extends _sFHfdtB { client: subscriptions.Client /** * note: modifying it after the connect has no effect */ idleTimeout: time.Duration } - type _sqGagET = hook.Event&RequestEvent - interface RealtimeMessageEvent extends _sqGagET { + type _stuWeTu = hook.Event&RequestEvent + interface RealtimeMessageEvent extends _stuWeTu { client: subscriptions.Client message?: subscriptions.Message } - type _sVggdkE = hook.Event&RequestEvent - interface RealtimeSubscribeRequestEvent extends _sVggdkE { + type _srlvooI = hook.Event&RequestEvent + interface RealtimeSubscribeRequestEvent extends _srlvooI { client: subscriptions.Client subscriptions: Array } - type _styLRkf = hook.Event&RequestEvent&baseCollectionEventData - interface RecordsListRequestEvent extends _styLRkf { + type _sZdLzoS = hook.Event&RequestEvent&baseCollectionEventData + interface RecordsListRequestEvent extends _sZdLzoS { /** * @todo consider removing and maybe add as generic to the search.Result? */ records: Array<(Record | undefined)> result?: search.Result } - type _sqJvhHJ = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEvent extends _sqJvhHJ { + type _sEHnNNa = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEvent extends _sEHnNNa { record?: Record } - type _sXXsATW = hook.Event&baseRecordEventData - interface RecordEnrichEvent extends _sXXsATW { + type _sDEhebY = hook.Event&baseRecordEventData + interface RecordEnrichEvent extends _sDEhebY { app: App requestInfo?: RequestInfo } - type _syTrmrZ = hook.Event&RequestEvent&baseCollectionEventData - interface RecordCreateOTPRequestEvent extends _syTrmrZ { + type _siqByrU = hook.Event&RequestEvent&baseCollectionEventData + interface RecordCreateOTPRequestEvent extends _siqByrU { record?: Record password: string } - type _sXhLZgn = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOTPRequestEvent extends _sXhLZgn { + type _sxVhMUx = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOTPRequestEvent extends _sxVhMUx { record?: Record otp?: OTP } - type _slCWgfp = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRequestEvent extends _slCWgfp { + type _sUHttoj = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRequestEvent extends _sUHttoj { record?: Record token: string meta: any authMethod: string } - type _srzTpQG = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithPasswordRequestEvent extends _srzTpQG { + type _suFSrRh = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithPasswordRequestEvent extends _suFSrRh { record?: Record identity: string identityField: string password: string } - type _sEDRtGd = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthWithOAuth2RequestEvent extends _sEDRtGd { + type _szJuivH = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthWithOAuth2RequestEvent extends _szJuivH { providerName: string providerClient: auth.Provider record?: Record @@ -9228,41 +9228,41 @@ namespace core { createData: _TygojaDict isNewRecord: boolean } - type _spgcktO = hook.Event&RequestEvent&baseCollectionEventData - interface RecordAuthRefreshRequestEvent extends _spgcktO { + type _sWJzjaz = hook.Event&RequestEvent&baseCollectionEventData + interface RecordAuthRefreshRequestEvent extends _sWJzjaz { record?: Record } - type _shwWBLN = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestPasswordResetRequestEvent extends _shwWBLN { + type _sAVRUyz = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestPasswordResetRequestEvent extends _sAVRUyz { record?: Record } - type _speOvry = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmPasswordResetRequestEvent extends _speOvry { + type _siJpYAW = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmPasswordResetRequestEvent extends _siJpYAW { record?: Record } - type _scDWcbr = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestVerificationRequestEvent extends _scDWcbr { + type _slAGCiC = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestVerificationRequestEvent extends _slAGCiC { record?: Record } - type _sELjAfQ = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmVerificationRequestEvent extends _sELjAfQ { + type _sHMgtVP = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmVerificationRequestEvent extends _sHMgtVP { record?: Record } - type _szBJTaG = hook.Event&RequestEvent&baseCollectionEventData - interface RecordRequestEmailChangeRequestEvent extends _szBJTaG { + type _sCqCegw = hook.Event&RequestEvent&baseCollectionEventData + interface RecordRequestEmailChangeRequestEvent extends _sCqCegw { record?: Record newEmail: string } - type _sNXBtqs = hook.Event&RequestEvent&baseCollectionEventData - interface RecordConfirmEmailChangeRequestEvent extends _sNXBtqs { + type _shuDwcr = hook.Event&RequestEvent&baseCollectionEventData + interface RecordConfirmEmailChangeRequestEvent extends _shuDwcr { record?: Record newEmail: string } /** * ExternalAuth defines a Record proxy for working with the externalAuths collection. */ - type _sXEHGsG = Record - interface ExternalAuth extends _sXEHGsG { + type _sexJOWK = Record + interface ExternalAuth extends _sexJOWK { } interface newExternalAuth { /** @@ -11724,8 +11724,8 @@ namespace core { interface onlyFieldType { type: string } - type _sLjLozJ = Field - interface fieldWithType extends _sLjLozJ { + type _sSoSBlS = Field + interface fieldWithType extends _sSoSBlS { type: string } interface fieldWithType { @@ -11757,8 +11757,8 @@ namespace core { */ scan(value: any): void } - type _squsSuV = BaseModel - interface Log extends _squsSuV { + type _sgYUQgr = BaseModel + interface Log extends _sgYUQgr { created: types.DateTime data: types.JSONMap message: string @@ -11804,8 +11804,8 @@ namespace core { /** * MFA defines a Record proxy for working with the mfas collection. */ - type _splLXuy = Record - interface MFA extends _splLXuy { + type _smLjswC = Record + interface MFA extends _smLjswC { } interface newMFA { /** @@ -12027,8 +12027,8 @@ namespace core { /** * OTP defines a Record proxy for working with the otps collection. */ - type _sJTTRwU = Record - interface OTP extends _sJTTRwU { + type _srXvShH = Record + interface OTP extends _srXvShH { } interface newOTP { /** @@ -12264,8 +12264,8 @@ namespace core { } interface runner { } - type _sSWZACP = BaseModel - interface Record extends _sSWZACP { + type _smSJmDP = BaseModel + interface Record extends _smSJmDP { } interface newRecord { /** @@ -12740,8 +12740,8 @@ namespace core { * BaseRecordProxy implements the [RecordProxy] interface and it is intended * to be used as embed to custom user provided Record proxy structs. */ - type _sBAWRFO = Record - interface BaseRecordProxy extends _sBAWRFO { + type _sAFoKTi = Record + interface BaseRecordProxy extends _sAFoKTi { } interface BaseRecordProxy { /** @@ -12990,8 +12990,8 @@ namespace core { /** * Settings defines the PocketBase app settings. */ - type _spQoyCa = settings - interface Settings extends _spQoyCa { + type _sIZwcSk = settings + interface Settings extends _sIZwcSk { } interface Settings { /** @@ -13292,8 +13292,8 @@ namespace core { */ durationTime(): time.Duration } - type _sduVFFk = BaseModel - interface Param extends _sduVFFk { + type _sGILwVN = BaseModel + interface Param extends _sGILwVN { created: types.DateTime updated: types.DateTime value: types.JSONRaw @@ -13807,8 +13807,8 @@ namespace apis { */ (limitBytes: number): (hook.Handler) } - type _sKekRgw = io.ReadCloser - interface limitedReader extends _sKekRgw { + type _sIjOXdA = io.ReadCloser + interface limitedReader extends _sIjOXdA { } interface limitedReader { read(b: string|Array): number @@ -13959,8 +13959,8 @@ namespace apis { */ (config: GzipConfig): (hook.Handler) } - type _sIvbujB = http.ResponseWriter&io.Writer - interface gzipResponseWriter extends _sIvbujB { + type _sprtZbI = http.ResponseWriter&io.Writer + interface gzipResponseWriter extends _sprtZbI { } interface gzipResponseWriter { writeHeader(code: number): void @@ -13980,11 +13980,11 @@ namespace apis { interface gzipResponseWriter { unwrap(): http.ResponseWriter } - type _saRHsyh = sync.RWMutex - interface rateLimiter extends _saRHsyh { + type _siaohKb = sync.RWMutex + interface rateLimiter extends _siaohKb { } - type _sDYMFYN = sync.Mutex - interface fixedWindow extends _sDYMFYN { + type _srtOPhz = sync.Mutex + interface fixedWindow extends _srtOPhz { } interface realtimeSubscribeForm { clientId: string @@ -14218,6 +14218,99 @@ namespace apis { } } +namespace pocketbase { + /** + * PocketBase defines a PocketBase app launcher. + * + * It implements [CoreApp] via embedding and all of the app interface methods + * could be accessed directly through the instance (eg. PocketBase.DataDir()). + */ + type _sGDsUzu = CoreApp + interface PocketBase extends _sGDsUzu { + /** + * RootCmd is the main console command + */ + rootCmd?: cobra.Command + } + /** + * Config is the PocketBase initialization config struct. + */ + interface Config { + /** + * hide the default console server info on app startup + */ + hideStartBanner: boolean + /** + * optional default values for the console flags + */ + defaultDev: boolean + defaultDataDir: string // if not set, it will fallback to "./pb_data" + defaultEncryptionEnv: string + defaultQueryTimeout: time.Duration // default to core.DefaultQueryTimeout (in seconds) + /** + * optional DB configurations + */ + dataMaxOpenConns: number // default to core.DefaultDataMaxOpenConns + dataMaxIdleConns: number // default to core.DefaultDataMaxIdleConns + auxMaxOpenConns: number // default to core.DefaultAuxMaxOpenConns + auxMaxIdleConns: number // default to core.DefaultAuxMaxIdleConns + dbConnect: core.DBConnectFunc // default to core.dbConnect + } + interface _new { + /** + * New creates a new PocketBase instance with the default configuration. + * Use [NewWithConfig] if you want to provide a custom configuration. + * + * Note that the application will not be initialized/bootstrapped yet, + * aka. DB connections, migrations, app settings, etc. will not be accessible. + * Everything will be initialized when [PocketBase.Start] is executed. + * If you want to initialize the application before calling [PocketBase.Start], + * then you'll have to manually call [PocketBase.Bootstrap]. + */ + (): (PocketBase) + } + interface newWithConfig { + /** + * NewWithConfig creates a new PocketBase instance with the provided config. + * + * Note that the application will not be initialized/bootstrapped yet, + * aka. DB connections, migrations, app settings, etc. will not be accessible. + * Everything will be initialized when [PocketBase.Start] is executed. + * If you want to initialize the application before calling [PocketBase.Start], + * then you'll have to manually call [PocketBase.Bootstrap]. + */ + (config: Config): (PocketBase) + } + interface PocketBase { + /** + * Start starts the application, aka. registers the default system + * commands (serve, superuser, version) and executes pb.RootCmd. + */ + start(): void + } + interface PocketBase { + /** + * Execute initializes the application (if not already) and executes + * the pb.RootCmd with graceful shutdown support. + * + * This method differs from pb.Start() by not registering the default + * system commands! + */ + execute(): void + } + /** + * coloredWriter is a small wrapper struct to construct a [color.Color] writter. + */ + interface coloredWriter { + } + interface coloredWriter { + /** + * Write writes the p bytes using the colored writer. + */ + write(p: string|Array): number + } +} + /** * Package template is a thin wrapper around the standard html/template * and text/template packages that implements a convenient registry to @@ -14323,99 +14416,6 @@ namespace template { } } -namespace pocketbase { - /** - * PocketBase defines a PocketBase app launcher. - * - * It implements [CoreApp] via embedding and all of the app interface methods - * could be accessed directly through the instance (eg. PocketBase.DataDir()). - */ - type _sEJHVpX = CoreApp - interface PocketBase extends _sEJHVpX { - /** - * RootCmd is the main console command - */ - rootCmd?: cobra.Command - } - /** - * Config is the PocketBase initialization config struct. - */ - interface Config { - /** - * hide the default console server info on app startup - */ - hideStartBanner: boolean - /** - * optional default values for the console flags - */ - defaultDev: boolean - defaultDataDir: string // if not set, it will fallback to "./pb_data" - defaultEncryptionEnv: string - defaultQueryTimeout: time.Duration // default to core.DefaultQueryTimeout (in seconds) - /** - * optional DB configurations - */ - dataMaxOpenConns: number // default to core.DefaultDataMaxOpenConns - dataMaxIdleConns: number // default to core.DefaultDataMaxIdleConns - auxMaxOpenConns: number // default to core.DefaultAuxMaxOpenConns - auxMaxIdleConns: number // default to core.DefaultAuxMaxIdleConns - dbConnect: core.DBConnectFunc // default to core.dbConnect - } - interface _new { - /** - * New creates a new PocketBase instance with the default configuration. - * Use [NewWithConfig] if you want to provide a custom configuration. - * - * Note that the application will not be initialized/bootstrapped yet, - * aka. DB connections, migrations, app settings, etc. will not be accessible. - * Everything will be initialized when [PocketBase.Start] is executed. - * If you want to initialize the application before calling [PocketBase.Start], - * then you'll have to manually call [PocketBase.Bootstrap]. - */ - (): (PocketBase) - } - interface newWithConfig { - /** - * NewWithConfig creates a new PocketBase instance with the provided config. - * - * Note that the application will not be initialized/bootstrapped yet, - * aka. DB connections, migrations, app settings, etc. will not be accessible. - * Everything will be initialized when [PocketBase.Start] is executed. - * If you want to initialize the application before calling [PocketBase.Start], - * then you'll have to manually call [PocketBase.Bootstrap]. - */ - (config: Config): (PocketBase) - } - interface PocketBase { - /** - * Start starts the application, aka. registers the default system - * commands (serve, superuser, version) and executes pb.RootCmd. - */ - start(): void - } - interface PocketBase { - /** - * Execute initializes the application (if not already) and executes - * the pb.RootCmd with graceful shutdown support. - * - * This method differs from pb.Start() by not registering the default - * system commands! - */ - execute(): void - } - /** - * coloredWriter is a small wrapper struct to construct a [color.Color] writter. - */ - interface coloredWriter { - } - interface coloredWriter { - /** - * Write writes the p bytes using the colored writer. - */ - write(p: string|Array): number - } -} - /** * Package sync provides basic synchronization primitives such as mutual * exclusion locks. Other than the [Once] and [WaitGroup] types, most are intended @@ -14726,6 +14726,21 @@ namespace bytes { } } +/** + * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer + * object, creating another object (Reader or Writer) that also implements + * the interface but provides buffering and some help for textual I/O. + */ +namespace bufio { + /** + * ReadWriter stores pointers to a [Reader] and a [Writer]. + * It implements [io.ReadWriter]. + */ + type _szuRqeY = Reader&Writer + interface ReadWriter extends _szuRqeY { + } +} + /** * Package syscall contains an interface to the low-level operating system * primitives. The details vary depending on the underlying system, and @@ -15822,662 +15837,6 @@ namespace fs { interface WalkDirFunc {(path: string, d: DirEntry, err: Error): void } } -namespace exec { - /** - * Cmd represents an external command being prepared or run. - * - * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] - * methods. - */ - interface Cmd { - /** - * Path is the path of the command to run. - * - * This is the only field that must be set to a non-zero - * value. If Path is relative, it is evaluated relative - * to Dir. - */ - path: string - /** - * Args holds command line arguments, including the command as Args[0]. - * If the Args field is empty or nil, Run uses {Path}. - * - * In typical use, both Path and Args are set by calling Command. - */ - args: Array - /** - * Env specifies the environment of the process. - * Each entry is of the form "key=value". - * If Env is nil, the new process uses the current process's - * environment. - * If Env contains duplicate environment keys, only the last - * value in the slice for each duplicate key is used. - * As a special case on Windows, SYSTEMROOT is always added if - * missing and not explicitly set to the empty string. - */ - env: Array - /** - * Dir specifies the working directory of the command. - * If Dir is the empty string, Run runs the command in the - * calling process's current directory. - */ - dir: string - /** - * Stdin specifies the process's standard input. - * - * If Stdin is nil, the process reads from the null device (os.DevNull). - * - * If Stdin is an *os.File, the process's standard input is connected - * directly to that file. - * - * Otherwise, during the execution of the command a separate - * goroutine reads from Stdin and delivers that data to the command - * over a pipe. In this case, Wait does not complete until the goroutine - * stops copying, either because it has reached the end of Stdin - * (EOF or a read error), or because writing to the pipe returned an error, - * or because a nonzero WaitDelay was set and expired. - */ - stdin: io.Reader - /** - * Stdout and Stderr specify the process's standard output and error. - * - * If either is nil, Run connects the corresponding file descriptor - * to the null device (os.DevNull). - * - * If either is an *os.File, the corresponding output from the process - * is connected directly to that file. - * - * Otherwise, during the execution of the command a separate goroutine - * reads from the process over a pipe and delivers that data to the - * corresponding Writer. In this case, Wait does not complete until the - * goroutine reaches EOF or encounters an error or a nonzero WaitDelay - * expires. - * - * If Stdout and Stderr are the same writer, and have a type that can - * be compared with ==, at most one goroutine at a time will call Write. - */ - stdout: io.Writer - stderr: io.Writer - /** - * ExtraFiles specifies additional open files to be inherited by the - * new process. It does not include standard input, standard output, or - * standard error. If non-nil, entry i becomes file descriptor 3+i. - * - * ExtraFiles is not supported on Windows. - */ - extraFiles: Array<(os.File | undefined)> - /** - * SysProcAttr holds optional, operating system-specific attributes. - * Run passes it to os.StartProcess as the os.ProcAttr's Sys field. - */ - sysProcAttr?: syscall.SysProcAttr - /** - * Process is the underlying process, once started. - */ - process?: os.Process - /** - * ProcessState contains information about an exited process. - * If the process was started successfully, Wait or Run will - * populate its ProcessState when the command completes. - */ - processState?: os.ProcessState - err: Error // LookPath error, if any. - /** - * If Cancel is non-nil, the command must have been created with - * CommandContext and Cancel will be called when the command's - * Context is done. By default, CommandContext sets Cancel to - * call the Kill method on the command's Process. - * - * Typically a custom Cancel will send a signal to the command's - * Process, but it may instead take other actions to initiate cancellation, - * such as closing a stdin or stdout pipe or sending a shutdown request on a - * network socket. - * - * If the command exits with a success status after Cancel is - * called, and Cancel does not return an error equivalent to - * os.ErrProcessDone, then Wait and similar methods will return a non-nil - * error: either an error wrapping the one returned by Cancel, - * or the error from the Context. - * (If the command exits with a non-success status, or Cancel - * returns an error that wraps os.ErrProcessDone, Wait and similar methods - * continue to return the command's usual exit status.) - * - * If Cancel is set to nil, nothing will happen immediately when the command's - * Context is done, but a nonzero WaitDelay will still take effect. That may - * be useful, for example, to work around deadlocks in commands that do not - * support shutdown signals but are expected to always finish quickly. - * - * Cancel will not be called if Start returns a non-nil error. - */ - cancel: () => void - /** - * If WaitDelay is non-zero, it bounds the time spent waiting on two sources - * of unexpected delay in Wait: a child process that fails to exit after the - * associated Context is canceled, and a child process that exits but leaves - * its I/O pipes unclosed. - * - * The WaitDelay timer starts when either the associated Context is done or a - * call to Wait observes that the child process has exited, whichever occurs - * first. When the delay has elapsed, the command shuts down the child process - * and/or its I/O pipes. - * - * If the child process has failed to exit — perhaps because it ignored or - * failed to receive a shutdown signal from a Cancel function, or because no - * Cancel function was set — then it will be terminated using os.Process.Kill. - * - * Then, if the I/O pipes communicating with the child process are still open, - * those pipes are closed in order to unblock any goroutines currently blocked - * on Read or Write calls. - * - * If pipes are closed due to WaitDelay, no Cancel call has occurred, - * and the command has otherwise exited with a successful status, Wait and - * similar methods will return ErrWaitDelay instead of nil. - * - * If WaitDelay is zero (the default), I/O pipes will be read until EOF, - * which might not occur until orphaned subprocesses of the command have - * also closed their descriptors for the pipes. - */ - waitDelay: time.Duration - } - interface Cmd { - /** - * String returns a human-readable description of c. - * It is intended only for debugging. - * In particular, it is not suitable for use as input to a shell. - * The output of String may vary across Go releases. - */ - string(): string - } - interface Cmd { - /** - * Run starts the specified command and waits for it to complete. - * - * The returned error is nil if the command runs, has no problems - * copying stdin, stdout, and stderr, and exits with a zero exit - * status. - * - * If the command starts but does not complete successfully, the error is of - * type [*ExitError]. Other error types may be returned for other situations. - * - * If the calling goroutine has locked the operating system thread - * with [runtime.LockOSThread] and modified any inheritable OS-level - * thread state (for example, Linux or Plan 9 name spaces), the new - * process will inherit the caller's thread state. - */ - run(): void - } - interface Cmd { - /** - * Start starts the specified command but does not wait for it to complete. - * - * If Start returns successfully, the c.Process field will be set. - * - * After a successful call to Start the [Cmd.Wait] method must be called in - * order to release associated system resources. - */ - start(): void - } - interface Cmd { - /** - * Wait waits for the command to exit and waits for any copying to - * stdin or copying from stdout or stderr to complete. - * - * The command must have been started by [Cmd.Start]. - * - * The returned error is nil if the command runs, has no problems - * copying stdin, stdout, and stderr, and exits with a zero exit - * status. - * - * If the command fails to run or doesn't complete successfully, the - * error is of type [*ExitError]. Other error types may be - * returned for I/O problems. - * - * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits - * for the respective I/O loop copying to or from the process to complete. - * - * Wait releases any resources associated with the [Cmd]. - */ - wait(): void - } - interface Cmd { - /** - * Output runs the command and returns its standard output. - * Any returned error will usually be of type [*ExitError]. - * If c.Stderr was nil, Output populates [ExitError.Stderr]. - */ - output(): string|Array - } - interface Cmd { - /** - * CombinedOutput runs the command and returns its combined standard - * output and standard error. - */ - combinedOutput(): string|Array - } - interface Cmd { - /** - * StdinPipe returns a pipe that will be connected to the command's - * standard input when the command starts. - * The pipe will be closed automatically after [Cmd.Wait] sees the command exit. - * A caller need only call Close to force the pipe to close sooner. - * For example, if the command being run will not exit until standard input - * is closed, the caller must close the pipe. - */ - stdinPipe(): io.WriteCloser - } - interface Cmd { - /** - * StdoutPipe returns a pipe that will be connected to the command's - * standard output when the command starts. - * - * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. - * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. - * See the example for idiomatic usage. - */ - stdoutPipe(): io.ReadCloser - } - interface Cmd { - /** - * StderrPipe returns a pipe that will be connected to the command's - * standard error when the command starts. - * - * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers - * need not close the pipe themselves. It is thus incorrect to call Wait - * before all reads from the pipe have completed. - * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. - * See the StdoutPipe example for idiomatic usage. - */ - stderrPipe(): io.ReadCloser - } - interface Cmd { - /** - * Environ returns a copy of the environment in which the command would be run - * as it is currently configured. - */ - environ(): Array - } -} - -/** - * Package syntax parses regular expressions into parse trees and compiles - * parse trees into programs. Most clients of regular expressions will use the - * facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. - * - * # Syntax - * - * The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. - * Parts of the syntax can be disabled by passing alternate flags to [Parse]. - * - * Single characters: - * - * ``` - * . any character, possibly including newline (flag s=true) - * [xyz] character class - * [^xyz] negated character class - * \d Perl character class - * \D negated Perl character class - * [[:alpha:]] ASCII character class - * [[:^alpha:]] negated ASCII character class - * \pN Unicode character class (one-letter name) - * \p{Greek} Unicode character class - * \PN negated Unicode character class (one-letter name) - * \P{Greek} negated Unicode character class - * ``` - * - * Composites: - * - * ``` - * xy x followed by y - * x|y x or y (prefer x) - * ``` - * - * Repetitions: - * - * ``` - * x* zero or more x, prefer more - * x+ one or more x, prefer more - * x? zero or one x, prefer one - * x{n,m} n or n+1 or ... or m x, prefer more - * x{n,} n or more x, prefer more - * x{n} exactly n x - * x*? zero or more x, prefer fewer - * x+? one or more x, prefer fewer - * x?? zero or one x, prefer zero - * x{n,m}? n or n+1 or ... or m x, prefer fewer - * x{n,}? n or more x, prefer fewer - * x{n}? exactly n x - * ``` - * - * Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} - * reject forms that create a minimum or maximum repetition count above 1000. - * Unlimited repetitions are not subject to this restriction. - * - * Grouping: - * - * ``` - * (re) numbered capturing group (submatch) - * (?Pre) named & numbered capturing group (submatch) - * (?re) named & numbered capturing group (submatch) - * (?:re) non-capturing group - * (?flags) set flags within current group; non-capturing - * (?flags:re) set flags during re; non-capturing - * - * Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: - * - * i case-insensitive (default false) - * m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) - * s let . match \n (default false) - * U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) - * ``` - * - * Empty strings: - * - * ``` - * ^ at beginning of text or line (flag m=true) - * $ at end of text (like \z not \Z) or line (flag m=true) - * \A at beginning of text - * \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) - * \B not at ASCII word boundary - * \z at end of text - * ``` - * - * Escape sequences: - * - * ``` - * \a bell (== \007) - * \f form feed (== \014) - * \t horizontal tab (== \011) - * \n newline (== \012) - * \r carriage return (== \015) - * \v vertical tab character (== \013) - * \* literal *, for any punctuation character * - * \123 octal character code (up to three digits) - * \x7F hex character code (exactly two digits) - * \x{10FFFF} hex character code - * \Q...\E literal text ... even if ... has punctuation - * ``` - * - * Character class elements: - * - * ``` - * x single character - * A-Z character range (inclusive) - * \d Perl character class - * [:foo:] ASCII character class foo - * \p{Foo} Unicode character class Foo - * \pF Unicode character class F (one-letter name) - * ``` - * - * Named character classes as character class elements: - * - * ``` - * [\d] digits (== \d) - * [^\d] not digits (== \D) - * [\D] not digits (== \D) - * [^\D] not not digits (== \d) - * [[:name:]] named ASCII class inside character class (== [:name:]) - * [^[:name:]] named ASCII class inside negated character class (== [:^name:]) - * [\p{Name}] named Unicode property inside character class (== \p{Name}) - * [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) - * ``` - * - * Perl character classes (all ASCII-only): - * - * ``` - * \d digits (== [0-9]) - * \D not digits (== [^0-9]) - * \s whitespace (== [\t\n\f\r ]) - * \S not whitespace (== [^\t\n\f\r ]) - * \w word characters (== [0-9A-Za-z_]) - * \W not word characters (== [^0-9A-Za-z_]) - * ``` - * - * ASCII character classes: - * - * ``` - * [[:alnum:]] alphanumeric (== [0-9A-Za-z]) - * [[:alpha:]] alphabetic (== [A-Za-z]) - * [[:ascii:]] ASCII (== [\x00-\x7F]) - * [[:blank:]] blank (== [\t ]) - * [[:cntrl:]] control (== [\x00-\x1F\x7F]) - * [[:digit:]] digits (== [0-9]) - * [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) - * [[:lower:]] lower case (== [a-z]) - * [[:print:]] printable (== [ -~] == [ [:graph:]]) - * [[:punct:]] punctuation (== [!-/:-@[-`{-~]) - * [[:space:]] whitespace (== [\t\n\v\f\r ]) - * [[:upper:]] upper case (== [A-Z]) - * [[:word:]] word characters (== [0-9A-Za-z_]) - * [[:xdigit:]] hex digit (== [0-9A-Fa-f]) - * ``` - * - * Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. - */ -namespace syntax { - /** - * Flags control the behavior of the parser and record information about regexp context. - */ - interface Flags extends Number{} -} - -/** - * Package types implements some commonly used db serializable types - * like datetime, json, etc. - */ -namespace types { - /** - * DateTime represents a [time.Time] instance in UTC that is wrapped - * and serialized using the app default date layout. - */ - interface DateTime { - } - interface DateTime { - /** - * Time returns the internal [time.Time] instance. - */ - time(): time.Time - } - interface DateTime { - /** - * Add returns a new DateTime based on the current DateTime + the specified duration. - */ - add(duration: time.Duration): DateTime - } - interface DateTime { - /** - * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one. - * - * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], - * the maximum (or minimum) duration will be returned. - */ - sub(u: DateTime): time.Duration - } - interface DateTime { - /** - * AddDate returns a new DateTime based on the current one + duration. - * - * It follows the same rules as [time.AddDate]. - */ - addDate(years: number, months: number, days: number): DateTime - } - interface DateTime { - /** - * After reports whether the current DateTime instance is after u. - */ - after(u: DateTime): boolean - } - interface DateTime { - /** - * Before reports whether the current DateTime instance is before u. - */ - before(u: DateTime): boolean - } - interface DateTime { - /** - * Compare compares the current DateTime instance with u. - * If the current instance is before u, it returns -1. - * If the current instance is after u, it returns +1. - * If they're the same, it returns 0. - */ - compare(u: DateTime): number - } - interface DateTime { - /** - * Equal reports whether the current DateTime and u represent the same time instant. - * Two DateTime can be equal even if they are in different locations. - * For example, 6:00 +0200 and 4:00 UTC are Equal. - */ - equal(u: DateTime): boolean - } - interface DateTime { - /** - * Unix returns the current DateTime as a Unix time, aka. - * the number of seconds elapsed since January 1, 1970 UTC. - */ - unix(): number - } - interface DateTime { - /** - * IsZero checks whether the current DateTime instance has zero time value. - */ - isZero(): boolean - } - interface DateTime { - /** - * String serializes the current DateTime instance into a formatted - * UTC date string. - * - * The zero value is serialized to an empty string. - */ - string(): string - } - interface DateTime { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface DateTime { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface DateTime { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface DateTime { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current DateTime instance. - */ - scan(value: any): void - } - /** - * GeoPoint defines a struct for storing geo coordinates as serialized json object - * (e.g. {lon:0,lat:0}). - * - * Note: using object notation and not a plain array to avoid the confusion - * as there doesn't seem to be a fixed standard for the coordinates order. - */ - interface GeoPoint { - lon: number - lat: number - } - interface GeoPoint { - /** - * String returns the string representation of the current GeoPoint instance. - */ - string(): string - } - interface GeoPoint { - /** - * AsMap implements [core.mapExtractor] and returns a value suitable - * to be used in an API rule expression. - */ - asMap(): _TygojaDict - } - interface GeoPoint { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface GeoPoint { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current GeoPoint instance. - * - * The value argument could be nil (no-op), another GeoPoint instance, - * map or serialized json object with lat-lon props. - */ - scan(value: any): void - } - /** - * JSONArray defines a slice that is safe for json and db read/write. - */ - interface JSONArray extends Array{} - /** - * JSONMap defines a map that is safe for json and db read/write. - */ - interface JSONMap extends _TygojaDict{} - /** - * JSONRaw defines a json value type that is safe for db read/write. - */ - interface JSONRaw extends Array{} - interface JSONRaw { - /** - * String returns the current JSONRaw instance as a json encoded string. - */ - string(): string - } - interface JSONRaw { - /** - * MarshalJSON implements the [json.Marshaler] interface. - */ - marshalJSON(): string|Array - } - interface JSONRaw { - /** - * UnmarshalJSON implements the [json.Unmarshaler] interface. - */ - unmarshalJSON(b: string|Array): void - } - interface JSONRaw { - /** - * Value implements the [driver.Valuer] interface. - */ - value(): any - } - interface JSONRaw { - /** - * Scan implements [sql.Scanner] interface to scan the provided value - * into the current JSONRaw instance. - */ - scan(value: any): void - } -} - -/** - * Package bufio implements buffered I/O. It wraps an io.Reader or io.Writer - * object, creating another object (Reader or Writer) that also implements - * the interface but provides buffering and some help for textual I/O. - */ -namespace bufio { - /** - * ReadWriter stores pointers to a [Reader] and a [Writer]. - * It implements [io.ReadWriter]. - */ - type _sjQyudU = Reader&Writer - interface ReadWriter extends _sjQyudU { - } -} - /** * Package sql provides a generic interface around SQL (or SQL-like) * databases. @@ -17153,6 +16512,168 @@ namespace sql { } } +/** + * Package syntax parses regular expressions into parse trees and compiles + * parse trees into programs. Most clients of regular expressions will use the + * facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. + * + * # Syntax + * + * The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. + * Parts of the syntax can be disabled by passing alternate flags to [Parse]. + * + * Single characters: + * + * ``` + * . any character, possibly including newline (flag s=true) + * [xyz] character class + * [^xyz] negated character class + * \d Perl character class + * \D negated Perl character class + * [[:alpha:]] ASCII character class + * [[:^alpha:]] negated ASCII character class + * \pN Unicode character class (one-letter name) + * \p{Greek} Unicode character class + * \PN negated Unicode character class (one-letter name) + * \P{Greek} negated Unicode character class + * ``` + * + * Composites: + * + * ``` + * xy x followed by y + * x|y x or y (prefer x) + * ``` + * + * Repetitions: + * + * ``` + * x* zero or more x, prefer more + * x+ one or more x, prefer more + * x? zero or one x, prefer one + * x{n,m} n or n+1 or ... or m x, prefer more + * x{n,} n or more x, prefer more + * x{n} exactly n x + * x*? zero or more x, prefer fewer + * x+? one or more x, prefer fewer + * x?? zero or one x, prefer zero + * x{n,m}? n or n+1 or ... or m x, prefer fewer + * x{n,}? n or more x, prefer fewer + * x{n}? exactly n x + * ``` + * + * Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} + * reject forms that create a minimum or maximum repetition count above 1000. + * Unlimited repetitions are not subject to this restriction. + * + * Grouping: + * + * ``` + * (re) numbered capturing group (submatch) + * (?Pre) named & numbered capturing group (submatch) + * (?re) named & numbered capturing group (submatch) + * (?:re) non-capturing group + * (?flags) set flags within current group; non-capturing + * (?flags:re) set flags during re; non-capturing + * + * Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: + * + * i case-insensitive (default false) + * m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) + * s let . match \n (default false) + * U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) + * ``` + * + * Empty strings: + * + * ``` + * ^ at beginning of text or line (flag m=true) + * $ at end of text (like \z not \Z) or line (flag m=true) + * \A at beginning of text + * \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) + * \B not at ASCII word boundary + * \z at end of text + * ``` + * + * Escape sequences: + * + * ``` + * \a bell (== \007) + * \f form feed (== \014) + * \t horizontal tab (== \011) + * \n newline (== \012) + * \r carriage return (== \015) + * \v vertical tab character (== \013) + * \* literal *, for any punctuation character * + * \123 octal character code (up to three digits) + * \x7F hex character code (exactly two digits) + * \x{10FFFF} hex character code + * \Q...\E literal text ... even if ... has punctuation + * ``` + * + * Character class elements: + * + * ``` + * x single character + * A-Z character range (inclusive) + * \d Perl character class + * [:foo:] ASCII character class foo + * \p{Foo} Unicode character class Foo + * \pF Unicode character class F (one-letter name) + * ``` + * + * Named character classes as character class elements: + * + * ``` + * [\d] digits (== \d) + * [^\d] not digits (== \D) + * [\D] not digits (== \D) + * [^\D] not not digits (== \d) + * [[:name:]] named ASCII class inside character class (== [:name:]) + * [^[:name:]] named ASCII class inside negated character class (== [:^name:]) + * [\p{Name}] named Unicode property inside character class (== \p{Name}) + * [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) + * ``` + * + * Perl character classes (all ASCII-only): + * + * ``` + * \d digits (== [0-9]) + * \D not digits (== [^0-9]) + * \s whitespace (== [\t\n\f\r ]) + * \S not whitespace (== [^\t\n\f\r ]) + * \w word characters (== [0-9A-Za-z_]) + * \W not word characters (== [^0-9A-Za-z_]) + * ``` + * + * ASCII character classes: + * + * ``` + * [[:alnum:]] alphanumeric (== [0-9A-Za-z]) + * [[:alpha:]] alphabetic (== [A-Za-z]) + * [[:ascii:]] ASCII (== [\x00-\x7F]) + * [[:blank:]] blank (== [\t ]) + * [[:cntrl:]] control (== [\x00-\x1F\x7F]) + * [[:digit:]] digits (== [0-9]) + * [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) + * [[:lower:]] lower case (== [a-z]) + * [[:print:]] printable (== [ -~] == [ [:graph:]]) + * [[:punct:]] punctuation (== [!-/:-@[-`{-~]) + * [[:space:]] whitespace (== [\t\n\v\f\r ]) + * [[:upper:]] upper case (== [A-Z]) + * [[:word:]] word characters (== [0-9A-Za-z_]) + * [[:xdigit:]] hex digit (== [0-9A-Fa-f]) + * ``` + * + * Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. + */ +namespace syntax { + /** + * Flags control the behavior of the parser and record information about regexp context. + */ + interface Flags extends Number{} +} + /** * Package net provides a portable interface for network I/O, including * TCP/IP, UDP, domain name resolution, and Unix domain sockets. @@ -18763,8 +18284,209 @@ namespace hook { * TaggedHook defines a proxy hook which register handlers that are triggered only * if the TaggedHook.tags are empty or includes at least one of the event data tag(s). */ - type _sidSpTC = mainHook - interface TaggedHook extends _sidSpTC { + type _sreGGOi = mainHook + interface TaggedHook extends _sreGGOi { + } +} + +/** + * Package types implements some commonly used db serializable types + * like datetime, json, etc. + */ +namespace types { + /** + * DateTime represents a [time.Time] instance in UTC that is wrapped + * and serialized using the app default date layout. + */ + interface DateTime { + } + interface DateTime { + /** + * Time returns the internal [time.Time] instance. + */ + time(): time.Time + } + interface DateTime { + /** + * Add returns a new DateTime based on the current DateTime + the specified duration. + */ + add(duration: time.Duration): DateTime + } + interface DateTime { + /** + * Sub returns a [time.Duration] by subtracting the specified DateTime from the current one. + * + * If the result exceeds the maximum (or minimum) value that can be stored in a [time.Duration], + * the maximum (or minimum) duration will be returned. + */ + sub(u: DateTime): time.Duration + } + interface DateTime { + /** + * AddDate returns a new DateTime based on the current one + duration. + * + * It follows the same rules as [time.AddDate]. + */ + addDate(years: number, months: number, days: number): DateTime + } + interface DateTime { + /** + * After reports whether the current DateTime instance is after u. + */ + after(u: DateTime): boolean + } + interface DateTime { + /** + * Before reports whether the current DateTime instance is before u. + */ + before(u: DateTime): boolean + } + interface DateTime { + /** + * Compare compares the current DateTime instance with u. + * If the current instance is before u, it returns -1. + * If the current instance is after u, it returns +1. + * If they're the same, it returns 0. + */ + compare(u: DateTime): number + } + interface DateTime { + /** + * Equal reports whether the current DateTime and u represent the same time instant. + * Two DateTime can be equal even if they are in different locations. + * For example, 6:00 +0200 and 4:00 UTC are Equal. + */ + equal(u: DateTime): boolean + } + interface DateTime { + /** + * Unix returns the current DateTime as a Unix time, aka. + * the number of seconds elapsed since January 1, 1970 UTC. + */ + unix(): number + } + interface DateTime { + /** + * IsZero checks whether the current DateTime instance has zero time value. + */ + isZero(): boolean + } + interface DateTime { + /** + * String serializes the current DateTime instance into a formatted + * UTC date string. + * + * The zero value is serialized to an empty string. + */ + string(): string + } + interface DateTime { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface DateTime { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface DateTime { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface DateTime { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current DateTime instance. + */ + scan(value: any): void + } + /** + * GeoPoint defines a struct for storing geo coordinates as serialized json object + * (e.g. {lon:0,lat:0}). + * + * Note: using object notation and not a plain array to avoid the confusion + * as there doesn't seem to be a fixed standard for the coordinates order. + */ + interface GeoPoint { + lon: number + lat: number + } + interface GeoPoint { + /** + * String returns the string representation of the current GeoPoint instance. + */ + string(): string + } + interface GeoPoint { + /** + * AsMap implements [core.mapExtractor] and returns a value suitable + * to be used in an API rule expression. + */ + asMap(): _TygojaDict + } + interface GeoPoint { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface GeoPoint { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current GeoPoint instance. + * + * The value argument could be nil (no-op), another GeoPoint instance, + * map or serialized json object with lat-lon props. + */ + scan(value: any): void + } + /** + * JSONArray defines a slice that is safe for json and db read/write. + */ + interface JSONArray extends Array{} + /** + * JSONMap defines a map that is safe for json and db read/write. + */ + interface JSONMap extends _TygojaDict{} + /** + * JSONRaw defines a json value type that is safe for db read/write. + */ + interface JSONRaw extends Array{} + interface JSONRaw { + /** + * String returns the current JSONRaw instance as a json encoded string. + */ + string(): string + } + interface JSONRaw { + /** + * MarshalJSON implements the [json.Marshaler] interface. + */ + marshalJSON(): string|Array + } + interface JSONRaw { + /** + * UnmarshalJSON implements the [json.Unmarshaler] interface. + */ + unmarshalJSON(b: string|Array): void + } + interface JSONRaw { + /** + * Value implements the [driver.Valuer] interface. + */ + value(): any + } + interface JSONRaw { + /** + * Scan implements [sql.Scanner] interface to scan the provided value + * into the current JSONRaw instance. + */ + scan(value: any): void } } @@ -18846,8 +18568,8 @@ namespace router { * * NB! It is expected that the Response and Request fields are always set. */ - type _sJDPZLK = hook.Event - interface Event extends _sJDPZLK { + type _sjmIjPM = hook.Event + interface Event extends _sjmIjPM { response: http.ResponseWriter request?: http.Request } @@ -19083,8 +18805,314 @@ namespace router { * http.ListenAndServe("localhost:8090", mux) * ``` */ - type _svMCXUV = RouterGroup - interface Router extends _svMCXUV { + type _sOiUHJu = RouterGroup + interface Router extends _sOiUHJu { + } +} + +namespace exec { + /** + * Cmd represents an external command being prepared or run. + * + * A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] + * methods. + */ + interface Cmd { + /** + * Path is the path of the command to run. + * + * This is the only field that must be set to a non-zero + * value. If Path is relative, it is evaluated relative + * to Dir. + */ + path: string + /** + * Args holds command line arguments, including the command as Args[0]. + * If the Args field is empty or nil, Run uses {Path}. + * + * In typical use, both Path and Args are set by calling Command. + */ + args: Array + /** + * Env specifies the environment of the process. + * Each entry is of the form "key=value". + * If Env is nil, the new process uses the current process's + * environment. + * If Env contains duplicate environment keys, only the last + * value in the slice for each duplicate key is used. + * As a special case on Windows, SYSTEMROOT is always added if + * missing and not explicitly set to the empty string. + */ + env: Array + /** + * Dir specifies the working directory of the command. + * If Dir is the empty string, Run runs the command in the + * calling process's current directory. + */ + dir: string + /** + * Stdin specifies the process's standard input. + * + * If Stdin is nil, the process reads from the null device (os.DevNull). + * + * If Stdin is an *os.File, the process's standard input is connected + * directly to that file. + * + * Otherwise, during the execution of the command a separate + * goroutine reads from Stdin and delivers that data to the command + * over a pipe. In this case, Wait does not complete until the goroutine + * stops copying, either because it has reached the end of Stdin + * (EOF or a read error), or because writing to the pipe returned an error, + * or because a nonzero WaitDelay was set and expired. + */ + stdin: io.Reader + /** + * Stdout and Stderr specify the process's standard output and error. + * + * If either is nil, Run connects the corresponding file descriptor + * to the null device (os.DevNull). + * + * If either is an *os.File, the corresponding output from the process + * is connected directly to that file. + * + * Otherwise, during the execution of the command a separate goroutine + * reads from the process over a pipe and delivers that data to the + * corresponding Writer. In this case, Wait does not complete until the + * goroutine reaches EOF or encounters an error or a nonzero WaitDelay + * expires. + * + * If Stdout and Stderr are the same writer, and have a type that can + * be compared with ==, at most one goroutine at a time will call Write. + */ + stdout: io.Writer + stderr: io.Writer + /** + * ExtraFiles specifies additional open files to be inherited by the + * new process. It does not include standard input, standard output, or + * standard error. If non-nil, entry i becomes file descriptor 3+i. + * + * ExtraFiles is not supported on Windows. + */ + extraFiles: Array<(os.File | undefined)> + /** + * SysProcAttr holds optional, operating system-specific attributes. + * Run passes it to os.StartProcess as the os.ProcAttr's Sys field. + */ + sysProcAttr?: syscall.SysProcAttr + /** + * Process is the underlying process, once started. + */ + process?: os.Process + /** + * ProcessState contains information about an exited process. + * If the process was started successfully, Wait or Run will + * populate its ProcessState when the command completes. + */ + processState?: os.ProcessState + err: Error // LookPath error, if any. + /** + * If Cancel is non-nil, the command must have been created with + * CommandContext and Cancel will be called when the command's + * Context is done. By default, CommandContext sets Cancel to + * call the Kill method on the command's Process. + * + * Typically a custom Cancel will send a signal to the command's + * Process, but it may instead take other actions to initiate cancellation, + * such as closing a stdin or stdout pipe or sending a shutdown request on a + * network socket. + * + * If the command exits with a success status after Cancel is + * called, and Cancel does not return an error equivalent to + * os.ErrProcessDone, then Wait and similar methods will return a non-nil + * error: either an error wrapping the one returned by Cancel, + * or the error from the Context. + * (If the command exits with a non-success status, or Cancel + * returns an error that wraps os.ErrProcessDone, Wait and similar methods + * continue to return the command's usual exit status.) + * + * If Cancel is set to nil, nothing will happen immediately when the command's + * Context is done, but a nonzero WaitDelay will still take effect. That may + * be useful, for example, to work around deadlocks in commands that do not + * support shutdown signals but are expected to always finish quickly. + * + * Cancel will not be called if Start returns a non-nil error. + */ + cancel: () => void + /** + * If WaitDelay is non-zero, it bounds the time spent waiting on two sources + * of unexpected delay in Wait: a child process that fails to exit after the + * associated Context is canceled, and a child process that exits but leaves + * its I/O pipes unclosed. + * + * The WaitDelay timer starts when either the associated Context is done or a + * call to Wait observes that the child process has exited, whichever occurs + * first. When the delay has elapsed, the command shuts down the child process + * and/or its I/O pipes. + * + * If the child process has failed to exit — perhaps because it ignored or + * failed to receive a shutdown signal from a Cancel function, or because no + * Cancel function was set — then it will be terminated using os.Process.Kill. + * + * Then, if the I/O pipes communicating with the child process are still open, + * those pipes are closed in order to unblock any goroutines currently blocked + * on Read or Write calls. + * + * If pipes are closed due to WaitDelay, no Cancel call has occurred, + * and the command has otherwise exited with a successful status, Wait and + * similar methods will return ErrWaitDelay instead of nil. + * + * If WaitDelay is zero (the default), I/O pipes will be read until EOF, + * which might not occur until orphaned subprocesses of the command have + * also closed their descriptors for the pipes. + */ + waitDelay: time.Duration + } + interface Cmd { + /** + * String returns a human-readable description of c. + * It is intended only for debugging. + * In particular, it is not suitable for use as input to a shell. + * The output of String may vary across Go releases. + */ + string(): string + } + interface Cmd { + /** + * Run starts the specified command and waits for it to complete. + * + * The returned error is nil if the command runs, has no problems + * copying stdin, stdout, and stderr, and exits with a zero exit + * status. + * + * If the command starts but does not complete successfully, the error is of + * type [*ExitError]. Other error types may be returned for other situations. + * + * If the calling goroutine has locked the operating system thread + * with [runtime.LockOSThread] and modified any inheritable OS-level + * thread state (for example, Linux or Plan 9 name spaces), the new + * process will inherit the caller's thread state. + */ + run(): void + } + interface Cmd { + /** + * Start starts the specified command but does not wait for it to complete. + * + * If Start returns successfully, the c.Process field will be set. + * + * After a successful call to Start the [Cmd.Wait] method must be called in + * order to release associated system resources. + */ + start(): void + } + interface Cmd { + /** + * Wait waits for the command to exit and waits for any copying to + * stdin or copying from stdout or stderr to complete. + * + * The command must have been started by [Cmd.Start]. + * + * The returned error is nil if the command runs, has no problems + * copying stdin, stdout, and stderr, and exits with a zero exit + * status. + * + * If the command fails to run or doesn't complete successfully, the + * error is of type [*ExitError]. Other error types may be + * returned for I/O problems. + * + * If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits + * for the respective I/O loop copying to or from the process to complete. + * + * Wait releases any resources associated with the [Cmd]. + */ + wait(): void + } + interface Cmd { + /** + * Output runs the command and returns its standard output. + * Any returned error will usually be of type [*ExitError]. + * If c.Stderr was nil, Output populates [ExitError.Stderr]. + */ + output(): string|Array + } + interface Cmd { + /** + * CombinedOutput runs the command and returns its combined standard + * output and standard error. + */ + combinedOutput(): string|Array + } + interface Cmd { + /** + * StdinPipe returns a pipe that will be connected to the command's + * standard input when the command starts. + * The pipe will be closed automatically after [Cmd.Wait] sees the command exit. + * A caller need only call Close to force the pipe to close sooner. + * For example, if the command being run will not exit until standard input + * is closed, the caller must close the pipe. + */ + stdinPipe(): io.WriteCloser + } + interface Cmd { + /** + * StdoutPipe returns a pipe that will be connected to the command's + * standard output when the command starts. + * + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. + * For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. + * See the example for idiomatic usage. + */ + stdoutPipe(): io.ReadCloser + } + interface Cmd { + /** + * StderrPipe returns a pipe that will be connected to the command's + * standard error when the command starts. + * + * [Cmd.Wait] will close the pipe after seeing the command exit, so most callers + * need not close the pipe themselves. It is thus incorrect to call Wait + * before all reads from the pipe have completed. + * For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. + * See the StdoutPipe example for idiomatic usage. + */ + stderrPipe(): io.ReadCloser + } + interface Cmd { + /** + * Environ returns a copy of the environment in which the command would be run + * as it is currently configured. + */ + environ(): Array + } +} + +namespace mailer { + /** + * Message defines a generic email message struct. + */ + interface Message { + from: { address: string; name?: string; } + to: Array<{ address: string; name?: string; }> + bcc: Array<{ address: string; name?: string; }> + cc: Array<{ address: string; name?: string; }> + subject: string + html: string + text: string + headers: _TygojaDict + attachments: _TygojaDict + inlineAttachments: _TygojaDict + } + /** + * Mailer defines a base mail client interface. + */ + interface Mailer { + [key:string]: any; + /** + * Send sends an email with the provided Message. + */ + send(message: Message): void } } @@ -19231,492 +19259,99 @@ namespace subscriptions { } /** - * Package slog provides structured logging, - * in which log records include a message, - * a severity level, and various other attributes - * expressed as key-value pairs. + * Package cron implements a crontab-like service to execute and schedule + * repeative tasks/jobs. * - * It defines a type, [Logger], - * which provides several methods (such as [Logger.Info] and [Logger.Error]) - * for reporting events of interest. - * - * Each Logger is associated with a [Handler]. - * A Logger output method creates a [Record] from the method arguments - * and passes it to the Handler, which decides how to handle it. - * There is a default Logger accessible through top-level functions - * (such as [Info] and [Error]) that call the corresponding Logger methods. - * - * A log record consists of a time, a level, a message, and a set of key-value - * pairs, where the keys are strings and the values may be of any type. - * As an example, + * Example: * * ``` - * slog.Info("hello", "count", 3) + * c := cron.New() + * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) + * c.Start() * ``` - * - * creates a record containing the time of the call, - * a level of Info, the message "hello", and a single - * pair with key "count" and value 3. - * - * The [Info] top-level function calls the [Logger.Info] method on the default Logger. - * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. - * Besides these convenience methods for common levels, - * there is also a [Logger.Log] method which takes the level as an argument. - * Each of these methods has a corresponding top-level function that uses the - * default logger. - * - * The default handler formats the log record's message, time, level, and attributes - * as a string and passes it to the [log] package. - * - * ``` - * 2022/11/08 15:28:26 INFO hello count=3 - * ``` - * - * For more control over the output format, create a logger with a different handler. - * This statement uses [New] to create a new logger with a [TextHandler] - * that writes structured records in text form to standard error: - * - * ``` - * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) - * ``` - * - * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously - * parsed by machine. This statement: - * - * ``` - * logger.Info("hello", "count", 3) - * ``` - * - * produces this output: - * - * ``` - * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 - * ``` - * - * The package also provides [JSONHandler], whose output is line-delimited JSON: - * - * ``` - * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) - * logger.Info("hello", "count", 3) - * ``` - * - * produces this output: - * - * ``` - * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} - * ``` - * - * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. - * There are options for setting the minimum level (see Levels, below), - * displaying the source file and line of the log call, and - * modifying attributes before they are logged. - * - * Setting a logger as the default with - * - * ``` - * slog.SetDefault(logger) - * ``` - * - * will cause the top-level functions like [Info] to use it. - * [SetDefault] also updates the default logger used by the [log] package, - * so that existing applications that use [log.Printf] and related functions - * will send log records to the logger's handler without needing to be rewritten. - * - * Some attributes are common to many log calls. - * For example, you may wish to include the URL or trace identifier of a server request - * with all log events arising from the request. - * Rather than repeat the attribute with every log call, you can use [Logger.With] - * to construct a new Logger containing the attributes: - * - * ``` - * logger2 := logger.With("url", r.URL) - * ``` - * - * The arguments to With are the same key-value pairs used in [Logger.Info]. - * The result is a new Logger with the same handler as the original, but additional - * attributes that will appear in the output of every call. - * - * # Levels - * - * A [Level] is an integer representing the importance or severity of a log event. - * The higher the level, the more severe the event. - * This package defines constants for the most common levels, - * but any int can be used as a level. - * - * In an application, you may wish to log messages only at a certain level or greater. - * One common configuration is to log messages at Info or higher levels, - * suppressing debug logging until it is needed. - * The built-in handlers can be configured with the minimum level to output by - * setting [HandlerOptions.Level]. - * The program's `main` function typically does this. - * The default value is LevelInfo. - * - * Setting the [HandlerOptions.Level] field to a [Level] value - * fixes the handler's minimum level throughout its lifetime. - * Setting it to a [LevelVar] allows the level to be varied dynamically. - * A LevelVar holds a Level and is safe to read or write from multiple - * goroutines. - * To vary the level dynamically for an entire program, first initialize - * a global LevelVar: - * - * ``` - * var programLevel = new(slog.LevelVar) // Info by default - * ``` - * - * Then use the LevelVar to construct a handler, and make it the default: - * - * ``` - * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) - * slog.SetDefault(slog.New(h)) - * ``` - * - * Now the program can change its logging level with a single statement: - * - * ``` - * programLevel.Set(slog.LevelDebug) - * ``` - * - * # Groups - * - * Attributes can be collected into groups. - * A group has a name that is used to qualify the names of its attributes. - * How this qualification is displayed depends on the handler. - * [TextHandler] separates the group and attribute names with a dot. - * [JSONHandler] treats each group as a separate JSON object, with the group name as the key. - * - * Use [Group] to create a Group attribute from a name and a list of key-value pairs: - * - * ``` - * slog.Group("request", - * "method", r.Method, - * "url", r.URL) - * ``` - * - * TextHandler would display this group as - * - * ``` - * request.method=GET request.url=http://example.com - * ``` - * - * JSONHandler would display it as - * - * ``` - * "request":{"method":"GET","url":"http://example.com"} - * ``` - * - * Use [Logger.WithGroup] to qualify all of a Logger's output - * with a group name. Calling WithGroup on a Logger results in a - * new Logger with the same Handler as the original, but with all - * its attributes qualified by the group name. - * - * This can help prevent duplicate attribute keys in large systems, - * where subsystems might use the same keys. - * Pass each subsystem a different Logger with its own group name so that - * potential duplicates are qualified: - * - * ``` - * logger := slog.Default().With("id", systemID) - * parserLogger := logger.WithGroup("parser") - * parseInput(input, parserLogger) - * ``` - * - * When parseInput logs with parserLogger, its keys will be qualified with "parser", - * so even if it uses the common key "id", the log line will have distinct keys. - * - * # Contexts - * - * Some handlers may wish to include information from the [context.Context] that is - * available at the call site. One example of such information - * is the identifier for the current span when tracing is enabled. - * - * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first - * argument, as do their corresponding top-level functions. - * - * Although the convenience methods on Logger (Info and so on) and the - * corresponding top-level functions do not take a context, the alternatives ending - * in "Context" do. For example, - * - * ``` - * slog.InfoContext(ctx, "message") - * ``` - * - * It is recommended to pass a context to an output method if one is available. - * - * # Attrs and Values - * - * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as - * alternating keys and values. The statement - * - * ``` - * slog.Info("hello", slog.Int("count", 3)) - * ``` - * - * behaves the same as - * - * ``` - * slog.Info("hello", "count", 3) - * ``` - * - * There are convenience constructors for [Attr] such as [Int], [String], and [Bool] - * for common types, as well as the function [Any] for constructing Attrs of any - * type. - * - * The value part of an Attr is a type called [Value]. - * Like an [any], a Value can hold any Go value, - * but it can represent typical values, including all numbers and strings, - * without an allocation. - * - * For the most efficient log output, use [Logger.LogAttrs]. - * It is similar to [Logger.Log] but accepts only Attrs, not alternating - * keys and values; this allows it, too, to avoid allocation. - * - * The call - * - * ``` - * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3)) - * ``` - * - * is the most efficient way to achieve the same output as - * - * ``` - * slog.InfoContext(ctx, "hello", "count", 3) - * ``` - * - * # Customizing a type's logging behavior - * - * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue - * method is used for logging. You can use this to control how values of the type - * appear in logs. For example, you can redact secret information like passwords, - * or gather a struct's fields in a Group. See the examples under [LogValuer] for - * details. - * - * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] - * method handles these cases carefully, avoiding infinite loops and unbounded recursion. - * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly. - * - * # Wrapping output methods - * - * The logger functions use reflection over the call stack to find the file name - * and line number of the logging call within the application. This can produce - * incorrect source information for functions that wrap slog. For instance, if you - * define this function in file mylog.go: - * - * ``` - * func Infof(logger *slog.Logger, format string, args ...any) { - * logger.Info(fmt.Sprintf(format, args...)) - * } - * ``` - * - * and you call it like this in main.go: - * - * ``` - * Infof(slog.Default(), "hello, %s", "world") - * ``` - * - * then slog will report the source file as mylog.go, not main.go. - * - * A correct implementation of Infof will obtain the source location - * (pc) and pass it to NewRecord. - * The Infof function in the package-level example called "wrapping" - * demonstrates how to do this. - * - * # Working with Records - * - * Sometimes a Handler will need to modify a Record - * before passing it on to another Handler or backend. - * A Record contains a mixture of simple public fields (e.g. Time, Level, Message) - * and hidden fields that refer to state (such as attributes) indirectly. This - * means that modifying a simple copy of a Record (e.g. by calling - * [Record.Add] or [Record.AddAttrs] to add attributes) - * may have unexpected effects on the original. - * Before modifying a Record, use [Record.Clone] to - * create a copy that shares no state with the original, - * or create a new Record with [NewRecord] - * and build up its Attrs by traversing the old ones with [Record.Attrs]. - * - * # Performance considerations - * - * If profiling your application demonstrates that logging is taking significant time, - * the following suggestions may help. - * - * If many log lines have a common attribute, use [Logger.With] to create a Logger with - * that attribute. The built-in handlers will format that attribute only once, at the - * call to [Logger.With]. The [Handler] interface is designed to allow that optimization, - * and a well-written Handler should take advantage of it. - * - * The arguments to a log call are always evaluated, even if the log event is discarded. - * If possible, defer computation so that it happens only if the value is actually logged. - * For example, consider the call - * - * ``` - * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily - * ``` - * - * The URL.String method will be called even if the logger discards Info-level events. - * Instead, pass the URL directly: - * - * ``` - * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed - * ``` - * - * The built-in [TextHandler] will call its String method, but only - * if the log event is enabled. - * Avoiding the call to String also preserves the structure of the underlying value. - * For example [JSONHandler] emits the components of the parsed URL as a JSON object. - * If you want to avoid eagerly paying the cost of the String call - * without causing the handler to potentially inspect the structure of the value, - * wrap the value in a fmt.Stringer implementation that hides its Marshal methods. - * - * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log - * calls. Say you need to log some expensive value: - * - * ``` - * slog.Debug("frobbing", "value", computeExpensiveValue(arg)) - * ``` - * - * Even if this line is disabled, computeExpensiveValue will be called. - * To avoid that, define a type implementing LogValuer: - * - * ``` - * type expensive struct { arg int } - * - * func (e expensive) LogValue() slog.Value { - * return slog.AnyValue(computeExpensiveValue(e.arg)) - * } - * ``` - * - * Then use a value of that type in log calls: - * - * ``` - * slog.Debug("frobbing", "value", expensive{arg}) - * ``` - * - * Now computeExpensiveValue will only be called when the line is enabled. - * - * The built-in handlers acquire a lock before calling [io.Writer.Write] - * to ensure that exactly one [Record] is written at a time in its entirety. - * Although each log record has a timestamp, - * the built-in handlers do not use that time to sort the written records. - * User-defined handlers are responsible for their own locking and sorting. - * - * # Writing a handler - * - * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide. */ -namespace slog { - // @ts-ignore - import loginternal = internal +namespace cron { /** - * A Logger records structured information about each call to its - * Log, Debug, Info, Warn, and Error methods. - * For each call, it creates a [Record] and passes it to a [Handler]. - * - * To create a new Logger, call [New] or a Logger method - * that begins "With". + * Cron is a crontab-like struct for tasks/jobs scheduling. */ - interface Logger { + interface Cron { } - interface Logger { + interface Cron { /** - * Handler returns l's Handler. + * SetInterval changes the current cron tick interval + * (it usually should be >= 1 minute). */ - handler(): Handler + setInterval(d: time.Duration): void } - interface Logger { + interface Cron { /** - * With returns a Logger that includes the given attributes - * in each output operation. Arguments are converted to - * attributes as if by [Logger.Log]. + * SetTimezone changes the current cron tick timezone. */ - with(...args: any[]): (Logger) + setTimezone(l: time.Location): void } - interface Logger { + interface Cron { /** - * WithGroup returns a Logger that starts a group, if name is non-empty. - * The keys of all attributes added to the Logger will be qualified by the given - * name. (How that qualification happens depends on the [Handler.WithGroup] - * method of the Logger's Handler.) + * MustAdd is similar to Add() but panic on failure. + */ + mustAdd(jobId: string, cronExpr: string, run: () => void): void + } + interface Cron { + /** + * Add registers a single cron job. * - * If name is empty, WithGroup returns the receiver. - */ - withGroup(name: string): (Logger) - } - interface Logger { - /** - * Enabled reports whether l emits log records at the given context and level. - */ - enabled(ctx: context.Context, level: Level): boolean - } - interface Logger { - /** - * Log emits a log record with the current time and the given level and message. - * The Record's Attrs consist of the Logger's attributes followed by - * the Attrs specified by args. + * If there is already a job with the provided id, then the old job + * will be replaced with the new one. * - * The attribute arguments are processed as follows: - * ``` - * - If an argument is an Attr, it is used as is. - * - If an argument is a string and this is not the last argument, - * the following argument is treated as the value and the two are combined - * into an Attr. - * - Otherwise, the argument is treated as a value with key "!BADKEY". - * ``` + * cronExpr is a regular cron expression, eg. "0 *\/3 * * *" (aka. at minute 0 past every 3rd hour). + * Check cron.NewSchedule() for the supported tokens. */ - log(ctx: context.Context, level: Level, msg: string, ...args: any[]): void + add(jobId: string, cronExpr: string, fn: () => void): void } - interface Logger { + interface Cron { /** - * LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. + * Remove removes a single cron job by its id. */ - logAttrs(ctx: context.Context, level: Level, msg: string, ...attrs: Attr[]): void + remove(jobId: string): void } - interface Logger { + interface Cron { /** - * Debug logs at [LevelDebug]. + * RemoveAll removes all registered cron jobs. */ - debug(msg: string, ...args: any[]): void + removeAll(): void } - interface Logger { + interface Cron { /** - * DebugContext logs at [LevelDebug] with the given context. + * Total returns the current total number of registered cron jobs. */ - debugContext(ctx: context.Context, msg: string, ...args: any[]): void + total(): number } - interface Logger { + interface Cron { /** - * Info logs at [LevelInfo]. + * Jobs returns a shallow copy of the currently registered cron jobs. */ - info(msg: string, ...args: any[]): void + jobs(): Array<(Job | undefined)> } - interface Logger { + interface Cron { /** - * InfoContext logs at [LevelInfo] with the given context. + * Stop stops the current cron ticker (if not already). + * + * You can resume the ticker by calling Start(). */ - infoContext(ctx: context.Context, msg: string, ...args: any[]): void + stop(): void } - interface Logger { + interface Cron { /** - * Warn logs at [LevelWarn]. + * Start starts the cron ticker. + * + * Calling Start() on already started cron will restart the ticker. */ - warn(msg: string, ...args: any[]): void + start(): void } - interface Logger { + interface Cron { /** - * WarnContext logs at [LevelWarn] with the given context. + * HasStarted checks whether the current Cron ticker has been started. */ - warnContext(ctx: context.Context, msg: string, ...args: any[]): void - } - interface Logger { - /** - * Error logs at [LevelError]. - */ - error(msg: string, ...args: any[]): void - } - interface Logger { - /** - * ErrorContext logs at [LevelError] with the given context. - */ - errorContext(ctx: context.Context, msg: string, ...args: any[]): void + hasStarted(): boolean } } @@ -20954,127 +20589,492 @@ namespace auth { } /** - * Package cron implements a crontab-like service to execute and schedule - * repeative tasks/jobs. + * Package slog provides structured logging, + * in which log records include a message, + * a severity level, and various other attributes + * expressed as key-value pairs. * - * Example: + * It defines a type, [Logger], + * which provides several methods (such as [Logger.Info] and [Logger.Error]) + * for reporting events of interest. + * + * Each Logger is associated with a [Handler]. + * A Logger output method creates a [Record] from the method arguments + * and passes it to the Handler, which decides how to handle it. + * There is a default Logger accessible through top-level functions + * (such as [Info] and [Error]) that call the corresponding Logger methods. + * + * A log record consists of a time, a level, a message, and a set of key-value + * pairs, where the keys are strings and the values may be of any type. + * As an example, * * ``` - * c := cron.New() - * c.MustAdd("dailyReport", "0 0 * * *", func() { ... }) - * c.Start() + * slog.Info("hello", "count", 3) * ``` + * + * creates a record containing the time of the call, + * a level of Info, the message "hello", and a single + * pair with key "count" and value 3. + * + * The [Info] top-level function calls the [Logger.Info] method on the default Logger. + * In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. + * Besides these convenience methods for common levels, + * there is also a [Logger.Log] method which takes the level as an argument. + * Each of these methods has a corresponding top-level function that uses the + * default logger. + * + * The default handler formats the log record's message, time, level, and attributes + * as a string and passes it to the [log] package. + * + * ``` + * 2022/11/08 15:28:26 INFO hello count=3 + * ``` + * + * For more control over the output format, create a logger with a different handler. + * This statement uses [New] to create a new logger with a [TextHandler] + * that writes structured records in text form to standard error: + * + * ``` + * logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) + * ``` + * + * [TextHandler] output is a sequence of key=value pairs, easily and unambiguously + * parsed by machine. This statement: + * + * ``` + * logger.Info("hello", "count", 3) + * ``` + * + * produces this output: + * + * ``` + * time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + * ``` + * + * The package also provides [JSONHandler], whose output is line-delimited JSON: + * + * ``` + * logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) + * logger.Info("hello", "count", 3) + * ``` + * + * produces this output: + * + * ``` + * {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} + * ``` + * + * Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. + * There are options for setting the minimum level (see Levels, below), + * displaying the source file and line of the log call, and + * modifying attributes before they are logged. + * + * Setting a logger as the default with + * + * ``` + * slog.SetDefault(logger) + * ``` + * + * will cause the top-level functions like [Info] to use it. + * [SetDefault] also updates the default logger used by the [log] package, + * so that existing applications that use [log.Printf] and related functions + * will send log records to the logger's handler without needing to be rewritten. + * + * Some attributes are common to many log calls. + * For example, you may wish to include the URL or trace identifier of a server request + * with all log events arising from the request. + * Rather than repeat the attribute with every log call, you can use [Logger.With] + * to construct a new Logger containing the attributes: + * + * ``` + * logger2 := logger.With("url", r.URL) + * ``` + * + * The arguments to With are the same key-value pairs used in [Logger.Info]. + * The result is a new Logger with the same handler as the original, but additional + * attributes that will appear in the output of every call. + * + * # Levels + * + * A [Level] is an integer representing the importance or severity of a log event. + * The higher the level, the more severe the event. + * This package defines constants for the most common levels, + * but any int can be used as a level. + * + * In an application, you may wish to log messages only at a certain level or greater. + * One common configuration is to log messages at Info or higher levels, + * suppressing debug logging until it is needed. + * The built-in handlers can be configured with the minimum level to output by + * setting [HandlerOptions.Level]. + * The program's `main` function typically does this. + * The default value is LevelInfo. + * + * Setting the [HandlerOptions.Level] field to a [Level] value + * fixes the handler's minimum level throughout its lifetime. + * Setting it to a [LevelVar] allows the level to be varied dynamically. + * A LevelVar holds a Level and is safe to read or write from multiple + * goroutines. + * To vary the level dynamically for an entire program, first initialize + * a global LevelVar: + * + * ``` + * var programLevel = new(slog.LevelVar) // Info by default + * ``` + * + * Then use the LevelVar to construct a handler, and make it the default: + * + * ``` + * h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) + * slog.SetDefault(slog.New(h)) + * ``` + * + * Now the program can change its logging level with a single statement: + * + * ``` + * programLevel.Set(slog.LevelDebug) + * ``` + * + * # Groups + * + * Attributes can be collected into groups. + * A group has a name that is used to qualify the names of its attributes. + * How this qualification is displayed depends on the handler. + * [TextHandler] separates the group and attribute names with a dot. + * [JSONHandler] treats each group as a separate JSON object, with the group name as the key. + * + * Use [Group] to create a Group attribute from a name and a list of key-value pairs: + * + * ``` + * slog.Group("request", + * "method", r.Method, + * "url", r.URL) + * ``` + * + * TextHandler would display this group as + * + * ``` + * request.method=GET request.url=http://example.com + * ``` + * + * JSONHandler would display it as + * + * ``` + * "request":{"method":"GET","url":"http://example.com"} + * ``` + * + * Use [Logger.WithGroup] to qualify all of a Logger's output + * with a group name. Calling WithGroup on a Logger results in a + * new Logger with the same Handler as the original, but with all + * its attributes qualified by the group name. + * + * This can help prevent duplicate attribute keys in large systems, + * where subsystems might use the same keys. + * Pass each subsystem a different Logger with its own group name so that + * potential duplicates are qualified: + * + * ``` + * logger := slog.Default().With("id", systemID) + * parserLogger := logger.WithGroup("parser") + * parseInput(input, parserLogger) + * ``` + * + * When parseInput logs with parserLogger, its keys will be qualified with "parser", + * so even if it uses the common key "id", the log line will have distinct keys. + * + * # Contexts + * + * Some handlers may wish to include information from the [context.Context] that is + * available at the call site. One example of such information + * is the identifier for the current span when tracing is enabled. + * + * The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first + * argument, as do their corresponding top-level functions. + * + * Although the convenience methods on Logger (Info and so on) and the + * corresponding top-level functions do not take a context, the alternatives ending + * in "Context" do. For example, + * + * ``` + * slog.InfoContext(ctx, "message") + * ``` + * + * It is recommended to pass a context to an output method if one is available. + * + * # Attrs and Values + * + * An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as + * alternating keys and values. The statement + * + * ``` + * slog.Info("hello", slog.Int("count", 3)) + * ``` + * + * behaves the same as + * + * ``` + * slog.Info("hello", "count", 3) + * ``` + * + * There are convenience constructors for [Attr] such as [Int], [String], and [Bool] + * for common types, as well as the function [Any] for constructing Attrs of any + * type. + * + * The value part of an Attr is a type called [Value]. + * Like an [any], a Value can hold any Go value, + * but it can represent typical values, including all numbers and strings, + * without an allocation. + * + * For the most efficient log output, use [Logger.LogAttrs]. + * It is similar to [Logger.Log] but accepts only Attrs, not alternating + * keys and values; this allows it, too, to avoid allocation. + * + * The call + * + * ``` + * logger.LogAttrs(ctx, slog.LevelInfo, "hello", slog.Int("count", 3)) + * ``` + * + * is the most efficient way to achieve the same output as + * + * ``` + * slog.InfoContext(ctx, "hello", "count", 3) + * ``` + * + * # Customizing a type's logging behavior + * + * If a type implements the [LogValuer] interface, the [Value] returned from its LogValue + * method is used for logging. You can use this to control how values of the type + * appear in logs. For example, you can redact secret information like passwords, + * or gather a struct's fields in a Group. See the examples under [LogValuer] for + * details. + * + * A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] + * method handles these cases carefully, avoiding infinite loops and unbounded recursion. + * Handler authors and others may wish to use [Value.Resolve] instead of calling LogValue directly. + * + * # Wrapping output methods + * + * The logger functions use reflection over the call stack to find the file name + * and line number of the logging call within the application. This can produce + * incorrect source information for functions that wrap slog. For instance, if you + * define this function in file mylog.go: + * + * ``` + * func Infof(logger *slog.Logger, format string, args ...any) { + * logger.Info(fmt.Sprintf(format, args...)) + * } + * ``` + * + * and you call it like this in main.go: + * + * ``` + * Infof(slog.Default(), "hello, %s", "world") + * ``` + * + * then slog will report the source file as mylog.go, not main.go. + * + * A correct implementation of Infof will obtain the source location + * (pc) and pass it to NewRecord. + * The Infof function in the package-level example called "wrapping" + * demonstrates how to do this. + * + * # Working with Records + * + * Sometimes a Handler will need to modify a Record + * before passing it on to another Handler or backend. + * A Record contains a mixture of simple public fields (e.g. Time, Level, Message) + * and hidden fields that refer to state (such as attributes) indirectly. This + * means that modifying a simple copy of a Record (e.g. by calling + * [Record.Add] or [Record.AddAttrs] to add attributes) + * may have unexpected effects on the original. + * Before modifying a Record, use [Record.Clone] to + * create a copy that shares no state with the original, + * or create a new Record with [NewRecord] + * and build up its Attrs by traversing the old ones with [Record.Attrs]. + * + * # Performance considerations + * + * If profiling your application demonstrates that logging is taking significant time, + * the following suggestions may help. + * + * If many log lines have a common attribute, use [Logger.With] to create a Logger with + * that attribute. The built-in handlers will format that attribute only once, at the + * call to [Logger.With]. The [Handler] interface is designed to allow that optimization, + * and a well-written Handler should take advantage of it. + * + * The arguments to a log call are always evaluated, even if the log event is discarded. + * If possible, defer computation so that it happens only if the value is actually logged. + * For example, consider the call + * + * ``` + * slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily + * ``` + * + * The URL.String method will be called even if the logger discards Info-level events. + * Instead, pass the URL directly: + * + * ``` + * slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed + * ``` + * + * The built-in [TextHandler] will call its String method, but only + * if the log event is enabled. + * Avoiding the call to String also preserves the structure of the underlying value. + * For example [JSONHandler] emits the components of the parsed URL as a JSON object. + * If you want to avoid eagerly paying the cost of the String call + * without causing the handler to potentially inspect the structure of the value, + * wrap the value in a fmt.Stringer implementation that hides its Marshal methods. + * + * You can also use the [LogValuer] interface to avoid unnecessary work in disabled log + * calls. Say you need to log some expensive value: + * + * ``` + * slog.Debug("frobbing", "value", computeExpensiveValue(arg)) + * ``` + * + * Even if this line is disabled, computeExpensiveValue will be called. + * To avoid that, define a type implementing LogValuer: + * + * ``` + * type expensive struct { arg int } + * + * func (e expensive) LogValue() slog.Value { + * return slog.AnyValue(computeExpensiveValue(e.arg)) + * } + * ``` + * + * Then use a value of that type in log calls: + * + * ``` + * slog.Debug("frobbing", "value", expensive{arg}) + * ``` + * + * Now computeExpensiveValue will only be called when the line is enabled. + * + * The built-in handlers acquire a lock before calling [io.Writer.Write] + * to ensure that exactly one [Record] is written at a time in its entirety. + * Although each log record has a timestamp, + * the built-in handlers do not use that time to sort the written records. + * User-defined handlers are responsible for their own locking and sorting. + * + * # Writing a handler + * + * For a guide to writing a custom handler, see https://golang.org/s/slog-handler-guide. */ -namespace cron { +namespace slog { + // @ts-ignore + import loginternal = internal /** - * Cron is a crontab-like struct for tasks/jobs scheduling. + * A Logger records structured information about each call to its + * Log, Debug, Info, Warn, and Error methods. + * For each call, it creates a [Record] and passes it to a [Handler]. + * + * To create a new Logger, call [New] or a Logger method + * that begins "With". */ - interface Cron { + interface Logger { } - interface Cron { + interface Logger { /** - * SetInterval changes the current cron tick interval - * (it usually should be >= 1 minute). + * Handler returns l's Handler. */ - setInterval(d: time.Duration): void + handler(): Handler } - interface Cron { + interface Logger { /** - * SetTimezone changes the current cron tick timezone. + * With returns a Logger that includes the given attributes + * in each output operation. Arguments are converted to + * attributes as if by [Logger.Log]. */ - setTimezone(l: time.Location): void + with(...args: any[]): (Logger) } - interface Cron { + interface Logger { /** - * MustAdd is similar to Add() but panic on failure. - */ - mustAdd(jobId: string, cronExpr: string, run: () => void): void - } - interface Cron { - /** - * Add registers a single cron job. + * WithGroup returns a Logger that starts a group, if name is non-empty. + * The keys of all attributes added to the Logger will be qualified by the given + * name. (How that qualification happens depends on the [Handler.WithGroup] + * method of the Logger's Handler.) * - * If there is already a job with the provided id, then the old job - * will be replaced with the new one. + * If name is empty, WithGroup returns the receiver. + */ + withGroup(name: string): (Logger) + } + interface Logger { + /** + * Enabled reports whether l emits log records at the given context and level. + */ + enabled(ctx: context.Context, level: Level): boolean + } + interface Logger { + /** + * Log emits a log record with the current time and the given level and message. + * The Record's Attrs consist of the Logger's attributes followed by + * the Attrs specified by args. * - * cronExpr is a regular cron expression, eg. "0 *\/3 * * *" (aka. at minute 0 past every 3rd hour). - * Check cron.NewSchedule() for the supported tokens. + * The attribute arguments are processed as follows: + * ``` + * - If an argument is an Attr, it is used as is. + * - If an argument is a string and this is not the last argument, + * the following argument is treated as the value and the two are combined + * into an Attr. + * - Otherwise, the argument is treated as a value with key "!BADKEY". + * ``` */ - add(jobId: string, cronExpr: string, fn: () => void): void + log(ctx: context.Context, level: Level, msg: string, ...args: any[]): void } - interface Cron { + interface Logger { /** - * Remove removes a single cron job by its id. + * LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. */ - remove(jobId: string): void + logAttrs(ctx: context.Context, level: Level, msg: string, ...attrs: Attr[]): void } - interface Cron { + interface Logger { /** - * RemoveAll removes all registered cron jobs. + * Debug logs at [LevelDebug]. */ - removeAll(): void + debug(msg: string, ...args: any[]): void } - interface Cron { + interface Logger { /** - * Total returns the current total number of registered cron jobs. + * DebugContext logs at [LevelDebug] with the given context. */ - total(): number + debugContext(ctx: context.Context, msg: string, ...args: any[]): void } - interface Cron { + interface Logger { /** - * Jobs returns a shallow copy of the currently registered cron jobs. + * Info logs at [LevelInfo]. */ - jobs(): Array<(Job | undefined)> + info(msg: string, ...args: any[]): void } - interface Cron { + interface Logger { /** - * Stop stops the current cron ticker (if not already). - * - * You can resume the ticker by calling Start(). + * InfoContext logs at [LevelInfo] with the given context. */ - stop(): void + infoContext(ctx: context.Context, msg: string, ...args: any[]): void } - interface Cron { + interface Logger { /** - * Start starts the cron ticker. - * - * Calling Start() on already started cron will restart the ticker. + * Warn logs at [LevelWarn]. */ - start(): void + warn(msg: string, ...args: any[]): void } - interface Cron { + interface Logger { /** - * HasStarted checks whether the current Cron ticker has been started. + * WarnContext logs at [LevelWarn] with the given context. */ - hasStarted(): boolean + warnContext(ctx: context.Context, msg: string, ...args: any[]): void } -} - -namespace mailer { - /** - * Message defines a generic email message struct. - */ - interface Message { - from: { address: string; name?: string; } - to: Array<{ address: string; name?: string; }> - bcc: Array<{ address: string; name?: string; }> - cc: Array<{ address: string; name?: string; }> - subject: string - html: string - text: string - headers: _TygojaDict - attachments: _TygojaDict - inlineAttachments: _TygojaDict - } - /** - * Mailer defines a base mail client interface. - */ - interface Mailer { - [key:string]: any; + interface Logger { /** - * Send sends an email with the provided Message. + * Error logs at [LevelError]. */ - send(message: Message): void + error(msg: string, ...args: any[]): void + } + interface Logger { + /** + * ErrorContext logs at [LevelError] with the given context. + */ + errorContext(ctx: context.Context, msg: string, ...args: any[]): void } } @@ -21181,10 +21181,46 @@ namespace time { } } +namespace context { +} + namespace fs { } -namespace store { +namespace net { + /** + * Addr represents a network end point address. + * + * The two methods [Addr.Network] and [Addr.String] conventionally return strings + * that can be passed as the arguments to [Dial], but the exact form + * and meaning of the strings is up to the implementation. + */ + interface Addr { + [key:string]: any; + network(): string // name of the network (for example, "tcp", "udp") + string(): string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80") + } + /** + * A Listener is a generic network listener for stream-oriented protocols. + * + * Multiple goroutines may invoke methods on a Listener simultaneously. + */ + interface Listener { + [key:string]: any; + /** + * Accept waits for and returns the next connection to the listener. + */ + accept(): Conn + /** + * Close closes the listener. + * Any blocked Accept operations will be unblocked and return errors. + */ + close(): void + /** + * Addr returns the listener's network address. + */ + addr(): Addr + } } /** @@ -21425,470 +21461,6 @@ namespace url { } } -namespace context { -} - -namespace net { - /** - * Addr represents a network end point address. - * - * The two methods [Addr.Network] and [Addr.String] conventionally return strings - * that can be passed as the arguments to [Dial], but the exact form - * and meaning of the strings is up to the implementation. - */ - interface Addr { - [key:string]: any; - network(): string // name of the network (for example, "tcp", "udp") - string(): string // string form of address (for example, "192.0.2.1:25", "[2001:db8::1]:80") - } - /** - * A Listener is a generic network listener for stream-oriented protocols. - * - * Multiple goroutines may invoke methods on a Listener simultaneously. - */ - interface Listener { - [key:string]: any; - /** - * Accept waits for and returns the next connection to the listener. - */ - accept(): Conn - /** - * Close closes the listener. - * Any blocked Accept operations will be unblocked and return errors. - */ - close(): void - /** - * Addr returns the listener's network address. - */ - addr(): Addr - } -} - -namespace jwt { - /** - * NumericDate represents a JSON numeric date value, as referenced at - * https://datatracker.ietf.org/doc/html/rfc7519#section-2. - */ - type _sygZdFG = time.Time - interface NumericDate extends _sygZdFG { - } - interface NumericDate { - /** - * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch - * represented in NumericDate to a byte array, using the precision specified in TimePrecision. - */ - marshalJSON(): string|Array - } - interface NumericDate { - /** - * UnmarshalJSON is an implementation of the json.RawMessage interface and - * deserializes a [NumericDate] from a JSON representation, i.e. a - * [json.Number]. This number represents an UNIX epoch with either integer or - * non-integer seconds. - */ - unmarshalJSON(b: string|Array): void - } - /** - * ClaimStrings is basically just a slice of strings, but it can be either - * serialized from a string array or just a string. This type is necessary, - * since the "aud" claim can either be a single string or an array. - */ - interface ClaimStrings extends Array{} - interface ClaimStrings { - unmarshalJSON(data: string|Array): void - } - interface ClaimStrings { - marshalJSON(): string|Array - } -} - -namespace hook { - /** - * wrapped local Hook embedded struct to limit the public API surface. - */ - type _sessGoe = Hook - interface mainHook extends _sessGoe { - } -} - -namespace sql { - /** - * IsolationLevel is the transaction isolation level used in [TxOptions]. - */ - interface IsolationLevel extends Number{} - interface IsolationLevel { - /** - * String returns the name of the transaction isolation level. - */ - string(): string - } - /** - * DBStats contains database statistics. - */ - interface DBStats { - maxOpenConnections: number // Maximum number of open connections to the database. - /** - * Pool Status - */ - openConnections: number // The number of established connections both in use and idle. - inUse: number // The number of connections currently in use. - idle: number // The number of idle connections. - /** - * Counters - */ - waitCount: number // The total number of connections waited for. - waitDuration: time.Duration // The total time blocked waiting for a new connection. - maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. - maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. - maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. - } - /** - * Conn represents a single database connection rather than a pool of database - * connections. Prefer running queries from [DB] unless there is a specific - * need for a continuous single database connection. - * - * A Conn must call [Conn.Close] to return the connection to the database pool - * and may do so concurrently with a running query. - * - * After a call to [Conn.Close], all operations on the - * connection fail with [ErrConnDone]. - */ - interface Conn { - } - interface Conn { - /** - * PingContext verifies the connection to the database is still alive. - */ - pingContext(ctx: context.Context): void - } - interface Conn { - /** - * ExecContext executes a query without returning any rows. - * The args are for any placeholder parameters in the query. - */ - execContext(ctx: context.Context, query: string, ...args: any[]): Result - } - interface Conn { - /** - * QueryContext executes a query that returns rows, typically a SELECT. - * The args are for any placeholder parameters in the query. - */ - queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) - } - interface Conn { - /** - * QueryRowContext executes a query that is expected to return at most one row. - * QueryRowContext always returns a non-nil value. Errors are deferred until - * the [*Row.Scan] method is called. - * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. - * Otherwise, the [*Row.Scan] scans the first selected row and discards - * the rest. - */ - queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) - } - interface Conn { - /** - * PrepareContext creates a prepared statement for later queries or executions. - * Multiple queries or executions may be run concurrently from the - * returned statement. - * The caller must call the statement's [*Stmt.Close] method - * when the statement is no longer needed. - * - * The provided context is used for the preparation of the statement, not for the - * execution of the statement. - */ - prepareContext(ctx: context.Context, query: string): (Stmt) - } - interface Conn { - /** - * Raw executes f exposing the underlying driver connection for the - * duration of f. The driverConn must not be used outside of f. - * - * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable - * until [Conn.Close] is called. - */ - raw(f: (driverConn: any) => void): void - } - interface Conn { - /** - * BeginTx starts a transaction. - * - * The provided context is used until the transaction is committed or rolled back. - * If the context is canceled, the sql package will roll back - * the transaction. [Tx.Commit] will return an error if the context provided to - * BeginTx is canceled. - * - * The provided [TxOptions] is optional and may be nil if defaults should be used. - * If a non-default isolation level is used that the driver doesn't support, - * an error will be returned. - */ - beginTx(ctx: context.Context, opts: TxOptions): (Tx) - } - interface Conn { - /** - * Close returns the connection to the connection pool. - * All operations after a Close will return with [ErrConnDone]. - * Close is safe to call concurrently with other operations and will - * block until all other operations finish. It may be useful to first - * cancel any used context and then call close directly after. - */ - close(): void - } - /** - * ColumnType contains the name and type of a column. - */ - interface ColumnType { - } - interface ColumnType { - /** - * Name returns the name or alias of the column. - */ - name(): string - } - interface ColumnType { - /** - * Length returns the column type length for variable length column types such - * as text and binary field types. If the type length is unbounded the value will - * be [math.MaxInt64] (any database limits will still apply). - * If the column type is not variable length, such as an int, or if not supported - * by the driver ok is false. - */ - length(): [number, boolean] - } - interface ColumnType { - /** - * DecimalSize returns the scale and precision of a decimal type. - * If not applicable or if not supported ok is false. - */ - decimalSize(): [number, number, boolean] - } - interface ColumnType { - /** - * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. - * If a driver does not support this property ScanType will return - * the type of an empty interface. - */ - scanType(): any - } - interface ColumnType { - /** - * Nullable reports whether the column may be null. - * If a driver does not support this property ok will be false. - */ - nullable(): [boolean, boolean] - } - interface ColumnType { - /** - * DatabaseTypeName returns the database system name of the column type. If an empty - * string is returned, then the driver type name is not supported. - * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers - * are not included. - * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", - * "INT", and "BIGINT". - */ - databaseTypeName(): string - } - /** - * Row is the result of calling [DB.QueryRow] to select a single row. - */ - interface Row { - } - interface Row { - /** - * Scan copies the columns from the matched row into the values - * pointed at by dest. See the documentation on [Rows.Scan] for details. - * If more than one row matches the query, - * Scan uses the first row and discards the rest. If no row matches - * the query, Scan returns [ErrNoRows]. - */ - scan(...dest: any[]): void - } - interface Row { - /** - * Err provides a way for wrapping packages to check for - * query errors without calling [Row.Scan]. - * Err returns the error, if any, that was encountered while running the query. - * If this error is not nil, this error will also be returned from [Row.Scan]. - */ - err(): void - } -} - -namespace subscriptions { -} - -namespace types { -} - -namespace search { -} - -namespace slog { - /** - * An Attr is a key-value pair. - */ - interface Attr { - key: string - value: Value - } - interface Attr { - /** - * Equal reports whether a and b have equal keys and values. - */ - equal(b: Attr): boolean - } - interface Attr { - string(): string - } - /** - * A Handler handles log records produced by a Logger. - * - * A typical handler may print log records to standard error, - * or write them to a file or database, or perhaps augment them - * with additional attributes and pass them on to another handler. - * - * Any of the Handler's methods may be called concurrently with itself - * or with other methods. It is the responsibility of the Handler to - * manage this concurrency. - * - * Users of the slog package should not invoke Handler methods directly. - * They should use the methods of [Logger] instead. - */ - interface Handler { - [key:string]: any; - /** - * Enabled reports whether the handler handles records at the given level. - * The handler ignores records whose level is lower. - * It is called early, before any arguments are processed, - * to save effort if the log event should be discarded. - * If called from a Logger method, the first argument is the context - * passed to that method, or context.Background() if nil was passed - * or the method does not take a context. - * The context is passed so Enabled can use its values - * to make a decision. - */ - enabled(_arg0: context.Context, _arg1: Level): boolean - /** - * Handle handles the Record. - * It will only be called when Enabled returns true. - * The Context argument is as for Enabled. - * It is present solely to provide Handlers access to the context's values. - * Canceling the context should not affect record processing. - * (Among other things, log messages may be necessary to debug a - * cancellation-related problem.) - * - * Handle methods that produce output should observe the following rules: - * ``` - * - If r.Time is the zero time, ignore the time. - * - If r.PC is zero, ignore it. - * - Attr's values should be resolved. - * - If an Attr's key and value are both the zero value, ignore the Attr. - * This can be tested with attr.Equal(Attr{}). - * - If a group's key is empty, inline the group's Attrs. - * - If a group has no Attrs (even if it has a non-empty key), - * ignore it. - * ``` - */ - handle(_arg0: context.Context, _arg1: Record): void - /** - * WithAttrs returns a new Handler whose attributes consist of - * both the receiver's attributes and the arguments. - * The Handler owns the slice: it may retain, modify or discard it. - */ - withAttrs(attrs: Array): Handler - /** - * WithGroup returns a new Handler with the given group appended to - * the receiver's existing groups. - * The keys of all subsequent attributes, whether added by With or in a - * Record, should be qualified by the sequence of group names. - * - * How this qualification happens is up to the Handler, so long as - * this Handler's attribute keys differ from those of another Handler - * with a different sequence of group names. - * - * A Handler should treat WithGroup as starting a Group of Attrs that ends - * at the end of the log event. That is, - * - * ``` - * logger.WithGroup("s").LogAttrs(ctx, level, msg, slog.Int("a", 1), slog.Int("b", 2)) - * ``` - * - * should behave like - * - * ``` - * logger.LogAttrs(ctx, level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) - * ``` - * - * If the name is empty, WithGroup returns the receiver. - */ - withGroup(name: string): Handler - } - /** - * A Level is the importance or severity of a log event. - * The higher the level, the more important or severe the event. - */ - interface Level extends Number{} - interface Level { - /** - * String returns a name for the level. - * If the level has a name, then that name - * in uppercase is returned. - * If the level is between named values, then - * an integer is appended to the uppercased name. - * Examples: - * - * ``` - * LevelWarn.String() => "WARN" - * (LevelInfo+2).String() => "INFO+2" - * ``` - */ - string(): string - } - interface Level { - /** - * MarshalJSON implements [encoding/json.Marshaler] - * by quoting the output of [Level.String]. - */ - marshalJSON(): string|Array - } - interface Level { - /** - * UnmarshalJSON implements [encoding/json.Unmarshaler] - * It accepts any string produced by [Level.MarshalJSON], - * ignoring case. - * It also accepts numeric offsets that would result in a different string on - * output. For example, "Error-8" would marshal as "INFO". - */ - unmarshalJSON(data: string|Array): void - } - interface Level { - /** - * MarshalText implements [encoding.TextMarshaler] - * by calling [Level.String]. - */ - marshalText(): string|Array - } - interface Level { - /** - * UnmarshalText implements [encoding.TextUnmarshaler]. - * It accepts any string produced by [Level.MarshalText], - * ignoring case. - * It also accepts numeric offsets that would result in a different string on - * output. For example, "Error-8" would marshal as "INFO". - */ - unmarshalText(data: string|Array): void - } - interface Level { - /** - * Level returns the receiver. - * It implements [Leveler]. - */ - level(): Level - } - // @ts-ignore - import loginternal = internal -} - namespace bufio { /** * Reader implements buffering for an io.Reader object. @@ -22569,110 +22141,6 @@ namespace http { } } -namespace router { - // @ts-ignore - import validation = ozzo_validation - /** - * RouterGroup represents a collection of routes and other sub groups - * that share common pattern prefix and middlewares. - */ - interface RouterGroup { - prefix: string - middlewares: Array<(hook.Handler | undefined)> - } -} - -namespace cron { - /** - * Job defines a single registered cron job. - */ - interface Job { - } - interface Job { - /** - * Id returns the cron job id. - */ - id(): string - } - interface Job { - /** - * Expression returns the plain cron job schedule expression. - */ - expression(): string - } - interface Job { - /** - * Run runs the cron job function. - */ - run(): void - } - interface Job { - /** - * MarshalJSON implements [json.Marshaler] and export the current - * jobs data into valid JSON. - */ - marshalJSON(): string|Array - } -} - -namespace cobra { - interface PositionalArgs {(cmd: Command, args: Array): void } - // @ts-ignore - import flag = pflag - /** - * FParseErrWhitelist configures Flag parse errors to be ignored - */ - interface FParseErrWhitelist extends _TygojaAny{} - /** - * Group Structure to manage groups for commands - */ - interface Group { - id: string - title: string - } - /** - * CompletionOptions are the options to control shell completion - */ - interface CompletionOptions { - /** - * DisableDefaultCmd prevents Cobra from creating a default 'completion' command - */ - disableDefaultCmd: boolean - /** - * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag - * for shells that support completion descriptions - */ - disableNoDescFlag: boolean - /** - * DisableDescriptions turns off all completion descriptions for shells - * that support them - */ - disableDescriptions: boolean - /** - * HiddenDefaultCmd makes the default 'completion' command hidden - */ - hiddenDefaultCmd: boolean - } - /** - * Completion is a string that can be used for completions - * - * two formats are supported: - * ``` - * - the completion choice - * - the completion choice with a textual description (separated by a TAB). - * ``` - * - * [CompletionWithDesc] can be used to create a completion string with a textual description. - * - * Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. - */ - interface Completion extends String{} - /** - * CompletionFunc is a function that provides completion results. - */ - interface CompletionFunc {(cmd: Command, args: Array, toComplete: string): [Array, ShellCompDirective] } -} - /** * Package oauth2 provides support for making * OAuth2 authorized and authenticated HTTP requests, @@ -22769,6 +22237,568 @@ namespace oauth2 { } } +namespace store { +} + +namespace jwt { + /** + * NumericDate represents a JSON numeric date value, as referenced at + * https://datatracker.ietf.org/doc/html/rfc7519#section-2. + */ + type _soCkAUH = time.Time + interface NumericDate extends _soCkAUH { + } + interface NumericDate { + /** + * MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch + * represented in NumericDate to a byte array, using the precision specified in TimePrecision. + */ + marshalJSON(): string|Array + } + interface NumericDate { + /** + * UnmarshalJSON is an implementation of the json.RawMessage interface and + * deserializes a [NumericDate] from a JSON representation, i.e. a + * [json.Number]. This number represents an UNIX epoch with either integer or + * non-integer seconds. + */ + unmarshalJSON(b: string|Array): void + } + /** + * ClaimStrings is basically just a slice of strings, but it can be either + * serialized from a string array or just a string. This type is necessary, + * since the "aud" claim can either be a single string or an array. + */ + interface ClaimStrings extends Array{} + interface ClaimStrings { + unmarshalJSON(data: string|Array): void + } + interface ClaimStrings { + marshalJSON(): string|Array + } +} + +namespace subscriptions { +} + +namespace sql { + /** + * IsolationLevel is the transaction isolation level used in [TxOptions]. + */ + interface IsolationLevel extends Number{} + interface IsolationLevel { + /** + * String returns the name of the transaction isolation level. + */ + string(): string + } + /** + * DBStats contains database statistics. + */ + interface DBStats { + maxOpenConnections: number // Maximum number of open connections to the database. + /** + * Pool Status + */ + openConnections: number // The number of established connections both in use and idle. + inUse: number // The number of connections currently in use. + idle: number // The number of idle connections. + /** + * Counters + */ + waitCount: number // The total number of connections waited for. + waitDuration: time.Duration // The total time blocked waiting for a new connection. + maxIdleClosed: number // The total number of connections closed due to SetMaxIdleConns. + maxIdleTimeClosed: number // The total number of connections closed due to SetConnMaxIdleTime. + maxLifetimeClosed: number // The total number of connections closed due to SetConnMaxLifetime. + } + /** + * Conn represents a single database connection rather than a pool of database + * connections. Prefer running queries from [DB] unless there is a specific + * need for a continuous single database connection. + * + * A Conn must call [Conn.Close] to return the connection to the database pool + * and may do so concurrently with a running query. + * + * After a call to [Conn.Close], all operations on the + * connection fail with [ErrConnDone]. + */ + interface Conn { + } + interface Conn { + /** + * PingContext verifies the connection to the database is still alive. + */ + pingContext(ctx: context.Context): void + } + interface Conn { + /** + * ExecContext executes a query without returning any rows. + * The args are for any placeholder parameters in the query. + */ + execContext(ctx: context.Context, query: string, ...args: any[]): Result + } + interface Conn { + /** + * QueryContext executes a query that returns rows, typically a SELECT. + * The args are for any placeholder parameters in the query. + */ + queryContext(ctx: context.Context, query: string, ...args: any[]): (Rows) + } + interface Conn { + /** + * QueryRowContext executes a query that is expected to return at most one row. + * QueryRowContext always returns a non-nil value. Errors are deferred until + * the [*Row.Scan] method is called. + * If the query selects no rows, the [*Row.Scan] will return [ErrNoRows]. + * Otherwise, the [*Row.Scan] scans the first selected row and discards + * the rest. + */ + queryRowContext(ctx: context.Context, query: string, ...args: any[]): (Row) + } + interface Conn { + /** + * PrepareContext creates a prepared statement for later queries or executions. + * Multiple queries or executions may be run concurrently from the + * returned statement. + * The caller must call the statement's [*Stmt.Close] method + * when the statement is no longer needed. + * + * The provided context is used for the preparation of the statement, not for the + * execution of the statement. + */ + prepareContext(ctx: context.Context, query: string): (Stmt) + } + interface Conn { + /** + * Raw executes f exposing the underlying driver connection for the + * duration of f. The driverConn must not be used outside of f. + * + * Once f returns and err is not [driver.ErrBadConn], the [Conn] will continue to be usable + * until [Conn.Close] is called. + */ + raw(f: (driverConn: any) => void): void + } + interface Conn { + /** + * BeginTx starts a transaction. + * + * The provided context is used until the transaction is committed or rolled back. + * If the context is canceled, the sql package will roll back + * the transaction. [Tx.Commit] will return an error if the context provided to + * BeginTx is canceled. + * + * The provided [TxOptions] is optional and may be nil if defaults should be used. + * If a non-default isolation level is used that the driver doesn't support, + * an error will be returned. + */ + beginTx(ctx: context.Context, opts: TxOptions): (Tx) + } + interface Conn { + /** + * Close returns the connection to the connection pool. + * All operations after a Close will return with [ErrConnDone]. + * Close is safe to call concurrently with other operations and will + * block until all other operations finish. It may be useful to first + * cancel any used context and then call close directly after. + */ + close(): void + } + /** + * ColumnType contains the name and type of a column. + */ + interface ColumnType { + } + interface ColumnType { + /** + * Name returns the name or alias of the column. + */ + name(): string + } + interface ColumnType { + /** + * Length returns the column type length for variable length column types such + * as text and binary field types. If the type length is unbounded the value will + * be [math.MaxInt64] (any database limits will still apply). + * If the column type is not variable length, such as an int, or if not supported + * by the driver ok is false. + */ + length(): [number, boolean] + } + interface ColumnType { + /** + * DecimalSize returns the scale and precision of a decimal type. + * If not applicable or if not supported ok is false. + */ + decimalSize(): [number, number, boolean] + } + interface ColumnType { + /** + * ScanType returns a Go type suitable for scanning into using [Rows.Scan]. + * If a driver does not support this property ScanType will return + * the type of an empty interface. + */ + scanType(): any + } + interface ColumnType { + /** + * Nullable reports whether the column may be null. + * If a driver does not support this property ok will be false. + */ + nullable(): [boolean, boolean] + } + interface ColumnType { + /** + * DatabaseTypeName returns the database system name of the column type. If an empty + * string is returned, then the driver type name is not supported. + * Consult your driver documentation for a list of driver data types. [ColumnType.Length] specifiers + * are not included. + * Common type names include "VARCHAR", "TEXT", "NVARCHAR", "DECIMAL", "BOOL", + * "INT", and "BIGINT". + */ + databaseTypeName(): string + } + /** + * Row is the result of calling [DB.QueryRow] to select a single row. + */ + interface Row { + } + interface Row { + /** + * Scan copies the columns from the matched row into the values + * pointed at by dest. See the documentation on [Rows.Scan] for details. + * If more than one row matches the query, + * Scan uses the first row and discards the rest. If no row matches + * the query, Scan returns [ErrNoRows]. + */ + scan(...dest: any[]): void + } + interface Row { + /** + * Err provides a way for wrapping packages to check for + * query errors without calling [Row.Scan]. + * Err returns the error, if any, that was encountered while running the query. + * If this error is not nil, this error will also be returned from [Row.Scan]. + */ + err(): void + } +} + +namespace types { +} + +namespace search { +} + +namespace cobra { + interface PositionalArgs {(cmd: Command, args: Array): void } + // @ts-ignore + import flag = pflag + /** + * FParseErrWhitelist configures Flag parse errors to be ignored + */ + interface FParseErrWhitelist extends _TygojaAny{} + /** + * Group Structure to manage groups for commands + */ + interface Group { + id: string + title: string + } + /** + * CompletionOptions are the options to control shell completion + */ + interface CompletionOptions { + /** + * DisableDefaultCmd prevents Cobra from creating a default 'completion' command + */ + disableDefaultCmd: boolean + /** + * DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + * for shells that support completion descriptions + */ + disableNoDescFlag: boolean + /** + * DisableDescriptions turns off all completion descriptions for shells + * that support them + */ + disableDescriptions: boolean + /** + * HiddenDefaultCmd makes the default 'completion' command hidden + */ + hiddenDefaultCmd: boolean + } + /** + * Completion is a string that can be used for completions + * + * two formats are supported: + * ``` + * - the completion choice + * - the completion choice with a textual description (separated by a TAB). + * ``` + * + * [CompletionWithDesc] can be used to create a completion string with a textual description. + * + * Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. + */ + interface Completion extends String{} + /** + * CompletionFunc is a function that provides completion results. + */ + interface CompletionFunc {(cmd: Command, args: Array, toComplete: string): [Array, ShellCompDirective] } +} + +namespace cron { + /** + * Job defines a single registered cron job. + */ + interface Job { + } + interface Job { + /** + * Id returns the cron job id. + */ + id(): string + } + interface Job { + /** + * Expression returns the plain cron job schedule expression. + */ + expression(): string + } + interface Job { + /** + * Run runs the cron job function. + */ + run(): void + } + interface Job { + /** + * MarshalJSON implements [json.Marshaler] and export the current + * jobs data into valid JSON. + */ + marshalJSON(): string|Array + } +} + +namespace hook { + /** + * wrapped local Hook embedded struct to limit the public API surface. + */ + type _sAnQlNo = Hook + interface mainHook extends _sAnQlNo { + } +} + +namespace slog { + /** + * An Attr is a key-value pair. + */ + interface Attr { + key: string + value: Value + } + interface Attr { + /** + * Equal reports whether a and b have equal keys and values. + */ + equal(b: Attr): boolean + } + interface Attr { + string(): string + } + /** + * A Handler handles log records produced by a Logger. + * + * A typical handler may print log records to standard error, + * or write them to a file or database, or perhaps augment them + * with additional attributes and pass them on to another handler. + * + * Any of the Handler's methods may be called concurrently with itself + * or with other methods. It is the responsibility of the Handler to + * manage this concurrency. + * + * Users of the slog package should not invoke Handler methods directly. + * They should use the methods of [Logger] instead. + */ + interface Handler { + [key:string]: any; + /** + * Enabled reports whether the handler handles records at the given level. + * The handler ignores records whose level is lower. + * It is called early, before any arguments are processed, + * to save effort if the log event should be discarded. + * If called from a Logger method, the first argument is the context + * passed to that method, or context.Background() if nil was passed + * or the method does not take a context. + * The context is passed so Enabled can use its values + * to make a decision. + */ + enabled(_arg0: context.Context, _arg1: Level): boolean + /** + * Handle handles the Record. + * It will only be called when Enabled returns true. + * The Context argument is as for Enabled. + * It is present solely to provide Handlers access to the context's values. + * Canceling the context should not affect record processing. + * (Among other things, log messages may be necessary to debug a + * cancellation-related problem.) + * + * Handle methods that produce output should observe the following rules: + * ``` + * - If r.Time is the zero time, ignore the time. + * - If r.PC is zero, ignore it. + * - Attr's values should be resolved. + * - If an Attr's key and value are both the zero value, ignore the Attr. + * This can be tested with attr.Equal(Attr{}). + * - If a group's key is empty, inline the group's Attrs. + * - If a group has no Attrs (even if it has a non-empty key), + * ignore it. + * ``` + */ + handle(_arg0: context.Context, _arg1: Record): void + /** + * WithAttrs returns a new Handler whose attributes consist of + * both the receiver's attributes and the arguments. + * The Handler owns the slice: it may retain, modify or discard it. + */ + withAttrs(attrs: Array): Handler + /** + * WithGroup returns a new Handler with the given group appended to + * the receiver's existing groups. + * The keys of all subsequent attributes, whether added by With or in a + * Record, should be qualified by the sequence of group names. + * + * How this qualification happens is up to the Handler, so long as + * this Handler's attribute keys differ from those of another Handler + * with a different sequence of group names. + * + * A Handler should treat WithGroup as starting a Group of Attrs that ends + * at the end of the log event. That is, + * + * ``` + * logger.WithGroup("s").LogAttrs(ctx, level, msg, slog.Int("a", 1), slog.Int("b", 2)) + * ``` + * + * should behave like + * + * ``` + * logger.LogAttrs(ctx, level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) + * ``` + * + * If the name is empty, WithGroup returns the receiver. + */ + withGroup(name: string): Handler + } + /** + * A Level is the importance or severity of a log event. + * The higher the level, the more important or severe the event. + */ + interface Level extends Number{} + interface Level { + /** + * String returns a name for the level. + * If the level has a name, then that name + * in uppercase is returned. + * If the level is between named values, then + * an integer is appended to the uppercased name. + * Examples: + * + * ``` + * LevelWarn.String() => "WARN" + * (LevelInfo+2).String() => "INFO+2" + * ``` + */ + string(): string + } + interface Level { + /** + * MarshalJSON implements [encoding/json.Marshaler] + * by quoting the output of [Level.String]. + */ + marshalJSON(): string|Array + } + interface Level { + /** + * UnmarshalJSON implements [encoding/json.Unmarshaler] + * It accepts any string produced by [Level.MarshalJSON], + * ignoring case. + * It also accepts numeric offsets that would result in a different string on + * output. For example, "Error-8" would marshal as "INFO". + */ + unmarshalJSON(data: string|Array): void + } + interface Level { + /** + * MarshalText implements [encoding.TextMarshaler] + * by calling [Level.String]. + */ + marshalText(): string|Array + } + interface Level { + /** + * UnmarshalText implements [encoding.TextUnmarshaler]. + * It accepts any string produced by [Level.MarshalText], + * ignoring case. + * It also accepts numeric offsets that would result in a different string on + * output. For example, "Error-8" would marshal as "INFO". + */ + unmarshalText(data: string|Array): void + } + interface Level { + /** + * Level returns the receiver. + * It implements [Leveler]. + */ + level(): Level + } + // @ts-ignore + import loginternal = internal +} + +namespace router { + // @ts-ignore + import validation = ozzo_validation + /** + * RouterGroup represents a collection of routes and other sub groups + * that share common pattern prefix and middlewares. + */ + interface RouterGroup { + prefix: string + middlewares: Array<(hook.Handler | undefined)> + } +} + +namespace url { + /** + * The Userinfo type is an immutable encapsulation of username and + * password details for a [URL]. An existing Userinfo value is guaranteed + * to have a username set (potentially empty, as allowed by RFC 2396), + * and optionally a password. + */ + interface Userinfo { + } + interface Userinfo { + /** + * Username returns the username. + */ + username(): string + } + interface Userinfo { + /** + * Password returns the password in case it is set, and whether it is set. + */ + password(): [string, boolean] + } + interface Userinfo { + /** + * String returns the encoded userinfo information in the standard form + * of "username[:password]". + */ + string(): string + } +} + namespace multipart { /** * A Part represents a single part in a multipart body. @@ -22808,36 +22838,6 @@ namespace multipart { } } -namespace url { - /** - * The Userinfo type is an immutable encapsulation of username and - * password details for a [URL]. An existing Userinfo value is guaranteed - * to have a username set (potentially empty, as allowed by RFC 2396), - * and optionally a password. - */ - interface Userinfo { - } - interface Userinfo { - /** - * Username returns the username. - */ - username(): string - } - interface Userinfo { - /** - * Password returns the password in case it is set, and whether it is set. - */ - password(): [string, boolean] - } - interface Userinfo { - /** - * String returns the encoded userinfo information in the standard form - * of "username[:password]". - */ - string(): string - } -} - namespace http { /** * SameSite allows a server to define a cookie attribute making it impossible for @@ -22854,6 +22854,9 @@ namespace http { import urlpkg = url } +namespace oauth2 { +} + namespace cobra { // @ts-ignore import flag = pflag @@ -22864,9 +22867,6 @@ namespace cobra { interface ShellCompDirective extends Number{} } -namespace oauth2 { -} - namespace slog { // @ts-ignore import loginternal = internal