mirror of
https://github.com/NginxProxyManager/nginx-proxy-manager.git
synced 2025-06-18 02:06:25 +00:00
Compare commits
32 Commits
Author | SHA1 | Date | |
---|---|---|---|
e1bcef6e5c | |||
81f51f9e2d | |||
661953db25 | |||
065c2dac42 | |||
2926844cbe | |||
c1960f3793 | |||
11a29a8b67 | |||
c40e48e678 | |||
124cb18e17 | |||
5ac9dc0758 | |||
9a799d51ce | |||
77eb618758 | |||
79fedfcea4 | |||
8fdb8ac853 | |||
4fdc80be01 | |||
f8e6c8d018 | |||
c3469de61b | |||
ea61b15a40 | |||
60175e6d8c | |||
2a07445005 | |||
dad3e1da7c | |||
82d9452001 | |||
095bc8f676 | |||
8c15340b83 | |||
a13f7c3792 | |||
6748985669 | |||
e2957f070b | |||
fccbde1371 | |||
fec36834f7 | |||
00aeef75b6 | |||
5b7682f13c | |||
b30fcb50c8 |
12
Jenkinsfile
vendored
12
Jenkinsfile
vendored
@ -14,9 +14,9 @@ pipeline {
|
||||
ansiColor('xterm')
|
||||
}
|
||||
environment {
|
||||
IMAGE = "nginx-proxy-manager"
|
||||
IMAGE = 'nginx-proxy-manager'
|
||||
BUILD_VERSION = getVersion()
|
||||
MAJOR_VERSION = "2"
|
||||
MAJOR_VERSION = '2'
|
||||
BRANCH_LOWER = "${BRANCH_NAME.toLowerCase().replaceAll('/', '-')}"
|
||||
COMPOSE_PROJECT_NAME = "npm_${BRANCH_LOWER}_${BUILD_NUMBER}"
|
||||
COMPOSE_FILE = 'docker/docker-compose.ci.yml'
|
||||
@ -102,8 +102,8 @@ pipeline {
|
||||
always {
|
||||
// Dumps to analyze later
|
||||
sh 'mkdir -p debug'
|
||||
sh 'docker-compose logs fullstack-sqlite | gzip > debug/docker_fullstack_sqlite.log.gz'
|
||||
sh 'docker-compose logs db | gzip > debug/docker_db.log.gz'
|
||||
sh 'docker-compose logs fullstack-sqlite > debug/docker_fullstack_sqlite.log'
|
||||
sh 'docker-compose logs db > debug/docker_db.log'
|
||||
// Cypress videos and screenshot artifacts
|
||||
dir(path: 'test/results') {
|
||||
archiveArtifacts allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml'
|
||||
@ -128,8 +128,8 @@ pipeline {
|
||||
always {
|
||||
// Dumps to analyze later
|
||||
sh 'mkdir -p debug'
|
||||
sh 'docker-compose logs fullstack-mysql | gzip > debug/docker_fullstack_mysql.log.gz'
|
||||
sh 'docker-compose logs db | gzip > debug/docker_db.log.gz'
|
||||
sh 'docker-compose logs fullstack-mysql > debug/docker_fullstack_mysql.log'
|
||||
sh 'docker-compose logs db > debug/docker_db.log'
|
||||
// Cypress videos and screenshot artifacts
|
||||
dir(path: 'test/results') {
|
||||
archiveArtifacts allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml'
|
||||
|
@ -1,7 +1,7 @@
|
||||
<p align="center">
|
||||
<img src="https://nginxproxymanager.com/github.png">
|
||||
<br><br>
|
||||
<img src="https://img.shields.io/badge/version-2.9.19-green.svg?style=for-the-badge">
|
||||
<img src="https://img.shields.io/badge/version-2.10.0-green.svg?style=for-the-badge">
|
||||
<a href="https://hub.docker.com/repository/docker/jc21/nginx-proxy-manager">
|
||||
<img src="https://img.shields.io/docker/stars/jc21/nginx-proxy-manager.svg?style=for-the-badge">
|
||||
</a>
|
||||
@ -56,7 +56,7 @@ I won't go in to too much detail here but here are the basics for someone new to
|
||||
2. Create a docker-compose.yml file similar to this:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
version: '3.8'
|
||||
services:
|
||||
app:
|
||||
image: 'jc21/nginx-proxy-manager:latest'
|
||||
@ -70,6 +70,8 @@ services:
|
||||
- ./letsencrypt:/etc/letsencrypt
|
||||
```
|
||||
|
||||
This is the bare minimum configuration required. See the [documentation](https://nginxproxymanager.com/setup/) for more.
|
||||
|
||||
3. Bring up your stack by running
|
||||
|
||||
```bash
|
||||
|
@ -2,6 +2,7 @@ const express = require('express');
|
||||
const bodyParser = require('body-parser');
|
||||
const fileUpload = require('express-fileupload');
|
||||
const compression = require('compression');
|
||||
const config = require('./lib/config');
|
||||
const log = require('./logger').express;
|
||||
|
||||
/**
|
||||
@ -24,7 +25,7 @@ app.enable('trust proxy', ['loopback', 'linklocal', 'uniquelocal']);
|
||||
app.enable('strict routing');
|
||||
|
||||
// pretty print JSON when not live
|
||||
if (process.env.NODE_ENV !== 'production') {
|
||||
if (config.debug()) {
|
||||
app.set('json spaces', 2);
|
||||
}
|
||||
|
||||
@ -65,7 +66,7 @@ app.use(function (err, req, res, next) {
|
||||
}
|
||||
};
|
||||
|
||||
if (process.env.NODE_ENV === 'development' || (req.baseUrl + req.path).includes('nginx/certificates')) {
|
||||
if (config.debug() || (req.baseUrl + req.path).includes('nginx/certificates')) {
|
||||
payload.debug = {
|
||||
stack: typeof err.stack !== 'undefined' && err.stack ? err.stack.split('\n') : null,
|
||||
previous: err.previous
|
||||
@ -74,7 +75,7 @@ app.use(function (err, req, res, next) {
|
||||
|
||||
// Not every error is worth logging - but this is good for now until it gets annoying.
|
||||
if (typeof err.stack !== 'undefined' && err.stack) {
|
||||
if (process.env.NODE_ENV === 'development' || process.env.DEBUG) {
|
||||
if (config.debug()) {
|
||||
log.debug(err.stack);
|
||||
} else if (typeof err.public == 'undefined' || !err.public) {
|
||||
log.warn(err.message);
|
||||
|
@ -1,33 +1,27 @@
|
||||
const config = require('config');
|
||||
const config = require('./lib/config');
|
||||
|
||||
if (!config.has('database')) {
|
||||
throw new Error('Database config does not exist! Please read the instructions: https://github.com/jc21/nginx-proxy-manager/blob/master/doc/INSTALL.md');
|
||||
throw new Error('Database config does not exist! Please read the instructions: https://nginxproxymanager.com/setup/');
|
||||
}
|
||||
|
||||
function generateDbConfig() {
|
||||
if (config.database.engine === 'knex-native') {
|
||||
return config.database.knex;
|
||||
} else
|
||||
return {
|
||||
client: config.database.engine,
|
||||
connection: {
|
||||
host: config.database.host,
|
||||
user: config.database.user,
|
||||
password: config.database.password,
|
||||
database: config.database.name,
|
||||
port: config.database.port
|
||||
},
|
||||
migrations: {
|
||||
tableName: 'migrations'
|
||||
}
|
||||
};
|
||||
const cfg = config.get('database');
|
||||
if (cfg.engine === 'knex-native') {
|
||||
return cfg.knex;
|
||||
}
|
||||
return {
|
||||
client: cfg.engine,
|
||||
connection: {
|
||||
host: cfg.host,
|
||||
user: cfg.user,
|
||||
password: cfg.password,
|
||||
database: cfg.name,
|
||||
port: cfg.port
|
||||
},
|
||||
migrations: {
|
||||
tableName: 'migrations'
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
let data = generateDbConfig();
|
||||
|
||||
if (typeof config.database.version !== 'undefined') {
|
||||
data.version = config.database.version;
|
||||
}
|
||||
|
||||
module.exports = require('knex')(data);
|
||||
module.exports = require('knex')(generateDbConfig());
|
||||
|
@ -3,9 +3,6 @@
|
||||
const logger = require('./logger').global;
|
||||
|
||||
async function appStart () {
|
||||
// Create config file db settings if environment variables have been set
|
||||
await createDbConfigFromEnvironment();
|
||||
|
||||
const migrate = require('./migrate');
|
||||
const setup = require('./setup');
|
||||
const app = require('./app');
|
||||
@ -42,90 +39,6 @@ async function appStart () {
|
||||
});
|
||||
}
|
||||
|
||||
async function createDbConfigFromEnvironment() {
|
||||
return new Promise((resolve, reject) => {
|
||||
const envMysqlHost = process.env.DB_MYSQL_HOST || null;
|
||||
const envMysqlPort = process.env.DB_MYSQL_PORT || null;
|
||||
const envMysqlUser = process.env.DB_MYSQL_USER || null;
|
||||
const envMysqlName = process.env.DB_MYSQL_NAME || null;
|
||||
let envSqliteFile = process.env.DB_SQLITE_FILE || null;
|
||||
|
||||
const fs = require('fs');
|
||||
const filename = (process.env.NODE_CONFIG_DIR || './config') + '/' + (process.env.NODE_ENV || 'default') + '.json';
|
||||
let configData = {};
|
||||
|
||||
try {
|
||||
configData = require(filename);
|
||||
} catch (err) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
if (configData.database && configData.database.engine && !configData.database.fromEnv) {
|
||||
logger.info('Manual db configuration already exists, skipping config creation from environment variables');
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
if ((!envMysqlHost || !envMysqlPort || !envMysqlUser || !envMysqlName) && !envSqliteFile){
|
||||
envSqliteFile = '/data/database.sqlite';
|
||||
logger.info(`No valid environment variables for database provided, using default SQLite file '${envSqliteFile}'`);
|
||||
}
|
||||
|
||||
if (envMysqlHost && envMysqlPort && envMysqlUser && envMysqlName) {
|
||||
const newConfig = {
|
||||
fromEnv: true,
|
||||
engine: 'mysql',
|
||||
host: envMysqlHost,
|
||||
port: envMysqlPort,
|
||||
user: envMysqlUser,
|
||||
password: process.env.DB_MYSQL_PASSWORD,
|
||||
name: envMysqlName,
|
||||
};
|
||||
|
||||
if (JSON.stringify(configData.database) === JSON.stringify(newConfig)) {
|
||||
// Config is unchanged, skip overwrite
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info('Generating MySQL knex configuration from environment variables');
|
||||
configData.database = newConfig;
|
||||
|
||||
} else {
|
||||
const newConfig = {
|
||||
fromEnv: true,
|
||||
engine: 'knex-native',
|
||||
knex: {
|
||||
client: 'sqlite3',
|
||||
connection: {
|
||||
filename: envSqliteFile
|
||||
},
|
||||
useNullAsDefault: true
|
||||
}
|
||||
};
|
||||
if (JSON.stringify(configData.database) === JSON.stringify(newConfig)) {
|
||||
// Config is unchanged, skip overwrite
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info('Generating SQLite knex configuration');
|
||||
configData.database = newConfig;
|
||||
}
|
||||
|
||||
// Write config
|
||||
fs.writeFile(filename, JSON.stringify(configData, null, 2), (err) => {
|
||||
if (err) {
|
||||
logger.error('Could not write db config to config file: ' + filename);
|
||||
reject(err);
|
||||
} else {
|
||||
logger.debug('Wrote db configuration to config file: ' + filename);
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
appStart();
|
||||
} catch (err) {
|
||||
|
@ -3,13 +3,13 @@ const fs = require('fs');
|
||||
const batchflow = require('batchflow');
|
||||
const logger = require('../logger').access;
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const accessListModel = require('../models/access_list');
|
||||
const accessListAuthModel = require('../models/access_list_auth');
|
||||
const accessListClientModel = require('../models/access_list_client');
|
||||
const proxyHostModel = require('../models/proxy_host');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
const internalNginx = require('./nginx');
|
||||
const utils = require('../lib/utils');
|
||||
|
||||
function omissions () {
|
||||
return ['is_deleted'];
|
||||
@ -27,13 +27,13 @@ const internalAccessList = {
|
||||
.then((/*access_data*/) => {
|
||||
return accessListModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.insertAndFetch({
|
||||
name: data.name,
|
||||
satisfy_any: data.satisfy_any,
|
||||
pass_auth: data.pass_auth,
|
||||
owner_user_id: access.token.getUserId(1)
|
||||
});
|
||||
})
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
data.id = row.id;
|
||||
@ -256,35 +256,31 @@ const internalAccessList = {
|
||||
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
|
||||
.where('access_list.is_deleted', 0)
|
||||
.andWhere('access_list.id', data.id)
|
||||
.allowEager('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]')
|
||||
.omit(['access_list.is_deleted'])
|
||||
.allowGraph('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]')
|
||||
.first();
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('access_list.owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
query.omit(data.omit);
|
||||
}
|
||||
|
||||
if (typeof data.expand !== 'undefined' && data.expand !== null) {
|
||||
query.eager('[' + data.expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + data.expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (row) {
|
||||
if (!skip_masking && typeof row.items !== 'undefined' && row.items) {
|
||||
row = internalAccessList.maskItems(row);
|
||||
}
|
||||
|
||||
return _.omit(row, omissions());
|
||||
} else {
|
||||
if (!row) {
|
||||
throw new error.ItemNotFoundError(data.id);
|
||||
}
|
||||
if (!skip_masking && typeof row.items !== 'undefined' && row.items) {
|
||||
row = internalAccessList.maskItems(row);
|
||||
}
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
row = _.omit(row, data.omit);
|
||||
}
|
||||
return row;
|
||||
});
|
||||
},
|
||||
|
||||
@ -381,8 +377,7 @@ const internalAccessList = {
|
||||
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
|
||||
.where('access_list.is_deleted', 0)
|
||||
.groupBy('access_list.id')
|
||||
.omit(['access_list.is_deleted'])
|
||||
.allowEager('[owner,items,clients]')
|
||||
.allowGraph('[owner,items,clients]')
|
||||
.orderBy('access_list.name', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
@ -397,10 +392,10 @@ const internalAccessList = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRows(omissions()));
|
||||
})
|
||||
.then((rows) => {
|
||||
if (rows) {
|
||||
|
@ -19,7 +19,7 @@ const internalAuditLog = {
|
||||
.orderBy('created_on', 'DESC')
|
||||
.orderBy('id', 'DESC')
|
||||
.limit(100)
|
||||
.allowEager('[user]');
|
||||
.allowGraph('[user]');
|
||||
|
||||
// Query is used for searching
|
||||
if (typeof search_query === 'string') {
|
||||
@ -29,7 +29,7 @@ const internalAuditLog = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
|
@ -1,22 +1,24 @@
|
||||
const _ = require('lodash');
|
||||
const fs = require('fs');
|
||||
const https = require('https');
|
||||
const tempWrite = require('temp-write');
|
||||
const moment = require('moment');
|
||||
const logger = require('../logger').ssl;
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const certificateModel = require('../models/certificate');
|
||||
const dnsPlugins = require('../global/certbot-dns-plugins');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
const internalNginx = require('./nginx');
|
||||
const internalHost = require('./host');
|
||||
const letsencryptStaging = process.env.NODE_ENV !== 'production';
|
||||
const _ = require('lodash');
|
||||
const fs = require('fs');
|
||||
const https = require('https');
|
||||
const tempWrite = require('temp-write');
|
||||
const moment = require('moment');
|
||||
const logger = require('../logger').ssl;
|
||||
const config = require('../lib/config');
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const certificateModel = require('../models/certificate');
|
||||
const dnsPlugins = require('../global/certbot-dns-plugins');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
const internalNginx = require('./nginx');
|
||||
const internalHost = require('./host');
|
||||
const archiver = require('archiver');
|
||||
const path = require('path');
|
||||
const { isArray } = require('lodash');
|
||||
|
||||
const letsencryptStaging = config.useLetsencryptStaging();
|
||||
const letsencryptConfig = '/etc/letsencrypt.ini';
|
||||
const certbotCommand = 'certbot';
|
||||
const archiver = require('archiver');
|
||||
const path = require('path');
|
||||
const { isArray } = require('lodash');
|
||||
|
||||
function omissions() {
|
||||
return ['is_deleted'];
|
||||
@ -46,6 +48,8 @@ const internalCertificate = {
|
||||
|
||||
const cmd = certbotCommand + ' renew --non-interactive --quiet ' +
|
||||
'--config "' + letsencryptConfig + '" ' +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
'--preferred-challenges "dns,http" ' +
|
||||
'--disable-hook-validation ' +
|
||||
(letsencryptStaging ? '--staging' : '');
|
||||
@ -121,8 +125,8 @@ const internalCertificate = {
|
||||
|
||||
return certificateModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.insertAndFetch(data);
|
||||
.insertAndFetch(data)
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((certificate) => {
|
||||
if (certificate.provider === 'letsencrypt') {
|
||||
@ -269,8 +273,8 @@ const internalCertificate = {
|
||||
|
||||
return certificateModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.patchAndFetchById(row.id, data)
|
||||
.then(utils.omitRow(omissions()))
|
||||
.then((saved_row) => {
|
||||
saved_row.meta = internalCertificate.cleanMeta(saved_row.meta);
|
||||
data.meta = internalCertificate.cleanMeta(data.meta);
|
||||
@ -288,7 +292,7 @@ const internalCertificate = {
|
||||
meta: _.omit(data, ['expires_on']) // this prevents json circular reference because expires_on might be raw
|
||||
})
|
||||
.then(() => {
|
||||
return _.omit(saved_row, omissions());
|
||||
return saved_row;
|
||||
});
|
||||
});
|
||||
});
|
||||
@ -313,30 +317,28 @@ const internalCertificate = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere('id', data.id)
|
||||
.allowEager('[owner]')
|
||||
.allowGraph('[owner]')
|
||||
.first();
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
query.omit(data.omit);
|
||||
}
|
||||
|
||||
if (typeof data.expand !== 'undefined' && data.expand !== null) {
|
||||
query.eager('[' + data.expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + data.expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (row) {
|
||||
return _.omit(row, omissions());
|
||||
} else {
|
||||
if (!row) {
|
||||
throw new error.ItemNotFoundError(data.id);
|
||||
}
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
row = _.omit(row, data.omit);
|
||||
}
|
||||
return row;
|
||||
});
|
||||
},
|
||||
|
||||
@ -466,8 +468,7 @@ const internalCertificate = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.omit(['is_deleted'])
|
||||
.allowEager('[owner]')
|
||||
.allowGraph('[owner]')
|
||||
.orderBy('nice_name', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
@ -482,10 +483,10 @@ const internalCertificate = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRows(omissions()));
|
||||
});
|
||||
},
|
||||
|
||||
@ -662,7 +663,6 @@ const internalCertificate = {
|
||||
meta: _.clone(row.meta) // Prevent the update method from changing this value that we'll use later
|
||||
})
|
||||
.then((certificate) => {
|
||||
console.log('ROWMETA:', row.meta);
|
||||
certificate.meta = row.meta;
|
||||
return internalCertificate.writeCustomCert(certificate);
|
||||
});
|
||||
@ -837,6 +837,8 @@ const internalCertificate = {
|
||||
|
||||
const cmd = certbotCommand + ' certonly ' +
|
||||
'--config "' + letsencryptConfig + '" ' +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
'--cert-name "npm-' + certificate.id + '" ' +
|
||||
'--agree-tos ' +
|
||||
'--authenticator webroot ' +
|
||||
@ -875,13 +877,15 @@ const internalCertificate = {
|
||||
const escapedCredentials = certificate.meta.dns_provider_credentials.replaceAll('\'', '\\\'').replaceAll('\\', '\\\\');
|
||||
const credentialsCmd = 'mkdir -p /etc/letsencrypt/credentials 2> /dev/null; echo \'' + escapedCredentials + '\' > \'' + credentialsLocation + '\' && chmod 600 \'' + credentialsLocation + '\'';
|
||||
// we call `. /opt/certbot/bin/activate` (`.` is alternative to `source` in dash) to access certbot venv
|
||||
let prepareCmd = '. /opt/certbot/bin/activate && pip install ' + dns_plugin.package_name + (dns_plugin.version_requirement || '') + ' ' + dns_plugin.dependencies + ' && deactivate';
|
||||
const prepareCmd = '. /opt/certbot/bin/activate && pip install --no-cache-dir --user ' + dns_plugin.package_name + (dns_plugin.version_requirement || '') + ' ' + dns_plugin.dependencies + ' && deactivate';
|
||||
|
||||
// Whether the plugin has a --<name>-credentials argument
|
||||
const hasConfigArg = certificate.meta.dns_provider !== 'route53';
|
||||
|
||||
let mainCmd = certbotCommand + ' certonly ' +
|
||||
'--config "' + letsencryptConfig + '" ' +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
'--cert-name "npm-' + certificate.id + '" ' +
|
||||
'--agree-tos ' +
|
||||
'--email "' + certificate.meta.letsencrypt_email + '" ' +
|
||||
@ -978,6 +982,8 @@ const internalCertificate = {
|
||||
|
||||
const cmd = certbotCommand + ' renew --force-renewal ' +
|
||||
'--config "' + letsencryptConfig + '" ' +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
'--cert-name "npm-' + certificate.id + '" ' +
|
||||
'--preferred-challenges "dns,http" ' +
|
||||
'--no-random-sleep-on-renew ' +
|
||||
@ -1008,6 +1014,8 @@ const internalCertificate = {
|
||||
|
||||
let mainCmd = certbotCommand + ' renew ' +
|
||||
'--config "' + letsencryptConfig + '" ' +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
'--cert-name "npm-' + certificate.id + '" ' +
|
||||
'--disable-hook-validation ' +
|
||||
'--no-random-sleep-on-renew ' +
|
||||
|
@ -1,5 +1,6 @@
|
||||
const _ = require('lodash');
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const deadHostModel = require('../models/dead_host');
|
||||
const internalHost = require('./host');
|
||||
const internalNginx = require('./nginx');
|
||||
@ -49,8 +50,8 @@ const internalDeadHost = {
|
||||
|
||||
return deadHostModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.insertAndFetch(data);
|
||||
.insertAndFetch(data)
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (create_certificate) {
|
||||
@ -218,31 +219,28 @@ const internalDeadHost = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere('id', data.id)
|
||||
.allowEager('[owner,certificate]')
|
||||
.allowGraph('[owner,certificate]')
|
||||
.first();
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
query.omit(data.omit);
|
||||
}
|
||||
|
||||
if (typeof data.expand !== 'undefined' && data.expand !== null) {
|
||||
query.eager('[' + data.expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + data.expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (row) {
|
||||
row = internalHost.cleanRowCertificateMeta(row);
|
||||
return _.omit(row, omissions());
|
||||
} else {
|
||||
if (!row) {
|
||||
throw new error.ItemNotFoundError(data.id);
|
||||
}
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
row = _.omit(row, data.omit);
|
||||
}
|
||||
return row;
|
||||
});
|
||||
},
|
||||
|
||||
@ -404,8 +402,7 @@ const internalDeadHost = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.omit(['is_deleted'])
|
||||
.allowEager('[owner,certificate]')
|
||||
.allowGraph('[owner,certificate]')
|
||||
.orderBy('domain_names', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
@ -420,10 +417,10 @@ const internalDeadHost = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRows(omissions()));
|
||||
})
|
||||
.then((rows) => {
|
||||
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {
|
||||
|
@ -2,8 +2,8 @@ const https = require('https');
|
||||
const fs = require('fs');
|
||||
const logger = require('../logger').ip_ranges;
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const internalNginx = require('./nginx');
|
||||
const { Liquid } = require('liquidjs');
|
||||
|
||||
const CLOUDFRONT_URL = 'https://ip-ranges.amazonaws.com/ip-ranges.json';
|
||||
const CLOUDFARE_V4_URL = 'https://www.cloudflare.com/ips-v4';
|
||||
@ -119,10 +119,7 @@ const internalIpRanges = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
generateConfig: (ip_ranges) => {
|
||||
let renderEngine = new Liquid({
|
||||
root: __dirname + '/../templates/'
|
||||
});
|
||||
|
||||
const renderEngine = utils.getRenderEngine();
|
||||
return new Promise((resolve, reject) => {
|
||||
let template = null;
|
||||
let filename = '/etc/nginx/conf.d/include/ip_ranges.conf';
|
||||
|
@ -1,10 +1,9 @@
|
||||
const _ = require('lodash');
|
||||
const fs = require('fs');
|
||||
const logger = require('../logger').nginx;
|
||||
const utils = require('../lib/utils');
|
||||
const error = require('../lib/error');
|
||||
const { Liquid } = require('liquidjs');
|
||||
const debug_mode = process.env.NODE_ENV !== 'production' || !!process.env.DEBUG;
|
||||
const _ = require('lodash');
|
||||
const fs = require('fs');
|
||||
const logger = require('../logger').nginx;
|
||||
const config = require('../lib/config');
|
||||
const utils = require('../lib/utils');
|
||||
const error = require('../lib/error');
|
||||
|
||||
const internalNginx = {
|
||||
|
||||
@ -29,7 +28,9 @@ const internalNginx = {
|
||||
.then(() => {
|
||||
// Nginx is OK
|
||||
// We're deleting this config regardless.
|
||||
return internalNginx.deleteConfig(host_type, host); // Don't throw errors, as the file may not exist at all
|
||||
// Don't throw errors, as the file may not exist at all
|
||||
// Delete the .err file too
|
||||
return internalNginx.deleteConfig(host_type, host, false, true);
|
||||
})
|
||||
.then(() => {
|
||||
return internalNginx.generateConfig(host_type, host);
|
||||
@ -64,7 +65,7 @@ const internalNginx = {
|
||||
}
|
||||
});
|
||||
|
||||
if (debug_mode) {
|
||||
if (config.debug()) {
|
||||
logger.error('Nginx test failed:', valid_lines.join('\n'));
|
||||
}
|
||||
|
||||
@ -80,6 +81,9 @@ const internalNginx = {
|
||||
.patch({
|
||||
meta: combined_meta
|
||||
})
|
||||
.then(() => {
|
||||
internalNginx.renameConfigAsError(host_type, host);
|
||||
})
|
||||
.then(() => {
|
||||
return internalNginx.deleteConfig(host_type, host, true);
|
||||
});
|
||||
@ -97,7 +101,7 @@ const internalNginx = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
test: () => {
|
||||
if (debug_mode) {
|
||||
if (config.debug()) {
|
||||
logger.info('Testing Nginx configuration');
|
||||
}
|
||||
|
||||
@ -121,13 +125,10 @@ const internalNginx = {
|
||||
* @returns {String}
|
||||
*/
|
||||
getConfigName: (host_type, host_id) => {
|
||||
host_type = host_type.replace(new RegExp('-', 'g'), '_');
|
||||
|
||||
if (host_type === 'default') {
|
||||
return '/data/nginx/default_host/site.conf';
|
||||
}
|
||||
|
||||
return '/data/nginx/' + host_type + '/' + host_id + '.conf';
|
||||
return '/data/nginx/' + internalNginx.getFileFriendlyHostType(host_type) + '/' + host_id + '.conf';
|
||||
},
|
||||
|
||||
/**
|
||||
@ -136,8 +137,6 @@ const internalNginx = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
renderLocations: (host) => {
|
||||
|
||||
//logger.info('host = ' + JSON.stringify(host, null, 2));
|
||||
return new Promise((resolve, reject) => {
|
||||
let template;
|
||||
|
||||
@ -148,19 +147,17 @@ const internalNginx = {
|
||||
return;
|
||||
}
|
||||
|
||||
let renderer = new Liquid({
|
||||
root: __dirname + '/../templates/'
|
||||
});
|
||||
const renderEngine = utils.getRenderEngine();
|
||||
let renderedLocations = '';
|
||||
|
||||
const locationRendering = async () => {
|
||||
for (let i = 0; i < host.locations.length; i++) {
|
||||
let locationCopy = Object.assign({}, {access_list_id: host.access_list_id}, {certificate_id: host.certificate_id},
|
||||
let locationCopy = Object.assign({}, {access_list_id: host.access_list_id}, {certificate_id: host.certificate_id},
|
||||
{ssl_forced: host.ssl_forced}, {caching_enabled: host.caching_enabled}, {block_exploits: host.block_exploits},
|
||||
{allow_websocket_upgrade: host.allow_websocket_upgrade}, {http2_support: host.http2_support},
|
||||
{hsts_enabled: host.hsts_enabled}, {hsts_subdomains: host.hsts_subdomains}, {access_list: host.access_list},
|
||||
{certificate: host.certificate}, host.locations[i]);
|
||||
|
||||
|
||||
if (locationCopy.forward_host.indexOf('/') > -1) {
|
||||
const splitted = locationCopy.forward_host.split('/');
|
||||
|
||||
@ -168,16 +165,14 @@ const internalNginx = {
|
||||
locationCopy.forward_path = `/${splitted.join('/')}`;
|
||||
}
|
||||
|
||||
//logger.info('locationCopy = ' + JSON.stringify(locationCopy, null, 2));
|
||||
|
||||
// eslint-disable-next-line
|
||||
renderedLocations += await renderer.parseAndRender(template, locationCopy);
|
||||
renderedLocations += await renderEngine.parseAndRender(template, locationCopy);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
locationRendering().then(() => resolve(renderedLocations));
|
||||
|
||||
|
||||
});
|
||||
},
|
||||
|
||||
@ -187,24 +182,20 @@ const internalNginx = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
generateConfig: (host_type, host) => {
|
||||
host_type = host_type.replace(new RegExp('-', 'g'), '_');
|
||||
const nice_host_type = internalNginx.getFileFriendlyHostType(host_type);
|
||||
|
||||
if (debug_mode) {
|
||||
logger.info('Generating ' + host_type + ' Config:', host);
|
||||
if (config.debug()) {
|
||||
logger.info('Generating ' + nice_host_type + ' Config:', JSON.stringify(host, null, 2));
|
||||
}
|
||||
|
||||
// logger.info('host = ' + JSON.stringify(host, null, 2));
|
||||
|
||||
let renderEngine = new Liquid({
|
||||
root: __dirname + '/../templates/'
|
||||
});
|
||||
const renderEngine = utils.getRenderEngine();
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
let template = null;
|
||||
let filename = internalNginx.getConfigName(host_type, host.id);
|
||||
let filename = internalNginx.getConfigName(nice_host_type, host.id);
|
||||
|
||||
try {
|
||||
template = fs.readFileSync(__dirname + '/../templates/' + host_type + '.conf', {encoding: 'utf8'});
|
||||
template = fs.readFileSync(__dirname + '/../templates/' + nice_host_type + '.conf', {encoding: 'utf8'});
|
||||
} catch (err) {
|
||||
reject(new error.ConfigurationError(err.message));
|
||||
return;
|
||||
@ -214,7 +205,7 @@ const internalNginx = {
|
||||
let origLocations;
|
||||
|
||||
// Manipulate the data a bit before sending it to the template
|
||||
if (host_type !== 'default') {
|
||||
if (nice_host_type !== 'default') {
|
||||
host.use_default_location = true;
|
||||
if (typeof host.advanced_config !== 'undefined' && host.advanced_config) {
|
||||
host.use_default_location = !internalNginx.advancedConfigHasDefaultLocation(host.advanced_config);
|
||||
@ -248,7 +239,7 @@ const internalNginx = {
|
||||
.then((config_text) => {
|
||||
fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
|
||||
|
||||
if (debug_mode) {
|
||||
if (config.debug()) {
|
||||
logger.success('Wrote config:', filename, config_text);
|
||||
}
|
||||
|
||||
@ -258,7 +249,7 @@ const internalNginx = {
|
||||
resolve(true);
|
||||
})
|
||||
.catch((err) => {
|
||||
if (debug_mode) {
|
||||
if (config.debug()) {
|
||||
logger.warn('Could not write ' + filename + ':', err.message);
|
||||
}
|
||||
|
||||
@ -277,13 +268,11 @@ const internalNginx = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
generateLetsEncryptRequestConfig: (certificate) => {
|
||||
if (debug_mode) {
|
||||
if (config.debug()) {
|
||||
logger.info('Generating LetsEncrypt Request Config:', certificate);
|
||||
}
|
||||
|
||||
let renderEngine = new Liquid({
|
||||
root: __dirname + '/../templates/'
|
||||
});
|
||||
const renderEngine = utils.getRenderEngine();
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
let template = null;
|
||||
@ -303,14 +292,14 @@ const internalNginx = {
|
||||
.then((config_text) => {
|
||||
fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
|
||||
|
||||
if (debug_mode) {
|
||||
if (config.debug()) {
|
||||
logger.success('Wrote config:', filename, config_text);
|
||||
}
|
||||
|
||||
resolve(true);
|
||||
})
|
||||
.catch((err) => {
|
||||
if (debug_mode) {
|
||||
if (config.debug()) {
|
||||
logger.warn('Could not write ' + filename + ':', err.message);
|
||||
}
|
||||
|
||||
@ -319,33 +308,39 @@ const internalNginx = {
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* A simple wrapper around unlinkSync that writes to the logger
|
||||
*
|
||||
* @param {String} filename
|
||||
*/
|
||||
deleteFile: (filename) => {
|
||||
logger.debug('Deleting file: ' + filename);
|
||||
try {
|
||||
fs.unlinkSync(filename);
|
||||
} catch (err) {
|
||||
logger.debug('Could not delete file:', JSON.stringify(err, null, 2));
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {String} host_type
|
||||
* @returns String
|
||||
*/
|
||||
getFileFriendlyHostType: (host_type) => {
|
||||
return host_type.replace(new RegExp('-', 'g'), '_');
|
||||
},
|
||||
|
||||
/**
|
||||
* This removes the temporary nginx config file generated by `generateLetsEncryptRequestConfig`
|
||||
*
|
||||
* @param {Object} certificate
|
||||
* @param {Boolean} [throw_errors]
|
||||
* @returns {Promise}
|
||||
*/
|
||||
deleteLetsEncryptRequestConfig: (certificate, throw_errors) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
let config_file = '/data/nginx/temp/letsencrypt_' + certificate.id + '.conf';
|
||||
|
||||
if (debug_mode) {
|
||||
logger.warn('Deleting nginx config: ' + config_file);
|
||||
}
|
||||
|
||||
fs.unlinkSync(config_file);
|
||||
} catch (err) {
|
||||
if (debug_mode) {
|
||||
logger.warn('Could not delete config:', err.message);
|
||||
}
|
||||
|
||||
if (throw_errors) {
|
||||
reject(err);
|
||||
}
|
||||
}
|
||||
|
||||
deleteLetsEncryptRequestConfig: (certificate) => {
|
||||
const config_file = '/data/nginx/temp/letsencrypt_' + certificate.id + '.conf';
|
||||
return new Promise((resolve/*, reject*/) => {
|
||||
internalNginx.deleteFile(config_file);
|
||||
resolve();
|
||||
});
|
||||
},
|
||||
@ -353,35 +348,42 @@ const internalNginx = {
|
||||
/**
|
||||
* @param {String} host_type
|
||||
* @param {Object} [host]
|
||||
* @param {Boolean} [throw_errors]
|
||||
* @param {Boolean} [delete_err_file]
|
||||
* @returns {Promise}
|
||||
*/
|
||||
deleteConfig: (host_type, host, throw_errors) => {
|
||||
host_type = host_type.replace(new RegExp('-', 'g'), '_');
|
||||
deleteConfig: (host_type, host, delete_err_file) => {
|
||||
const config_file = internalNginx.getConfigName(internalNginx.getFileFriendlyHostType(host_type), typeof host === 'undefined' ? 0 : host.id);
|
||||
const config_file_err = config_file + '.err';
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
let config_file = internalNginx.getConfigName(host_type, typeof host === 'undefined' ? 0 : host.id);
|
||||
|
||||
if (debug_mode) {
|
||||
logger.warn('Deleting nginx config: ' + config_file);
|
||||
}
|
||||
|
||||
fs.unlinkSync(config_file);
|
||||
} catch (err) {
|
||||
if (debug_mode) {
|
||||
logger.warn('Could not delete config:', err.message);
|
||||
}
|
||||
|
||||
if (throw_errors) {
|
||||
reject(err);
|
||||
}
|
||||
return new Promise((resolve/*, reject*/) => {
|
||||
internalNginx.deleteFile(config_file);
|
||||
if (delete_err_file) {
|
||||
internalNginx.deleteFile(config_file_err);
|
||||
}
|
||||
|
||||
resolve();
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* @param {String} host_type
|
||||
* @param {Object} [host]
|
||||
* @returns {Promise}
|
||||
*/
|
||||
renameConfigAsError: (host_type, host) => {
|
||||
const config_file = internalNginx.getConfigName(internalNginx.getFileFriendlyHostType(host_type), typeof host === 'undefined' ? 0 : host.id);
|
||||
const config_file_err = config_file + '.err';
|
||||
|
||||
return new Promise((resolve/*, reject*/) => {
|
||||
fs.unlink(config_file, () => {
|
||||
// ignore result, continue
|
||||
fs.rename(config_file, config_file_err, () => {
|
||||
// also ignore result, as this is a debugging informative file anyway
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* @param {String} host_type
|
||||
* @param {Array} hosts
|
||||
@ -399,13 +401,12 @@ const internalNginx = {
|
||||
/**
|
||||
* @param {String} host_type
|
||||
* @param {Array} hosts
|
||||
* @param {Boolean} [throw_errors]
|
||||
* @returns {Promise}
|
||||
*/
|
||||
bulkDeleteConfigs: (host_type, hosts, throw_errors) => {
|
||||
bulkDeleteConfigs: (host_type, hosts) => {
|
||||
let promises = [];
|
||||
hosts.map(function (host) {
|
||||
promises.push(internalNginx.deleteConfig(host_type, host, throw_errors));
|
||||
promises.push(internalNginx.deleteConfig(host_type, host, true));
|
||||
});
|
||||
|
||||
return Promise.all(promises);
|
||||
@ -415,8 +416,8 @@ const internalNginx = {
|
||||
* @param {string} config
|
||||
* @returns {boolean}
|
||||
*/
|
||||
advancedConfigHasDefaultLocation: function (config) {
|
||||
return !!config.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im);
|
||||
advancedConfigHasDefaultLocation: function (cfg) {
|
||||
return !!cfg.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im);
|
||||
},
|
||||
|
||||
/**
|
||||
|
@ -1,5 +1,6 @@
|
||||
const _ = require('lodash');
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const proxyHostModel = require('../models/proxy_host');
|
||||
const internalHost = require('./host');
|
||||
const internalNginx = require('./nginx');
|
||||
@ -49,8 +50,8 @@ const internalProxyHost = {
|
||||
|
||||
return proxyHostModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.insertAndFetch(data);
|
||||
.insertAndFetch(data)
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (create_certificate) {
|
||||
@ -170,6 +171,7 @@ const internalProxyHost = {
|
||||
.query()
|
||||
.where({id: data.id})
|
||||
.patch(data)
|
||||
.then(utils.omitRow(omissions()))
|
||||
.then((saved_row) => {
|
||||
// Add to audit log
|
||||
return internalAuditLog.add(access, {
|
||||
@ -179,7 +181,7 @@ const internalProxyHost = {
|
||||
meta: data
|
||||
})
|
||||
.then(() => {
|
||||
return _.omit(saved_row, omissions());
|
||||
return saved_row;
|
||||
});
|
||||
});
|
||||
})
|
||||
@ -223,31 +225,29 @@ const internalProxyHost = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere('id', data.id)
|
||||
.allowEager('[owner,access_list,access_list.[clients,items],certificate]')
|
||||
.allowGraph('[owner,access_list,access_list.[clients,items],certificate]')
|
||||
.first();
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
query.omit(data.omit);
|
||||
}
|
||||
|
||||
if (typeof data.expand !== 'undefined' && data.expand !== null) {
|
||||
query.eager('[' + data.expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + data.expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (row) {
|
||||
row = internalHost.cleanRowCertificateMeta(row);
|
||||
return _.omit(row, omissions());
|
||||
} else {
|
||||
if (!row) {
|
||||
throw new error.ItemNotFoundError(data.id);
|
||||
}
|
||||
row = internalHost.cleanRowCertificateMeta(row);
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
row = _.omit(row, data.omit);
|
||||
}
|
||||
return row;
|
||||
});
|
||||
},
|
||||
|
||||
@ -409,8 +409,7 @@ const internalProxyHost = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.omit(['is_deleted'])
|
||||
.allowEager('[owner,access_list,certificate]')
|
||||
.allowGraph('[owner,access_list,certificate]')
|
||||
.orderBy('domain_names', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
@ -425,10 +424,10 @@ const internalProxyHost = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRows(omissions()));
|
||||
})
|
||||
.then((rows) => {
|
||||
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {
|
||||
|
@ -1,5 +1,6 @@
|
||||
const _ = require('lodash');
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const redirectionHostModel = require('../models/redirection_host');
|
||||
const internalHost = require('./host');
|
||||
const internalNginx = require('./nginx');
|
||||
@ -49,8 +50,8 @@ const internalRedirectionHost = {
|
||||
|
||||
return redirectionHostModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.insertAndFetch(data);
|
||||
.insertAndFetch(data)
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (create_certificate) {
|
||||
@ -65,9 +66,8 @@ const internalRedirectionHost = {
|
||||
.then(() => {
|
||||
return row;
|
||||
});
|
||||
} else {
|
||||
return row;
|
||||
}
|
||||
return row;
|
||||
})
|
||||
.then((row) => {
|
||||
// re-fetch with cert
|
||||
@ -218,31 +218,29 @@ const internalRedirectionHost = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere('id', data.id)
|
||||
.allowEager('[owner,certificate]')
|
||||
.allowGraph('[owner,certificate]')
|
||||
.first();
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
query.omit(data.omit);
|
||||
}
|
||||
|
||||
if (typeof data.expand !== 'undefined' && data.expand !== null) {
|
||||
query.eager('[' + data.expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + data.expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (row) {
|
||||
row = internalHost.cleanRowCertificateMeta(row);
|
||||
return _.omit(row, omissions());
|
||||
} else {
|
||||
if (!row) {
|
||||
throw new error.ItemNotFoundError(data.id);
|
||||
}
|
||||
row = internalHost.cleanRowCertificateMeta(row);
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
row = _.omit(row, data.omit);
|
||||
}
|
||||
return row;
|
||||
});
|
||||
},
|
||||
|
||||
@ -404,8 +402,7 @@ const internalRedirectionHost = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.omit(['is_deleted'])
|
||||
.allowEager('[owner,certificate]')
|
||||
.allowGraph('[owner,certificate]')
|
||||
.orderBy('domain_names', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
@ -420,10 +417,10 @@ const internalRedirectionHost = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRows(omissions()));
|
||||
})
|
||||
.then((rows) => {
|
||||
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {
|
||||
|
@ -1,5 +1,6 @@
|
||||
const _ = require('lodash');
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const streamModel = require('../models/stream');
|
||||
const internalNginx = require('./nginx');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
@ -27,8 +28,8 @@ const internalStream = {
|
||||
|
||||
return streamModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.insertAndFetch(data);
|
||||
.insertAndFetch(data)
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
// Configure nginx
|
||||
@ -71,8 +72,8 @@ const internalStream = {
|
||||
|
||||
return streamModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.patchAndFetchById(row.id, data)
|
||||
.then(utils.omitRow(omissions()))
|
||||
.then((saved_row) => {
|
||||
return internalNginx.configure(streamModel, 'stream', saved_row)
|
||||
.then(() => {
|
||||
@ -88,7 +89,7 @@ const internalStream = {
|
||||
meta: data
|
||||
})
|
||||
.then(() => {
|
||||
return _.omit(saved_row, omissions());
|
||||
return saved_row;
|
||||
});
|
||||
});
|
||||
});
|
||||
@ -113,30 +114,28 @@ const internalStream = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere('id', data.id)
|
||||
.allowEager('[owner]')
|
||||
.allowGraph('[owner]')
|
||||
.first();
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
query.omit(data.omit);
|
||||
}
|
||||
|
||||
if (typeof data.expand !== 'undefined' && data.expand !== null) {
|
||||
query.eager('[' + data.expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + data.expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (row) {
|
||||
return _.omit(row, omissions());
|
||||
} else {
|
||||
if (!row) {
|
||||
throw new error.ItemNotFoundError(data.id);
|
||||
}
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
row = _.omit(row, data.omit);
|
||||
}
|
||||
return row;
|
||||
});
|
||||
},
|
||||
|
||||
@ -298,8 +297,7 @@ const internalStream = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.omit(['is_deleted'])
|
||||
.allowEager('[owner]')
|
||||
.allowGraph('[owner]')
|
||||
.orderBy('incoming_port', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
@ -314,10 +312,10 @@ const internalStream = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRows(omissions()));
|
||||
});
|
||||
},
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
const _ = require('lodash');
|
||||
const error = require('../lib/error');
|
||||
const utils = require('../lib/utils');
|
||||
const userModel = require('../models/user');
|
||||
const userPermissionModel = require('../models/user_permission');
|
||||
const authModel = require('../models/auth');
|
||||
@ -35,8 +36,8 @@ const internalUser = {
|
||||
|
||||
return userModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.insertAndFetch(data);
|
||||
.insertAndFetch(data)
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((user) => {
|
||||
if (auth) {
|
||||
@ -140,11 +141,8 @@ const internalUser = {
|
||||
|
||||
return userModel
|
||||
.query()
|
||||
.omit(omissions())
|
||||
.patchAndFetchById(user.id, data)
|
||||
.then((saved_user) => {
|
||||
return _.omit(saved_user, omissions());
|
||||
});
|
||||
.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then(() => {
|
||||
return internalUser.get(access, {id: data.id});
|
||||
@ -186,26 +184,24 @@ const internalUser = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere('id', data.id)
|
||||
.allowEager('[permissions]')
|
||||
.allowGraph('[permissions]')
|
||||
.first();
|
||||
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
query.omit(data.omit);
|
||||
}
|
||||
|
||||
if (typeof data.expand !== 'undefined' && data.expand !== null) {
|
||||
query.eager('[' + data.expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + data.expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRow(omissions()));
|
||||
})
|
||||
.then((row) => {
|
||||
if (row) {
|
||||
return _.omit(row, omissions());
|
||||
} else {
|
||||
if (!row) {
|
||||
throw new error.ItemNotFoundError(data.id);
|
||||
}
|
||||
// Custom omissions
|
||||
if (typeof data.omit !== 'undefined' && data.omit !== null) {
|
||||
row = _.omit(row, data.omit);
|
||||
}
|
||||
return row;
|
||||
});
|
||||
},
|
||||
|
||||
@ -322,8 +318,7 @@ const internalUser = {
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.omit(['is_deleted'])
|
||||
.allowEager('[permissions]')
|
||||
.allowGraph('[permissions]')
|
||||
.orderBy('name', 'ASC');
|
||||
|
||||
// Query is used for searching
|
||||
@ -335,10 +330,10 @@ const internalUser = {
|
||||
}
|
||||
|
||||
if (typeof expand !== 'undefined' && expand !== null) {
|
||||
query.eager('[' + expand.join(', ') + ']');
|
||||
query.withGraphFetched('[' + expand.join(', ') + ']');
|
||||
}
|
||||
|
||||
return query;
|
||||
return query.then(utils.omitRows(omissions()));
|
||||
});
|
||||
},
|
||||
|
||||
|
@ -55,8 +55,8 @@ module.exports = function (token_string) {
|
||||
.where('id', token_data.attrs.id)
|
||||
.andWhere('is_deleted', 0)
|
||||
.andWhere('is_disabled', 0)
|
||||
.allowEager('[permissions]')
|
||||
.eager('[permissions]')
|
||||
.allowGraph('[permissions]')
|
||||
.withGraphFetched('[permissions]')
|
||||
.first()
|
||||
.then((user) => {
|
||||
if (user) {
|
||||
|
184
backend/lib/config.js
Normal file
184
backend/lib/config.js
Normal file
@ -0,0 +1,184 @@
|
||||
const fs = require('fs');
|
||||
const NodeRSA = require('node-rsa');
|
||||
const logger = require('../logger').global;
|
||||
|
||||
const keysFile = '/data/keys.json';
|
||||
|
||||
let instance = null;
|
||||
|
||||
// 1. Load from config file first (not recommended anymore)
|
||||
// 2. Use config env variables next
|
||||
const configure = () => {
|
||||
const filename = (process.env.NODE_CONFIG_DIR || './config') + '/' + (process.env.NODE_ENV || 'default') + '.json';
|
||||
if (fs.existsSync(filename)) {
|
||||
let configData;
|
||||
try {
|
||||
configData = require(filename);
|
||||
} catch (err) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
if (configData && configData.database) {
|
||||
logger.info(`Using configuration from file: ${filename}`);
|
||||
instance = configData;
|
||||
instance.keys = getKeys();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const envMysqlHost = process.env.DB_MYSQL_HOST || null;
|
||||
const envMysqlUser = process.env.DB_MYSQL_USER || null;
|
||||
const envMysqlName = process.env.DB_MYSQL_NAME || null;
|
||||
if (envMysqlHost && envMysqlUser && envMysqlName) {
|
||||
// we have enough mysql creds to go with mysql
|
||||
logger.info('Using MySQL configuration');
|
||||
instance = {
|
||||
database: {
|
||||
engine: 'mysql',
|
||||
host: envMysqlHost,
|
||||
port: process.env.DB_MYSQL_PORT || 3306,
|
||||
user: envMysqlUser,
|
||||
password: process.env.DB_MYSQL_PASSWORD,
|
||||
name: envMysqlName,
|
||||
},
|
||||
keys: getKeys(),
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/database.sqlite';
|
||||
logger.info(`Using Sqlite: ${envSqliteFile}`);
|
||||
instance = {
|
||||
database: {
|
||||
engine: 'knex-native',
|
||||
knex: {
|
||||
client: 'sqlite3',
|
||||
connection: {
|
||||
filename: envSqliteFile
|
||||
},
|
||||
useNullAsDefault: true
|
||||
}
|
||||
},
|
||||
keys: getKeys(),
|
||||
};
|
||||
};
|
||||
|
||||
const getKeys = () => {
|
||||
// Get keys from file
|
||||
if (!fs.existsSync(keysFile)) {
|
||||
generateKeys();
|
||||
} else if (process.env.DEBUG) {
|
||||
logger.info('Keys file exists OK');
|
||||
}
|
||||
try {
|
||||
return require(keysFile);
|
||||
} catch (err) {
|
||||
logger.error('Could not read JWT key pair from config file: ' + keysFile, err);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
const generateKeys = () => {
|
||||
logger.info('Creating a new JWT key pair...');
|
||||
// Now create the keys and save them in the config.
|
||||
const key = new NodeRSA({ b: 2048 });
|
||||
key.generateKeyPair();
|
||||
|
||||
const keys = {
|
||||
key: key.exportKey('private').toString(),
|
||||
pub: key.exportKey('public').toString(),
|
||||
};
|
||||
|
||||
// Write keys config
|
||||
try {
|
||||
fs.writeFileSync(keysFile, JSON.stringify(keys, null, 2));
|
||||
} catch (err) {
|
||||
logger.error('Could not write JWT key pair to config file: ' + keysFile + ': ' . err.message);
|
||||
process.exit(1);
|
||||
}
|
||||
logger.info('Wrote JWT key pair to config file: ' + keysFile);
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {string} key ie: 'database' or 'database.engine'
|
||||
* @returns {boolean}
|
||||
*/
|
||||
has: function(key) {
|
||||
instance === null && configure();
|
||||
const keys = key.split('.');
|
||||
let level = instance;
|
||||
let has = true;
|
||||
keys.forEach((keyItem) =>{
|
||||
if (typeof level[keyItem] === 'undefined') {
|
||||
has = false;
|
||||
} else {
|
||||
level = level[keyItem];
|
||||
}
|
||||
});
|
||||
|
||||
return has;
|
||||
},
|
||||
|
||||
/**
|
||||
* Gets a specific key from the top level
|
||||
*
|
||||
* @param {string} key
|
||||
* @returns {*}
|
||||
*/
|
||||
get: function (key) {
|
||||
instance === null && configure();
|
||||
if (key && typeof instance[key] !== 'undefined') {
|
||||
return instance[key];
|
||||
}
|
||||
return instance;
|
||||
},
|
||||
|
||||
/**
|
||||
* Is this a sqlite configuration?
|
||||
*
|
||||
* @returns {boolean}
|
||||
*/
|
||||
isSqlite: function () {
|
||||
instance === null && configure();
|
||||
return instance.database.knex && instance.database.knex.client === 'sqlite3';
|
||||
},
|
||||
|
||||
/**
|
||||
* Are we running in debug mdoe?
|
||||
*
|
||||
* @returns {boolean}
|
||||
*/
|
||||
debug: function () {
|
||||
return !!process.env.DEBUG;
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns a public key
|
||||
*
|
||||
* @returns {string}
|
||||
*/
|
||||
getPublicKey: function () {
|
||||
instance === null && configure();
|
||||
return instance.keys.pub;
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns a private key
|
||||
*
|
||||
* @returns {string}
|
||||
*/
|
||||
getPrivateKey: function () {
|
||||
instance === null && configure();
|
||||
return instance.keys.key;
|
||||
},
|
||||
|
||||
/**
|
||||
* @returns {boolean}
|
||||
*/
|
||||
useLetsencryptStaging: function () {
|
||||
return !!process.env.LE_STAGING;
|
||||
}
|
||||
};
|
@ -1,5 +1,8 @@
|
||||
const exec = require('child_process').exec;
|
||||
const execFile = require('child_process').execFile;
|
||||
const _ = require('lodash');
|
||||
const exec = require('child_process').exec;
|
||||
const execFile = require('child_process').execFile;
|
||||
const { Liquid } = require('liquidjs');
|
||||
const logger = require('../logger').global;
|
||||
|
||||
module.exports = {
|
||||
|
||||
@ -20,12 +23,14 @@ module.exports = {
|
||||
},
|
||||
|
||||
/**
|
||||
* @param {Array} cmd
|
||||
* @param {String} cmd
|
||||
* @param {Array} args
|
||||
* @returns {Promise}
|
||||
*/
|
||||
execFile: function (cmd) {
|
||||
execFile: function (cmd, args) {
|
||||
logger.debug('CMD: ' + cmd + ' ' + (args ? args.join(' ') : ''));
|
||||
return new Promise((resolve, reject) => {
|
||||
execFile(cmd, function (err, stdout, /*stderr*/) {
|
||||
execFile(cmd, args, function (err, stdout, /*stderr*/) {
|
||||
if (err && typeof err === 'object') {
|
||||
reject(err);
|
||||
} else {
|
||||
@ -33,5 +38,64 @@ module.exports = {
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Used in objection query builder
|
||||
*
|
||||
* @param {Array} omissions
|
||||
* @returns {Function}
|
||||
*/
|
||||
omitRow: function (omissions) {
|
||||
/**
|
||||
* @param {Object} row
|
||||
* @returns {Object}
|
||||
*/
|
||||
return (row) => {
|
||||
return _.omit(row, omissions);
|
||||
};
|
||||
},
|
||||
|
||||
/**
|
||||
* Used in objection query builder
|
||||
*
|
||||
* @param {Array} omissions
|
||||
* @returns {Function}
|
||||
*/
|
||||
omitRows: function (omissions) {
|
||||
/**
|
||||
* @param {Array} rows
|
||||
* @returns {Object}
|
||||
*/
|
||||
return (rows) => {
|
||||
rows.forEach((row, idx) => {
|
||||
rows[idx] = _.omit(row, omissions);
|
||||
});
|
||||
return rows;
|
||||
};
|
||||
},
|
||||
|
||||
/**
|
||||
* @returns {Object} Liquid render engine
|
||||
*/
|
||||
getRenderEngine: function () {
|
||||
const renderEngine = new Liquid({
|
||||
root: __dirname + '/../templates/'
|
||||
});
|
||||
|
||||
/**
|
||||
* nginxAccessRule expects the object given to have 2 properties:
|
||||
*
|
||||
* directive string
|
||||
* address string
|
||||
*/
|
||||
renderEngine.registerFilter('nginxAccessRule', (v) => {
|
||||
if (typeof v.directive !== 'undefined' && typeof v.address !== 'undefined' && v.directive && v.address) {
|
||||
return `${v.directive} ${v.address};`;
|
||||
}
|
||||
return '';
|
||||
});
|
||||
|
||||
return renderEngine;
|
||||
}
|
||||
};
|
||||
|
@ -5,7 +5,7 @@ const definitions = require('../../schema/definitions.json');
|
||||
RegExp.prototype.toJSON = RegExp.prototype.toString;
|
||||
|
||||
const ajv = require('ajv')({
|
||||
verbose: true, //process.env.NODE_ENV === 'development',
|
||||
verbose: true,
|
||||
allErrors: true,
|
||||
format: 'full', // strict regexes for format checks
|
||||
coerceTypes: true,
|
||||
|
@ -50,7 +50,6 @@ class AccessList extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('user.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
|
||||
}
|
||||
},
|
||||
items: {
|
||||
@ -59,9 +58,6 @@ class AccessList extends Model {
|
||||
join: {
|
||||
from: 'access_list.id',
|
||||
to: 'access_list_auth.access_list_id'
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'access_list_id', 'meta']);
|
||||
}
|
||||
},
|
||||
clients: {
|
||||
@ -70,9 +66,6 @@ class AccessList extends Model {
|
||||
join: {
|
||||
from: 'access_list.id',
|
||||
to: 'access_list_client.access_list_id'
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'access_list_id', 'meta']);
|
||||
}
|
||||
},
|
||||
proxy_hosts: {
|
||||
@ -84,19 +77,10 @@ class AccessList extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('proxy_host.is_deleted', 0);
|
||||
qb.omit(['is_deleted', 'meta']);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
get satisfy() {
|
||||
return this.satisfy_any ? 'satisfy any' : 'satisfy all';
|
||||
}
|
||||
|
||||
get passauth() {
|
||||
return this.pass_auth ? '' : 'proxy_set_header Authorization "";';
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AccessList;
|
||||
|
@ -45,7 +45,6 @@ class AccessListAuth extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('access_list.is_deleted', 0);
|
||||
qb.omit(['created_on', 'modified_on', 'is_deleted', 'access_list_id']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -45,15 +45,10 @@ class AccessListClient extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('access_list.is_deleted', 0);
|
||||
qb.omit(['created_on', 'modified_on', 'is_deleted', 'access_list_id']);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
get rule() {
|
||||
return `${this.directive} ${this.address}`;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AccessListClient;
|
||||
|
@ -43,9 +43,6 @@ class AuditLog extends Model {
|
||||
join: {
|
||||
from: 'audit_log.user_id',
|
||||
to: 'user.id'
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'roles']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -74,9 +74,6 @@ class Auth extends Model {
|
||||
},
|
||||
filter: {
|
||||
is_deleted: 0
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.omit(['is_deleted']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -63,7 +63,6 @@ class Certificate extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('user.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -59,7 +59,6 @@ class DeadHost extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('user.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
|
||||
}
|
||||
},
|
||||
certificate: {
|
||||
@ -71,7 +70,6 @@ class DeadHost extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('certificate.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -1,13 +1,13 @@
|
||||
const db = require('../db');
|
||||
const config = require('config');
|
||||
const config = require('../lib/config');
|
||||
const Model = require('objection').Model;
|
||||
|
||||
Model.knex(db);
|
||||
|
||||
module.exports = function () {
|
||||
if (config.database.knex && config.database.knex.client === 'sqlite3') {
|
||||
return Model.raw('datetime(\'now\',\'localtime\')');
|
||||
} else {
|
||||
return Model.raw('NOW()');
|
||||
if (config.isSqlite()) {
|
||||
// eslint-disable-next-line
|
||||
return Model.raw("datetime('now','localtime')");
|
||||
}
|
||||
return Model.raw('NOW()');
|
||||
};
|
||||
|
@ -60,7 +60,6 @@ class ProxyHost extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('user.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
|
||||
}
|
||||
},
|
||||
access_list: {
|
||||
@ -72,7 +71,6 @@ class ProxyHost extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('access_list.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
|
||||
}
|
||||
},
|
||||
certificate: {
|
||||
@ -84,7 +82,6 @@ class ProxyHost extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('certificate.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -1,3 +1,4 @@
|
||||
|
||||
// Objection Docs:
|
||||
// http://vincit.github.io/objection.js/
|
||||
|
||||
@ -59,7 +60,6 @@ class RedirectionHost extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('user.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
|
||||
}
|
||||
},
|
||||
certificate: {
|
||||
@ -71,7 +71,6 @@ class RedirectionHost extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('certificate.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -46,7 +46,6 @@ class Stream extends Model {
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.where('user.is_deleted', 0);
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -6,44 +6,36 @@
|
||||
const _ = require('lodash');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const crypto = require('crypto');
|
||||
const config = require('../lib/config');
|
||||
const error = require('../lib/error');
|
||||
const logger = require('../logger').global;
|
||||
const ALGO = 'RS256';
|
||||
|
||||
let public_key = null;
|
||||
let private_key = null;
|
||||
|
||||
function checkJWTKeyPair() {
|
||||
if (!public_key || !private_key) {
|
||||
let config = require('config');
|
||||
public_key = config.get('jwt.pub');
|
||||
private_key = config.get('jwt.key');
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = function () {
|
||||
|
||||
let token_data = {};
|
||||
|
||||
let self = {
|
||||
const self = {
|
||||
/**
|
||||
* @param {Object} payload
|
||||
* @returns {Promise}
|
||||
*/
|
||||
create: (payload) => {
|
||||
if (!config.getPrivateKey()) {
|
||||
logger.error('Private key is empty!');
|
||||
}
|
||||
// sign with RSA SHA256
|
||||
let options = {
|
||||
const options = {
|
||||
algorithm: ALGO,
|
||||
expiresIn: payload.expiresIn || '1d'
|
||||
};
|
||||
|
||||
payload.jti = crypto.randomBytes(12)
|
||||
.toString('base64')
|
||||
.substr(-8);
|
||||
|
||||
checkJWTKeyPair();
|
||||
.substring(-8);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
jwt.sign(payload, private_key, options, (err, token) => {
|
||||
jwt.sign(payload, config.getPrivateKey(), options, (err, token) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
@ -62,13 +54,15 @@ module.exports = function () {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
load: function (token) {
|
||||
if (!config.getPublicKey()) {
|
||||
logger.error('Public key is empty!');
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
checkJWTKeyPair();
|
||||
try {
|
||||
if (!token || token === null || token === 'null') {
|
||||
reject(new error.AuthError('Empty token'));
|
||||
} else {
|
||||
jwt.verify(token, public_key, {ignoreExpiration: false, algorithms: [ALGO]}, (err, result) => {
|
||||
jwt.verify(token, config.getPublicKey(), {ignoreExpiration: false, algorithms: [ALGO]}, (err, result) => {
|
||||
if (err) {
|
||||
|
||||
if (err.name === 'TokenExpiredError') {
|
||||
@ -83,8 +77,6 @@ module.exports = function () {
|
||||
// Hack: some tokens out in the wild have a scope of 'all' instead of 'user'.
|
||||
// For 30 days at least, we need to replace 'all' with user.
|
||||
if ((typeof token_data.scope !== 'undefined' && _.indexOf(token_data.scope, 'all') !== -1)) {
|
||||
//console.log('Warning! Replacing "all" scope with "user"');
|
||||
|
||||
token_data.scope = ['user'];
|
||||
}
|
||||
|
||||
@ -134,7 +126,7 @@ module.exports = function () {
|
||||
* @returns {Integer}
|
||||
*/
|
||||
getUserId: (default_value) => {
|
||||
let attrs = self.get('attrs');
|
||||
const attrs = self.get('attrs');
|
||||
if (attrs && typeof attrs.id !== 'undefined' && attrs.id) {
|
||||
return attrs.id;
|
||||
}
|
||||
|
@ -43,9 +43,6 @@ class User extends Model {
|
||||
join: {
|
||||
from: 'user.id',
|
||||
to: 'user_permission.user_id'
|
||||
},
|
||||
modify: function (qb) {
|
||||
qb.omit(['id', 'created_on', 'modified_on', 'user_id']);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -10,23 +10,21 @@
|
||||
"bcrypt": "^5.0.0",
|
||||
"body-parser": "^1.19.0",
|
||||
"compression": "^1.7.4",
|
||||
"config": "^3.3.1",
|
||||
"express": "^4.17.3",
|
||||
"express-fileupload": "^1.1.9",
|
||||
"gravatar": "^1.8.0",
|
||||
"json-schema-ref-parser": "^8.0.0",
|
||||
"jsonwebtoken": "^9.0.0",
|
||||
"knex": "^2.4.0",
|
||||
"liquidjs": "^10.0.0",
|
||||
"knex": "2.4.2",
|
||||
"liquidjs": "10.6.1",
|
||||
"lodash": "^4.17.21",
|
||||
"moment": "^2.29.4",
|
||||
"mysql": "^2.18.1",
|
||||
"node-rsa": "^1.0.8",
|
||||
"nodemon": "^2.0.2",
|
||||
"objection": "^2.2.16",
|
||||
"objection": "3.0.1",
|
||||
"path": "^0.12.7",
|
||||
"signale": "^1.4.0",
|
||||
"sqlite3": "^4.1.1",
|
||||
"signale": "1.4.0",
|
||||
"sqlite3": "5.1.6",
|
||||
"temp-write": "^4.0.0"
|
||||
},
|
||||
"signale": {
|
||||
@ -36,8 +34,9 @@
|
||||
"author": "Jamie Curnow <jc@jc21.com>",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"eslint": "^6.8.0",
|
||||
"eslint": "^8.36.0",
|
||||
"eslint-plugin-align-assignments": "^1.1.2",
|
||||
"nodemon": "^2.0.2",
|
||||
"prettier": "^2.0.4"
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,4 @@
|
||||
const fs = require('fs');
|
||||
const NodeRSA = require('node-rsa');
|
||||
const config = require('config');
|
||||
const config = require('./lib/config');
|
||||
const logger = require('./logger').setup;
|
||||
const certificateModel = require('./models/certificate');
|
||||
const userModel = require('./models/user');
|
||||
@ -9,62 +7,6 @@ const utils = require('./lib/utils');
|
||||
const authModel = require('./models/auth');
|
||||
const settingModel = require('./models/setting');
|
||||
const dns_plugins = require('./global/certbot-dns-plugins');
|
||||
const debug_mode = process.env.NODE_ENV !== 'production' || !!process.env.DEBUG;
|
||||
|
||||
/**
|
||||
* Creates a new JWT RSA Keypair if not alread set on the config
|
||||
*
|
||||
* @returns {Promise}
|
||||
*/
|
||||
const setupJwt = () => {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Now go and check if the jwt gpg keys have been created and if not, create them
|
||||
if (!config.has('jwt') || !config.has('jwt.key') || !config.has('jwt.pub')) {
|
||||
logger.info('Creating a new JWT key pair...');
|
||||
|
||||
// jwt keys are not configured properly
|
||||
const filename = config.util.getEnv('NODE_CONFIG_DIR') + '/' + (config.util.getEnv('NODE_ENV') || 'default') + '.json';
|
||||
let config_data = {};
|
||||
|
||||
try {
|
||||
config_data = require(filename);
|
||||
} catch (err) {
|
||||
// do nothing
|
||||
if (debug_mode) {
|
||||
logger.debug(filename + ' config file could not be required');
|
||||
}
|
||||
}
|
||||
|
||||
// Now create the keys and save them in the config.
|
||||
let key = new NodeRSA({ b: 2048 });
|
||||
key.generateKeyPair();
|
||||
|
||||
config_data.jwt = {
|
||||
key: key.exportKey('private').toString(),
|
||||
pub: key.exportKey('public').toString(),
|
||||
};
|
||||
|
||||
// Write config
|
||||
fs.writeFile(filename, JSON.stringify(config_data, null, 2), (err) => {
|
||||
if (err) {
|
||||
logger.error('Could not write JWT key pair to config file: ' + filename);
|
||||
reject(err);
|
||||
} else {
|
||||
logger.info('Wrote JWT key pair to config file: ' + filename);
|
||||
delete require.cache[require.resolve('config')];
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// JWT key pair exists
|
||||
if (debug_mode) {
|
||||
logger.debug('JWT Keypair already exists');
|
||||
}
|
||||
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a default admin users if one doesn't already exist in the database
|
||||
@ -119,8 +61,8 @@ const setupDefaultUser = () => {
|
||||
.then(() => {
|
||||
logger.info('Initial admin setup completed');
|
||||
});
|
||||
} else if (debug_mode) {
|
||||
logger.debug('Admin user setup not required');
|
||||
} else if (config.debug()) {
|
||||
logger.info('Admin user setup not required');
|
||||
}
|
||||
});
|
||||
};
|
||||
@ -151,8 +93,8 @@ const setupDefaultSettings = () => {
|
||||
logger.info('Default settings added');
|
||||
});
|
||||
}
|
||||
if (debug_mode) {
|
||||
logger.debug('Default setting setup not required');
|
||||
if (config.debug()) {
|
||||
logger.info('Default setting setup not required');
|
||||
}
|
||||
});
|
||||
};
|
||||
@ -189,7 +131,7 @@ const setupCertbotPlugins = () => {
|
||||
});
|
||||
|
||||
if (plugins.length) {
|
||||
const install_cmd = '. /opt/certbot/bin/activate && pip install ' + plugins.join(' ') + ' && deactivate';
|
||||
const install_cmd = '. /opt/certbot/bin/activate && pip install --no-cache-dir --user ' + plugins.join(' ') + ' && deactivate';
|
||||
promises.push(utils.exec(install_cmd));
|
||||
}
|
||||
|
||||
@ -225,8 +167,7 @@ const setupLogrotation = () => {
|
||||
};
|
||||
|
||||
module.exports = function () {
|
||||
return setupJwt()
|
||||
.then(setupDefaultUser)
|
||||
return setupDefaultUser()
|
||||
.then(setupDefaultSettings)
|
||||
.then(setupCertbotPlugins)
|
||||
.then(setupLogrotation);
|
||||
|
25
backend/templates/_access.conf
Normal file
25
backend/templates/_access.conf
Normal file
@ -0,0 +1,25 @@
|
||||
{% if access_list_id > 0 %}
|
||||
{% if access_list.items.length > 0 %}
|
||||
# Authorization
|
||||
auth_basic "Authorization required";
|
||||
auth_basic_user_file /data/access/{{ access_list_id }};
|
||||
|
||||
{% if access_list.pass_auth == 0 %}
|
||||
proxy_set_header Authorization "";
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Access Rules: {{ access_list.clients | size }} total
|
||||
{% for client in access_list.clients %}
|
||||
{{client | nginxAccessRule}}
|
||||
{% endfor %}
|
||||
deny all;
|
||||
|
||||
# Access checks must...
|
||||
{% if access_list.satisfy_any == 1 %}
|
||||
satisfy any;
|
||||
{% else %}
|
||||
satisfy all;
|
||||
{% endif %}
|
||||
{% endif %}
|
@ -6,30 +6,9 @@
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_pass {{ forward_scheme }}://{{ forward_host }}:{{ forward_port }}{{ forward_path }};
|
||||
|
||||
{% if access_list_id > 0 %}
|
||||
{% if access_list.items.length > 0 %}
|
||||
# Authorization
|
||||
auth_basic "Authorization required";
|
||||
auth_basic_user_file /data/access/{{ access_list_id }};
|
||||
|
||||
{{ access_list.passauth }}
|
||||
{% endif %}
|
||||
|
||||
# Access Rules
|
||||
{% for client in access_list.clients %}
|
||||
{{- client.rule -}};
|
||||
{% endfor %}deny all;
|
||||
|
||||
# Access checks must...
|
||||
{% if access_list.satisfy %}
|
||||
{{ access_list.satisfy }};
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% include "_access.conf" %}
|
||||
{% include "_assets.conf" %}
|
||||
{% include "_exploits.conf" %}
|
||||
|
||||
{% include "_forced_ssl.conf" %}
|
||||
{% include "_hsts.conf" %}
|
||||
|
||||
|
@ -30,27 +30,7 @@ proxy_http_version 1.1;
|
||||
|
||||
location / {
|
||||
|
||||
{% if access_list_id > 0 %}
|
||||
{% if access_list.items.length > 0 %}
|
||||
# Authorization
|
||||
auth_basic "Authorization required";
|
||||
auth_basic_user_file /data/access/{{ access_list_id }};
|
||||
|
||||
{{ access_list.passauth }}
|
||||
{% endif %}
|
||||
|
||||
# Access Rules
|
||||
{% for client in access_list.clients %}
|
||||
{{- client.rule -}};
|
||||
{% endfor %}deny all;
|
||||
|
||||
# Access checks must...
|
||||
{% if access_list.satisfy %}
|
||||
{{ access_list.satisfy }};
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% include "_access.conf" %}
|
||||
{% include "_hsts.conf" %}
|
||||
|
||||
{% if allow_websocket_upgrade == 1 or allow_websocket_upgrade == true %}
|
||||
|
1377
backend/yarn.lock
1377
backend/yarn.lock
File diff suppressed because it is too large
Load Diff
@ -25,7 +25,7 @@ RUN echo "fs.file-max = 65535" > /etc/sysctl.conf \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# s6 overlay
|
||||
COPY scripts/install-s6 /tmp/install-s6
|
||||
COPY docker/scripts/install-s6 /tmp/install-s6
|
||||
RUN /tmp/install-s6 "${TARGETPLATFORM}" && rm -f /tmp/install-s6
|
||||
|
||||
EXPOSE 80 81 443
|
||||
@ -35,21 +35,17 @@ COPY frontend/dist /app/frontend
|
||||
COPY global /app/global
|
||||
|
||||
WORKDIR /app
|
||||
RUN yarn install
|
||||
RUN yarn install \
|
||||
&& yarn cache clean
|
||||
|
||||
# add late to limit cache-busting by modifications
|
||||
COPY docker/rootfs /
|
||||
|
||||
# Remove frontend service not required for prod, dev nginx config as well
|
||||
RUN rm -rf /etc/services.d/frontend /etc/nginx/conf.d/dev.conf
|
||||
|
||||
# Change permission of logrotate config file
|
||||
RUN chmod 644 /etc/logrotate.d/nginx-proxy-manager
|
||||
|
||||
# fix for pip installs
|
||||
# https://github.com/NginxProxyManager/nginx-proxy-manager/issues/1769
|
||||
RUN pip uninstall --yes setuptools \
|
||||
&& pip install "setuptools==58.0.0"
|
||||
RUN rm -rf /etc/services.d/frontend /etc/nginx/conf.d/dev.conf \
|
||||
&& chmod 644 /etc/logrotate.d/nginx-proxy-manager \
|
||||
&& pip uninstall --yes setuptools \
|
||||
&& pip install --no-cache-dir "setuptools==58.0.0"
|
||||
|
||||
VOLUME [ "/data", "/etc/letsencrypt" ]
|
||||
ENTRYPOINT [ "/init" ]
|
||||
|
@ -7,7 +7,7 @@ ENV S6_LOGGING=0 \
|
||||
|
||||
RUN echo "fs.file-max = 65535" > /etc/sysctl.conf \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y certbot jq python3-pip logrotate \
|
||||
&& apt-get install -y jq python3-pip logrotate \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@ -21,9 +21,8 @@ RUN rm -f /etc/nginx/conf.d/production.conf
|
||||
RUN chmod 644 /etc/logrotate.d/nginx-proxy-manager
|
||||
|
||||
# s6 overlay
|
||||
RUN curl -L -o /tmp/s6-overlay-amd64.tar.gz "https://github.com/just-containers/s6-overlay/releases/download/v1.22.1.0/s6-overlay-amd64.tar.gz" \
|
||||
&& tar -xzf /tmp/s6-overlay-amd64.tar.gz -C /
|
||||
COPY scripts/install-s6 /tmp/install-s6
|
||||
RUN /tmp/install-s6 "${TARGETPLATFORM}" && rm -f /tmp/install-s6
|
||||
|
||||
EXPOSE 80 81 443
|
||||
ENTRYPOINT [ "/init" ]
|
||||
|
||||
|
@ -1,17 +1,18 @@
|
||||
# WARNING: This is a CI docker-compose file used for building and testing of the entire app, it should not be used for production.
|
||||
version: "3"
|
||||
version: '3.8'
|
||||
services:
|
||||
|
||||
fullstack-mysql:
|
||||
image: ${IMAGE}:ci-${BUILD_NUMBER}
|
||||
image: "${IMAGE}:ci-${BUILD_NUMBER}"
|
||||
environment:
|
||||
NODE_ENV: "development"
|
||||
DEBUG: 'true'
|
||||
LE_STAGING: 'true'
|
||||
FORCE_COLOR: 1
|
||||
DB_MYSQL_HOST: "db"
|
||||
DB_MYSQL_PORT: 3306
|
||||
DB_MYSQL_USER: "npm"
|
||||
DB_MYSQL_PASSWORD: "npm"
|
||||
DB_MYSQL_NAME: "npm"
|
||||
DB_MYSQL_HOST: 'db'
|
||||
DB_MYSQL_PORT: '3306'
|
||||
DB_MYSQL_USER: 'npm'
|
||||
DB_MYSQL_PASSWORD: 'npm'
|
||||
DB_MYSQL_NAME: 'npm'
|
||||
volumes:
|
||||
- npm_data:/data
|
||||
expose:
|
||||
@ -26,11 +27,12 @@ services:
|
||||
timeout: 3s
|
||||
|
||||
fullstack-sqlite:
|
||||
image: ${IMAGE}:ci-${BUILD_NUMBER}
|
||||
image: "${IMAGE}:ci-${BUILD_NUMBER}"
|
||||
environment:
|
||||
NODE_ENV: "development"
|
||||
DEBUG: 'true'
|
||||
LE_STAGING: 'true'
|
||||
FORCE_COLOR: 1
|
||||
DB_SQLITE_FILE: "/data/database.sqlite"
|
||||
DB_SQLITE_FILE: '/data/mydb.sqlite'
|
||||
volumes:
|
||||
- npm_data:/data
|
||||
expose:
|
||||
@ -45,26 +47,26 @@ services:
|
||||
db:
|
||||
image: jc21/mariadb-aria
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: "npm"
|
||||
MYSQL_DATABASE: "npm"
|
||||
MYSQL_USER: "npm"
|
||||
MYSQL_PASSWORD: "npm"
|
||||
MYSQL_ROOT_PASSWORD: 'npm'
|
||||
MYSQL_DATABASE: 'npm'
|
||||
MYSQL_USER: 'npm'
|
||||
MYSQL_PASSWORD: 'npm'
|
||||
volumes:
|
||||
- db_data:/var/lib/mysql
|
||||
|
||||
cypress-mysql:
|
||||
image: ${IMAGE}-cypress:ci-${BUILD_NUMBER}
|
||||
image: "${IMAGE}-cypress:ci-${BUILD_NUMBER}"
|
||||
build:
|
||||
context: ../test/
|
||||
dockerfile: cypress/Dockerfile
|
||||
environment:
|
||||
CYPRESS_baseUrl: "http://fullstack-mysql:81"
|
||||
CYPRESS_baseUrl: 'http://fullstack-mysql:81'
|
||||
volumes:
|
||||
- cypress-logs:/results
|
||||
command: cypress run --browser chrome --config-file=${CYPRESS_CONFIG:-cypress/config/ci.json}
|
||||
|
||||
cypress-sqlite:
|
||||
image: ${IMAGE}-cypress:ci-${BUILD_NUMBER}
|
||||
image: "${IMAGE}-cypress:ci-${BUILD_NUMBER}"
|
||||
build:
|
||||
context: ../test/
|
||||
dockerfile: cypress/Dockerfile
|
||||
|
@ -1,6 +1,7 @@
|
||||
# WARNING: This is a DEVELOPMENT docker-compose file, it should not be used for production.
|
||||
version: "3.5"
|
||||
version: '3.8'
|
||||
services:
|
||||
|
||||
npm:
|
||||
image: nginxproxymanager:dev
|
||||
container_name: npm_core
|
||||
@ -14,14 +15,19 @@ services:
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
environment:
|
||||
NODE_ENV: "development"
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
FORCE_COLOR: 1
|
||||
DEVELOPMENT: "true"
|
||||
DB_MYSQL_HOST: "db"
|
||||
DB_MYSQL_PORT: 3306
|
||||
DB_MYSQL_USER: "npm"
|
||||
DB_MYSQL_PASSWORD: "npm"
|
||||
DB_MYSQL_NAME: "npm"
|
||||
# specifically for dev:
|
||||
DEBUG: 'true'
|
||||
DEVELOPMENT: 'true'
|
||||
LE_STAGING: 'true'
|
||||
# db:
|
||||
DB_MYSQL_HOST: 'db'
|
||||
DB_MYSQL_PORT: '3306'
|
||||
DB_MYSQL_USER: 'npm'
|
||||
DB_MYSQL_PASSWORD: 'npm'
|
||||
DB_MYSQL_NAME: 'npm'
|
||||
# DB_SQLITE_FILE: "/data/database.sqlite"
|
||||
# DISABLE_IPV6: "true"
|
||||
volumes:
|
||||
@ -42,10 +48,10 @@ services:
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: "npm"
|
||||
MYSQL_DATABASE: "npm"
|
||||
MYSQL_USER: "npm"
|
||||
MYSQL_PASSWORD: "npm"
|
||||
MYSQL_ROOT_PASSWORD: 'npm'
|
||||
MYSQL_DATABASE: 'npm'
|
||||
MYSQL_USER: 'npm'
|
||||
MYSQL_PASSWORD: 'npm'
|
||||
volumes:
|
||||
- db_data:/var/lib/mysql
|
||||
|
||||
|
29
docker/rootfs/bin/common.sh
Normal file
29
docker/rootfs/bin/common.sh
Normal file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
CYAN='\E[1;36m'
|
||||
BLUE='\E[1;34m'
|
||||
YELLOW='\E[1;33m'
|
||||
RED='\E[1;31m'
|
||||
RESET='\E[0m'
|
||||
export CYAN BLUE YELLOW RED RESET
|
||||
|
||||
log_info () {
|
||||
echo -e "${BLUE}❯ ${CYAN}$1${RESET}"
|
||||
}
|
||||
|
||||
log_error () {
|
||||
echo -e "${RED}❯ $1${RESET}"
|
||||
}
|
||||
|
||||
# The `run` file will only execute 1 line so this helps keep things
|
||||
# logically separated
|
||||
|
||||
log_fatal () {
|
||||
echo -e "${RED}--------------------------------------${RESET}"
|
||||
echo -e "${RED}ERROR: $1${RESET}"
|
||||
echo -e "${RED}--------------------------------------${RESET}"
|
||||
/run/s6/basedir/bin/halt
|
||||
exit 1
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This command reads the `DISABLE_IPV6` env var and will either enable
|
||||
# or disable ipv6 in all nginx configs based on this setting.
|
||||
|
||||
# Lowercase
|
||||
DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
CYAN='\E[1;36m'
|
||||
BLUE='\E[1;34m'
|
||||
YELLOW='\E[1;33m'
|
||||
RED='\E[1;31m'
|
||||
RESET='\E[0m'
|
||||
|
||||
FOLDER=$1
|
||||
if [ "$FOLDER" == "" ]; then
|
||||
echo -e "${RED}❯ $0 requires a absolute folder path as the first argument!${RESET}"
|
||||
echo -e "${YELLOW} ie: $0 /data/nginx${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FILES=$(find "$FOLDER" -type f -name "*.conf")
|
||||
if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ]; then
|
||||
# IPV6 is disabled
|
||||
echo "Disabling IPV6 in hosts"
|
||||
echo -e "${BLUE}❯ ${CYAN}Disabling IPV6 in hosts: ${YELLOW}${FOLDER}${RESET}"
|
||||
|
||||
# Iterate over configs and run the regex
|
||||
for FILE in $FILES
|
||||
do
|
||||
echo -e " ${BLUE}❯ ${YELLOW}${FILE}${RESET}"
|
||||
sed -E -i 's/^([^#]*)listen \[::\]/\1#listen [::]/g' "$FILE"
|
||||
done
|
||||
|
||||
else
|
||||
# IPV6 is enabled
|
||||
echo -e "${BLUE}❯ ${CYAN}Enabling IPV6 in hosts: ${YELLOW}${FOLDER}${RESET}"
|
||||
|
||||
# Iterate over configs and run the regex
|
||||
for FILE in $FILES
|
||||
do
|
||||
echo -e " ${BLUE}❯ ${YELLOW}${FILE}${RESET}"
|
||||
sed -E -i 's/^(\s*)#listen \[::\]/\1listen [::]/g' "$FILE"
|
||||
done
|
||||
|
||||
fi
|
2
docker/rootfs/etc/cont-finish.d/.gitignore
vendored
2
docker/rootfs/etc/cont-finish.d/.gitignore
vendored
@ -1,2 +0,0 @@
|
||||
*
|
||||
!.gitignore
|
3
docker/rootfs/etc/cont-init.d/.gitignore
vendored
3
docker/rootfs/etc/cont-init.d/.gitignore
vendored
@ -1,3 +0,0 @@
|
||||
*
|
||||
!.gitignore
|
||||
!*.sh
|
@ -1,7 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
set -e
|
||||
|
||||
mkdir -p /data/logs
|
||||
echo "Changing ownership of /data/logs to $(id -u):$(id -g)"
|
||||
chown -R "$(id -u):$(id -g)" /data/logs
|
||||
|
@ -1,29 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
# ref: https://github.com/linuxserver/docker-baseimage-alpine/blob/master/root/etc/cont-init.d/01-envfile
|
||||
|
||||
# in s6, environmental variables are written as text files for s6 to monitor
|
||||
# search through full-path filenames for files ending in "__FILE"
|
||||
for FILENAME in $(find /var/run/s6/container_environment/ | grep "__FILE$"); do
|
||||
echo "[secret-init] Evaluating ${FILENAME##*/} ..."
|
||||
|
||||
# set SECRETFILE to the contents of the full-path textfile
|
||||
SECRETFILE=$(cat ${FILENAME})
|
||||
# SECRETFILE=${FILENAME}
|
||||
# echo "[secret-init] Set SECRETFILE to ${SECRETFILE}" # DEBUG - rm for prod!
|
||||
|
||||
# if SECRETFILE exists / is not null
|
||||
if [[ -f ${SECRETFILE} ]]; then
|
||||
# strip the appended "__FILE" from environmental variable name ...
|
||||
STRIPFILE=$(echo ${FILENAME} | sed "s/__FILE//g")
|
||||
# echo "[secret-init] Set STRIPFILE to ${STRIPFILE}" # DEBUG - rm for prod!
|
||||
|
||||
# ... and set value to contents of secretfile
|
||||
# since s6 uses text files, this is effectively "export ..."
|
||||
printf $(cat ${SECRETFILE}) > ${STRIPFILE}
|
||||
# echo "[secret-init] Set ${STRIPFILE##*/} to $(cat ${STRIPFILE})" # DEBUG - rm for prod!"
|
||||
echo "[secret-init] Success! ${STRIPFILE##*/} set from ${FILENAME##*/}"
|
||||
|
||||
else
|
||||
echo "[secret-init] cannot find secret in ${FILENAME}"
|
||||
fi
|
||||
done
|
2
docker/rootfs/etc/fix-attrs.d/.gitignore
vendored
2
docker/rootfs/etc/fix-attrs.d/.gitignore
vendored
@ -1,2 +0,0 @@
|
||||
*
|
||||
!.gitignore
|
@ -1,7 +1,6 @@
|
||||
# run nginx in foreground
|
||||
daemon off;
|
||||
|
||||
user root;
|
||||
pid /run/nginx/nginx.pid;
|
||||
|
||||
# Set number of worker processes automatically based on number of CPU cores.
|
||||
worker_processes auto;
|
||||
@ -57,7 +56,7 @@ http {
|
||||
}
|
||||
|
||||
# Real IP Determination
|
||||
|
||||
|
||||
# Local subnets:
|
||||
set_real_ip_from 10.0.0.0/8;
|
||||
set_real_ip_from 172.16.0.0/12; # Includes Docker subnet
|
||||
|
22
docker/rootfs/etc/s6-overlay/s6-rc.d/backend/run
Executable file
22
docker/rootfs/etc/s6-overlay/s6-rc.d/backend/run
Executable file
@ -0,0 +1,22 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
. /bin/common.sh
|
||||
|
||||
log_info 'Starting backend ...'
|
||||
|
||||
if [ "$DEVELOPMENT" == "true" ]; then
|
||||
cd /app || exit 1
|
||||
# If yarn install fails: add --verbose --network-concurrency 1
|
||||
s6-setuidgid npmuser yarn install
|
||||
exec s6-setuidgid npmuser bash -c 'export HOME=/tmp/npmuserhome;node --max_old_space_size=250 --abort_on_uncaught_exception node_modules/nodemon/bin/nodemon.js'
|
||||
else
|
||||
cd /app || exit 1
|
||||
while :
|
||||
do
|
||||
s6-setuidgid npmuser bash -c 'export HOME=/tmp/npmuserhome;node --abort_on_uncaught_exception --max_old_space_size=250 index.js'
|
||||
sleep 1
|
||||
done
|
||||
fi
|
1
docker/rootfs/etc/s6-overlay/s6-rc.d/backend/type
Normal file
1
docker/rootfs/etc/s6-overlay/s6-rc.d/backend/type
Normal file
@ -0,0 +1 @@
|
||||
longrun
|
21
docker/rootfs/etc/s6-overlay/s6-rc.d/frontend/run
Executable file
21
docker/rootfs/etc/s6-overlay/s6-rc.d/frontend/run
Executable file
@ -0,0 +1,21 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
# This service is DEVELOPMENT only.
|
||||
|
||||
if [ "$DEVELOPMENT" == "true" ]; then
|
||||
. /bin/common.sh
|
||||
cd /app/frontend || exit 1
|
||||
log_info 'Starting frontend ...'
|
||||
HOME=/tmp/npmuserhome
|
||||
export HOME
|
||||
mkdir -p /app/frontend/dist
|
||||
chown -R npmuser:npmuser /app/frontend/dist
|
||||
# If yarn install fails: add --verbose --network-concurrency 1
|
||||
s6-setuidgid npmuser yarn install
|
||||
exec s6-setuidgid npmuser yarn watch
|
||||
else
|
||||
exit 0
|
||||
fi
|
1
docker/rootfs/etc/s6-overlay/s6-rc.d/frontend/type
Normal file
1
docker/rootfs/etc/s6-overlay/s6-rc.d/frontend/type
Normal file
@ -0,0 +1 @@
|
||||
longrun
|
10
docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
Executable file
10
docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run
Executable file
@ -0,0 +1,10 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
. /bin/common.sh
|
||||
|
||||
log_info 'Starting nginx ...'
|
||||
|
||||
exec s6-setuidgid npmuser nginx
|
1
docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/type
Normal file
1
docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/type
Normal file
@ -0,0 +1 @@
|
||||
longrun
|
18
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/00-all.sh
Executable file
18
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/00-all.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
. /bin/common.sh
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
log_fatal "This docker container must be run as root, do not specify a user.\nYou can specify PUID and PGID env vars to run processes as that user and group after initialization."
|
||||
fi
|
||||
|
||||
. /etc/s6-overlay/s6-rc.d/prepare/10-npmuser.sh
|
||||
. /etc/s6-overlay/s6-rc.d/prepare/20-paths.sh
|
||||
. /etc/s6-overlay/s6-rc.d/prepare/30-ownership.sh
|
||||
. /etc/s6-overlay/s6-rc.d/prepare/40-dynamic.sh
|
||||
. /etc/s6-overlay/s6-rc.d/prepare/50-ipv6.sh
|
||||
. /etc/s6-overlay/s6-rc.d/prepare/60-secrets.sh
|
||||
. /etc/s6-overlay/s6-rc.d/prepare/90-banner.sh
|
25
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/10-npmuser.sh
Executable file
25
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/10-npmuser.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
PUID=${PUID:-911}
|
||||
PGID=${PGID:-911}
|
||||
|
||||
log_info 'Configuring npmuser ...'
|
||||
|
||||
groupmod -g 1000 users || exit 1
|
||||
|
||||
if id -u npmuser; then
|
||||
# user already exists
|
||||
usermod -u "${PUID}" npmuser || exit 1
|
||||
else
|
||||
# Add npmuser user
|
||||
useradd -u "${PUID}" -U -d /tmp/npmuserhome -s /bin/false npmuser || exit 1
|
||||
fi
|
||||
|
||||
usermod -G users npmuser || exit 1
|
||||
groupmod -o -g "${PGID}" npmuser || exit 1
|
||||
# Home for npmuser
|
||||
mkdir -p /tmp/npmuserhome
|
||||
chown -R npmuser:npmuser /tmp/npmuserhome
|
41
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/20-paths.sh
Executable file
41
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/20-paths.sh
Executable file
@ -0,0 +1,41 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
log_info 'Checking paths ...'
|
||||
|
||||
# Ensure /data is mounted
|
||||
if [ ! -d '/data' ]; then
|
||||
log_fatal '/data is not mounted! Check your docker configuration.'
|
||||
fi
|
||||
# Ensure /etc/letsencrypt is mounted
|
||||
if [ ! -d '/etc/letsencrypt' ]; then
|
||||
log_fatal '/etc/letsencrypt is not mounted! Check your docker configuration.'
|
||||
fi
|
||||
|
||||
# Create required folders
|
||||
mkdir -p \
|
||||
/data/nginx \
|
||||
/data/custom_ssl \
|
||||
/data/logs \
|
||||
/data/access \
|
||||
/data/nginx/default_host \
|
||||
/data/nginx/default_www \
|
||||
/data/nginx/proxy_host \
|
||||
/data/nginx/redirection_host \
|
||||
/data/nginx/stream \
|
||||
/data/nginx/dead_host \
|
||||
/data/nginx/temp \
|
||||
/data/letsencrypt-acme-challenge \
|
||||
/run/nginx \
|
||||
/tmp/nginx/body \
|
||||
/var/log/nginx \
|
||||
/var/lib/nginx/cache/public \
|
||||
/var/lib/nginx/cache/private \
|
||||
/var/cache/nginx/proxy_temp
|
||||
|
||||
touch /var/log/nginx/error.log || true
|
||||
chmod 777 /var/log/nginx/error.log || true
|
||||
chmod -R 777 /var/cache/nginx || true
|
||||
chmod 644 /etc/logrotate.d/nginx-proxy-manager
|
21
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/30-ownership.sh
Executable file
21
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/30-ownership.sh
Executable file
@ -0,0 +1,21 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
log_info 'Setting ownership ...'
|
||||
|
||||
# root
|
||||
chown root /tmp/nginx
|
||||
|
||||
# npmuser
|
||||
chown -R npmuser:npmuser \
|
||||
/data \
|
||||
/etc/letsencrypt \
|
||||
/etc/nginx \
|
||||
/run/nginx \
|
||||
/tmp/nginx \
|
||||
/var/cache/nginx \
|
||||
/var/lib/logrotate \
|
||||
/var/lib/nginx \
|
||||
/var/log/nginx
|
17
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/40-dynamic.sh
Executable file
17
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/40-dynamic.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
log_info 'Dynamic resolvers ...'
|
||||
|
||||
DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# Dynamically generate resolvers file, if resolver is IPv6, enclose in `[]`
|
||||
# thanks @tfmm
|
||||
if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ];
|
||||
then
|
||||
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) ipv6=off valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
|
||||
else
|
||||
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
|
||||
fi
|
36
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/50-ipv6.sh
Normal file
36
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/50-ipv6.sh
Normal file
@ -0,0 +1,36 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This command reads the `DISABLE_IPV6` env var and will either enable
|
||||
# or disable ipv6 in all nginx configs based on this setting.
|
||||
|
||||
log_info 'IPv6 ...'
|
||||
|
||||
# Lowercase
|
||||
DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
process_folder () {
|
||||
FILES=$(find "$1" -type f -name "*.conf")
|
||||
SED_REGEX=
|
||||
|
||||
if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ]; then
|
||||
# IPV6 is disabled
|
||||
echo "Disabling IPV6 in hosts in: $1"
|
||||
SED_REGEX='s/^([^#]*)listen \[::\]/\1#listen [::]/g'
|
||||
else
|
||||
# IPV6 is enabled
|
||||
echo "Enabling IPV6 in hosts in: $1"
|
||||
SED_REGEX='s/^(\s*)#listen \[::\]/\1listen [::]/g'
|
||||
fi
|
||||
|
||||
for FILE in $FILES
|
||||
do
|
||||
echo "- ${FILE}"
|
||||
sed -E -i "$SED_REGEX" "$FILE"
|
||||
done
|
||||
|
||||
# ensure the files are still owned by the npmuser
|
||||
chown -R npmuser:npmuser "$1"
|
||||
}
|
||||
|
||||
process_folder /etc/nginx/conf.d
|
||||
process_folder /data/nginx
|
30
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/60-secrets.sh
Executable file
30
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/60-secrets.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
# in s6, environmental variables are written as text files for s6 to monitor
|
||||
# search through full-path filenames for files ending in "__FILE"
|
||||
log_info 'Docker secrets ...'
|
||||
|
||||
for FILENAME in $(find /var/run/s6/container_environment/ | grep "__FILE$"); do
|
||||
echo "[secret-init] Evaluating ${FILENAME##*/} ..."
|
||||
|
||||
# set SECRETFILE to the contents of the full-path textfile
|
||||
SECRETFILE=$(cat "${FILENAME}")
|
||||
# if SECRETFILE exists / is not null
|
||||
if [[ -f "${SECRETFILE}" ]]; then
|
||||
# strip the appended "__FILE" from environmental variable name ...
|
||||
STRIPFILE=$(echo "${FILENAME}" | sed "s/__FILE//g")
|
||||
# echo "[secret-init] Set STRIPFILE to ${STRIPFILE}" # DEBUG - rm for prod!
|
||||
|
||||
# ... and set value to contents of secretfile
|
||||
# since s6 uses text files, this is effectively "export ..."
|
||||
printf $(cat "${SECRETFILE}") > "${STRIPFILE}"
|
||||
# echo "[secret-init] Set ${STRIPFILE##*/} to $(cat ${STRIPFILE})" # DEBUG - rm for prod!"
|
||||
echo "Success: ${STRIPFILE##*/} set from ${FILENAME##*/}"
|
||||
|
||||
else
|
||||
echo "Cannot find secret in ${FILENAME}"
|
||||
fi
|
||||
done
|
17
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/90-banner.sh
Executable file
17
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/90-banner.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
|
||||
set -e
|
||||
|
||||
echo
|
||||
echo "-------------------------------------
|
||||
_ _ ____ __ __
|
||||
| \ | | _ \| \/ |
|
||||
| \| | |_) | |\/| |
|
||||
| |\ | __/| | | |
|
||||
|_| \_|_| |_| |_|
|
||||
-------------------------------------
|
||||
User UID: $(id -u npmuser)
|
||||
User GID: $(id -g npmuser)
|
||||
-------------------------------------
|
||||
"
|
1
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/type
Normal file
1
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/type
Normal file
@ -0,0 +1 @@
|
||||
oneshot
|
2
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/up
Normal file
2
docker/rootfs/etc/s6-overlay/s6-rc.d/prepare/up
Normal file
@ -0,0 +1,2 @@
|
||||
# shellcheck shell=bash
|
||||
/etc/s6-overlay/s6-rc.d/prepare/00-all.sh
|
@ -1,6 +0,0 @@
|
||||
#!/usr/bin/execlineb -S1
|
||||
if { s6-test ${1} -ne 0 }
|
||||
if { s6-test ${1} -ne 256 }
|
||||
|
||||
s6-svscanctl -t /var/run/s6/services
|
||||
|
@ -1,12 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# This service is DEVELOPMENT only.
|
||||
|
||||
if [ "$DEVELOPMENT" == "true" ]; then
|
||||
cd /app/frontend || exit 1
|
||||
# If yarn install fails: add --verbose --network-concurrency 1
|
||||
yarn install
|
||||
yarn watch
|
||||
else
|
||||
exit 0
|
||||
fi
|
@ -1,3 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-svscanctl -t /var/run/s6/services
|
@ -1,19 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
mkdir -p /data/letsencrypt-acme-challenge
|
||||
|
||||
cd /app || echo
|
||||
|
||||
if [ "$DEVELOPMENT" == "true" ]; then
|
||||
cd /app || exit 1
|
||||
# If yarn install fails: add --verbose --network-concurrency 1
|
||||
yarn install
|
||||
node --max_old_space_size=250 --abort_on_uncaught_exception node_modules/nodemon/bin/nodemon.js
|
||||
else
|
||||
cd /app || exit 1
|
||||
while :
|
||||
do
|
||||
node --abort_on_uncaught_exception --max_old_space_size=250 index.js
|
||||
sleep 1
|
||||
done
|
||||
fi
|
@ -1 +0,0 @@
|
||||
/bin/true
|
@ -1,38 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# Create required folders
|
||||
mkdir -p /tmp/nginx/body \
|
||||
/run/nginx \
|
||||
/var/log/nginx \
|
||||
/data/nginx \
|
||||
/data/custom_ssl \
|
||||
/data/logs \
|
||||
/data/access \
|
||||
/data/nginx/default_host \
|
||||
/data/nginx/default_www \
|
||||
/data/nginx/proxy_host \
|
||||
/data/nginx/redirection_host \
|
||||
/data/nginx/stream \
|
||||
/data/nginx/dead_host \
|
||||
/data/nginx/temp \
|
||||
/var/lib/nginx/cache/public \
|
||||
/var/lib/nginx/cache/private \
|
||||
/var/cache/nginx/proxy_temp
|
||||
|
||||
touch /var/log/nginx/error.log && chmod 777 /var/log/nginx/error.log && chmod -R 777 /var/cache/nginx
|
||||
chown root /tmp/nginx
|
||||
|
||||
# Dynamically generate resolvers file, if resolver is IPv6, enclose in `[]`
|
||||
# thanks @tfmm
|
||||
if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ];
|
||||
then
|
||||
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) ipv6=off valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
|
||||
else
|
||||
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
|
||||
fi
|
||||
|
||||
# Handle IPV6 settings
|
||||
/bin/handle-ipv6-setting /etc/nginx/conf.d
|
||||
/bin/handle-ipv6-setting /data/nginx
|
||||
|
||||
exec nginx
|
@ -8,8 +8,8 @@ BLUE='\E[1;34m'
|
||||
GREEN='\E[1;32m'
|
||||
RESET='\E[0m'
|
||||
|
||||
S6_OVERLAY_VERSION=1.22.1.0
|
||||
TARGETPLATFORM=$1
|
||||
S6_OVERLAY_VERSION=3.1.4.1
|
||||
TARGETPLATFORM=${1:unspecified}
|
||||
|
||||
# Determine the correct binary file for the architecture given
|
||||
case $TARGETPLATFORM in
|
||||
@ -22,13 +22,17 @@ case $TARGETPLATFORM in
|
||||
;;
|
||||
|
||||
*)
|
||||
S6_ARCH=amd64
|
||||
S6_ARCH=x86_64
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -e "${BLUE}❯ ${CYAN}Installing S6-overlay v${S6_OVERLAY_VERSION} for ${YELLOW}${TARGETPLATFORM} (${S6_ARCH})${RESET}"
|
||||
|
||||
curl -L -o "/tmp/s6-overlay-${S6_ARCH}.tar.gz" "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_ARCH}.tar.gz" \
|
||||
&& tar -xzf "/tmp/s6-overlay-${S6_ARCH}.tar.gz" -C /
|
||||
curl -L -o '/tmp/s6-overlay-noarch.tar.xz' "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz"
|
||||
curl -L -o "/tmp/s6-overlay-${S6_ARCH}.tar.xz" "https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_ARCH}.tar.xz"
|
||||
tar -C / -Jxpf '/tmp/s6-overlay-noarch.tar.xz'
|
||||
tar -C / -Jxpf "/tmp/s6-overlay-${S6_ARCH}.tar.xz"
|
||||
|
||||
rm -rf "/tmp/s6-overlay-${S6_ARCH}.tar.xz"
|
||||
|
||||
echo -e "${BLUE}❯ ${GREEN}S6-overlay install Complete${RESET}"
|
@ -25,7 +25,7 @@ networks:
|
||||
Let's look at a Portainer example:
|
||||
|
||||
```yml
|
||||
version: '3'
|
||||
version: '3.8'
|
||||
services:
|
||||
|
||||
portainer:
|
||||
@ -60,14 +60,14 @@ healthcheck:
|
||||
timeout: 3s
|
||||
```
|
||||
|
||||
## Docker Secrets
|
||||
## Docker File Secrets
|
||||
|
||||
This image supports the use of Docker secrets to import from file and keep sensitive usernames or passwords from being passed or preserved in plaintext.
|
||||
This image supports the use of Docker secrets to import from files and keep sensitive usernames or passwords from being passed or preserved in plaintext.
|
||||
|
||||
You can set any environment variable from a file by appending `__FILE` (double-underscore FILE) to the environmental variable name.
|
||||
|
||||
```yml
|
||||
version: "3.7"
|
||||
version: '3.8'
|
||||
|
||||
secrets:
|
||||
# Secrets are single-line text files where the sole content is the secret
|
||||
@ -96,9 +96,7 @@ services:
|
||||
# DB_MYSQL_PASSWORD: "npm" # use secret instead
|
||||
DB_MYSQL_PASSWORD__FILE: /run/secrets/MYSQL_PWD
|
||||
DB_MYSQL_NAME: "npm"
|
||||
# If you would rather use Sqlite uncomment this
|
||||
# and remove all DB_MYSQL_* lines above
|
||||
# DB_SQLITE_FILE: "/data/database.sqlite"
|
||||
# If you would rather use Sqlite, remove all DB_MYSQL_* lines above
|
||||
# Uncomment this if IPv6 is not enabled on your host
|
||||
# DISABLE_IPV6: 'true'
|
||||
volumes:
|
||||
@ -108,6 +106,7 @@ services:
|
||||
- MYSQL_PWD
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: jc21/mariadb-aria
|
||||
restart: unless-stopped
|
||||
|
@ -5,7 +5,7 @@
|
||||
Create a `docker-compose.yml` file:
|
||||
|
||||
```yml
|
||||
version: "3"
|
||||
version: '3.8'
|
||||
services:
|
||||
app:
|
||||
image: 'jc21/nginx-proxy-manager:latest'
|
||||
@ -20,7 +20,7 @@ services:
|
||||
|
||||
# Uncomment the next line if you uncomment anything in the section
|
||||
# environment:
|
||||
# Uncomment this if you want to change the location of
|
||||
# Uncomment this if you want to change the location of
|
||||
# the SQLite DB file within the container
|
||||
# DB_SQLITE_FILE: "/data/database.sqlite"
|
||||
|
||||
@ -51,7 +51,7 @@ are going to use.
|
||||
Here is an example of what your `docker-compose.yml` will look like when using a MariaDB container:
|
||||
|
||||
```yml
|
||||
version: "3"
|
||||
version: '3.8'
|
||||
services:
|
||||
app:
|
||||
image: 'jc21/nginx-proxy-manager:latest'
|
||||
@ -64,6 +64,10 @@ services:
|
||||
# Add any other Stream port you want to expose
|
||||
# - '21:21' # FTP
|
||||
environment:
|
||||
# Unix user and group IDs, optional
|
||||
PUID: 1000
|
||||
PGID: 1000
|
||||
# Mysql/Maria connection parameters:
|
||||
DB_MYSQL_HOST: "db"
|
||||
DB_MYSQL_PORT: 3306
|
||||
DB_MYSQL_USER: "npm"
|
||||
@ -118,13 +122,12 @@ Please note that the `jc21/mariadb-aria:latest` image might have some problems o
|
||||
|
||||
After the app is running for the first time, the following will happen:
|
||||
|
||||
1. The database will initialize with table structures
|
||||
2. GPG keys will be generated and saved in the configuration file
|
||||
1. GPG keys will be generated and saved in the data folder
|
||||
2. The database will initialize with table structures
|
||||
3. A default admin user will be created
|
||||
|
||||
This process can take a couple of minutes depending on your machine.
|
||||
|
||||
|
||||
## Default Administrator User
|
||||
|
||||
```
|
||||
@ -134,49 +137,3 @@ Password: changeme
|
||||
|
||||
Immediately after logging in with this default user you will be asked to modify your details and change your password.
|
||||
|
||||
## Configuration File
|
||||
|
||||
::: warning
|
||||
|
||||
This section is meant for advanced users
|
||||
|
||||
:::
|
||||
|
||||
If you would like more control over the database settings you can define a custom config JSON file.
|
||||
|
||||
|
||||
Here's an example for `sqlite` configuration as it is generated from the environment variables:
|
||||
|
||||
```json
|
||||
{
|
||||
"database": {
|
||||
"engine": "knex-native",
|
||||
"knex": {
|
||||
"client": "sqlite3",
|
||||
"connection": {
|
||||
"filename": "/data/database.sqlite"
|
||||
},
|
||||
"useNullAsDefault": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can modify the `knex` object with your custom configuration, but note that not all knex clients might be installed in the image.
|
||||
|
||||
Once you've created your configuration file you can mount it to `/app/config/production.json` inside you container using:
|
||||
|
||||
```
|
||||
[...]
|
||||
services:
|
||||
app:
|
||||
image: 'jc21/nginx-proxy-manager:latest'
|
||||
[...]
|
||||
volumes:
|
||||
- ./config.json:/app/config/production.json
|
||||
[...]
|
||||
[...]
|
||||
```
|
||||
|
||||
**Note:** After the first run of the application, the config file will be altered to include generated encryption keys unique to your installation.
|
||||
These keys affect the login and session management of the application. If these keys change for any reason, all users will be logged out.
|
||||
|
@ -9,3 +9,4 @@ This project will automatically update any databases or other requirements so yo
|
||||
any crazy instructions. These steps above will pull the latest updates and recreate the docker
|
||||
containers.
|
||||
|
||||
See the [list of releases](https://github.com/NginxProxyManager/nginx-proxy-manager/releases) for any upgrade steps specific to each release.
|
||||
|
@ -487,9 +487,9 @@ dns_powerdns_api_key = AbCbASsd!@34`,
|
||||
package_name: 'certbot-regru',
|
||||
version_requirement: '~=1.0.2',
|
||||
dependencies: '',
|
||||
credentials: `certbot_regru:dns_username=username
|
||||
certbot_regru:dns_password=password`,
|
||||
full_plugin_name: 'certbot-regru:dns',
|
||||
credentials: `dns_username=username
|
||||
dns_password=password`,
|
||||
full_plugin_name: 'dns',
|
||||
},
|
||||
//####################################################//
|
||||
rfc2136: {
|
||||
|
Reference in New Issue
Block a user