mirror of
https://github.com/NginxProxyManager/nginx-proxy-manager.git
synced 2025-07-03 16:46:49 +00:00
Compare commits
19 Commits
v2.12.2
...
c6d884dab6
Author | SHA1 | Date | |
---|---|---|---|
c6d884dab6 | |||
5dc78df0bb | |||
04636b71a9 | |||
1353937c36 | |||
f68c1b7c29 | |||
32e0784865 | |||
f386f6b640 | |||
5ba7363c9e | |||
2e45444328 | |||
eb5c51a657 | |||
cb795565ea | |||
04b3608b4e | |||
111fc287eb | |||
95a94a4f8c | |||
5e7b69c396 | |||
2723de24fd | |||
891877afb6 | |||
8e9e033a72 | |||
e6ec74c2f7 |
38
Jenkinsfile
vendored
38
Jenkinsfile
vendored
@ -167,44 +167,6 @@ pipeline {
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Test Postgres') {
|
||||
environment {
|
||||
COMPOSE_PROJECT_NAME = "npm_${BRANCH_LOWER}_${BUILD_NUMBER}_postgres"
|
||||
COMPOSE_FILE = 'docker/docker-compose.ci.yml:docker/docker-compose.ci.postgres.yml'
|
||||
}
|
||||
when {
|
||||
not {
|
||||
equals expected: 'UNSTABLE', actual: currentBuild.result
|
||||
}
|
||||
}
|
||||
steps {
|
||||
sh 'rm -rf ./test/results/junit/*'
|
||||
sh './scripts/ci/fulltest-cypress'
|
||||
}
|
||||
post {
|
||||
always {
|
||||
// Dumps to analyze later
|
||||
sh 'mkdir -p debug/postgres'
|
||||
sh 'docker logs $(docker-compose ps --all -q fullstack) > debug/postgres/docker_fullstack.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q stepca) > debug/postgres/docker_stepca.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q pdns) > debug/postgres/docker_pdns.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q pdns-db) > debug/postgres/docker_pdns-db.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q dnsrouter) > debug/postgres/docker_dnsrouter.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q db-postgres) > debug/postgres/docker_db-postgres.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q authentik) > debug/postgres/docker_authentik.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q authentik-redis) > debug/postgres/docker_authentik-redis.log 2>&1'
|
||||
sh 'docker logs $(docker-compose ps --all -q authentik-ldap) > debug/postgres/docker_authentik-ldap.log 2>&1'
|
||||
|
||||
junit 'test/results/junit/*'
|
||||
sh 'docker-compose down --remove-orphans --volumes -t 30 || true'
|
||||
}
|
||||
unstable {
|
||||
dir(path: 'testing/results') {
|
||||
archiveArtifacts(allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('MultiArch Build') {
|
||||
when {
|
||||
not {
|
||||
|
@ -1,7 +1,7 @@
|
||||
<p align="center">
|
||||
<img src="https://nginxproxymanager.com/github.png">
|
||||
<br><br>
|
||||
<img src="https://img.shields.io/badge/version-2.12.2-green.svg?style=for-the-badge">
|
||||
<img src="https://img.shields.io/badge/version-2.12.1-green.svg?style=for-the-badge">
|
||||
<a href="https://hub.docker.com/repository/docker/jc21/nginx-proxy-manager">
|
||||
<img src="https://img.shields.io/docker/stars/jc21/nginx-proxy-manager.svg?style=for-the-badge">
|
||||
</a>
|
||||
|
@ -81,7 +81,7 @@ const internalAccessList = {
|
||||
|
||||
return internalAccessList.build(row)
|
||||
.then(() => {
|
||||
if (parseInt(row.proxy_host_count, 10)) {
|
||||
if (row.proxy_host_count) {
|
||||
return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
|
||||
}
|
||||
})
|
||||
@ -223,7 +223,7 @@ const internalAccessList = {
|
||||
.then((row) => {
|
||||
return internalAccessList.build(row)
|
||||
.then(() => {
|
||||
if (parseInt(row.proxy_host_count, 10)) {
|
||||
if (row.proxy_host_count) {
|
||||
return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
|
||||
}
|
||||
}).then(internalNginx.reload)
|
||||
@ -252,10 +252,7 @@ const internalAccessList = {
|
||||
let query = accessListModel
|
||||
.query()
|
||||
.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
|
||||
.leftJoin('proxy_host', function() {
|
||||
this.on('proxy_host.access_list_id', '=', 'access_list.id')
|
||||
.andOn('proxy_host.is_deleted', '=', 0);
|
||||
})
|
||||
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
|
||||
.where('access_list.is_deleted', 0)
|
||||
.andWhere('access_list.id', data.id)
|
||||
.allowGraph('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]')
|
||||
@ -376,10 +373,7 @@ const internalAccessList = {
|
||||
let query = accessListModel
|
||||
.query()
|
||||
.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
|
||||
.leftJoin('proxy_host', function() {
|
||||
this.on('proxy_host.access_list_id', '=', 'access_list.id')
|
||||
.andOn('proxy_host.is_deleted', '=', 0);
|
||||
})
|
||||
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
|
||||
.where('access_list.is_deleted', 0)
|
||||
.groupBy('access_list.id')
|
||||
.allowGraph('[owner,items,clients]')
|
||||
|
@ -1,6 +1,5 @@
|
||||
const error = require('../lib/error');
|
||||
const auditLogModel = require('../models/audit-log');
|
||||
const {castJsonIfNeed} = require('../lib/helpers');
|
||||
const error = require('../lib/error');
|
||||
const auditLogModel = require('../models/audit-log');
|
||||
|
||||
const internalAuditLog = {
|
||||
|
||||
@ -23,9 +22,9 @@ const internalAuditLog = {
|
||||
.allowGraph('[user]');
|
||||
|
||||
// Query is used for searching
|
||||
if (typeof search_query === 'string' && search_query.length > 0) {
|
||||
if (typeof search_query === 'string') {
|
||||
query.where(function () {
|
||||
this.where(castJsonIfNeed('meta'), 'like', '%' + search_query + '%');
|
||||
this.where('meta', 'like', '%' + search_query + '%');
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -570,6 +570,7 @@ const internalCertificate = {
|
||||
return internalCertificate.create(access, {
|
||||
provider: 'letsencrypt',
|
||||
domain_names: data.domain_names,
|
||||
ssl_key_type: data.ssl_key_type,
|
||||
meta: data.meta
|
||||
});
|
||||
},
|
||||
@ -832,6 +833,7 @@ const internalCertificate = {
|
||||
|
||||
const cmd = `${certbotCommand} certonly ` +
|
||||
`--config '${letsencryptConfig}' ` +
|
||||
`--key-type '${certificate.ssl_key_type}' ` +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
`--cert-name "npm-${certificate.id}" ` +
|
||||
@ -873,6 +875,7 @@ const internalCertificate = {
|
||||
|
||||
let mainCmd = certbotCommand + ' certonly ' +
|
||||
`--config '${letsencryptConfig}' ` +
|
||||
`--key-type '${certificate.ssl_key_type}' ` +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
`--cert-name 'npm-${certificate.id}' ` +
|
||||
@ -969,6 +972,7 @@ const internalCertificate = {
|
||||
|
||||
const cmd = certbotCommand + ' renew --force-renewal ' +
|
||||
`--config '${letsencryptConfig}' ` +
|
||||
`--key-type '${certificate.ssl_key_type}' ` +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
`--cert-name 'npm-${certificate.id}' ` +
|
||||
@ -1002,6 +1006,7 @@ const internalCertificate = {
|
||||
|
||||
let mainCmd = certbotCommand + ' renew --force-renewal ' +
|
||||
`--config "${letsencryptConfig}" ` +
|
||||
`--key-type '${certificate.ssl_key_type}' ` +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
`--cert-name 'npm-${certificate.id}' ` +
|
||||
@ -1035,6 +1040,7 @@ const internalCertificate = {
|
||||
|
||||
const mainCmd = certbotCommand + ' revoke ' +
|
||||
`--config '${letsencryptConfig}' ` +
|
||||
`--key-type '${certificate.ssl_key_type}' ` +
|
||||
'--work-dir "/tmp/letsencrypt-lib" ' +
|
||||
'--logs-dir "/tmp/letsencrypt-log" ' +
|
||||
`--cert-path '/etc/letsencrypt/live/npm-${certificate.id}/fullchain.pem' ` +
|
||||
|
@ -6,7 +6,6 @@ const internalHost = require('./host');
|
||||
const internalNginx = require('./nginx');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
const internalCertificate = require('./certificate');
|
||||
const {castJsonIfNeed} = require('../lib/helpers');
|
||||
|
||||
function omissions () {
|
||||
return ['is_deleted'];
|
||||
@ -410,16 +409,16 @@ const internalDeadHost = {
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.allowGraph('[owner,certificate]')
|
||||
.orderBy(castJsonIfNeed('domain_names'), 'ASC');
|
||||
.orderBy('domain_names', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Query is used for searching
|
||||
if (typeof search_query === 'string' && search_query.length > 0) {
|
||||
if (typeof search_query === 'string') {
|
||||
query.where(function () {
|
||||
this.where(castJsonIfNeed('domain_names'), 'like', '%' + search_query + '%');
|
||||
this.where('domain_names', 'like', '%' + search_query + '%');
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,6 @@ const _ = require('lodash');
|
||||
const proxyHostModel = require('../models/proxy_host');
|
||||
const redirectionHostModel = require('../models/redirection_host');
|
||||
const deadHostModel = require('../models/dead_host');
|
||||
const {castJsonIfNeed} = require('../lib/helpers');
|
||||
|
||||
const internalHost = {
|
||||
|
||||
@ -18,7 +17,7 @@ const internalHost = {
|
||||
cleanSslHstsData: function (data, existing_data) {
|
||||
existing_data = existing_data === undefined ? {} : existing_data;
|
||||
|
||||
const combined_data = _.assign({}, existing_data, data);
|
||||
let combined_data = _.assign({}, existing_data, data);
|
||||
|
||||
if (!combined_data.certificate_id) {
|
||||
combined_data.ssl_forced = false;
|
||||
@ -74,7 +73,7 @@ const internalHost = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
getHostsWithDomains: function (domain_names) {
|
||||
const promises = [
|
||||
let promises = [
|
||||
proxyHostModel
|
||||
.query()
|
||||
.where('is_deleted', 0),
|
||||
@ -126,19 +125,19 @@ const internalHost = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
isHostnameTaken: function (hostname, ignore_type, ignore_id) {
|
||||
const promises = [
|
||||
let promises = [
|
||||
proxyHostModel
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%'),
|
||||
.andWhere('domain_names', 'like', '%' + hostname + '%'),
|
||||
redirectionHostModel
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%'),
|
||||
.andWhere('domain_names', 'like', '%' + hostname + '%'),
|
||||
deadHostModel
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%')
|
||||
.andWhere('domain_names', 'like', '%' + hostname + '%')
|
||||
];
|
||||
|
||||
return Promise.all(promises)
|
||||
@ -229,8 +228,32 @@ const internalHost = {
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Internal use only, checks to see if the there is another default server record
|
||||
*
|
||||
* @param {String} hostname
|
||||
* @param {String} [ignore_type] 'proxy', 'redirection', 'dead'
|
||||
* @param {Integer} [ignore_id] Must be supplied if type was also supplied
|
||||
* @returns {Promise}
|
||||
*/
|
||||
checkDefaultServerNotExist: function (hostname) {
|
||||
let promises = proxyHostModel
|
||||
.query()
|
||||
.where('default_server', true)
|
||||
.andWhere('domain_names', 'not like', '%' + hostname + '%');
|
||||
|
||||
|
||||
return Promise.resolve(promises)
|
||||
.then((promises_results) => {
|
||||
if (promises_results.length > 0){
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = internalHost;
|
||||
|
@ -6,7 +6,6 @@ const internalHost = require('./host');
|
||||
const internalNginx = require('./nginx');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
const internalCertificate = require('./certificate');
|
||||
const {castJsonIfNeed} = require('../lib/helpers');
|
||||
|
||||
function omissions () {
|
||||
return ['is_deleted', 'owner.is_deleted'];
|
||||
@ -44,6 +43,22 @@ const internalProxyHost = {
|
||||
});
|
||||
});
|
||||
})
|
||||
.then(() => {
|
||||
// Get a list of the domain names and check each of them against default records
|
||||
if (data.default_server){
|
||||
if (data.domain_names.length > 1) {
|
||||
throw new error.ValidationError('Default server cant be set for multiple domain!');
|
||||
}
|
||||
|
||||
return internalHost
|
||||
.checkDefaultServerNotExist(data.domain_names[0])
|
||||
.then((result) => {
|
||||
if (!result){
|
||||
throw new error.ValidationError('One default server already exists');
|
||||
}
|
||||
});
|
||||
}
|
||||
})
|
||||
.then(() => {
|
||||
// At this point the domains should have been checked
|
||||
data.owner_user_id = access.token.getUserId(1);
|
||||
@ -141,6 +156,22 @@ const internalProxyHost = {
|
||||
});
|
||||
}
|
||||
})
|
||||
.then(() => {
|
||||
// Get a list of the domain names and check each of them against default records
|
||||
if (data.default_server){
|
||||
if (data.domain_names.length > 1) {
|
||||
throw new error.ValidationError('Default server cant be set for multiple domain!');
|
||||
}
|
||||
|
||||
return internalHost
|
||||
.checkDefaultServerNotExist(data.domain_names[0])
|
||||
.then((result) => {
|
||||
if (!result){
|
||||
throw new error.ValidationError('One default server already exists');
|
||||
}
|
||||
});
|
||||
}
|
||||
})
|
||||
.then(() => {
|
||||
return internalProxyHost.get(access, {id: data.id});
|
||||
})
|
||||
@ -153,6 +184,7 @@ const internalProxyHost = {
|
||||
if (create_certificate) {
|
||||
return internalCertificate.createQuickCertificate(access, {
|
||||
domain_names: data.domain_names || row.domain_names,
|
||||
ssl_key_type: data.ssl_key_type || row.ssl_key_type,
|
||||
meta: _.assign({}, row.meta, data.meta)
|
||||
})
|
||||
.then((cert) => {
|
||||
@ -417,16 +449,16 @@ const internalProxyHost = {
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.allowGraph('[owner,access_list,certificate]')
|
||||
.orderBy(castJsonIfNeed('domain_names'), 'ASC');
|
||||
.orderBy('domain_names', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Query is used for searching
|
||||
if (typeof search_query === 'string' && search_query.length > 0) {
|
||||
if (typeof search_query === 'string') {
|
||||
query.where(function () {
|
||||
this.where(castJsonIfNeed('domain_names'), 'like', `%${search_query}%`);
|
||||
this.where('domain_names', 'like', '%' + search_query + '%');
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,6 @@ const internalHost = require('./host');
|
||||
const internalNginx = require('./nginx');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
const internalCertificate = require('./certificate');
|
||||
const {castJsonIfNeed} = require('../lib/helpers');
|
||||
|
||||
function omissions () {
|
||||
return ['is_deleted'];
|
||||
@ -410,16 +409,16 @@ const internalRedirectionHost = {
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.allowGraph('[owner,certificate]')
|
||||
.orderBy(castJsonIfNeed('domain_names'), 'ASC');
|
||||
.orderBy('domain_names', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Query is used for searching
|
||||
if (typeof search_query === 'string' && search_query.length > 0) {
|
||||
if (typeof search_query === 'string') {
|
||||
query.where(function () {
|
||||
this.where(castJsonIfNeed('domain_names'), 'like', `%${search_query}%`);
|
||||
this.where('domain_names', 'like', '%' + search_query + '%');
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,6 @@ const utils = require('../lib/utils');
|
||||
const streamModel = require('../models/stream');
|
||||
const internalNginx = require('./nginx');
|
||||
const internalAuditLog = require('./audit-log');
|
||||
const {castJsonIfNeed} = require('../lib/helpers');
|
||||
|
||||
function omissions () {
|
||||
return ['is_deleted'];
|
||||
@ -294,21 +293,21 @@ const internalStream = {
|
||||
getAll: (access, expand, search_query) => {
|
||||
return access.can('streams:list')
|
||||
.then((access_data) => {
|
||||
const query = streamModel
|
||||
let query = streamModel
|
||||
.query()
|
||||
.where('is_deleted', 0)
|
||||
.groupBy('id')
|
||||
.allowGraph('[owner]')
|
||||
.orderByRaw('CAST(incoming_port AS INTEGER) ASC');
|
||||
.orderBy('incoming_port', 'ASC');
|
||||
|
||||
if (access_data.permission_visibility !== 'all') {
|
||||
query.andWhere('owner_user_id', access.token.getUserId(1));
|
||||
}
|
||||
|
||||
// Query is used for searching
|
||||
if (typeof search_query === 'string' && search_query.length > 0) {
|
||||
if (typeof search_query === 'string') {
|
||||
query.where(function () {
|
||||
this.where(castJsonIfNeed('incoming_port'), 'like', `%${search_query}%`);
|
||||
this.where('incoming_port', 'like', '%' + search_query + '%');
|
||||
});
|
||||
}
|
||||
|
||||
@ -328,9 +327,9 @@ const internalStream = {
|
||||
* @returns {Promise}
|
||||
*/
|
||||
getCount: (user_id, visibility) => {
|
||||
const query = streamModel
|
||||
let query = streamModel
|
||||
.query()
|
||||
.count('id AS count')
|
||||
.count('id as count')
|
||||
.where('is_deleted', 0);
|
||||
|
||||
if (visibility !== 'all') {
|
||||
|
@ -2,10 +2,7 @@ const fs = require('fs');
|
||||
const NodeRSA = require('node-rsa');
|
||||
const logger = require('../logger').global;
|
||||
|
||||
const keysFile = '/data/keys.json';
|
||||
const mysqlEngine = 'mysql2';
|
||||
const postgresEngine = 'pg';
|
||||
const sqliteClientName = 'sqlite3';
|
||||
const keysFile = '/data/keys.json';
|
||||
|
||||
let instance = null;
|
||||
|
||||
@ -17,7 +14,7 @@ const configure = () => {
|
||||
let configData;
|
||||
try {
|
||||
configData = require(filename);
|
||||
} catch (_) {
|
||||
} catch (err) {
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@ -37,7 +34,7 @@ const configure = () => {
|
||||
logger.info('Using MySQL configuration');
|
||||
instance = {
|
||||
database: {
|
||||
engine: mysqlEngine,
|
||||
engine: 'mysql2',
|
||||
host: envMysqlHost,
|
||||
port: process.env.DB_MYSQL_PORT || 3306,
|
||||
user: envMysqlUser,
|
||||
@ -49,33 +46,13 @@ const configure = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
const envPostgresHost = process.env.DB_POSTGRES_HOST || null;
|
||||
const envPostgresUser = process.env.DB_POSTGRES_USER || null;
|
||||
const envPostgresName = process.env.DB_POSTGRES_NAME || null;
|
||||
if (envPostgresHost && envPostgresUser && envPostgresName) {
|
||||
// we have enough postgres creds to go with postgres
|
||||
logger.info('Using Postgres configuration');
|
||||
instance = {
|
||||
database: {
|
||||
engine: postgresEngine,
|
||||
host: envPostgresHost,
|
||||
port: process.env.DB_POSTGRES_PORT || 5432,
|
||||
user: envPostgresUser,
|
||||
password: process.env.DB_POSTGRES_PASSWORD,
|
||||
name: envPostgresName,
|
||||
},
|
||||
keys: getKeys(),
|
||||
};
|
||||
return;
|
||||
}
|
||||
|
||||
const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/database.sqlite';
|
||||
logger.info(`Using Sqlite: ${envSqliteFile}`);
|
||||
instance = {
|
||||
database: {
|
||||
engine: 'knex-native',
|
||||
knex: {
|
||||
client: sqliteClientName,
|
||||
client: 'sqlite3',
|
||||
connection: {
|
||||
filename: envSqliteFile
|
||||
},
|
||||
@ -166,27 +143,7 @@ module.exports = {
|
||||
*/
|
||||
isSqlite: function () {
|
||||
instance === null && configure();
|
||||
return instance.database.knex && instance.database.knex.client === sqliteClientName;
|
||||
},
|
||||
|
||||
/**
|
||||
* Is this a mysql configuration?
|
||||
*
|
||||
* @returns {boolean}
|
||||
*/
|
||||
isMysql: function () {
|
||||
instance === null && configure();
|
||||
return instance.database.engine === mysqlEngine;
|
||||
},
|
||||
|
||||
/**
|
||||
* Is this a postgres configuration?
|
||||
*
|
||||
* @returns {boolean}
|
||||
*/
|
||||
isPostgres: function () {
|
||||
instance === null && configure();
|
||||
return instance.database.engine === postgresEngine;
|
||||
return instance.database.knex && instance.database.knex.client === 'sqlite3';
|
||||
},
|
||||
|
||||
/**
|
||||
|
@ -1,6 +1,4 @@
|
||||
const moment = require('moment');
|
||||
const {isPostgres} = require('./config');
|
||||
const {ref} = require('objection');
|
||||
const moment = require('moment');
|
||||
|
||||
module.exports = {
|
||||
|
||||
@ -47,16 +45,6 @@ module.exports = {
|
||||
}
|
||||
});
|
||||
return obj;
|
||||
},
|
||||
|
||||
/**
|
||||
* Casts a column to json if using postgres
|
||||
*
|
||||
* @param {string} colName
|
||||
* @returns {string|Objection.ReferenceBuilder}
|
||||
*/
|
||||
castJsonIfNeed: function (colName) {
|
||||
return isPostgres() ? ref(colName).castText() : colName;
|
||||
}
|
||||
|
||||
};
|
||||
|
39
backend/migrations/20241209062244_ssl_key_type.js
Normal file
39
backend/migrations/20241209062244_ssl_key_type.js
Normal file
@ -0,0 +1,39 @@
|
||||
const migrate_name = 'identifier_for_migrate';
|
||||
const logger = require('../logger').migrate;
|
||||
|
||||
/**
|
||||
* Migrate
|
||||
*
|
||||
* @see http://knexjs.org/#Schema
|
||||
*
|
||||
* @param {Object} knex
|
||||
* @param {Promise} Promise
|
||||
* @returns {Promise}
|
||||
*/
|
||||
exports.up = function (knex) {
|
||||
|
||||
logger.info(`[${migrate_name}] Migrating Up...`);
|
||||
|
||||
return knex.schema.alterTable('proxy_host', (table) => {
|
||||
table.enum('ssl_key_type', ['ecdsa', 'rsa']).defaultTo('ecdsa').notNullable();
|
||||
}).then(() => {
|
||||
logger.info(`[${migrate_name}] Column 'ssl_key_type' added to table 'proxy_host'`);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Undo Migrate
|
||||
*
|
||||
* @param {Object} knex
|
||||
* @param {Promise} Promise
|
||||
* @returns {Promise}
|
||||
*/
|
||||
exports.down = function (knex) {
|
||||
logger.info(`[${migrate_name}] Migrating Down...`);
|
||||
|
||||
return knex.schema.alterTable('proxy_host', (table) => {
|
||||
table.dropColumn('ssl_key_type');
|
||||
}).then(() => {
|
||||
logger.info(`[${migrate_name}] Column 'ssl_key_type' removed from table 'proxy_host'`);
|
||||
});
|
||||
};
|
39
backend/migrations/20241211081223_ssl_key_type_in_proxy.js
Normal file
39
backend/migrations/20241211081223_ssl_key_type_in_proxy.js
Normal file
@ -0,0 +1,39 @@
|
||||
const migrate_name = 'identifier_for_migrate';
|
||||
const logger = require('../logger').migrate;
|
||||
|
||||
/**
|
||||
* Migrate
|
||||
*
|
||||
* @see http://knexjs.org/#Schema
|
||||
*
|
||||
* @param {Object} knex
|
||||
* @param {Promise} Promise
|
||||
* @returns {Promise}
|
||||
*/
|
||||
exports.up = function (knex) {
|
||||
|
||||
logger.info(`[${migrate_name}] Migrating Up...`);
|
||||
|
||||
return knex.schema.alterTable('certificate', (table) => {
|
||||
table.enum('ssl_key_type', ['ecdsa', 'rsa']).defaultTo('ecdsa').notNullable();
|
||||
}).then(() => {
|
||||
logger.info(`[${migrate_name}] Column 'ssl_key_type' added to table 'proxy_host'`);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Undo Migrate
|
||||
*
|
||||
* @param {Object} knex
|
||||
* @param {Promise} Promise
|
||||
* @returns {Promise}
|
||||
*/
|
||||
exports.down = function (knex) {
|
||||
logger.info(`[${migrate_name}] Migrating Down...`);
|
||||
|
||||
return knex.schema.alterTable('certificate', (table) => {
|
||||
table.dropColumn('ssl_key_type');
|
||||
}).then(() => {
|
||||
logger.info(`[${migrate_name}] Column 'ssl_key_type' removed from table 'proxy_host'`);
|
||||
});
|
||||
};
|
40
backend/migrations/20241221201400_default_server.js
Normal file
40
backend/migrations/20241221201400_default_server.js
Normal file
@ -0,0 +1,40 @@
|
||||
const migrate_name = 'identifier_for_migrate';
|
||||
const logger = require('../logger').migrate;
|
||||
|
||||
/**
|
||||
* Migrate Up
|
||||
*
|
||||
* @param {Object} knex
|
||||
* @param {Promise} Promise
|
||||
* @returns {Promise}
|
||||
*/
|
||||
exports.up = function (knex) {
|
||||
logger.info(`[${migrate_name}] Migrating Up...`);
|
||||
|
||||
// Add default_server column to proxy_host table
|
||||
return knex.schema.table('proxy_host', (table) => {
|
||||
table.boolean('default_server').notNullable().defaultTo(false);
|
||||
})
|
||||
.then(() => {
|
||||
logger.info(`[${migrate_name}] Column 'default_server' added to 'proxy_host' table`);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Migrate Down
|
||||
*
|
||||
* @param {Object} knex
|
||||
* @param {Promise} Promise
|
||||
* @returns {Promise}
|
||||
*/
|
||||
exports.down = function (knex) {
|
||||
logger.info(`[${migrate_name}] Migrating Down...`);
|
||||
|
||||
// Remove default_server column from proxy_host table
|
||||
return knex.schema.table('proxy_host', (table) => {
|
||||
table.dropColumn('default_server');
|
||||
})
|
||||
.then(() => {
|
||||
logger.info(`[${migrate_name}] Column 'default_server' removed from 'proxy_host' table`);
|
||||
});
|
||||
};
|
@ -21,6 +21,7 @@ const boolFields = [
|
||||
'enabled',
|
||||
'hsts_enabled',
|
||||
'hsts_subdomains',
|
||||
'default_server',
|
||||
];
|
||||
|
||||
class ProxyHost extends Model {
|
||||
|
@ -17,9 +17,6 @@ const boolFields = [
|
||||
'preserve_path',
|
||||
'ssl_forced',
|
||||
'block_exploits',
|
||||
'hsts_enabled',
|
||||
'hsts_subdomains',
|
||||
'http2_support',
|
||||
];
|
||||
|
||||
class RedirectionHost extends Model {
|
||||
|
@ -23,7 +23,6 @@
|
||||
"node-rsa": "^1.0.8",
|
||||
"objection": "3.0.1",
|
||||
"path": "^0.12.7",
|
||||
"pg": "^8.13.1",
|
||||
"signale": "1.4.0",
|
||||
"sqlite3": "5.1.6",
|
||||
"temp-write": "^4.0.0"
|
||||
|
@ -41,6 +41,15 @@
|
||||
"owner": {
|
||||
"$ref": "./user-object.json"
|
||||
},
|
||||
"ssl_key_type": {
|
||||
"type": "string",
|
||||
"enum": ["ecdsa", "rsa"],
|
||||
"description": "Type of SSL key (either ecdsa or rsa)"
|
||||
},
|
||||
"default_server": {
|
||||
"type": "boolean",
|
||||
"description": "Defines if the server is the default for unmatched requests"
|
||||
},
|
||||
"meta": {
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
|
@ -23,6 +23,8 @@
|
||||
"locations",
|
||||
"hsts_enabled",
|
||||
"hsts_subdomains",
|
||||
"ssl_key_type",
|
||||
"default_server",
|
||||
"certificate"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
@ -149,6 +151,15 @@
|
||||
"$ref": "./access-list-object.json"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ssl_key_type": {
|
||||
"type": "string",
|
||||
"enum": ["ecdsa", "rsa"],
|
||||
"description": "Type of SSL key (either ecdsa or rsa)"
|
||||
},
|
||||
"default_server": {
|
||||
"type": "boolean",
|
||||
"description": "Defines if the server is the default for unmatched requests"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,6 +79,12 @@
|
||||
},
|
||||
"locations": {
|
||||
"$ref": "../../../../components/proxy-host-object.json#/properties/locations"
|
||||
},
|
||||
"ssl_key_type": {
|
||||
"$ref": "../../../../components/proxy-host-object.json#/properties/ssl_key_type"
|
||||
},
|
||||
"default_server": {
|
||||
"$ref": "../../../../components/proxy-host-object.json#/properties/default_server"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -67,6 +67,12 @@
|
||||
},
|
||||
"locations": {
|
||||
"$ref": "../../../components/proxy-host-object.json#/properties/locations"
|
||||
},
|
||||
"ssl_key_type": {
|
||||
"$ref": "../../../components/proxy-host-object.json#/properties/ssl_key_type"
|
||||
},
|
||||
"default_server": {
|
||||
"$ref": "../../../components/proxy-host-object.json#/properties/default_server"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -15,18 +15,18 @@ const certbot = require('./lib/certbot');
|
||||
const setupDefaultUser = () => {
|
||||
return userModel
|
||||
.query()
|
||||
.select('id', )
|
||||
.select(userModel.raw('COUNT(`id`) as `count`'))
|
||||
.where('is_deleted', 0)
|
||||
.first()
|
||||
.then((row) => {
|
||||
if (!row || !row.id) {
|
||||
if (!row.count) {
|
||||
// Create a new user and set password
|
||||
const email = process.env.INITIAL_ADMIN_EMAIL || 'admin@example.com';
|
||||
const password = process.env.INITIAL_ADMIN_PASSWORD || 'changeme';
|
||||
let email = process.env.INITIAL_ADMIN_EMAIL || 'admin@example.com';
|
||||
let password = process.env.INITIAL_ADMIN_PASSWORD || 'changeme';
|
||||
|
||||
logger.info('Creating a new user: ' + email + ' with password: ' + password);
|
||||
|
||||
const data = {
|
||||
let data = {
|
||||
is_deleted: 0,
|
||||
email: email,
|
||||
name: 'Administrator',
|
||||
@ -77,11 +77,11 @@ const setupDefaultUser = () => {
|
||||
const setupDefaultSettings = () => {
|
||||
return settingModel
|
||||
.query()
|
||||
.select('id')
|
||||
.select(settingModel.raw('COUNT(`id`) as `count`'))
|
||||
.where({id: 'default-site'})
|
||||
.first()
|
||||
.then((row) => {
|
||||
if (!row || !row.id) {
|
||||
if (!row.count) {
|
||||
settingModel
|
||||
.query()
|
||||
.insert({
|
||||
|
@ -1,13 +1,13 @@
|
||||
listen 80;
|
||||
listen 80{% if default_server == true %} default_server{% endif %};
|
||||
{% if ipv6 -%}
|
||||
listen [::]:80;
|
||||
listen [::]:80{% if default_server == true %} default_server{% endif %};
|
||||
{% else -%}
|
||||
#listen [::]:80;
|
||||
{% endif %}
|
||||
{% if certificate -%}
|
||||
listen 443 ssl;
|
||||
listen 443 ssl{% if default_server == true %} default_server{% endif %};
|
||||
{% if ipv6 -%}
|
||||
listen [::]:443 ssl;
|
||||
listen [::]:443 ssl{% if default_server == true %} default_server{% endif %};
|
||||
{% else -%}
|
||||
#listen [::]:443;
|
||||
{% endif %}
|
||||
|
@ -22,7 +22,5 @@ server {
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
# Custom
|
||||
include /data/nginx/custom/server_dead[.]conf;
|
||||
}
|
||||
{% endif %}
|
||||
|
@ -2735,67 +2735,11 @@ path@^0.12.7:
|
||||
process "^0.11.1"
|
||||
util "^0.10.3"
|
||||
|
||||
pg-cloudflare@^1.1.1:
|
||||
version "1.1.1"
|
||||
resolved "https://registry.yarnpkg.com/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz#e6d5833015b170e23ae819e8c5d7eaedb472ca98"
|
||||
integrity sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==
|
||||
|
||||
pg-connection-string@2.5.0:
|
||||
version "2.5.0"
|
||||
resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.5.0.tgz#538cadd0f7e603fc09a12590f3b8a452c2c0cf34"
|
||||
integrity sha512-r5o/V/ORTA6TmUnyWZR9nCj1klXCO2CEKNRlVuJptZe85QuhFayC7WeMic7ndayT5IRIR0S0xFxFi2ousartlQ==
|
||||
|
||||
pg-connection-string@^2.7.0:
|
||||
version "2.7.0"
|
||||
resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.7.0.tgz#f1d3489e427c62ece022dba98d5262efcb168b37"
|
||||
integrity sha512-PI2W9mv53rXJQEOb8xNR8lH7Hr+EKa6oJa38zsK0S/ky2er16ios1wLKhZyxzD7jUReiWokc9WK5nxSnC7W1TA==
|
||||
|
||||
pg-int8@1.0.1:
|
||||
version "1.0.1"
|
||||
resolved "https://registry.yarnpkg.com/pg-int8/-/pg-int8-1.0.1.tgz#943bd463bf5b71b4170115f80f8efc9a0c0eb78c"
|
||||
integrity sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==
|
||||
|
||||
pg-pool@^3.7.0:
|
||||
version "3.7.0"
|
||||
resolved "https://registry.yarnpkg.com/pg-pool/-/pg-pool-3.7.0.tgz#d4d3c7ad640f8c6a2245adc369bafde4ebb8cbec"
|
||||
integrity sha512-ZOBQForurqh4zZWjrgSwwAtzJ7QiRX0ovFkZr2klsen3Nm0aoh33Ls0fzfv3imeH/nw/O27cjdz5kzYJfeGp/g==
|
||||
|
||||
pg-protocol@^1.7.0:
|
||||
version "1.7.0"
|
||||
resolved "https://registry.yarnpkg.com/pg-protocol/-/pg-protocol-1.7.0.tgz#ec037c87c20515372692edac8b63cf4405448a93"
|
||||
integrity sha512-hTK/mE36i8fDDhgDFjy6xNOG+LCorxLG3WO17tku+ij6sVHXh1jQUJ8hYAnRhNla4QVD2H8er/FOjc/+EgC6yQ==
|
||||
|
||||
pg-types@^2.1.0:
|
||||
version "2.2.0"
|
||||
resolved "https://registry.yarnpkg.com/pg-types/-/pg-types-2.2.0.tgz#2d0250d636454f7cfa3b6ae0382fdfa8063254a3"
|
||||
integrity sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==
|
||||
dependencies:
|
||||
pg-int8 "1.0.1"
|
||||
postgres-array "~2.0.0"
|
||||
postgres-bytea "~1.0.0"
|
||||
postgres-date "~1.0.4"
|
||||
postgres-interval "^1.1.0"
|
||||
|
||||
pg@^8.13.1:
|
||||
version "8.13.1"
|
||||
resolved "https://registry.yarnpkg.com/pg/-/pg-8.13.1.tgz#6498d8b0a87ff76c2df7a32160309d3168c0c080"
|
||||
integrity sha512-OUir1A0rPNZlX//c7ksiu7crsGZTKSOXJPgtNiHGIlC9H0lO+NC6ZDYksSgBYY/thSWhnSRBv8w1lieNNGATNQ==
|
||||
dependencies:
|
||||
pg-connection-string "^2.7.0"
|
||||
pg-pool "^3.7.0"
|
||||
pg-protocol "^1.7.0"
|
||||
pg-types "^2.1.0"
|
||||
pgpass "1.x"
|
||||
optionalDependencies:
|
||||
pg-cloudflare "^1.1.1"
|
||||
|
||||
pgpass@1.x:
|
||||
version "1.0.5"
|
||||
resolved "https://registry.yarnpkg.com/pgpass/-/pgpass-1.0.5.tgz#9b873e4a564bb10fa7a7dbd55312728d422a223d"
|
||||
integrity sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==
|
||||
dependencies:
|
||||
split2 "^4.1.0"
|
||||
|
||||
picomatch@^2.0.4, picomatch@^2.2.1:
|
||||
version "2.2.2"
|
||||
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad"
|
||||
@ -2814,28 +2758,6 @@ pkg-conf@^2.1.0:
|
||||
find-up "^2.0.0"
|
||||
load-json-file "^4.0.0"
|
||||
|
||||
postgres-array@~2.0.0:
|
||||
version "2.0.0"
|
||||
resolved "https://registry.yarnpkg.com/postgres-array/-/postgres-array-2.0.0.tgz#48f8fce054fbc69671999329b8834b772652d82e"
|
||||
integrity sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==
|
||||
|
||||
postgres-bytea@~1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/postgres-bytea/-/postgres-bytea-1.0.0.tgz#027b533c0aa890e26d172d47cf9ccecc521acd35"
|
||||
integrity sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==
|
||||
|
||||
postgres-date@~1.0.4:
|
||||
version "1.0.7"
|
||||
resolved "https://registry.yarnpkg.com/postgres-date/-/postgres-date-1.0.7.tgz#51bc086006005e5061c591cee727f2531bf641a8"
|
||||
integrity sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==
|
||||
|
||||
postgres-interval@^1.1.0:
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/postgres-interval/-/postgres-interval-1.2.0.tgz#b460c82cb1587507788819a06aa0fffdb3544695"
|
||||
integrity sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==
|
||||
dependencies:
|
||||
xtend "^4.0.0"
|
||||
|
||||
prelude-ls@^1.2.1:
|
||||
version "1.2.1"
|
||||
resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396"
|
||||
@ -3272,11 +3194,6 @@ socks@^2.6.2:
|
||||
ip "^2.0.0"
|
||||
smart-buffer "^4.2.0"
|
||||
|
||||
split2@^4.1.0:
|
||||
version "4.2.0"
|
||||
resolved "https://registry.yarnpkg.com/split2/-/split2-4.2.0.tgz#c9c5920904d148bab0b9f67145f245a86aadbfa4"
|
||||
integrity sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==
|
||||
|
||||
sprintf-js@~1.0.2:
|
||||
version "1.0.3"
|
||||
resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
|
||||
@ -3748,11 +3665,6 @@ xdg-basedir@^4.0.0:
|
||||
resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13"
|
||||
integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==
|
||||
|
||||
xtend@^4.0.0:
|
||||
version "4.0.2"
|
||||
resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54"
|
||||
integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
|
||||
|
||||
y18n@^4.0.0:
|
||||
version "4.0.1"
|
||||
resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.1.tgz#8db2b83c31c5d75099bb890b23f3094891e247d4"
|
||||
|
@ -53,9 +53,11 @@ COPY --from=testca /home/step/certs/root_ca.crt /etc/ssl/certs/NginxProxyManager
|
||||
# Remove frontend service not required for prod, dev nginx config as well
|
||||
RUN rm -rf /etc/s6-overlay/s6-rc.d/user/contents.d/frontend /etc/nginx/conf.d/dev.conf \
|
||||
&& chmod 644 /etc/logrotate.d/nginx-proxy-manager
|
||||
COPY docker/start-container /usr/local/bin/start-container
|
||||
RUN chmod +x /usr/local/bin/start-container
|
||||
|
||||
VOLUME [ "/data" ]
|
||||
ENTRYPOINT [ "/init" ]
|
||||
ENTRYPOINT [ "start-container" ]
|
||||
|
||||
LABEL org.label-schema.schema-version="1.0" \
|
||||
org.label-schema.license="MIT" \
|
||||
|
@ -1,8 +0,0 @@
|
||||
AUTHENTIK_SECRET_KEY=gl8woZe8L6IIX8SC0c5Ocsj0xPkX5uJo5DVZCFl+L/QGbzuplfutYuua2ODNLEiDD3aFd9H2ylJmrke0
|
||||
AUTHENTIK_REDIS__HOST=authentik-redis
|
||||
AUTHENTIK_POSTGRESQL__HOST=db-postgres
|
||||
AUTHENTIK_POSTGRESQL__USER=authentik
|
||||
AUTHENTIK_POSTGRESQL__NAME=authentik
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD=07EKS5NLI6Tpv68tbdvrxfvj
|
||||
AUTHENTIK_BOOTSTRAP_PASSWORD=admin
|
||||
AUTHENTIK_BOOTSTRAP_EMAIL=admin@example.com
|
Binary file not shown.
@ -29,12 +29,14 @@ COPY scripts/install-s6 /tmp/install-s6
|
||||
RUN rm -f /etc/nginx/conf.d/production.conf \
|
||||
&& chmod 644 /etc/logrotate.d/nginx-proxy-manager \
|
||||
&& /tmp/install-s6 "${TARGETPLATFORM}" \
|
||||
&& rm -f /tmp/install-s6 \
|
||||
&& chmod 644 -R /root/.cache
|
||||
&& rm -f /tmp/install-s6
|
||||
|
||||
# Certs for testing purposes
|
||||
COPY --from=pebbleca /test/certs/pebble.minica.pem /etc/ssl/certs/pebble.minica.pem
|
||||
COPY --from=testca /home/step/certs/root_ca.crt /etc/ssl/certs/NginxProxyManager.crt
|
||||
|
||||
COPY start-container /usr/local/bin/start-container
|
||||
RUN chmod +x /usr/local/bin/start-container
|
||||
|
||||
EXPOSE 80 81 443
|
||||
ENTRYPOINT [ "/init" ]
|
||||
ENTRYPOINT [ "start-container" ]
|
||||
|
@ -1,7 +1,5 @@
|
||||
text = True
|
||||
non-interactive = True
|
||||
webroot-path = /data/letsencrypt-acme-challenge
|
||||
key-type = ecdsa
|
||||
elliptic-curve = secp384r1
|
||||
preferred-chain = ISRG Root X1
|
||||
server =
|
||||
|
@ -1,78 +0,0 @@
|
||||
# WARNING: This is a CI docker-compose file used for building and testing of the entire app, it should not be used for production.
|
||||
services:
|
||||
|
||||
cypress:
|
||||
environment:
|
||||
CYPRESS_stack: 'postgres'
|
||||
|
||||
fullstack:
|
||||
environment:
|
||||
DB_POSTGRES_HOST: 'db-postgres'
|
||||
DB_POSTGRES_PORT: '5432'
|
||||
DB_POSTGRES_USER: 'npm'
|
||||
DB_POSTGRES_PASSWORD: 'npmpass'
|
||||
DB_POSTGRES_NAME: 'npm'
|
||||
depends_on:
|
||||
- db-postgres
|
||||
- authentik
|
||||
- authentik-worker
|
||||
- authentik-ldap
|
||||
|
||||
db-postgres:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: 'npm'
|
||||
POSTGRES_PASSWORD: 'npmpass'
|
||||
POSTGRES_DB: 'npm'
|
||||
volumes:
|
||||
- psql_vol:/var/lib/postgresql/data
|
||||
- ./ci/postgres:/docker-entrypoint-initdb.d
|
||||
networks:
|
||||
- fulltest
|
||||
|
||||
authentik-redis:
|
||||
image: 'redis:alpine'
|
||||
command: --save 60 1 --loglevel warning
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 3s
|
||||
volumes:
|
||||
- redis_vol:/data
|
||||
|
||||
authentik:
|
||||
image: ghcr.io/goauthentik/server:2024.10.1
|
||||
restart: unless-stopped
|
||||
command: server
|
||||
env_file:
|
||||
- ci.env
|
||||
depends_on:
|
||||
- authentik-redis
|
||||
- db-postgres
|
||||
|
||||
authentik-worker:
|
||||
image: ghcr.io/goauthentik/server:2024.10.1
|
||||
restart: unless-stopped
|
||||
command: worker
|
||||
env_file:
|
||||
- ci.env
|
||||
depends_on:
|
||||
- authentik-redis
|
||||
- db-postgres
|
||||
|
||||
authentik-ldap:
|
||||
image: ghcr.io/goauthentik/ldap:2024.10.1
|
||||
environment:
|
||||
AUTHENTIK_HOST: 'http://authentik:9000'
|
||||
AUTHENTIK_INSECURE: 'true'
|
||||
AUTHENTIK_TOKEN: 'wKYZuRcI0ETtb8vWzMCr04oNbhrQUUICy89hSpDln1OEKLjiNEuQ51044Vkp'
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- authentik
|
||||
|
||||
volumes:
|
||||
psql_vol:
|
||||
redis_vol:
|
@ -2,8 +2,8 @@
|
||||
services:
|
||||
|
||||
fullstack:
|
||||
image: npm2dev:core
|
||||
container_name: npm2dev.core
|
||||
image: nginxproxymanager:dev
|
||||
container_name: npm_core
|
||||
build:
|
||||
context: ./
|
||||
dockerfile: ./dev/Dockerfile
|
||||
@ -26,17 +26,11 @@ services:
|
||||
DEVELOPMENT: 'true'
|
||||
LE_STAGING: 'true'
|
||||
# db:
|
||||
# DB_MYSQL_HOST: 'db'
|
||||
# DB_MYSQL_PORT: '3306'
|
||||
# DB_MYSQL_USER: 'npm'
|
||||
# DB_MYSQL_PASSWORD: 'npm'
|
||||
# DB_MYSQL_NAME: 'npm'
|
||||
# db-postgres:
|
||||
DB_POSTGRES_HOST: 'db-postgres'
|
||||
DB_POSTGRES_PORT: '5432'
|
||||
DB_POSTGRES_USER: 'npm'
|
||||
DB_POSTGRES_PASSWORD: 'npmpass'
|
||||
DB_POSTGRES_NAME: 'npm'
|
||||
DB_MYSQL_HOST: 'db'
|
||||
DB_MYSQL_PORT: '3306'
|
||||
DB_MYSQL_USER: 'npm'
|
||||
DB_MYSQL_PASSWORD: 'npm'
|
||||
DB_MYSQL_NAME: 'npm'
|
||||
# DB_SQLITE_FILE: "/data/database.sqlite"
|
||||
# DISABLE_IPV6: "true"
|
||||
# Required for DNS Certificate provisioning testing:
|
||||
@ -55,15 +49,11 @@ services:
|
||||
timeout: 3s
|
||||
depends_on:
|
||||
- db
|
||||
- db-postgres
|
||||
- authentik
|
||||
- authentik-worker
|
||||
- authentik-ldap
|
||||
working_dir: /app
|
||||
|
||||
db:
|
||||
image: jc21/mariadb-aria
|
||||
container_name: npm2dev.db
|
||||
container_name: npm_db
|
||||
ports:
|
||||
- 33306:3306
|
||||
networks:
|
||||
@ -76,22 +66,8 @@ services:
|
||||
volumes:
|
||||
- db_data:/var/lib/mysql
|
||||
|
||||
db-postgres:
|
||||
image: postgres:latest
|
||||
container_name: npm2dev.db-postgres
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
environment:
|
||||
POSTGRES_USER: 'npm'
|
||||
POSTGRES_PASSWORD: 'npmpass'
|
||||
POSTGRES_DB: 'npm'
|
||||
volumes:
|
||||
- psql_data:/var/lib/postgresql/data
|
||||
- ./ci/postgres:/docker-entrypoint-initdb.d
|
||||
|
||||
stepca:
|
||||
image: jc21/testca
|
||||
container_name: npm2dev.stepca
|
||||
volumes:
|
||||
- './dev/resolv.conf:/etc/resolv.conf:ro'
|
||||
- '/etc/localtime:/etc/localtime:ro'
|
||||
@ -102,7 +78,6 @@ services:
|
||||
|
||||
dnsrouter:
|
||||
image: jc21/dnsrouter
|
||||
container_name: npm2dev.dnsrouter
|
||||
volumes:
|
||||
- ./dev/dnsrouter-config.json.tmp:/dnsrouter-config.json:ro
|
||||
networks:
|
||||
@ -110,7 +85,7 @@ services:
|
||||
|
||||
swagger:
|
||||
image: swaggerapi/swagger-ui:latest
|
||||
container_name: npm2dev.swagger
|
||||
container_name: npm_swagger
|
||||
ports:
|
||||
- 3082:80
|
||||
environment:
|
||||
@ -121,7 +96,7 @@ services:
|
||||
|
||||
squid:
|
||||
image: ubuntu/squid
|
||||
container_name: npm2dev.squid
|
||||
container_name: npm_squid
|
||||
volumes:
|
||||
- './dev/squid.conf:/etc/squid/squid.conf:ro'
|
||||
- './dev/resolv.conf:/etc/resolv.conf:ro'
|
||||
@ -133,7 +108,6 @@ services:
|
||||
|
||||
pdns:
|
||||
image: pschiffe/pdns-mysql
|
||||
container_name: npm2dev.pdns
|
||||
volumes:
|
||||
- '/etc/localtime:/etc/localtime:ro'
|
||||
environment:
|
||||
@ -162,7 +136,6 @@ services:
|
||||
|
||||
pdns-db:
|
||||
image: mariadb
|
||||
container_name: npm2dev.pdns-db
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: 'pdns'
|
||||
MYSQL_DATABASE: 'pdns'
|
||||
@ -176,8 +149,7 @@ services:
|
||||
- nginx_proxy_manager
|
||||
|
||||
cypress:
|
||||
image: npm2dev:cypress
|
||||
container_name: npm2dev.cypress
|
||||
image: "npm_dev_cypress"
|
||||
build:
|
||||
context: ../
|
||||
dockerfile: test/cypress/Dockerfile
|
||||
@ -192,77 +164,16 @@ services:
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
|
||||
authentik-redis:
|
||||
image: 'redis:alpine'
|
||||
container_name: npm2dev.authentik-redis
|
||||
command: --save 60 1 --loglevel warning
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 3s
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
|
||||
authentik:
|
||||
image: ghcr.io/goauthentik/server:2024.10.1
|
||||
container_name: npm2dev.authentik
|
||||
restart: unless-stopped
|
||||
command: server
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
env_file:
|
||||
- ci.env
|
||||
ports:
|
||||
- 9000:9000
|
||||
depends_on:
|
||||
- authentik-redis
|
||||
- db-postgres
|
||||
|
||||
authentik-worker:
|
||||
image: ghcr.io/goauthentik/server:2024.10.1
|
||||
container_name: npm2dev.authentik-worker
|
||||
restart: unless-stopped
|
||||
command: worker
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
env_file:
|
||||
- ci.env
|
||||
depends_on:
|
||||
- authentik-redis
|
||||
- db-postgres
|
||||
|
||||
authentik-ldap:
|
||||
image: ghcr.io/goauthentik/ldap:2024.10.1
|
||||
container_name: npm2dev.authentik-ldap
|
||||
networks:
|
||||
- nginx_proxy_manager
|
||||
environment:
|
||||
AUTHENTIK_HOST: 'http://authentik:9000'
|
||||
AUTHENTIK_INSECURE: 'true'
|
||||
AUTHENTIK_TOKEN: 'wKYZuRcI0ETtb8vWzMCr04oNbhrQUUICy89hSpDln1OEKLjiNEuQ51044Vkp'
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- authentik
|
||||
|
||||
volumes:
|
||||
npm_data:
|
||||
name: npm2dev_core_data
|
||||
name: npm_core_data
|
||||
le_data:
|
||||
name: npm2dev_le_data
|
||||
name: npm_le_data
|
||||
db_data:
|
||||
name: npm2dev_db_data
|
||||
name: npm_db_data
|
||||
pdns_mysql:
|
||||
name: npnpm2dev_pdns_mysql
|
||||
psql_data:
|
||||
name: npm2dev_psql_data
|
||||
redis_data:
|
||||
name: npm2dev_redis_data
|
||||
name: npm_pdns_mysql
|
||||
|
||||
networks:
|
||||
nginx_proxy_manager:
|
||||
name: npm2dev_network
|
||||
name: npm_network
|
||||
|
@ -1,6 +1,4 @@
|
||||
text = True
|
||||
non-interactive = True
|
||||
webroot-path = /data/letsencrypt-acme-challenge
|
||||
key-type = ecdsa
|
||||
elliptic-curve = secp384r1
|
||||
preferred-chain = ISRG Root X1
|
||||
|
@ -3,5 +3,7 @@ ssl_session_cache shared:SSL:50m;
|
||||
|
||||
# intermediate configuration. tweak to your needs.
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
|
||||
ssl_ciphers "ALL:RC4-SHA:AES128-SHA:AES256-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384:AES128-GCM-SHA256:RSA-AES256-CBC-SHA:RC4-MD5:DES-CBC3-SHA:AES256-SHA:RC4-SHA:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
|
||||
ssl_prefer_server_ciphers off;
|
||||
ssl_ecdh_curve X25519:prime256v1:secp384r1;
|
||||
ssl_dhparam /etc/ssl/certs/dhparam.pem;
|
13
docker/start-container
Normal file
13
docker/start-container
Normal file
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
FILE="/etc/ssl/certs/dhparam.pem"
|
||||
|
||||
if [ ! -f "$FILE" ]; then
|
||||
echo "the $FILE does not exist, creating..."
|
||||
openssl dhparam -out "$FILE" 2048
|
||||
else
|
||||
echo "the $FILE already exists, skipping..."
|
||||
fi
|
||||
|
||||
echo "run default script"
|
||||
exec /init
|
@ -181,7 +181,6 @@ You can add your custom configuration snippet files at `/data/nginx/custom` as f
|
||||
- `/data/nginx/custom/server_stream.conf`: Included at the end of every stream server block
|
||||
- `/data/nginx/custom/server_stream_tcp.conf`: Included at the end of every TCP stream server block
|
||||
- `/data/nginx/custom/server_stream_udp.conf`: Included at the end of every UDP stream server block
|
||||
- `/data/nginx/custom/server_dead.conf`: Included at the end of every 404 server block
|
||||
|
||||
Every file is optional.
|
||||
|
||||
|
@ -21,7 +21,8 @@ services:
|
||||
# Add any other Stream port you want to expose
|
||||
# - '21:21' # FTP
|
||||
|
||||
environment:
|
||||
# Uncomment the next line if you uncomment anything in the section
|
||||
# environment:
|
||||
# Uncomment this if you want to change the location of
|
||||
# the SQLite DB file within the container
|
||||
# DB_SQLITE_FILE: "/data/database.sqlite"
|
||||
@ -98,53 +99,6 @@ Please note, that `DB_MYSQL_*` environment variables will take precedent over `D
|
||||
|
||||
:::
|
||||
|
||||
## Using Postgres database
|
||||
|
||||
Similar to the MySQL server setup:
|
||||
|
||||
```yml
|
||||
services:
|
||||
app:
|
||||
image: 'jc21/nginx-proxy-manager:latest'
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
# These ports are in format <host-port>:<container-port>
|
||||
- '80:80' # Public HTTP Port
|
||||
- '443:443' # Public HTTPS Port
|
||||
- '81:81' # Admin Web Port
|
||||
# Add any other Stream port you want to expose
|
||||
# - '21:21' # FTP
|
||||
environment:
|
||||
# Postgres parameters:
|
||||
DB_POSTGRES_HOST: 'db'
|
||||
DB_POSTGRES_PORT: '5432'
|
||||
DB_POSTGRES_USER: 'npm'
|
||||
DB_POSTGRES_PASSWORD: 'npmpass'
|
||||
DB_POSTGRES_NAME: 'npm'
|
||||
# Uncomment this if IPv6 is not enabled on your host
|
||||
# DISABLE_IPV6: 'true'
|
||||
volumes:
|
||||
- ./data:/data
|
||||
- ./letsencrypt:/etc/letsencrypt
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: postgres:latest
|
||||
environment:
|
||||
POSTGRES_USER: 'npm'
|
||||
POSTGRES_PASSWORD: 'npmpass'
|
||||
POSTGRES_DB: 'npm'
|
||||
volumes:
|
||||
- ./postgres:/var/lib/postgresql/data
|
||||
```
|
||||
|
||||
::: warning
|
||||
|
||||
Custom Postgres schema is not supported, as such `public` will be used.
|
||||
|
||||
:::
|
||||
|
||||
## Running on Raspberry PI / ARM devices
|
||||
|
||||
The docker images support the following architectures:
|
||||
|
1
docs/src/third-party/index.md
vendored
1
docs/src/third-party/index.md
vendored
@ -12,7 +12,6 @@ Known integrations:
|
||||
- [HomeAssistant Hass.io plugin](https://github.com/hassio-addons/addon-nginx-proxy-manager)
|
||||
- [UnRaid / Synology](https://github.com/jlesage/docker-nginx-proxy-manager)
|
||||
- [Proxmox Scripts](https://github.com/ej52/proxmox-scripts/tree/main/apps/nginx-proxy-manager)
|
||||
- [Proxmox VE Helper-Scripts](https://community-scripts.github.io/ProxmoxVE/scripts?id=nginxproxymanager)
|
||||
- [nginxproxymanagerGraf](https://github.com/ma-karai/nginxproxymanagerGraf)
|
||||
|
||||
|
||||
|
@ -873,9 +873,9 @@ mitt@^3.0.1:
|
||||
integrity sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==
|
||||
|
||||
nanoid@^3.3.7:
|
||||
version "3.3.8"
|
||||
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf"
|
||||
integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==
|
||||
version "3.3.7"
|
||||
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8"
|
||||
integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==
|
||||
|
||||
oniguruma-to-js@0.4.3:
|
||||
version "0.4.3"
|
||||
|
@ -50,8 +50,7 @@ module.exports = Mn.View.extend({
|
||||
onRender: function () {
|
||||
let view = this;
|
||||
|
||||
if (typeof view.stats.hosts === 'undefined') {
|
||||
Api.Reports.getHostStats()
|
||||
Api.Reports.getHostStats()
|
||||
.then(response => {
|
||||
if (!view.isDestroyed()) {
|
||||
view.stats.hosts = response;
|
||||
@ -61,7 +60,6 @@ module.exports = Mn.View.extend({
|
||||
.catch(err => {
|
||||
console.log(err);
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
|
@ -72,7 +72,7 @@
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-sm-12 col-md-12">
|
||||
<div class="col-sm-6 col-md-6">
|
||||
<div class="form-group">
|
||||
<label class="custom-switch">
|
||||
<input type="checkbox" class="custom-switch-input" name="allow_websocket_upgrade" value="1"<%- allow_websocket_upgrade ? ' checked' : '' %>>
|
||||
@ -81,6 +81,15 @@
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-sm-6 col-md-6">
|
||||
<div class="form-group">
|
||||
<label class="custom-switch">
|
||||
<input type="checkbox" class="custom-switch-input" name="default_server" value="1"<%- default_server ? ' checked' : '' %>>
|
||||
<span class="custom-switch-indicator"></span>
|
||||
<span class="custom-switch-description"><%- i18n('proxy-hosts', 'default-server') %></span>
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="col-sm-12 col-md-12">
|
||||
<div class="form-group">
|
||||
@ -105,6 +114,15 @@
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-sm-12 col-md-12">
|
||||
<div class="form-group">
|
||||
<label class="form-label"><%- i18n('all-hosts', 'ssl-key-type') %></label>
|
||||
<select name="ssl_key_type" class="form-control custom-select">
|
||||
<option value="ecdsa" data-data="{"id":"ecdsa"}" <%- ssl_key_type == 'ecdsa' ? 'selected' : '' %>>ECDSA</option>
|
||||
<option value="rsa" data-data="{"id":"rsa"}" <%- ssl_key_type == 'rsa' ? 'selected' : '' %>>RSA</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-sm-6 col-md-6">
|
||||
<div class="form-group">
|
||||
<label class="custom-switch">
|
||||
|
@ -167,6 +167,7 @@ module.exports = Mn.View.extend({
|
||||
data.hsts_enabled = !!data.hsts_enabled;
|
||||
data.hsts_subdomains = !!data.hsts_subdomains;
|
||||
data.ssl_forced = !!data.ssl_forced;
|
||||
data.default_server = !!data.default_server;
|
||||
|
||||
if (typeof data.meta === 'undefined') data.meta = {};
|
||||
data.meta.letsencrypt_agree = data.meta.letsencrypt_agree == 1;
|
||||
|
@ -77,6 +77,7 @@
|
||||
"block-exploits": "Block Common Exploits",
|
||||
"caching-enabled": "Cache Assets",
|
||||
"ssl-certificate": "SSL Certificate",
|
||||
"ssl-key-type": "SSL Key Type",
|
||||
"none": "None",
|
||||
"new-cert": "Request a new SSL Certificate",
|
||||
"with-le": "with Let's Encrypt",
|
||||
@ -131,6 +132,7 @@
|
||||
"help-content": "A Proxy Host is the incoming endpoint for a web service that you want to forward.\nIt provides optional SSL termination for your service that might not have SSL support built in.\nProxy Hosts are the most common use for the Nginx Proxy Manager.",
|
||||
"access-list": "Access List",
|
||||
"allow-websocket-upgrade": "Websockets Support",
|
||||
"default-server": "Default Server",
|
||||
"ignore-invalid-upstream-ssl": "Ignore Invalid SSL",
|
||||
"custom-forward-host-help": "Add a path for sub-folder forwarding.\nExample: 203.0.113.25/path/",
|
||||
"search": "Search Host…"
|
||||
|
@ -10,6 +10,8 @@ const model = Backbone.Model.extend({
|
||||
modified_on: null,
|
||||
domain_names: [],
|
||||
certificate_id: 0,
|
||||
ssl_key_type: 'ecdsa',
|
||||
default_server: false,
|
||||
ssl_forced: false,
|
||||
http2_support: false,
|
||||
hsts_enabled: false,
|
||||
|
@ -14,6 +14,8 @@ const model = Backbone.Model.extend({
|
||||
forward_port: null,
|
||||
access_list_id: 0,
|
||||
certificate_id: 0,
|
||||
ssl_key_type: 'ecdsa',
|
||||
default_server: false,
|
||||
ssl_forced: false,
|
||||
hsts_enabled: false,
|
||||
hsts_subdomains: false,
|
||||
|
@ -14,6 +14,8 @@ const model = Backbone.Model.extend({
|
||||
forward_domain_name: '',
|
||||
preserve_path: true,
|
||||
certificate_id: 0,
|
||||
ssl_key_type: 'ecdsa',
|
||||
default_server: false,
|
||||
ssl_forced: false,
|
||||
hsts_enabled: false,
|
||||
hsts_subdomains: false,
|
||||
|
@ -7,7 +7,7 @@
|
||||
"credentials": "dns_acmedns_api_url = http://acmedns-server/\ndns_acmedns_registration_file = /data/acme-registration.json",
|
||||
"full_plugin_name": "dns-acmedns"
|
||||
},
|
||||
"active24": {
|
||||
"active24":{
|
||||
"name": "Active24",
|
||||
"package_name": "certbot-dns-active24",
|
||||
"version": "~=1.5.1",
|
||||
@ -18,7 +18,7 @@
|
||||
"aliyun": {
|
||||
"name": "Aliyun",
|
||||
"package_name": "certbot-dns-aliyun",
|
||||
"version": "~=2.0.0",
|
||||
"version": "~=0.38.1",
|
||||
"dependencies": "",
|
||||
"credentials": "dns_aliyun_access_key = 12345678\ndns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef",
|
||||
"full_plugin_name": "dns-aliyun"
|
||||
@ -31,14 +31,6 @@
|
||||
"credentials": "# This plugin supported API authentication using either Service Principals or utilizing a Managed Identity assigned to the virtual machine.\n# Regardless which authentication method used, the identity will need the “DNS Zone Contributor” role assigned to it.\n# As multiple Azure DNS Zones in multiple resource groups can exist, the config file needs a mapping of zone to resource group ID. Multiple zones -> ID mappings can be listed by using the key dns_azure_zoneX where X is a unique number. At least 1 zone mapping is required.\n\n# Using a service principal (option 1)\ndns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\ndns_azure_sp_client_secret = E-xqXU83Y-jzTI6xe9fs2YC~mck3ZzUih9\ndns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7\n\n# Using used assigned MSI (option 2)\n# dns_azure_msi_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\n\n# Using system assigned MSI (option 3)\n# dns_azure_msi_system_assigned = true\n\n# Zones (at least one always required)\ndns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1\ndns_azure_zone2 = example.org:/subscriptions/99800903-fb14-4992-9aff-12eaf2744622/resourceGroups/dns2",
|
||||
"full_plugin_name": "dns-azure"
|
||||
},
|
||||
"beget": {
|
||||
"name":"Beget",
|
||||
"package_name": "certbot-beget-plugin",
|
||||
"version": "~=1.0.0.dev9",
|
||||
"dependencies": "",
|
||||
"credentials": "# Beget API credentials used by Certbot\nbeget_plugin_username = username\nbeget_plugin_password = password",
|
||||
"full_plugin_name": "beget-plugin"
|
||||
},
|
||||
"bunny": {
|
||||
"name": "bunny.net",
|
||||
"package_name": "certbot-dns-bunny",
|
||||
@ -255,14 +247,6 @@
|
||||
"credentials": "dns_hetzner_api_token = 0123456789abcdef0123456789abcdef",
|
||||
"full_plugin_name": "dns-hetzner"
|
||||
},
|
||||
"hostingnl": {
|
||||
"name": "Hosting.nl",
|
||||
"package_name": "certbot-dns-hostingnl",
|
||||
"version": "~=0.1.5",
|
||||
"dependencies": "",
|
||||
"credentials": "dns_hostingnl_api_key = 0123456789abcdef0123456789abcdef",
|
||||
"full_plugin_name": "dns-hostingnl"
|
||||
},
|
||||
"hover": {
|
||||
"name": "Hover",
|
||||
"package_name": "certbot-dns-hover",
|
||||
@ -418,7 +402,7 @@
|
||||
"porkbun": {
|
||||
"name": "Porkbun",
|
||||
"package_name": "certbot-dns-porkbun",
|
||||
"version": "~=0.9",
|
||||
"version": "~=0.2",
|
||||
"dependencies": "",
|
||||
"credentials": "dns_porkbun_key=your-porkbun-api-key\ndns_porkbun_secret=your-porkbun-api-secret",
|
||||
"full_plugin_name": "dns-porkbun"
|
||||
@ -511,7 +495,7 @@
|
||||
"credentials": "dns_websupport_identifier = <api_key>\ndns_websupport_secret_key = <secret>",
|
||||
"full_plugin_name": "dns-websupport"
|
||||
},
|
||||
"wedos": {
|
||||
"wedos":{
|
||||
"name": "Wedos",
|
||||
"package_name": "certbot-dns-wedos",
|
||||
"version": "~=2.2",
|
||||
|
@ -11,7 +11,7 @@ YELLOW='\E[1;33m'
|
||||
export BLUE CYAN GREEN RED RESET YELLOW
|
||||
|
||||
# Docker Compose
|
||||
COMPOSE_PROJECT_NAME="npm2dev"
|
||||
COMPOSE_PROJECT_NAME="npmdev"
|
||||
COMPOSE_FILE="docker/docker-compose.dev.yml"
|
||||
|
||||
export COMPOSE_FILE COMPOSE_PROJECT_NAME
|
||||
|
@ -67,8 +67,6 @@ printf "nameserver %s\noptions ndots:0" "${DNSROUTER_IP}" > "${LOCAL_RESOLVE}"
|
||||
# bring up all remaining containers, except cypress!
|
||||
docker-compose up -d --remove-orphans stepca squid
|
||||
docker-compose pull db-mysql || true # ok to fail
|
||||
docker-compose pull db-postgres || true # ok to fail
|
||||
docker-compose pull authentik authentik-redis authentik-ldap || true # ok to fail
|
||||
docker-compose up -d --remove-orphans --pull=never fullstack
|
||||
|
||||
# wait for main container to be healthy
|
||||
|
@ -36,11 +36,12 @@ if hash docker-compose 2>/dev/null; then
|
||||
|
||||
# bring up all remaining containers, except cypress!
|
||||
docker-compose up -d --remove-orphans stepca squid
|
||||
docker-compose pull db db-postgres authentik-redis authentik authentik-worker authentik-ldap
|
||||
docker-compose build --pull --parallel fullstack
|
||||
docker-compose up -d --remove-orphans fullstack
|
||||
docker-compose pull db
|
||||
docker-compose up -d --remove-orphans --pull=never fullstack
|
||||
docker-compose up -d --remove-orphans swagger
|
||||
|
||||
# docker-compose up -d --remove-orphans --force-recreate --build
|
||||
|
||||
# wait for main container to be healthy
|
||||
bash "$DIR/wait-healthy" "$(docker-compose ps --all -q fullstack)" 120
|
||||
|
||||
@ -52,10 +53,10 @@ if hash docker-compose 2>/dev/null; then
|
||||
|
||||
if [ "$1" == "-f" ]; then
|
||||
echo -e "${BLUE}❯ ${YELLOW}Following Backend Container:${RESET}"
|
||||
docker logs -f npm2dev.core
|
||||
docker logs -f npm_core
|
||||
else
|
||||
echo -e "${YELLOW}Hint:${RESET} You can follow the output of some of the containers with:"
|
||||
echo " docker logs -f npm2dev.core"
|
||||
echo " docker logs -f npm_core"
|
||||
fi
|
||||
else
|
||||
echo -e "${RED}❯ docker-compose command is not available${RESET}"
|
||||
|
@ -1,64 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
describe('LDAP with Authentik', () => {
|
||||
let token;
|
||||
if (Cypress.env('skipStackCheck') === 'true' || Cypress.env('stack') === 'postgres') {
|
||||
|
||||
before(() => {
|
||||
cy.getToken().then((tok) => {
|
||||
token = tok;
|
||||
|
||||
// cy.task('backendApiPut', {
|
||||
// token: token,
|
||||
// path: '/api/settings/ldap-auth',
|
||||
// data: {
|
||||
// value: {
|
||||
// host: 'authentik-ldap:3389',
|
||||
// base_dn: 'ou=users,DC=ldap,DC=goauthentik,DC=io',
|
||||
// user_dn: 'cn={{USERNAME}},ou=users,DC=ldap,DC=goauthentik,DC=io',
|
||||
// email_property: 'mail',
|
||||
// name_property: 'sn',
|
||||
// self_filter: '(&(cn={{USERNAME}})(ak-active=TRUE))',
|
||||
// auto_create_user: true
|
||||
// }
|
||||
// }
|
||||
// }).then((data) => {
|
||||
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
|
||||
// expect(data.result).to.have.property('id');
|
||||
// expect(data.result.id).to.be.greaterThan(0);
|
||||
// });
|
||||
|
||||
// cy.task('backendApiPut', {
|
||||
// token: token,
|
||||
// path: '/api/settings/auth-methods',
|
||||
// data: {
|
||||
// value: [
|
||||
// 'local',
|
||||
// 'ldap'
|
||||
// ]
|
||||
// }
|
||||
// }).then((data) => {
|
||||
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
|
||||
// expect(data.result).to.have.property('id');
|
||||
// expect(data.result.id).to.be.greaterThan(0);
|
||||
// });
|
||||
});
|
||||
});
|
||||
|
||||
it.skip('Should log in with LDAP', function() {
|
||||
// cy.task('backendApiPost', {
|
||||
// token: token,
|
||||
// path: '/api/auth',
|
||||
// data: {
|
||||
// // Authentik LDAP creds:
|
||||
// type: 'ldap',
|
||||
// identity: 'cypress',
|
||||
// secret: 'fqXBfUYqHvYqiwBHWW7f'
|
||||
// }
|
||||
// }).then((data) => {
|
||||
// cy.validateSwaggerSchema('post', 200, '/auth', data);
|
||||
// expect(data.result).to.have.property('token');
|
||||
// });
|
||||
});
|
||||
}
|
||||
});
|
@ -1,97 +0,0 @@
|
||||
/// <reference types="cypress" />
|
||||
|
||||
describe('OAuth with Authentik', () => {
|
||||
let token;
|
||||
if (Cypress.env('skipStackCheck') === 'true' || Cypress.env('stack') === 'postgres') {
|
||||
|
||||
before(() => {
|
||||
cy.getToken().then((tok) => {
|
||||
token = tok;
|
||||
|
||||
// cy.task('backendApiPut', {
|
||||
// token: token,
|
||||
// path: '/api/settings/oauth-auth',
|
||||
// data: {
|
||||
// value: {
|
||||
// client_id: '7iO2AvuUp9JxiSVkCcjiIbQn4mHmUMBj7yU8EjqU',
|
||||
// client_secret: 'VUMZzaGTrmXJ8PLksyqzyZ6lrtz04VvejFhPMBP9hGZNCMrn2LLBanySs4ta7XGrDr05xexPyZT1XThaf4ubg00WqvHRVvlu4Naa1aMootNmSRx3VAk6RSslUJmGyHzq',
|
||||
// authorization_url: 'http://authentik:9000/application/o/authorize/',
|
||||
// resource_url: 'http://authentik:9000/application/o/userinfo/',
|
||||
// token_url: 'http://authentik:9000/application/o/token/',
|
||||
// logout_url: 'http://authentik:9000/application/o/npm/end-session/',
|
||||
// identifier: 'preferred_username',
|
||||
// scopes: [],
|
||||
// auto_create_user: true
|
||||
// }
|
||||
// }
|
||||
// }).then((data) => {
|
||||
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
|
||||
// expect(data.result).to.have.property('id');
|
||||
// expect(data.result.id).to.be.greaterThan(0);
|
||||
// });
|
||||
|
||||
// cy.task('backendApiPut', {
|
||||
// token: token,
|
||||
// path: '/api/settings/auth-methods',
|
||||
// data: {
|
||||
// value: [
|
||||
// 'local',
|
||||
// 'oauth'
|
||||
// ]
|
||||
// }
|
||||
// }).then((data) => {
|
||||
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
|
||||
// expect(data.result).to.have.property('id');
|
||||
// expect(data.result.id).to.be.greaterThan(0);
|
||||
// });
|
||||
});
|
||||
});
|
||||
|
||||
it.skip('Should log in with OAuth', function() {
|
||||
// cy.task('backendApiGet', {
|
||||
// path: '/oauth/login?redirect_base=' + encodeURI(Cypress.config('baseUrl')),
|
||||
// }).then((data) => {
|
||||
// expect(data).to.have.property('result');
|
||||
|
||||
// cy.origin('http://authentik:9000', {args: data.result}, (url) => {
|
||||
// cy.visit(url);
|
||||
// cy.get('ak-flow-executor')
|
||||
// .shadow()
|
||||
// .find('ak-stage-identification')
|
||||
// .shadow()
|
||||
// .find('input[name="uidField"]', { visible: true })
|
||||
// .type('cypress');
|
||||
|
||||
// cy.get('ak-flow-executor')
|
||||
// .shadow()
|
||||
// .find('ak-stage-identification')
|
||||
// .shadow()
|
||||
// .find('button[type="submit"]', { visible: true })
|
||||
// .click();
|
||||
|
||||
// cy.get('ak-flow-executor')
|
||||
// .shadow()
|
||||
// .find('ak-stage-password')
|
||||
// .shadow()
|
||||
// .find('input[name="password"]', { visible: true })
|
||||
// .type('fqXBfUYqHvYqiwBHWW7f');
|
||||
|
||||
// cy.get('ak-flow-executor')
|
||||
// .shadow()
|
||||
// .find('ak-stage-password')
|
||||
// .shadow()
|
||||
// .find('button[type="submit"]', { visible: true })
|
||||
// .click();
|
||||
// })
|
||||
|
||||
// // we should be logged in
|
||||
// cy.get('#root p.chakra-text')
|
||||
// .first()
|
||||
// .should('have.text', 'Nginx Proxy Manager');
|
||||
|
||||
// // logout:
|
||||
// cy.clearLocalStorage();
|
||||
// });
|
||||
});
|
||||
}
|
||||
});
|
Reference in New Issue
Block a user