Compare commits

..

7 Commits

64 changed files with 381 additions and 798 deletions

View File

@ -1 +1 @@
2.12.2 2.12.1

48
Jenkinsfile vendored
View File

@ -43,7 +43,7 @@ pipeline {
steps { steps {
script { script {
// Defaults to the Branch name, which is applies to all branches AND pr's // Defaults to the Branch name, which is applies to all branches AND pr's
buildxPushTags = "-t docker.io/nginxproxymanager/${IMAGE}-dev:${BRANCH_LOWER}" buildxPushTags = "-t docker.io/jc21/${IMAGE}:github-${BRANCH_LOWER}"
} }
} }
} }
@ -167,44 +167,6 @@ pipeline {
} }
} }
} }
stage('Test Postgres') {
environment {
COMPOSE_PROJECT_NAME = "npm_${BRANCH_LOWER}_${BUILD_NUMBER}_postgres"
COMPOSE_FILE = 'docker/docker-compose.ci.yml:docker/docker-compose.ci.postgres.yml'
}
when {
not {
equals expected: 'UNSTABLE', actual: currentBuild.result
}
}
steps {
sh 'rm -rf ./test/results/junit/*'
sh './scripts/ci/fulltest-cypress'
}
post {
always {
// Dumps to analyze later
sh 'mkdir -p debug/postgres'
sh 'docker logs $(docker-compose ps --all -q fullstack) > debug/postgres/docker_fullstack.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q stepca) > debug/postgres/docker_stepca.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q pdns) > debug/postgres/docker_pdns.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q pdns-db) > debug/postgres/docker_pdns-db.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q dnsrouter) > debug/postgres/docker_dnsrouter.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q db-postgres) > debug/postgres/docker_db-postgres.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q authentik) > debug/postgres/docker_authentik.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q authentik-redis) > debug/postgres/docker_authentik-redis.log 2>&1'
sh 'docker logs $(docker-compose ps --all -q authentik-ldap) > debug/postgres/docker_authentik-ldap.log 2>&1'
junit 'test/results/junit/*'
sh 'docker-compose down --remove-orphans --volumes -t 30 || true'
}
unstable {
dir(path: 'testing/results') {
archiveArtifacts(allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml')
}
}
}
}
stage('MultiArch Build') { stage('MultiArch Build') {
when { when {
not { not {
@ -241,13 +203,7 @@ pipeline {
} }
steps { steps {
script { script {
npmGithubPrComment("""Docker Image for build ${BUILD_NUMBER} is available on npmGithubPrComment("Docker Image for build ${BUILD_NUMBER} is available on [DockerHub](https://cloud.docker.com/repository/docker/jc21/${IMAGE}) as `jc21/${IMAGE}:github-${BRANCH_LOWER}`\n\n**Note:** ensure you backup your NPM instance before testing this PR image! Especially if this PR contains database changes.", true)
[DockerHub](https://cloud.docker.com/repository/docker/nginxproxymanager/${IMAGE}-dev)
as `nginxproxymanager/${IMAGE}-dev:${BRANCH_LOWER}`
**Note:** ensure you backup your NPM instance before testing this image! Especially if there are database changes
**Note:** this is a different docker image namespace than the official image
""", true)
} }
} }
} }

View File

@ -1,7 +1,7 @@
<p align="center"> <p align="center">
<img src="https://nginxproxymanager.com/github.png"> <img src="https://nginxproxymanager.com/github.png">
<br><br> <br><br>
<img src="https://img.shields.io/badge/version-2.12.2-green.svg?style=for-the-badge"> <img src="https://img.shields.io/badge/version-2.12.1-green.svg?style=for-the-badge">
<a href="https://hub.docker.com/repository/docker/jc21/nginx-proxy-manager"> <a href="https://hub.docker.com/repository/docker/jc21/nginx-proxy-manager">
<img src="https://img.shields.io/docker/stars/jc21/nginx-proxy-manager.svg?style=for-the-badge"> <img src="https://img.shields.io/docker/stars/jc21/nginx-proxy-manager.svg?style=for-the-badge">
</a> </a>
@ -101,6 +101,11 @@ Immediately after logging in with this default user you will be asked to modify
All are welcome to create pull requests for this project, against the `develop` branch. Official releases are created from the `master` branch. All are welcome to create pull requests for this project, against the `develop` branch. Official releases are created from the `master` branch.
Run the following commands to test locally:
- `cd backend && npm i && node_modules/eslint/bin/eslint.js .`
- `cd test && npm i && npm run cypress`
CI is used in this project. All PR's must pass before being considered. After passing, CI is used in this project. All PR's must pass before being considered. After passing,
docker builds for PR's are available on dockerhub for manual verifications. docker builds for PR's are available on dockerhub for manual verifications.

View File

@ -81,7 +81,7 @@ const internalAccessList = {
return internalAccessList.build(row) return internalAccessList.build(row)
.then(() => { .then(() => {
if (parseInt(row.proxy_host_count, 10)) { if (row.proxy_host_count) {
return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts); return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
} }
}) })
@ -223,7 +223,7 @@ const internalAccessList = {
.then((row) => { .then((row) => {
return internalAccessList.build(row) return internalAccessList.build(row)
.then(() => { .then(() => {
if (parseInt(row.proxy_host_count, 10)) { if (row.proxy_host_count) {
return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts); return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
} }
}).then(internalNginx.reload) }).then(internalNginx.reload)
@ -252,10 +252,7 @@ const internalAccessList = {
let query = accessListModel let query = accessListModel
.query() .query()
.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count')) .select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
.leftJoin('proxy_host', function() { .joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
this.on('proxy_host.access_list_id', '=', 'access_list.id')
.andOn('proxy_host.is_deleted', '=', 0);
})
.where('access_list.is_deleted', 0) .where('access_list.is_deleted', 0)
.andWhere('access_list.id', data.id) .andWhere('access_list.id', data.id)
.allowGraph('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]') .allowGraph('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]')
@ -376,10 +373,7 @@ const internalAccessList = {
let query = accessListModel let query = accessListModel
.query() .query()
.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count')) .select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
.leftJoin('proxy_host', function() { .joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
this.on('proxy_host.access_list_id', '=', 'access_list.id')
.andOn('proxy_host.is_deleted', '=', 0);
})
.where('access_list.is_deleted', 0) .where('access_list.is_deleted', 0)
.groupBy('access_list.id') .groupBy('access_list.id')
.allowGraph('[owner,items,clients]') .allowGraph('[owner,items,clients]')

View File

@ -1,6 +1,5 @@
const error = require('../lib/error'); const error = require('../lib/error');
const auditLogModel = require('../models/audit-log'); const auditLogModel = require('../models/audit-log');
const {castJsonIfNeed} = require('../lib/helpers');
const internalAuditLog = { const internalAuditLog = {
@ -23,9 +22,9 @@ const internalAuditLog = {
.allowGraph('[user]'); .allowGraph('[user]');
// Query is used for searching // Query is used for searching
if (typeof search_query === 'string' && search_query.length > 0) { if (typeof search_query === 'string') {
query.where(function () { query.where(function () {
this.where(castJsonIfNeed('meta'), 'like', '%' + search_query + '%'); this.where('meta', 'like', '%' + search_query + '%');
}); });
} }

View File

@ -6,7 +6,6 @@ const internalHost = require('./host');
const internalNginx = require('./nginx'); const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log'); const internalAuditLog = require('./audit-log');
const internalCertificate = require('./certificate'); const internalCertificate = require('./certificate');
const {castJsonIfNeed} = require('../lib/helpers');
function omissions () { function omissions () {
return ['is_deleted']; return ['is_deleted'];
@ -410,16 +409,16 @@ const internalDeadHost = {
.where('is_deleted', 0) .where('is_deleted', 0)
.groupBy('id') .groupBy('id')
.allowGraph('[owner,certificate]') .allowGraph('[owner,certificate]')
.orderBy(castJsonIfNeed('domain_names'), 'ASC'); .orderBy('domain_names', 'ASC');
if (access_data.permission_visibility !== 'all') { if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1)); query.andWhere('owner_user_id', access.token.getUserId(1));
} }
// Query is used for searching // Query is used for searching
if (typeof search_query === 'string' && search_query.length > 0) { if (typeof search_query === 'string') {
query.where(function () { query.where(function () {
this.where(castJsonIfNeed('domain_names'), 'like', '%' + search_query + '%'); this.where('domain_names', 'like', '%' + search_query + '%');
}); });
} }

View File

@ -2,7 +2,6 @@ const _ = require('lodash');
const proxyHostModel = require('../models/proxy_host'); const proxyHostModel = require('../models/proxy_host');
const redirectionHostModel = require('../models/redirection_host'); const redirectionHostModel = require('../models/redirection_host');
const deadHostModel = require('../models/dead_host'); const deadHostModel = require('../models/dead_host');
const {castJsonIfNeed} = require('../lib/helpers');
const internalHost = { const internalHost = {
@ -18,7 +17,7 @@ const internalHost = {
cleanSslHstsData: function (data, existing_data) { cleanSslHstsData: function (data, existing_data) {
existing_data = existing_data === undefined ? {} : existing_data; existing_data = existing_data === undefined ? {} : existing_data;
const combined_data = _.assign({}, existing_data, data); let combined_data = _.assign({}, existing_data, data);
if (!combined_data.certificate_id) { if (!combined_data.certificate_id) {
combined_data.ssl_forced = false; combined_data.ssl_forced = false;
@ -74,7 +73,7 @@ const internalHost = {
* @returns {Promise} * @returns {Promise}
*/ */
getHostsWithDomains: function (domain_names) { getHostsWithDomains: function (domain_names) {
const promises = [ let promises = [
proxyHostModel proxyHostModel
.query() .query()
.where('is_deleted', 0), .where('is_deleted', 0),
@ -126,19 +125,19 @@ const internalHost = {
* @returns {Promise} * @returns {Promise}
*/ */
isHostnameTaken: function (hostname, ignore_type, ignore_id) { isHostnameTaken: function (hostname, ignore_type, ignore_id) {
const promises = [ let promises = [
proxyHostModel proxyHostModel
.query() .query()
.where('is_deleted', 0) .where('is_deleted', 0)
.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%'), .andWhere('domain_names', 'like', '%' + hostname + '%'),
redirectionHostModel redirectionHostModel
.query() .query()
.where('is_deleted', 0) .where('is_deleted', 0)
.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%'), .andWhere('domain_names', 'like', '%' + hostname + '%'),
deadHostModel deadHostModel
.query() .query()
.where('is_deleted', 0) .where('is_deleted', 0)
.andWhere(castJsonIfNeed('domain_names'), 'like', '%' + hostname + '%') .andWhere('domain_names', 'like', '%' + hostname + '%')
]; ];
return Promise.all(promises) return Promise.all(promises)

View File

@ -155,6 +155,7 @@ const internalNginx = {
let locationCopy = Object.assign({}, {access_list_id: host.access_list_id}, {certificate_id: host.certificate_id}, let locationCopy = Object.assign({}, {access_list_id: host.access_list_id}, {certificate_id: host.certificate_id},
{ssl_forced: host.ssl_forced}, {caching_enabled: host.caching_enabled}, {block_exploits: host.block_exploits}, {ssl_forced: host.ssl_forced}, {caching_enabled: host.caching_enabled}, {block_exploits: host.block_exploits},
{allow_websocket_upgrade: host.allow_websocket_upgrade}, {http2_support: host.http2_support}, {allow_websocket_upgrade: host.allow_websocket_upgrade}, {http2_support: host.http2_support},
{enable_proxy_protocol: host.enable_proxy_protocol}, {load_balancer_ip: host.load_balancer_ip},
{hsts_enabled: host.hsts_enabled}, {hsts_subdomains: host.hsts_subdomains}, {access_list: host.access_list}, {hsts_enabled: host.hsts_enabled}, {hsts_subdomains: host.hsts_subdomains}, {access_list: host.access_list},
{certificate: host.certificate}, host.locations[i]); {certificate: host.certificate}, host.locations[i]);

View File

@ -6,7 +6,6 @@ const internalHost = require('./host');
const internalNginx = require('./nginx'); const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log'); const internalAuditLog = require('./audit-log');
const internalCertificate = require('./certificate'); const internalCertificate = require('./certificate');
const {castJsonIfNeed} = require('../lib/helpers');
function omissions () { function omissions () {
return ['is_deleted', 'owner.is_deleted']; return ['is_deleted', 'owner.is_deleted'];
@ -417,16 +416,16 @@ const internalProxyHost = {
.where('is_deleted', 0) .where('is_deleted', 0)
.groupBy('id') .groupBy('id')
.allowGraph('[owner,access_list,certificate]') .allowGraph('[owner,access_list,certificate]')
.orderBy(castJsonIfNeed('domain_names'), 'ASC'); .orderBy('domain_names', 'ASC');
if (access_data.permission_visibility !== 'all') { if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1)); query.andWhere('owner_user_id', access.token.getUserId(1));
} }
// Query is used for searching // Query is used for searching
if (typeof search_query === 'string' && search_query.length > 0) { if (typeof search_query === 'string') {
query.where(function () { query.where(function () {
this.where(castJsonIfNeed('domain_names'), 'like', `%${search_query}%`); this.where('domain_names', 'like', '%' + search_query + '%');
}); });
} }

View File

@ -6,7 +6,6 @@ const internalHost = require('./host');
const internalNginx = require('./nginx'); const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log'); const internalAuditLog = require('./audit-log');
const internalCertificate = require('./certificate'); const internalCertificate = require('./certificate');
const {castJsonIfNeed} = require('../lib/helpers');
function omissions () { function omissions () {
return ['is_deleted']; return ['is_deleted'];
@ -410,16 +409,16 @@ const internalRedirectionHost = {
.where('is_deleted', 0) .where('is_deleted', 0)
.groupBy('id') .groupBy('id')
.allowGraph('[owner,certificate]') .allowGraph('[owner,certificate]')
.orderBy(castJsonIfNeed('domain_names'), 'ASC'); .orderBy('domain_names', 'ASC');
if (access_data.permission_visibility !== 'all') { if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1)); query.andWhere('owner_user_id', access.token.getUserId(1));
} }
// Query is used for searching // Query is used for searching
if (typeof search_query === 'string' && search_query.length > 0) { if (typeof search_query === 'string') {
query.where(function () { query.where(function () {
this.where(castJsonIfNeed('domain_names'), 'like', `%${search_query}%`); this.where('domain_names', 'like', '%' + search_query + '%');
}); });
} }

View File

@ -4,7 +4,6 @@ const utils = require('../lib/utils');
const streamModel = require('../models/stream'); const streamModel = require('../models/stream');
const internalNginx = require('./nginx'); const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log'); const internalAuditLog = require('./audit-log');
const {castJsonIfNeed} = require('../lib/helpers');
function omissions () { function omissions () {
return ['is_deleted']; return ['is_deleted'];
@ -294,21 +293,21 @@ const internalStream = {
getAll: (access, expand, search_query) => { getAll: (access, expand, search_query) => {
return access.can('streams:list') return access.can('streams:list')
.then((access_data) => { .then((access_data) => {
const query = streamModel let query = streamModel
.query() .query()
.where('is_deleted', 0) .where('is_deleted', 0)
.groupBy('id') .groupBy('id')
.allowGraph('[owner]') .allowGraph('[owner]')
.orderByRaw('CAST(incoming_port AS INTEGER) ASC'); .orderBy('incoming_port', 'ASC');
if (access_data.permission_visibility !== 'all') { if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1)); query.andWhere('owner_user_id', access.token.getUserId(1));
} }
// Query is used for searching // Query is used for searching
if (typeof search_query === 'string' && search_query.length > 0) { if (typeof search_query === 'string') {
query.where(function () { query.where(function () {
this.where(castJsonIfNeed('incoming_port'), 'like', `%${search_query}%`); this.where('incoming_port', 'like', '%' + search_query + '%');
}); });
} }
@ -328,9 +327,9 @@ const internalStream = {
* @returns {Promise} * @returns {Promise}
*/ */
getCount: (user_id, visibility) => { getCount: (user_id, visibility) => {
const query = streamModel let query = streamModel
.query() .query()
.count('id AS count') .count('id as count')
.where('is_deleted', 0); .where('is_deleted', 0);
if (visibility !== 'all') { if (visibility !== 'all') {

View File

@ -5,8 +5,6 @@ const authModel = require('../models/auth');
const helpers = require('../lib/helpers'); const helpers = require('../lib/helpers');
const TokenModel = require('../models/token'); const TokenModel = require('../models/token');
const ERROR_MESSAGE_INVALID_AUTH = 'Invalid email or password';
module.exports = { module.exports = {
/** /**
@ -71,15 +69,15 @@ module.exports = {
}; };
}); });
} else { } else {
throw new error.AuthError(ERROR_MESSAGE_INVALID_AUTH); throw new error.AuthError('Invalid password');
} }
}); });
} else { } else {
throw new error.AuthError(ERROR_MESSAGE_INVALID_AUTH); throw new error.AuthError('No password auth for user');
} }
}); });
} else { } else {
throw new error.AuthError(ERROR_MESSAGE_INVALID_AUTH); throw new error.AuthError('No relevant user found');
} }
}); });
}, },

View File

@ -2,10 +2,7 @@ const fs = require('fs');
const NodeRSA = require('node-rsa'); const NodeRSA = require('node-rsa');
const logger = require('../logger').global; const logger = require('../logger').global;
const keysFile = '/data/keys.json'; const keysFile = '/data/keys.json';
const mysqlEngine = 'mysql2';
const postgresEngine = 'pg';
const sqliteClientName = 'sqlite3';
let instance = null; let instance = null;
@ -17,7 +14,7 @@ const configure = () => {
let configData; let configData;
try { try {
configData = require(filename); configData = require(filename);
} catch (_) { } catch (err) {
// do nothing // do nothing
} }
@ -37,7 +34,7 @@ const configure = () => {
logger.info('Using MySQL configuration'); logger.info('Using MySQL configuration');
instance = { instance = {
database: { database: {
engine: mysqlEngine, engine: 'mysql2',
host: envMysqlHost, host: envMysqlHost,
port: process.env.DB_MYSQL_PORT || 3306, port: process.env.DB_MYSQL_PORT || 3306,
user: envMysqlUser, user: envMysqlUser,
@ -49,33 +46,13 @@ const configure = () => {
return; return;
} }
const envPostgresHost = process.env.DB_POSTGRES_HOST || null;
const envPostgresUser = process.env.DB_POSTGRES_USER || null;
const envPostgresName = process.env.DB_POSTGRES_NAME || null;
if (envPostgresHost && envPostgresUser && envPostgresName) {
// we have enough postgres creds to go with postgres
logger.info('Using Postgres configuration');
instance = {
database: {
engine: postgresEngine,
host: envPostgresHost,
port: process.env.DB_POSTGRES_PORT || 5432,
user: envPostgresUser,
password: process.env.DB_POSTGRES_PASSWORD,
name: envPostgresName,
},
keys: getKeys(),
};
return;
}
const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/database.sqlite'; const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/database.sqlite';
logger.info(`Using Sqlite: ${envSqliteFile}`); logger.info(`Using Sqlite: ${envSqliteFile}`);
instance = { instance = {
database: { database: {
engine: 'knex-native', engine: 'knex-native',
knex: { knex: {
client: sqliteClientName, client: 'sqlite3',
connection: { connection: {
filename: envSqliteFile filename: envSqliteFile
}, },
@ -166,27 +143,7 @@ module.exports = {
*/ */
isSqlite: function () { isSqlite: function () {
instance === null && configure(); instance === null && configure();
return instance.database.knex && instance.database.knex.client === sqliteClientName; return instance.database.knex && instance.database.knex.client === 'sqlite3';
},
/**
* Is this a mysql configuration?
*
* @returns {boolean}
*/
isMysql: function () {
instance === null && configure();
return instance.database.engine === mysqlEngine;
},
/**
* Is this a postgres configuration?
*
* @returns {boolean}
*/
isPostgres: function () {
instance === null && configure();
return instance.database.engine === postgresEngine;
}, },
/** /**

View File

@ -1,6 +1,4 @@
const moment = require('moment'); const moment = require('moment');
const {isPostgres} = require('./config');
const {ref} = require('objection');
module.exports = { module.exports = {
@ -47,16 +45,6 @@ module.exports = {
} }
}); });
return obj; return obj;
},
/**
* Casts a column to json if using postgres
*
* @param {string} colName
* @returns {string|Objection.ReferenceBuilder}
*/
castJsonIfNeed: function (colName) {
return isPostgres() ? ref(colName).castText() : colName;
} }
}; };

View File

@ -0,0 +1,41 @@
const migrate_name = 'proxy_protocol';
const logger = require('../logger').migrate;
/**
* Migrate
*
* @see http://knexjs.org/#Schema
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.up = function (knex/*, Promise*/) {
logger.info('[' + migrate_name + '] Migrating Up...');
return knex.schema.table('proxy_host', function (proxy_host) {
proxy_host.integer('enable_proxy_protocol').notNull().unsigned().defaultTo(0);
proxy_host.string('load_balancer_ip').notNull().defaultTo('');
})
.then(() => {
logger.info('[' + migrate_name + '] proxy_host Table altered');
});
};
/**
* Undo Migrate
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.down = function (knex/*, Promise*/) {
return knex.schema.table('proxy_host', function (proxy_host) {
proxy_host.dropColumn('enable_proxy_protocol');
proxy_host.dropColumn('load_balancer_ip');
})
.then(function () {
logger.info('[' + migrate_name + '] proxy_host Table altered');
});
};

View File

@ -0,0 +1,41 @@
const migrate_name = 'proxy_protocol_streams';
const logger = require('../logger').migrate;
/**
* Migrate
*
* @see http://knexjs.org/#Schema
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.up = function (knex/*, Promise*/) {
logger.info('[' + migrate_name + '] Migrating Up...');
return knex.schema.table('stream', function (stream) {
stream.integer('enable_proxy_protocol').notNull().unsigned().defaultTo(0);
stream.string('load_balancer_ip').notNull().defaultTo('');
})
.then(() => {
logger.info('[' + migrate_name + '] stream Table altered');
});
};
/**
* Undo Migrate
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.down = function (knex/*, Promise*/) {
return knex.schema.table('stream', function (stream) {
stream.dropColumn('enable_proxy_protocol');
stream.dropColumn('load_balancer_ip');
})
.then(function () {
logger.info('[' + migrate_name + '] stream Table altered');
});
};

View File

@ -17,9 +17,6 @@ const boolFields = [
'preserve_path', 'preserve_path',
'ssl_forced', 'ssl_forced',
'block_exploits', 'block_exploits',
'hsts_enabled',
'hsts_subdomains',
'http2_support',
]; ];
class RedirectionHost extends Model { class RedirectionHost extends Model {

View File

@ -23,7 +23,6 @@
"node-rsa": "^1.0.8", "node-rsa": "^1.0.8",
"objection": "3.0.1", "objection": "3.0.1",
"path": "^0.12.7", "path": "^0.12.7",
"pg": "^8.13.1",
"signale": "1.4.0", "signale": "1.4.0",
"sqlite3": "5.1.6", "sqlite3": "5.1.6",
"temp-write": "^4.0.0" "temp-write": "^4.0.0"

View File

@ -19,9 +19,7 @@
"incoming_port": { "incoming_port": {
"type": "integer", "type": "integer",
"minimum": 1, "minimum": 1,
"maximum": 65535, "maximum": 65535
"if": {"properties": {"tcp_forwarding": {"const": true}}},
"then": {"not": {"oneOf": [{"const": 80}, {"const": 443}]}}
}, },
"forwarding_host": { "forwarding_host": {
"anyOf": [ "anyOf": [

View File

@ -49,7 +49,8 @@
"minLength": 1 "minLength": 1
}, },
"password": { "password": {
"type": "string" "type": "string",
"minLength": 1
} }
} }
} }

View File

@ -15,18 +15,18 @@ const certbot = require('./lib/certbot');
const setupDefaultUser = () => { const setupDefaultUser = () => {
return userModel return userModel
.query() .query()
.select('id', ) .select(userModel.raw('COUNT(`id`) as `count`'))
.where('is_deleted', 0) .where('is_deleted', 0)
.first() .first()
.then((row) => { .then((row) => {
if (!row || !row.id) { if (!row.count) {
// Create a new user and set password // Create a new user and set password
const email = process.env.INITIAL_ADMIN_EMAIL || 'admin@example.com'; let email = process.env.INITIAL_ADMIN_EMAIL || 'admin@example.com';
const password = process.env.INITIAL_ADMIN_PASSWORD || 'changeme'; let password = process.env.INITIAL_ADMIN_PASSWORD || 'changeme';
logger.info('Creating a new user: ' + email + ' with password: ' + password); logger.info('Creating a new user: ' + email + ' with password: ' + password);
const data = { let data = {
is_deleted: 0, is_deleted: 0,
email: email, email: email,
name: 'Administrator', name: 'Administrator',
@ -77,11 +77,11 @@ const setupDefaultUser = () => {
const setupDefaultSettings = () => { const setupDefaultSettings = () => {
return settingModel return settingModel
.query() .query()
.select('id') .select(settingModel.raw('COUNT(`id`) as `count`'))
.where({id: 'default-site'}) .where({id: 'default-site'})
.first() .first()
.then((row) => { .then((row) => {
if (!row || !row.id) { if (!row.count) {
settingModel settingModel
.query() .query()
.insert({ .insert({

View File

@ -4,7 +4,7 @@
auth_basic "Authorization required"; auth_basic "Authorization required";
auth_basic_user_file /data/access/{{ access_list_id }}; auth_basic_user_file /data/access/{{ access_list_id }};
{% if access_list.pass_auth == 0 or access_list.pass_auth == true %} {% if access_list.pass_auth == 0 %}
proxy_set_header Authorization ""; proxy_set_header Authorization "";
{% endif %} {% endif %}
@ -17,7 +17,7 @@
deny all; deny all;
# Access checks must... # Access checks must...
{% if access_list.satisfy_any == 1 or access_list.satisfy_any == true %} {% if access_list.satisfy_any == 1 %}
satisfy any; satisfy any;
{% else %} {% else %}
satisfy all; satisfy all;

View File

@ -1,20 +1,24 @@
listen 80; {% if enable_proxy_protocol == 1 or enable_proxy_protocol == true -%}
{% if ipv6 -%} {% assign port_number_http = "88" -%}
listen [::]:80; {% assign port_number_https = "444" -%}
{% assign listen_extra_args = "proxy_protocol" -%}
{% else -%} {% else -%}
#listen [::]:80; {% assign port_number_http = "80" -%}
{% endif %} {% assign port_number_https = "443" -%}
{% assign listen_extra_args = "" -%}
{% endif -%}
listen {{ port_number_http }} {{ listen_extra_args }};
{% if ipv6 -%}
listen [::]:{{ port_number_http }} {{ listen_extra_args }};
{% endif -%}
{% if certificate -%} {% if certificate -%}
listen 443 ssl; {% capture listen_extra_args_https %}ssl{% if http2_support %} http2{% endif %} {{ listen_extra_args }}{% endcapture -%}
listen {{ port_number_https }} {{ listen_extra_args_https }};
{% if ipv6 -%} {% if ipv6 -%}
listen [::]:443 ssl; listen [::]:{{ port_number_https }} {{ listen_extra_args_https }};
{% else -%} {% endif -%}
#listen [::]:443; {% endif -%}
{% endif %}
{% endif %}
server_name {{ domain_names | join: " " }}; server_name {{ domain_names | join: " " }};
{% if http2_support == 1 or http2_support == true %}
http2 on;
{% else -%}
http2 off;
{% endif %}

View File

@ -7,7 +7,11 @@
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_pass {{ forward_scheme }}://{{ forward_host }}:{{ forward_port }}{{ forward_path }}; set $proxy_forward_scheme {{ forward_scheme }};
set $proxy_server "{{ forward_host }}";
set $proxy_port {{ forward_port }};
proxy_pass $proxy_forward_scheme://$proxy_server:$proxy_port{{ forward_path }};
{% include "_access.conf" %} {% include "_access.conf" %}
{% include "_assets.conf" %} {% include "_assets.conf" %}

View File

@ -0,0 +1,6 @@
{% if enable_proxy_protocol == 1 or enable_proxy_protocol == true %}
{% if load_balancer_ip != '' %}
set_real_ip_from {{ load_balancer_ip }};
real_ip_header proxy_protocol;
{% endif %}
{% endif %}

View File

@ -22,7 +22,5 @@ server {
} }
{% endif %} {% endif %}
# Custom
include /data/nginx/custom/server_dead[.]conf;
} }
{% endif %} {% endif %}

View File

@ -15,6 +15,7 @@ server {
{% include "_exploits.conf" %} {% include "_exploits.conf" %}
{% include "_hsts.conf" %} {% include "_hsts.conf" %}
{% include "_forced_ssl.conf" %} {% include "_forced_ssl.conf" %}
{% include "_proxy_protocol.conf" %}
{% if allow_websocket_upgrade == 1 or allow_websocket_upgrade == true %} {% if allow_websocket_upgrade == 1 or allow_websocket_upgrade == true %}
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;

View File

@ -1,31 +1,38 @@
# ------------------------------------------------------------ # ------------------------------------------------------------
# {{ incoming_port }} TCP: {{ tcp_forwarding }} UDP: {{ udp_forwarding }} # {{ incoming_port }} TCP: {{ tcp_forwarding }} UDP: {{ udp_forwarding }}
# ------------------------------------------------------------ # ------------------------------------------------------------
{% if enable_proxy_protocol == 1 or enable_proxy_protocol == true -%}
{% capture listen_extra_args %}proxy_protocol{% endcapture -%}
{% endif -%}
{% if enabled %} {% if enabled %}
{% if tcp_forwarding == 1 or tcp_forwarding == true -%} {% if tcp_forwarding == 1 or tcp_forwarding == true -%}
server { server {
listen {{ incoming_port }}; listen {{ incoming_port }} {{ listen_extra_args }};
{% if ipv6 -%} {% if ipv6 -%}
listen [::]:{{ incoming_port }}; listen [::]:{{ incoming_port }} {{ listen_extra_args }};
{% else -%} {% else -%}
#listen [::]:{{ incoming_port }}; #listen [::]:{{ incoming_port }} {{ listen_extra_args }};
{% endif %} {% endif %}
proxy_pass {{ forwarding_host }}:{{ forwarding_port }}; proxy_pass {{ forwarding_host }}:{{ forwarding_port }};
{% include '_proxy_protocol.conf' %}
# Custom # Custom
include /data/nginx/custom/server_stream[.]conf; include /data/nginx/custom/server_stream[.]conf;
include /data/nginx/custom/server_stream_tcp[.]conf; include /data/nginx/custom/server_stream_tcp[.]conf;
} }
{% endif %} {% endif %}
{% if udp_forwarding == 1 or udp_forwarding == true %} {% if udp_forwarding == 1 or udp_forwarding == true %}
{% # Proxy Protocol is not supported for UDP %}
{% assign listen_extra_args = "" %}
server { server {
listen {{ incoming_port }} udp; listen {{ incoming_port }} udp {{ listen_extra_args }};
{% if ipv6 -%} {% if ipv6 -%}
listen [::]:{{ incoming_port }} udp; listen [::]:{{ incoming_port }} udp {{ listen_extra_args }};
{% else -%} {% else -%}
#listen [::]:{{ incoming_port }} udp; #listen [::]:{{ incoming_port }} udp {{ listen_extra_args }};
{% endif %} {% endif %}
proxy_pass {{ forwarding_host }}:{{ forwarding_port }}; proxy_pass {{ forwarding_host }}:{{ forwarding_port }};

View File

@ -830,9 +830,9 @@ crc32-stream@^4.0.2:
readable-stream "^3.4.0" readable-stream "^3.4.0"
cross-spawn@^7.0.2: cross-spawn@^7.0.2:
version "7.0.6" version "7.0.3"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
dependencies: dependencies:
path-key "^3.1.0" path-key "^3.1.0"
shebang-command "^2.0.0" shebang-command "^2.0.0"
@ -2735,67 +2735,11 @@ path@^0.12.7:
process "^0.11.1" process "^0.11.1"
util "^0.10.3" util "^0.10.3"
pg-cloudflare@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/pg-cloudflare/-/pg-cloudflare-1.1.1.tgz#e6d5833015b170e23ae819e8c5d7eaedb472ca98"
integrity sha512-xWPagP/4B6BgFO+EKz3JONXv3YDgvkbVrGw2mTo3D6tVDQRh1e7cqVGvyR3BE+eQgAvx1XhW/iEASj4/jCWl3Q==
pg-connection-string@2.5.0: pg-connection-string@2.5.0:
version "2.5.0" version "2.5.0"
resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.5.0.tgz#538cadd0f7e603fc09a12590f3b8a452c2c0cf34" resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.5.0.tgz#538cadd0f7e603fc09a12590f3b8a452c2c0cf34"
integrity sha512-r5o/V/ORTA6TmUnyWZR9nCj1klXCO2CEKNRlVuJptZe85QuhFayC7WeMic7ndayT5IRIR0S0xFxFi2ousartlQ== integrity sha512-r5o/V/ORTA6TmUnyWZR9nCj1klXCO2CEKNRlVuJptZe85QuhFayC7WeMic7ndayT5IRIR0S0xFxFi2ousartlQ==
pg-connection-string@^2.7.0:
version "2.7.0"
resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.7.0.tgz#f1d3489e427c62ece022dba98d5262efcb168b37"
integrity sha512-PI2W9mv53rXJQEOb8xNR8lH7Hr+EKa6oJa38zsK0S/ky2er16ios1wLKhZyxzD7jUReiWokc9WK5nxSnC7W1TA==
pg-int8@1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/pg-int8/-/pg-int8-1.0.1.tgz#943bd463bf5b71b4170115f80f8efc9a0c0eb78c"
integrity sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==
pg-pool@^3.7.0:
version "3.7.0"
resolved "https://registry.yarnpkg.com/pg-pool/-/pg-pool-3.7.0.tgz#d4d3c7ad640f8c6a2245adc369bafde4ebb8cbec"
integrity sha512-ZOBQForurqh4zZWjrgSwwAtzJ7QiRX0ovFkZr2klsen3Nm0aoh33Ls0fzfv3imeH/nw/O27cjdz5kzYJfeGp/g==
pg-protocol@^1.7.0:
version "1.7.0"
resolved "https://registry.yarnpkg.com/pg-protocol/-/pg-protocol-1.7.0.tgz#ec037c87c20515372692edac8b63cf4405448a93"
integrity sha512-hTK/mE36i8fDDhgDFjy6xNOG+LCorxLG3WO17tku+ij6sVHXh1jQUJ8hYAnRhNla4QVD2H8er/FOjc/+EgC6yQ==
pg-types@^2.1.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/pg-types/-/pg-types-2.2.0.tgz#2d0250d636454f7cfa3b6ae0382fdfa8063254a3"
integrity sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==
dependencies:
pg-int8 "1.0.1"
postgres-array "~2.0.0"
postgres-bytea "~1.0.0"
postgres-date "~1.0.4"
postgres-interval "^1.1.0"
pg@^8.13.1:
version "8.13.1"
resolved "https://registry.yarnpkg.com/pg/-/pg-8.13.1.tgz#6498d8b0a87ff76c2df7a32160309d3168c0c080"
integrity sha512-OUir1A0rPNZlX//c7ksiu7crsGZTKSOXJPgtNiHGIlC9H0lO+NC6ZDYksSgBYY/thSWhnSRBv8w1lieNNGATNQ==
dependencies:
pg-connection-string "^2.7.0"
pg-pool "^3.7.0"
pg-protocol "^1.7.0"
pg-types "^2.1.0"
pgpass "1.x"
optionalDependencies:
pg-cloudflare "^1.1.1"
pgpass@1.x:
version "1.0.5"
resolved "https://registry.yarnpkg.com/pgpass/-/pgpass-1.0.5.tgz#9b873e4a564bb10fa7a7dbd55312728d422a223d"
integrity sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==
dependencies:
split2 "^4.1.0"
picomatch@^2.0.4, picomatch@^2.2.1: picomatch@^2.0.4, picomatch@^2.2.1:
version "2.2.2" version "2.2.2"
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad"
@ -2814,28 +2758,6 @@ pkg-conf@^2.1.0:
find-up "^2.0.0" find-up "^2.0.0"
load-json-file "^4.0.0" load-json-file "^4.0.0"
postgres-array@~2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/postgres-array/-/postgres-array-2.0.0.tgz#48f8fce054fbc69671999329b8834b772652d82e"
integrity sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==
postgres-bytea@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/postgres-bytea/-/postgres-bytea-1.0.0.tgz#027b533c0aa890e26d172d47cf9ccecc521acd35"
integrity sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==
postgres-date@~1.0.4:
version "1.0.7"
resolved "https://registry.yarnpkg.com/postgres-date/-/postgres-date-1.0.7.tgz#51bc086006005e5061c591cee727f2531bf641a8"
integrity sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==
postgres-interval@^1.1.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/postgres-interval/-/postgres-interval-1.2.0.tgz#b460c82cb1587507788819a06aa0fffdb3544695"
integrity sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==
dependencies:
xtend "^4.0.0"
prelude-ls@^1.2.1: prelude-ls@^1.2.1:
version "1.2.1" version "1.2.1"
resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396"
@ -3272,11 +3194,6 @@ socks@^2.6.2:
ip "^2.0.0" ip "^2.0.0"
smart-buffer "^4.2.0" smart-buffer "^4.2.0"
split2@^4.1.0:
version "4.2.0"
resolved "https://registry.yarnpkg.com/split2/-/split2-4.2.0.tgz#c9c5920904d148bab0b9f67145f245a86aadbfa4"
integrity sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==
sprintf-js@~1.0.2: sprintf-js@~1.0.2:
version "1.0.3" version "1.0.3"
resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
@ -3748,11 +3665,6 @@ xdg-basedir@^4.0.0:
resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13" resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13"
integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q== integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==
xtend@^4.0.0:
version "4.0.2"
resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54"
integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
y18n@^4.0.0: y18n@^4.0.0:
version "4.0.1" version "4.0.1"
resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.1.tgz#8db2b83c31c5d75099bb890b23f3094891e247d4" resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.1.tgz#8db2b83c31c5d75099bb890b23f3094891e247d4"

View File

@ -35,7 +35,7 @@ RUN echo "fs.file-max = 65535" > /etc/sysctl.conf \
COPY docker/scripts/install-s6 /tmp/install-s6 COPY docker/scripts/install-s6 /tmp/install-s6
RUN /tmp/install-s6 "${TARGETPLATFORM}" && rm -f /tmp/install-s6 RUN /tmp/install-s6 "${TARGETPLATFORM}" && rm -f /tmp/install-s6
EXPOSE 80 81 443 EXPOSE 80 81 88 443 444
COPY backend /app COPY backend /app
COPY frontend/dist /app/frontend COPY frontend/dist /app/frontend

View File

@ -1,8 +0,0 @@
AUTHENTIK_SECRET_KEY=gl8woZe8L6IIX8SC0c5Ocsj0xPkX5uJo5DVZCFl+L/QGbzuplfutYuua2ODNLEiDD3aFd9H2ylJmrke0
AUTHENTIK_REDIS__HOST=authentik-redis
AUTHENTIK_POSTGRESQL__HOST=db-postgres
AUTHENTIK_POSTGRESQL__USER=authentik
AUTHENTIK_POSTGRESQL__NAME=authentik
AUTHENTIK_POSTGRESQL__PASSWORD=07EKS5NLI6Tpv68tbdvrxfvj
AUTHENTIK_BOOTSTRAP_PASSWORD=admin
AUTHENTIK_BOOTSTRAP_EMAIL=admin@example.com

Binary file not shown.

View File

@ -29,12 +29,11 @@ COPY scripts/install-s6 /tmp/install-s6
RUN rm -f /etc/nginx/conf.d/production.conf \ RUN rm -f /etc/nginx/conf.d/production.conf \
&& chmod 644 /etc/logrotate.d/nginx-proxy-manager \ && chmod 644 /etc/logrotate.d/nginx-proxy-manager \
&& /tmp/install-s6 "${TARGETPLATFORM}" \ && /tmp/install-s6 "${TARGETPLATFORM}" \
&& rm -f /tmp/install-s6 \ && rm -f /tmp/install-s6
&& chmod 644 -R /root/.cache
# Certs for testing purposes # Certs for testing purposes
COPY --from=pebbleca /test/certs/pebble.minica.pem /etc/ssl/certs/pebble.minica.pem COPY --from=pebbleca /test/certs/pebble.minica.pem /etc/ssl/certs/pebble.minica.pem
COPY --from=testca /home/step/certs/root_ca.crt /etc/ssl/certs/NginxProxyManager.crt COPY --from=testca /home/step/certs/root_ca.crt /etc/ssl/certs/NginxProxyManager.crt
EXPOSE 80 81 443 EXPOSE 80 81 88 443 444
ENTRYPOINT [ "/init" ] ENTRYPOINT [ "/init" ]

View File

@ -1,78 +0,0 @@
# WARNING: This is a CI docker-compose file used for building and testing of the entire app, it should not be used for production.
services:
cypress:
environment:
CYPRESS_stack: 'postgres'
fullstack:
environment:
DB_POSTGRES_HOST: 'db-postgres'
DB_POSTGRES_PORT: '5432'
DB_POSTGRES_USER: 'npm'
DB_POSTGRES_PASSWORD: 'npmpass'
DB_POSTGRES_NAME: 'npm'
depends_on:
- db-postgres
- authentik
- authentik-worker
- authentik-ldap
db-postgres:
image: postgres:latest
environment:
POSTGRES_USER: 'npm'
POSTGRES_PASSWORD: 'npmpass'
POSTGRES_DB: 'npm'
volumes:
- psql_vol:/var/lib/postgresql/data
- ./ci/postgres:/docker-entrypoint-initdb.d
networks:
- fulltest
authentik-redis:
image: 'redis:alpine'
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis_vol:/data
authentik:
image: ghcr.io/goauthentik/server:2024.10.1
restart: unless-stopped
command: server
env_file:
- ci.env
depends_on:
- authentik-redis
- db-postgres
authentik-worker:
image: ghcr.io/goauthentik/server:2024.10.1
restart: unless-stopped
command: worker
env_file:
- ci.env
depends_on:
- authentik-redis
- db-postgres
authentik-ldap:
image: ghcr.io/goauthentik/ldap:2024.10.1
environment:
AUTHENTIK_HOST: 'http://authentik:9000'
AUTHENTIK_INSECURE: 'true'
AUTHENTIK_TOKEN: 'wKYZuRcI0ETtb8vWzMCr04oNbhrQUUICy89hSpDln1OEKLjiNEuQ51044Vkp'
restart: unless-stopped
depends_on:
- authentik
volumes:
psql_vol:
redis_vol:

View File

@ -2,15 +2,17 @@
services: services:
fullstack: fullstack:
image: npm2dev:core image: nginxproxymanager:dev
container_name: npm2dev.core container_name: npm_core
build: build:
context: ./ context: ./
dockerfile: ./dev/Dockerfile dockerfile: ./dev/Dockerfile
ports: ports:
- 3080:80 - 3080:80
- 3081:81 - 3081:81
- 3088:88
- 3443:443 - 3443:443
- 3444:444
networks: networks:
nginx_proxy_manager: nginx_proxy_manager:
aliases: aliases:
@ -26,17 +28,11 @@ services:
DEVELOPMENT: 'true' DEVELOPMENT: 'true'
LE_STAGING: 'true' LE_STAGING: 'true'
# db: # db:
# DB_MYSQL_HOST: 'db' DB_MYSQL_HOST: 'db'
# DB_MYSQL_PORT: '3306' DB_MYSQL_PORT: '3306'
# DB_MYSQL_USER: 'npm' DB_MYSQL_USER: 'npm'
# DB_MYSQL_PASSWORD: 'npm' DB_MYSQL_PASSWORD: 'npm'
# DB_MYSQL_NAME: 'npm' DB_MYSQL_NAME: 'npm'
# db-postgres:
DB_POSTGRES_HOST: 'db-postgres'
DB_POSTGRES_PORT: '5432'
DB_POSTGRES_USER: 'npm'
DB_POSTGRES_PASSWORD: 'npmpass'
DB_POSTGRES_NAME: 'npm'
# DB_SQLITE_FILE: "/data/database.sqlite" # DB_SQLITE_FILE: "/data/database.sqlite"
# DISABLE_IPV6: "true" # DISABLE_IPV6: "true"
# Required for DNS Certificate provisioning testing: # Required for DNS Certificate provisioning testing:
@ -55,15 +51,11 @@ services:
timeout: 3s timeout: 3s
depends_on: depends_on:
- db - db
- db-postgres
- authentik
- authentik-worker
- authentik-ldap
working_dir: /app working_dir: /app
db: db:
image: jc21/mariadb-aria image: jc21/mariadb-aria
container_name: npm2dev.db container_name: npm_db
ports: ports:
- 33306:3306 - 33306:3306
networks: networks:
@ -76,22 +68,8 @@ services:
volumes: volumes:
- db_data:/var/lib/mysql - db_data:/var/lib/mysql
db-postgres:
image: postgres:latest
container_name: npm2dev.db-postgres
networks:
- nginx_proxy_manager
environment:
POSTGRES_USER: 'npm'
POSTGRES_PASSWORD: 'npmpass'
POSTGRES_DB: 'npm'
volumes:
- psql_data:/var/lib/postgresql/data
- ./ci/postgres:/docker-entrypoint-initdb.d
stepca: stepca:
image: jc21/testca image: jc21/testca
container_name: npm2dev.stepca
volumes: volumes:
- './dev/resolv.conf:/etc/resolv.conf:ro' - './dev/resolv.conf:/etc/resolv.conf:ro'
- '/etc/localtime:/etc/localtime:ro' - '/etc/localtime:/etc/localtime:ro'
@ -102,7 +80,6 @@ services:
dnsrouter: dnsrouter:
image: jc21/dnsrouter image: jc21/dnsrouter
container_name: npm2dev.dnsrouter
volumes: volumes:
- ./dev/dnsrouter-config.json.tmp:/dnsrouter-config.json:ro - ./dev/dnsrouter-config.json.tmp:/dnsrouter-config.json:ro
networks: networks:
@ -110,7 +87,7 @@ services:
swagger: swagger:
image: swaggerapi/swagger-ui:latest image: swaggerapi/swagger-ui:latest
container_name: npm2dev.swagger container_name: npm_swagger
ports: ports:
- 3082:80 - 3082:80
environment: environment:
@ -121,7 +98,7 @@ services:
squid: squid:
image: ubuntu/squid image: ubuntu/squid
container_name: npm2dev.squid container_name: npm_squid
volumes: volumes:
- './dev/squid.conf:/etc/squid/squid.conf:ro' - './dev/squid.conf:/etc/squid/squid.conf:ro'
- './dev/resolv.conf:/etc/resolv.conf:ro' - './dev/resolv.conf:/etc/resolv.conf:ro'
@ -133,7 +110,6 @@ services:
pdns: pdns:
image: pschiffe/pdns-mysql image: pschiffe/pdns-mysql
container_name: npm2dev.pdns
volumes: volumes:
- '/etc/localtime:/etc/localtime:ro' - '/etc/localtime:/etc/localtime:ro'
environment: environment:
@ -162,7 +138,6 @@ services:
pdns-db: pdns-db:
image: mariadb image: mariadb
container_name: npm2dev.pdns-db
environment: environment:
MYSQL_ROOT_PASSWORD: 'pdns' MYSQL_ROOT_PASSWORD: 'pdns'
MYSQL_DATABASE: 'pdns' MYSQL_DATABASE: 'pdns'
@ -176,8 +151,7 @@ services:
- nginx_proxy_manager - nginx_proxy_manager
cypress: cypress:
image: npm2dev:cypress image: "npm_dev_cypress"
container_name: npm2dev.cypress
build: build:
context: ../ context: ../
dockerfile: test/cypress/Dockerfile dockerfile: test/cypress/Dockerfile
@ -192,77 +166,16 @@ services:
networks: networks:
- nginx_proxy_manager - nginx_proxy_manager
authentik-redis:
image: 'redis:alpine'
container_name: npm2dev.authentik-redis
command: --save 60 1 --loglevel warning
networks:
- nginx_proxy_manager
restart: unless-stopped
healthcheck:
test: ['CMD-SHELL', 'redis-cli ping | grep PONG']
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis_data:/data
authentik:
image: ghcr.io/goauthentik/server:2024.10.1
container_name: npm2dev.authentik
restart: unless-stopped
command: server
networks:
- nginx_proxy_manager
env_file:
- ci.env
ports:
- 9000:9000
depends_on:
- authentik-redis
- db-postgres
authentik-worker:
image: ghcr.io/goauthentik/server:2024.10.1
container_name: npm2dev.authentik-worker
restart: unless-stopped
command: worker
networks:
- nginx_proxy_manager
env_file:
- ci.env
depends_on:
- authentik-redis
- db-postgres
authentik-ldap:
image: ghcr.io/goauthentik/ldap:2024.10.1
container_name: npm2dev.authentik-ldap
networks:
- nginx_proxy_manager
environment:
AUTHENTIK_HOST: 'http://authentik:9000'
AUTHENTIK_INSECURE: 'true'
AUTHENTIK_TOKEN: 'wKYZuRcI0ETtb8vWzMCr04oNbhrQUUICy89hSpDln1OEKLjiNEuQ51044Vkp'
restart: unless-stopped
depends_on:
- authentik
volumes: volumes:
npm_data: npm_data:
name: npm2dev_core_data name: npm_core_data
le_data: le_data:
name: npm2dev_le_data name: npm_le_data
db_data: db_data:
name: npm2dev_db_data name: npm_db_data
pdns_mysql: pdns_mysql:
name: npnpm2dev_pdns_mysql name: npm_pdns_mysql
psql_data:
name: npm2dev_psql_data
redis_data:
name: npm2dev_redis_data
networks: networks:
nginx_proxy_manager: nginx_proxy_manager:
name: npm2dev_network name: npm_network

View File

@ -1,4 +1,4 @@
location ~* ^.*\.(css|js|jpe?g|gif|png|webp|woff|woff2|eot|ttf|svg|ico|css\.map|js\.map)$ { location ~* ^.*\.(css|js|jpe?g|gif|png|webp|woff|eot|ttf|svg|ico|css\.map|js\.map)$ {
if_modified_since off; if_modified_since off;
# use the public cache # use the public cache

View File

@ -50,6 +50,7 @@ networks:
Let's look at a Portainer example: Let's look at a Portainer example:
```yml ```yml
version: '3.8'
services: services:
portainer: portainer:
@ -91,6 +92,8 @@ This image supports the use of Docker secrets to import from files and keep sens
You can set any environment variable from a file by appending `__FILE` (double-underscore FILE) to the environmental variable name. You can set any environment variable from a file by appending `__FILE` (double-underscore FILE) to the environmental variable name.
```yml ```yml
version: '3.8'
secrets: secrets:
# Secrets are single-line text files where the sole content is the secret # Secrets are single-line text files where the sole content is the secret
# Paths in this example assume that secrets are kept in local folder called ".secrets" # Paths in this example assume that secrets are kept in local folder called ".secrets"
@ -181,7 +184,6 @@ You can add your custom configuration snippet files at `/data/nginx/custom` as f
- `/data/nginx/custom/server_stream.conf`: Included at the end of every stream server block - `/data/nginx/custom/server_stream.conf`: Included at the end of every stream server block
- `/data/nginx/custom/server_stream_tcp.conf`: Included at the end of every TCP stream server block - `/data/nginx/custom/server_stream_tcp.conf`: Included at the end of every TCP stream server block
- `/data/nginx/custom/server_stream_udp.conf`: Included at the end of every UDP stream server block - `/data/nginx/custom/server_stream_udp.conf`: Included at the end of every UDP stream server block
- `/data/nginx/custom/server_dead.conf`: Included at the end of every 404 server block
Every file is optional. Every file is optional.
@ -212,6 +214,29 @@ You can customise the logrotate configuration through a mount (if your custom co
For reference, the default configuration can be found [here](https://github.com/NginxProxyManager/nginx-proxy-manager/blob/develop/docker/rootfs/etc/logrotate.d/nginx-proxy-manager). For reference, the default configuration can be found [here](https://github.com/NginxProxyManager/nginx-proxy-manager/blob/develop/docker/rootfs/etc/logrotate.d/nginx-proxy-manager).
## Enabling PROXY protocol for Proxy Hosts
When running NPM behind a load balancer, you might want to use the [PROXY procotol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) to receive client information such as the source IP address (useful for banning IPs).
When configuring the PROXY protocol for proxy hosts, NPM uses the ports 88 for http and 444 for https traffic to allow you to decide on a per host basis whether to use the PROSY protocol.
To enable the PROXY protocol for your hosts you need to perform the following steps:
1. Expose the ports `88` (and `444` is applicable) by adjusting your `docker-compose.yml`
2. Edit your proxy hosts to enable the PROXY protocol
3. Edit your upstream load balancer to redirect traffic to the port `88`/`444` and enable the PROXY protocol
## Enabling PROXY protocol for Streams
When running NPM behind a load balancer, you might want to use the [PROXY procotol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) to receive client information such as the source IP address (useful for banning IPs).
Keep in mind that the PROXY procotol cannot be enabled for udp endpoints.
To enable the PROXY protocol for streams:
1. Expose the desired port by adjusting you `docker-compose.yml`
2. Edit the Stream to enable the PROXY protocol
3. Edit your upstream load balancer to enable the PROXY protocol
## Enabling the geoip2 module ## Enabling the geoip2 module
To enable the geoip2 module, you can create the custom configuration file `/data/nginx/custom/root_top.conf` and include the following snippet: To enable the geoip2 module, you can create the custom configuration file `/data/nginx/custom/root_top.conf` and include the following snippet:

View File

@ -9,6 +9,7 @@ outline: deep
Create a `docker-compose.yml` file: Create a `docker-compose.yml` file:
```yml ```yml
version: '3.8'
services: services:
app: app:
image: 'jc21/nginx-proxy-manager:latest' image: 'jc21/nginx-proxy-manager:latest'
@ -21,7 +22,8 @@ services:
# Add any other Stream port you want to expose # Add any other Stream port you want to expose
# - '21:21' # FTP # - '21:21' # FTP
environment: # Uncomment the next line if you uncomment anything in the section
# environment:
# Uncomment this if you want to change the location of # Uncomment this if you want to change the location of
# the SQLite DB file within the container # the SQLite DB file within the container
# DB_SQLITE_FILE: "/data/database.sqlite" # DB_SQLITE_FILE: "/data/database.sqlite"
@ -53,6 +55,7 @@ are going to use.
Here is an example of what your `docker-compose.yml` will look like when using a MariaDB container: Here is an example of what your `docker-compose.yml` will look like when using a MariaDB container:
```yml ```yml
version: '3.8'
services: services:
app: app:
image: 'jc21/nginx-proxy-manager:latest' image: 'jc21/nginx-proxy-manager:latest'
@ -62,6 +65,8 @@ services:
- '80:80' # Public HTTP Port - '80:80' # Public HTTP Port
- '443:443' # Public HTTPS Port - '443:443' # Public HTTPS Port
- '81:81' # Admin Web Port - '81:81' # Admin Web Port
# - '88:88' # Public HTTP Port with proxy_protocol enabled
# - '444:444' # Public HTTPS Port with proxy_protocol enabled
# Add any other Stream port you want to expose # Add any other Stream port you want to expose
# - '21:21' # FTP # - '21:21' # FTP
environment: environment:
@ -98,53 +103,6 @@ Please note, that `DB_MYSQL_*` environment variables will take precedent over `D
::: :::
## Using Postgres database
Similar to the MySQL server setup:
```yml
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
ports:
# These ports are in format <host-port>:<container-port>
- '80:80' # Public HTTP Port
- '443:443' # Public HTTPS Port
- '81:81' # Admin Web Port
# Add any other Stream port you want to expose
# - '21:21' # FTP
environment:
# Postgres parameters:
DB_POSTGRES_HOST: 'db'
DB_POSTGRES_PORT: '5432'
DB_POSTGRES_USER: 'npm'
DB_POSTGRES_PASSWORD: 'npmpass'
DB_POSTGRES_NAME: 'npm'
# Uncomment this if IPv6 is not enabled on your host
# DISABLE_IPV6: 'true'
volumes:
- ./data:/data
- ./letsencrypt:/etc/letsencrypt
depends_on:
- db
db:
image: postgres:latest
environment:
POSTGRES_USER: 'npm'
POSTGRES_PASSWORD: 'npmpass'
POSTGRES_DB: 'npm'
volumes:
- ./postgres:/var/lib/postgresql/data
```
::: warning
Custom Postgres schema is not supported, as such `public` will be used.
:::
## Running on Raspberry PI / ARM devices ## Running on Raspberry PI / ARM devices
The docker images support the following architectures: The docker images support the following architectures:
@ -181,13 +139,5 @@ Email: admin@example.com
Password: changeme Password: changeme
``` ```
Immediately after logging in with this default user you will be asked to modify your details and change your password. You can change defaults with: Immediately after logging in with this default user you will be asked to modify your details and change your password.
```
environment:
INITIAL_ADMIN_EMAIL: my@example.com
INITIAL_ADMIN_PASSWORD: mypassword1
```

View File

@ -12,7 +12,6 @@ Known integrations:
- [HomeAssistant Hass.io plugin](https://github.com/hassio-addons/addon-nginx-proxy-manager) - [HomeAssistant Hass.io plugin](https://github.com/hassio-addons/addon-nginx-proxy-manager)
- [UnRaid / Synology](https://github.com/jlesage/docker-nginx-proxy-manager) - [UnRaid / Synology](https://github.com/jlesage/docker-nginx-proxy-manager)
- [Proxmox Scripts](https://github.com/ej52/proxmox-scripts/tree/main/apps/nginx-proxy-manager) - [Proxmox Scripts](https://github.com/ej52/proxmox-scripts/tree/main/apps/nginx-proxy-manager)
- [Proxmox VE Helper-Scripts](https://community-scripts.github.io/ProxmoxVE/scripts?id=nginxproxymanager)
- [nginxproxymanagerGraf](https://github.com/ma-karai/nginxproxymanagerGraf) - [nginxproxymanagerGraf](https://github.com/ma-karai/nginxproxymanagerGraf)

View File

@ -873,9 +873,9 @@ mitt@^3.0.1:
integrity sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw== integrity sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==
nanoid@^3.3.7: nanoid@^3.3.7:
version "3.3.8" version "3.3.7"
resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8"
integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==
oniguruma-to-js@0.4.3: oniguruma-to-js@0.4.3:
version "0.4.3" version "0.4.3"

View File

@ -1,6 +1,6 @@
<td class="text-center"> <td class="text-center">
<div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>"> <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
<span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span> <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
</div> </div>
</td> </td>
<td> <td>

View File

@ -1,6 +1,6 @@
<td class="text-center"> <td class="text-center">
<div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>"> <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
<span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span> <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
</div> </div>
</td> </td>
<td> <td>

View File

@ -1,6 +1,6 @@
<td class="text-center"> <td class="text-center">
<div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>"> <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
<span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span> <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
</div> </div>
</td> </td>
<td> <td>

View File

@ -72,7 +72,7 @@
</label> </label>
</div> </div>
</div> </div>
<div class="col-sm-12 col-md-12"> <div class="col-sm-6 col-md-6">
<div class="form-group"> <div class="form-group">
<label class="custom-switch"> <label class="custom-switch">
<input type="checkbox" class="custom-switch-input" name="allow_websocket_upgrade" value="1"<%- allow_websocket_upgrade ? ' checked' : '' %>> <input type="checkbox" class="custom-switch-input" name="allow_websocket_upgrade" value="1"<%- allow_websocket_upgrade ? ' checked' : '' %>>
@ -81,6 +81,22 @@
</label> </label>
</div> </div>
</div> </div>
<div class="col-sm-6 col-md-6">
<div class="form-group">
<label class="custom-switch">
<input type="checkbox" class="custom-switch-input" name="enable_proxy_protocol" value="1"<%- enable_proxy_protocol ? ' checked' : '' %>>
<span class="custom-switch-indicator"></span>
<span class="custom-switch-description"><%- i18n('proxy-hosts', 'enable-proxy-protocol') %><a href="https://docs.nginx.com/nginx/admin-guide/load-balancer/using-proxy-protocol/#introduction" target="_blank"><i class="fe fe-help-circle"></i></a></span>
</label>
</div>
</div>
<div class="col-sm-12 col-md-12">
<div class="form-group">
<label class="form-label"><%- i18n('proxy-hosts', 'load-balancer-ip') %> <a href="https://docs.nginx.com/nginx/admin-guide/load-balancer/using-proxy-protocol/#changing-the-load-balancers-ip-address-to-the-client-ip-address" target="_blank"><i class="fe fe-help-circle"></i></a></label>
<input type="text" name="load_balancer_ip" class="form-control text-monospace" placeholder="" value="<%- load_balancer_ip %>" autocomplete="off" maxlength="255" <%- enable_proxy_protocol ? '' : ' disabled' %>>
</div>
</div>
<div class="col-sm-12 col-md-12"> <div class="col-sm-12 col-md-12">
<div class="form-group"> <div class="form-group">

View File

@ -43,6 +43,8 @@ module.exports = Mn.View.extend({
dns_provider_credentials: 'textarea[name="meta[dns_provider_credentials]"]', dns_provider_credentials: 'textarea[name="meta[dns_provider_credentials]"]',
propagation_seconds: 'input[name="meta[propagation_seconds]"]', propagation_seconds: 'input[name="meta[propagation_seconds]"]',
forward_scheme: 'select[name="forward_scheme"]', forward_scheme: 'select[name="forward_scheme"]',
enable_proxy_protocol: 'input[name="enable_proxy_protocol"]',
load_balancer_ip: 'input[name="load_balancer_ip"]',
letsencrypt: '.letsencrypt' letsencrypt: '.letsencrypt'
}, },
@ -51,6 +53,13 @@ module.exports = Mn.View.extend({
}, },
events: { events: {
'change @ui.enable_proxy_protocol': function () {
let checked = this.ui.enable_proxy_protocol.prop('checked');
this.ui.load_balancer_ip
.prop('disabled', !checked)
.parents('.form-group')
.css('opacity', checked ? 1 : 0.5);
},
'change @ui.certificate_select': function () { 'change @ui.certificate_select': function () {
let id = this.ui.certificate_select.val(); let id = this.ui.certificate_select.val();
if (id === 'new') { if (id === 'new') {
@ -163,6 +172,7 @@ module.exports = Mn.View.extend({
data.block_exploits = !!data.block_exploits; data.block_exploits = !!data.block_exploits;
data.caching_enabled = !!data.caching_enabled; data.caching_enabled = !!data.caching_enabled;
data.allow_websocket_upgrade = !!data.allow_websocket_upgrade; data.allow_websocket_upgrade = !!data.allow_websocket_upgrade;
data.enable_proxy_protocol = !!data.enable_proxy_protocol;
data.http2_support = !!data.http2_support; data.http2_support = !!data.http2_support;
data.hsts_enabled = !!data.hsts_enabled; data.hsts_enabled = !!data.hsts_enabled;
data.hsts_subdomains = !!data.hsts_subdomains; data.hsts_subdomains = !!data.hsts_subdomains;
@ -264,6 +274,7 @@ module.exports = Mn.View.extend({
onRender: function () { onRender: function () {
let view = this; let view = this;
this.ui.enable_proxy_protocol.trigger('change');
this.ui.ssl_forced.trigger('change'); this.ui.ssl_forced.trigger('change');
this.ui.hsts_enabled.trigger('change'); this.ui.hsts_enabled.trigger('change');

View File

@ -1,6 +1,6 @@
<td class="text-center"> <td class="text-center">
<div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>"> <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
<span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span> <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
</div> </div>
</td> </td>
<td> <td>

View File

@ -1,6 +1,6 @@
<td class="text-center"> <td class="text-center">
<div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>"> <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
<span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span> <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
</div> </div>
</td> </td>
<td> <td>

View File

@ -42,6 +42,22 @@
</label> </label>
</div> </div>
</div> </div>
<div class="col-sm-6 col-md-6">
<div class="form-group">
<label class="custom-switch">
<input type="checkbox" class="custom-switch-input" name="enable_proxy_protocol" value="1"<%- enable_proxy_protocol ? ' checked' : '' %>>
<span class="custom-switch-indicator"></span>
<span class="custom-switch-description"><%- i18n('streams', 'enable-proxy-protocol') %><a href="https://docs.nginx.com/nginx/admin-guide/load-balancer/using-proxy-protocol/#introduction" target="_blank"><i class="fe fe-help-circle"></i></a></span>
</label>
</div>
</div>
<div class="col-sm-12 col-md-12">
<div class="form-group">
<label class="form-label"><%- i18n('streams', 'load-balancer-ip') %><a href="https://docs.nginx.com/nginx/admin-guide/load-balancer/using-proxy-protocol/#changing-the-load-balancers-ip-address-to-the-client-ip-address" target="_blank"><i class="fe fe-help-circle"></i></a></label>
<input type="text" name="load_balancer_ip" class="form-control text-monospace" placeholder="" value="<%- load_balancer_ip %>" autocomplete="off" maxlength="255" <%- enable_proxy_protocol ? '' : ' disabled' %>>
</div>
</div>
<div class="col-sm-12 col-md-12"> <div class="col-sm-12 col-md-12">
<div class="forward-type-error invalid-feedback"><%- i18n('streams', 'forward-type-error') %></div> <div class="forward-type-error invalid-feedback"><%- i18n('streams', 'forward-type-error') %></div>
</div> </div>

View File

@ -14,6 +14,8 @@ module.exports = Mn.View.extend({
ui: { ui: {
form: 'form', form: 'form',
forwarding_host: 'input[name="forwarding_host"]', forwarding_host: 'input[name="forwarding_host"]',
enable_proxy_protocol: 'input[name="enable_proxy_protocol"]',
load_balancer_ip: 'input[name="load_balancer_ip"]',
type_error: '.forward-type-error', type_error: '.forward-type-error',
buttons: '.modal-footer button', buttons: '.modal-footer button',
switches: '.custom-switch-input', switches: '.custom-switch-input',
@ -22,6 +24,13 @@ module.exports = Mn.View.extend({
}, },
events: { events: {
'change @ui.enable_proxy_protocol': function () {
let checked = this.ui.enable_proxy_protocol.prop('checked');
this.ui.load_balancer_ip
.prop('disabled', !checked)
.parents('.form-group')
.css('opacity', checked ? 1 : 0.5);
},
'change @ui.switches': function () { 'change @ui.switches': function () {
this.ui.type_error.hide(); this.ui.type_error.hide();
}, },
@ -47,6 +56,7 @@ module.exports = Mn.View.extend({
data.forwarding_port = parseInt(data.forwarding_port, 10); data.forwarding_port = parseInt(data.forwarding_port, 10);
data.tcp_forwarding = !!data.tcp_forwarding; data.tcp_forwarding = !!data.tcp_forwarding;
data.udp_forwarding = !!data.udp_forwarding; data.udp_forwarding = !!data.udp_forwarding;
data.enable_proxy_protocol = !!data.enable_proxy_protocol;
let method = App.Api.Nginx.Streams.create; let method = App.Api.Nginx.Streams.create;
let is_new = true; let is_new = true;
@ -76,6 +86,10 @@ module.exports = Mn.View.extend({
} }
}, },
onRender: function () {
this.ui.enable_proxy_protocol.trigger('change');
},
initialize: function (options) { initialize: function (options) {
if (typeof options.model === 'undefined' || !options.model) { if (typeof options.model === 'undefined' || !options.model) {
this.model = new StreamModel.Model(); this.model = new StreamModel.Model();

View File

@ -1,6 +1,6 @@
<td class="text-center"> <td class="text-center">
<div class="avatar d-block" style="background-image: url(<%- (owner && owner.avatar) || '/images/default-avatar.jpg' %>)" title="Owned by <%- (owner && owner.name) || 'a deleted user' %>"> <div class="avatar d-block" style="background-image: url(<%- owner.avatar || '/images/default-avatar.jpg' %>)" title="Owned by <%- owner.name %>">
<span class="avatar-status <%- owner && !owner.is_disabled ? 'bg-green' : 'bg-red' %>"></span> <span class="avatar-status <%- owner.is_disabled ? 'bg-red' : 'bg-green' %>"></span>
</div> </div>
</td> </td>
<td> <td>

View File

@ -1,10 +1,10 @@
<div class="modal-content"> <div class="modal-content">
<form> <div class="modal-header">
<div class="modal-header"> <h5 class="modal-title"><%- i18n('users', 'form-title', {id: id}) %></h5>
<h5 class="modal-title"><%- i18n('users', 'form-title', {id: id}) %></h5> <button type="button" class="close cancel" aria-label="Close" data-dismiss="modal">&nbsp;</button>
<button type="button" class="close cancel" aria-label="Close" data-dismiss="modal">&nbsp;</button> </div>
</div> <div class="modal-body">
<div class="modal-body"> <form>
<div class="row"> <div class="row">
<div class="col-sm-6 col-md-6"> <div class="col-sm-6 col-md-6">
<div class="form-group"> <div class="form-group">
@ -49,10 +49,10 @@
</div> </div>
<% } %> <% } %>
</div> </div>
</div> </form>
<div class="modal-footer"> </div>
<button type="button" class="btn btn-secondary cancel" data-dismiss="modal"><%- i18n('str', 'cancel') %></button> <div class="modal-footer">
<button type="submit" class="btn btn-teal save"><%- i18n('str', 'save') %></button> <button type="button" class="btn btn-secondary cancel" data-dismiss="modal"><%- i18n('str', 'cancel') %></button>
</div> <button type="button" class="btn btn-teal save"><%- i18n('str', 'save') %></button>
</form> </div>
</div> </div>

View File

@ -19,7 +19,7 @@ module.exports = Mn.View.extend({
events: { events: {
'submit @ui.form': function (e) { 'click @ui.save': function (e) {
e.preventDefault(); e.preventDefault();
this.ui.error.hide(); this.ui.error.hide();
let view = this; let view = this;

View File

@ -131,6 +131,8 @@
"help-content": "A Proxy Host is the incoming endpoint for a web service that you want to forward.\nIt provides optional SSL termination for your service that might not have SSL support built in.\nProxy Hosts are the most common use for the Nginx Proxy Manager.", "help-content": "A Proxy Host is the incoming endpoint for a web service that you want to forward.\nIt provides optional SSL termination for your service that might not have SSL support built in.\nProxy Hosts are the most common use for the Nginx Proxy Manager.",
"access-list": "Access List", "access-list": "Access List",
"allow-websocket-upgrade": "Websockets Support", "allow-websocket-upgrade": "Websockets Support",
"enable-proxy-protocol": "Enable Proxy Protocol",
"load-balancer-ip": "Load balancer or TCP proxy IP / CIDR range",
"ignore-invalid-upstream-ssl": "Ignore Invalid SSL", "ignore-invalid-upstream-ssl": "Ignore Invalid SSL",
"custom-forward-host-help": "Add a path for sub-folder forwarding.\nExample: 203.0.113.25/path/", "custom-forward-host-help": "Add a path for sub-folder forwarding.\nExample: 203.0.113.25/path/",
"search": "Search Host…" "search": "Search Host…"
@ -175,6 +177,8 @@
"protocol": "Protocol", "protocol": "Protocol",
"tcp": "TCP", "tcp": "TCP",
"udp": "UDP", "udp": "UDP",
"enable-proxy-protocol": "Enable Proxy Protocol",
"load-balancer-ip": "Load balancer or TCP proxy IP / CIDR range",
"delete": "Delete Stream", "delete": "Delete Stream",
"delete-confirm": "Are you sure you want to delete this Stream?", "delete-confirm": "Are you sure you want to delete this Stream?",
"help-title": "What is a Stream?", "help-title": "What is a Stream?",

View File

@ -19,6 +19,8 @@ const model = Backbone.Model.extend({
hsts_subdomains: false, hsts_subdomains: false,
caching_enabled: false, caching_enabled: false,
allow_websocket_upgrade: false, allow_websocket_upgrade: false,
enable_proxy_protocol: false,
load_balancer_ip: '',
block_exploits: false, block_exploits: false,
http2_support: false, http2_support: false,
advanced_config: '', advanced_config: '',

View File

@ -5,18 +5,20 @@ const model = Backbone.Model.extend({
defaults: function () { defaults: function () {
return { return {
id: undefined, id: undefined,
created_on: null, created_on: null,
modified_on: null, modified_on: null,
incoming_port: null, incoming_port: null,
forwarding_host: null, forwarding_host: null,
forwarding_port: null, forwarding_port: null,
tcp_forwarding: true, tcp_forwarding: true,
udp_forwarding: false, udp_forwarding: false,
enabled: true, enable_proxy_protocol: false,
meta: {}, load_balancer_ip: "",
enabled: true,
meta: {},
// The following are expansions: // The following are expansions:
owner: null owner: null
}; };
} }
}); });

View File

@ -2648,9 +2648,9 @@ electron-to-chromium@^1.3.47:
integrity sha512-67V62Z4CFOiAtox+o+tosGfVk0QX4DJgH609tjT8QymbJZVAI/jWnAthnr8c5hnRNziIRwkc9EMQYejiVz3/9Q== integrity sha512-67V62Z4CFOiAtox+o+tosGfVk0QX4DJgH609tjT8QymbJZVAI/jWnAthnr8c5hnRNziIRwkc9EMQYejiVz3/9Q==
elliptic@^6.5.3, elliptic@^6.5.4: elliptic@^6.5.3, elliptic@^6.5.4:
version "6.6.0" version "6.5.7"
resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.6.0.tgz#5919ec723286c1edf28685aa89261d4761afa210" resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.7.tgz#8ec4da2cb2939926a1b9a73619d768207e647c8b"
integrity sha512-dpwoQcLc/2WLQvJvLRHKZ+f9FgOdjnq11rurqwekGQygGPsYSK29OMMD2WalatiqQ+XGFDglTNixpPfI+lpaAA== integrity sha512-ESVCtTwiA+XhY3wyh24QqRGBoP3rEdDUl3EDUUo9tft074fi19IrdpH7hLCMMP3CIj7jb3W96rn8lt/BqIlt5Q==
dependencies: dependencies:
bn.js "^4.11.9" bn.js "^4.11.9"
brorand "^1.1.0" brorand "^1.1.0"

View File

@ -7,7 +7,7 @@
"credentials": "dns_acmedns_api_url = http://acmedns-server/\ndns_acmedns_registration_file = /data/acme-registration.json", "credentials": "dns_acmedns_api_url = http://acmedns-server/\ndns_acmedns_registration_file = /data/acme-registration.json",
"full_plugin_name": "dns-acmedns" "full_plugin_name": "dns-acmedns"
}, },
"active24": { "active24":{
"name": "Active24", "name": "Active24",
"package_name": "certbot-dns-active24", "package_name": "certbot-dns-active24",
"version": "~=1.5.1", "version": "~=1.5.1",
@ -18,7 +18,7 @@
"aliyun": { "aliyun": {
"name": "Aliyun", "name": "Aliyun",
"package_name": "certbot-dns-aliyun", "package_name": "certbot-dns-aliyun",
"version": "~=2.0.0", "version": "~=0.38.1",
"dependencies": "", "dependencies": "",
"credentials": "dns_aliyun_access_key = 12345678\ndns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef", "credentials": "dns_aliyun_access_key = 12345678\ndns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef",
"full_plugin_name": "dns-aliyun" "full_plugin_name": "dns-aliyun"
@ -31,14 +31,6 @@
"credentials": "# This plugin supported API authentication using either Service Principals or utilizing a Managed Identity assigned to the virtual machine.\n# Regardless which authentication method used, the identity will need the “DNS Zone Contributor” role assigned to it.\n# As multiple Azure DNS Zones in multiple resource groups can exist, the config file needs a mapping of zone to resource group ID. Multiple zones -> ID mappings can be listed by using the key dns_azure_zoneX where X is a unique number. At least 1 zone mapping is required.\n\n# Using a service principal (option 1)\ndns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\ndns_azure_sp_client_secret = E-xqXU83Y-jzTI6xe9fs2YC~mck3ZzUih9\ndns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7\n\n# Using used assigned MSI (option 2)\n# dns_azure_msi_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\n\n# Using system assigned MSI (option 3)\n# dns_azure_msi_system_assigned = true\n\n# Zones (at least one always required)\ndns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1\ndns_azure_zone2 = example.org:/subscriptions/99800903-fb14-4992-9aff-12eaf2744622/resourceGroups/dns2", "credentials": "# This plugin supported API authentication using either Service Principals or utilizing a Managed Identity assigned to the virtual machine.\n# Regardless which authentication method used, the identity will need the “DNS Zone Contributor” role assigned to it.\n# As multiple Azure DNS Zones in multiple resource groups can exist, the config file needs a mapping of zone to resource group ID. Multiple zones -> ID mappings can be listed by using the key dns_azure_zoneX where X is a unique number. At least 1 zone mapping is required.\n\n# Using a service principal (option 1)\ndns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\ndns_azure_sp_client_secret = E-xqXU83Y-jzTI6xe9fs2YC~mck3ZzUih9\ndns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7\n\n# Using used assigned MSI (option 2)\n# dns_azure_msi_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\n\n# Using system assigned MSI (option 3)\n# dns_azure_msi_system_assigned = true\n\n# Zones (at least one always required)\ndns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1\ndns_azure_zone2 = example.org:/subscriptions/99800903-fb14-4992-9aff-12eaf2744622/resourceGroups/dns2",
"full_plugin_name": "dns-azure" "full_plugin_name": "dns-azure"
}, },
"beget": {
"name":"Beget",
"package_name": "certbot-beget-plugin",
"version": "~=1.0.0.dev9",
"dependencies": "",
"credentials": "# Beget API credentials used by Certbot\nbeget_plugin_username = username\nbeget_plugin_password = password",
"full_plugin_name": "beget-plugin"
},
"bunny": { "bunny": {
"name": "bunny.net", "name": "bunny.net",
"package_name": "certbot-dns-bunny", "package_name": "certbot-dns-bunny",
@ -255,14 +247,6 @@
"credentials": "dns_hetzner_api_token = 0123456789abcdef0123456789abcdef", "credentials": "dns_hetzner_api_token = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-hetzner" "full_plugin_name": "dns-hetzner"
}, },
"hostingnl": {
"name": "Hosting.nl",
"package_name": "certbot-dns-hostingnl",
"version": "~=0.1.5",
"dependencies": "",
"credentials": "dns_hostingnl_api_key = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-hostingnl"
},
"hover": { "hover": {
"name": "Hover", "name": "Hover",
"package_name": "certbot-dns-hover", "package_name": "certbot-dns-hover",
@ -319,14 +303,6 @@
"credentials": "dns_joker_username = <Dynamic DNS Authentication Username>\ndns_joker_password = <Dynamic DNS Authentication Password>\ndns_joker_domain = <Dynamic DNS Domain>", "credentials": "dns_joker_username = <Dynamic DNS Authentication Username>\ndns_joker_password = <Dynamic DNS Authentication Password>\ndns_joker_domain = <Dynamic DNS Domain>",
"full_plugin_name": "dns-joker" "full_plugin_name": "dns-joker"
}, },
"leaseweb": {
"name": "LeaseWeb",
"package_name": "certbot-dns-leaseweb",
"version": "~=1.0.1",
"dependencies": "",
"credentials": "dns_leaseweb_api_token = 01234556789",
"full_plugin_name": "dns-leaseweb"
},
"linode": { "linode": {
"name": "Linode", "name": "Linode",
"package_name": "certbot-dns-linode", "package_name": "certbot-dns-linode",
@ -418,7 +394,7 @@
"porkbun": { "porkbun": {
"name": "Porkbun", "name": "Porkbun",
"package_name": "certbot-dns-porkbun", "package_name": "certbot-dns-porkbun",
"version": "~=0.9", "version": "~=0.2",
"dependencies": "", "dependencies": "",
"credentials": "dns_porkbun_key=your-porkbun-api-key\ndns_porkbun_secret=your-porkbun-api-secret", "credentials": "dns_porkbun_key=your-porkbun-api-key\ndns_porkbun_secret=your-porkbun-api-secret",
"full_plugin_name": "dns-porkbun" "full_plugin_name": "dns-porkbun"
@ -448,13 +424,13 @@
"full_plugin_name": "dns-rfc2136" "full_plugin_name": "dns-rfc2136"
}, },
"rockenstein": { "rockenstein": {
"name": "rockenstein AG", "name": "rockenstein AG",
"package_name": "certbot-dns-rockenstein", "package_name": "certbot-dns-rockenstein",
"version": "~=1.0.0", "version": "~=1.0.0",
"dependencies": "", "dependencies": "",
"credentials": "dns_rockenstein_token=<token>", "credentials": "dns_rockenstein_token=<token>",
"full_plugin_name": "dns-rockenstein" "full_plugin_name": "dns-rockenstein"
}, },
"route53": { "route53": {
"name": "Route 53 (Amazon)", "name": "Route 53 (Amazon)",
"package_name": "certbot-dns-route53", "package_name": "certbot-dns-route53",
@ -511,7 +487,7 @@
"credentials": "dns_websupport_identifier = <api_key>\ndns_websupport_secret_key = <secret>", "credentials": "dns_websupport_identifier = <api_key>\ndns_websupport_secret_key = <secret>",
"full_plugin_name": "dns-websupport" "full_plugin_name": "dns-websupport"
}, },
"wedos": { "wedos":{
"name": "Wedos", "name": "Wedos",
"package_name": "certbot-dns-wedos", "package_name": "certbot-dns-wedos",
"version": "~=2.2", "version": "~=2.2",

View File

@ -11,7 +11,7 @@ YELLOW='\E[1;33m'
export BLUE CYAN GREEN RED RESET YELLOW export BLUE CYAN GREEN RED RESET YELLOW
# Docker Compose # Docker Compose
COMPOSE_PROJECT_NAME="npm2dev" COMPOSE_PROJECT_NAME="npmdev"
COMPOSE_FILE="docker/docker-compose.dev.yml" COMPOSE_FILE="docker/docker-compose.dev.yml"
export COMPOSE_FILE COMPOSE_PROJECT_NAME export COMPOSE_FILE COMPOSE_PROJECT_NAME

View File

@ -67,8 +67,6 @@ printf "nameserver %s\noptions ndots:0" "${DNSROUTER_IP}" > "${LOCAL_RESOLVE}"
# bring up all remaining containers, except cypress! # bring up all remaining containers, except cypress!
docker-compose up -d --remove-orphans stepca squid docker-compose up -d --remove-orphans stepca squid
docker-compose pull db-mysql || true # ok to fail docker-compose pull db-mysql || true # ok to fail
docker-compose pull db-postgres || true # ok to fail
docker-compose pull authentik authentik-redis authentik-ldap || true # ok to fail
docker-compose up -d --remove-orphans --pull=never fullstack docker-compose up -d --remove-orphans --pull=never fullstack
# wait for main container to be healthy # wait for main container to be healthy

View File

@ -36,11 +36,12 @@ if hash docker-compose 2>/dev/null; then
# bring up all remaining containers, except cypress! # bring up all remaining containers, except cypress!
docker-compose up -d --remove-orphans stepca squid docker-compose up -d --remove-orphans stepca squid
docker-compose pull db db-postgres authentik-redis authentik authentik-worker authentik-ldap docker-compose pull db
docker-compose build --pull --parallel fullstack docker-compose up -d --remove-orphans --pull=never fullstack
docker-compose up -d --remove-orphans fullstack
docker-compose up -d --remove-orphans swagger docker-compose up -d --remove-orphans swagger
# docker-compose up -d --remove-orphans --force-recreate --build
# wait for main container to be healthy # wait for main container to be healthy
bash "$DIR/wait-healthy" "$(docker-compose ps --all -q fullstack)" 120 bash "$DIR/wait-healthy" "$(docker-compose ps --all -q fullstack)" 120
@ -52,10 +53,10 @@ if hash docker-compose 2>/dev/null; then
if [ "$1" == "-f" ]; then if [ "$1" == "-f" ]; then
echo -e "${BLUE} ${YELLOW}Following Backend Container:${RESET}" echo -e "${BLUE} ${YELLOW}Following Backend Container:${RESET}"
docker logs -f npm2dev.core docker logs -f npm_core
else else
echo -e "${YELLOW}Hint:${RESET} You can follow the output of some of the containers with:" echo -e "${YELLOW}Hint:${RESET} You can follow the output of some of the containers with:"
echo " docker logs -f npm2dev.core" echo " docker logs -f npm_core"
fi fi
else else
echo -e "${RED} docker-compose command is not available${RESET}" echo -e "${RED} docker-compose command is not available${RESET}"

View File

@ -1,64 +0,0 @@
/// <reference types="cypress" />
describe('LDAP with Authentik', () => {
let token;
if (Cypress.env('skipStackCheck') === 'true' || Cypress.env('stack') === 'postgres') {
before(() => {
cy.getToken().then((tok) => {
token = tok;
// cy.task('backendApiPut', {
// token: token,
// path: '/api/settings/ldap-auth',
// data: {
// value: {
// host: 'authentik-ldap:3389',
// base_dn: 'ou=users,DC=ldap,DC=goauthentik,DC=io',
// user_dn: 'cn={{USERNAME}},ou=users,DC=ldap,DC=goauthentik,DC=io',
// email_property: 'mail',
// name_property: 'sn',
// self_filter: '(&(cn={{USERNAME}})(ak-active=TRUE))',
// auto_create_user: true
// }
// }
// }).then((data) => {
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
// expect(data.result).to.have.property('id');
// expect(data.result.id).to.be.greaterThan(0);
// });
// cy.task('backendApiPut', {
// token: token,
// path: '/api/settings/auth-methods',
// data: {
// value: [
// 'local',
// 'ldap'
// ]
// }
// }).then((data) => {
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
// expect(data.result).to.have.property('id');
// expect(data.result.id).to.be.greaterThan(0);
// });
});
});
it.skip('Should log in with LDAP', function() {
// cy.task('backendApiPost', {
// token: token,
// path: '/api/auth',
// data: {
// // Authentik LDAP creds:
// type: 'ldap',
// identity: 'cypress',
// secret: 'fqXBfUYqHvYqiwBHWW7f'
// }
// }).then((data) => {
// cy.validateSwaggerSchema('post', 200, '/auth', data);
// expect(data.result).to.have.property('token');
// });
});
}
});

View File

@ -1,97 +0,0 @@
/// <reference types="cypress" />
describe('OAuth with Authentik', () => {
let token;
if (Cypress.env('skipStackCheck') === 'true' || Cypress.env('stack') === 'postgres') {
before(() => {
cy.getToken().then((tok) => {
token = tok;
// cy.task('backendApiPut', {
// token: token,
// path: '/api/settings/oauth-auth',
// data: {
// value: {
// client_id: '7iO2AvuUp9JxiSVkCcjiIbQn4mHmUMBj7yU8EjqU',
// client_secret: 'VUMZzaGTrmXJ8PLksyqzyZ6lrtz04VvejFhPMBP9hGZNCMrn2LLBanySs4ta7XGrDr05xexPyZT1XThaf4ubg00WqvHRVvlu4Naa1aMootNmSRx3VAk6RSslUJmGyHzq',
// authorization_url: 'http://authentik:9000/application/o/authorize/',
// resource_url: 'http://authentik:9000/application/o/userinfo/',
// token_url: 'http://authentik:9000/application/o/token/',
// logout_url: 'http://authentik:9000/application/o/npm/end-session/',
// identifier: 'preferred_username',
// scopes: [],
// auto_create_user: true
// }
// }
// }).then((data) => {
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
// expect(data.result).to.have.property('id');
// expect(data.result.id).to.be.greaterThan(0);
// });
// cy.task('backendApiPut', {
// token: token,
// path: '/api/settings/auth-methods',
// data: {
// value: [
// 'local',
// 'oauth'
// ]
// }
// }).then((data) => {
// cy.validateSwaggerSchema('put', 200, '/settings/{name}', data);
// expect(data.result).to.have.property('id');
// expect(data.result.id).to.be.greaterThan(0);
// });
});
});
it.skip('Should log in with OAuth', function() {
// cy.task('backendApiGet', {
// path: '/oauth/login?redirect_base=' + encodeURI(Cypress.config('baseUrl')),
// }).then((data) => {
// expect(data).to.have.property('result');
// cy.origin('http://authentik:9000', {args: data.result}, (url) => {
// cy.visit(url);
// cy.get('ak-flow-executor')
// .shadow()
// .find('ak-stage-identification')
// .shadow()
// .find('input[name="uidField"]', { visible: true })
// .type('cypress');
// cy.get('ak-flow-executor')
// .shadow()
// .find('ak-stage-identification')
// .shadow()
// .find('button[type="submit"]', { visible: true })
// .click();
// cy.get('ak-flow-executor')
// .shadow()
// .find('ak-stage-password')
// .shadow()
// .find('input[name="password"]', { visible: true })
// .type('fqXBfUYqHvYqiwBHWW7f');
// cy.get('ak-flow-executor')
// .shadow()
// .find('ak-stage-password')
// .shadow()
// .find('button[type="submit"]', { visible: true })
// .click();
// })
// // we should be logged in
// cy.get('#root p.chakra-text')
// .first()
// .should('have.text', 'Nginx Proxy Manager');
// // logout:
// cy.clearLocalStorage();
// });
});
}
});

View File

@ -29,6 +29,8 @@ describe('Proxy Hosts endpoints', () => {
block_exploits: false, block_exploits: false,
caching_enabled: false, caching_enabled: false,
allow_websocket_upgrade: false, allow_websocket_upgrade: false,
enable_proxy_protocol: false,
load_balancer_ip: '',
http2_support: false, http2_support: false,
hsts_enabled: false, hsts_enabled: false,
hsts_subdomains: false, hsts_subdomains: false,

View File

@ -132,9 +132,9 @@
integrity sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ== integrity sha512-BsWiH1yFGjXXS2yvrf5LyuoSIIbPrGUWob917o+BTKuZ7qJdxX8aJLRxs1fS9n6r7vESrq1OUqb68dANcFXuQQ==
"@eslint/plugin-kit@^0.2.0": "@eslint/plugin-kit@^0.2.0":
version "0.2.3" version "0.2.0"
resolved "https://registry.yarnpkg.com/@eslint/plugin-kit/-/plugin-kit-0.2.3.tgz#812980a6a41ecf3a8341719f92a6d1e784a2e0e8" resolved "https://registry.yarnpkg.com/@eslint/plugin-kit/-/plugin-kit-0.2.0.tgz#8712dccae365d24e9eeecb7b346f85e750ba343d"
integrity sha512-2b/g5hRmpbb1o4GnTZax9N9m0FXzz9OV42ZzI4rDDMDuHUqigAiQCEWChBWCY4ztAGVRjoWT19v0yMmc5/L5kA== integrity sha512-vH9PiIMMwvhCx31Af3HiGzsVNULDbyVkHXwlemn/B0TFj/00ho3y55efXrUZTfQipxoHC5u4xq6zblww1zm1Ig==
dependencies: dependencies:
levn "^0.4.1" levn "^0.4.1"
@ -628,9 +628,9 @@ core-util-is@1.0.2:
integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac= integrity sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=
cross-spawn@^7.0.0, cross-spawn@^7.0.2: cross-spawn@^7.0.0, cross-spawn@^7.0.2:
version "7.0.6" version "7.0.3"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6"
integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==
dependencies: dependencies:
path-key "^3.1.0" path-key "^3.1.0"
shebang-command "^2.0.0" shebang-command "^2.0.0"