Compare commits

..

11 Commits

Author SHA1 Message Date
a91dcb144d Use model for db defaults as sqlite doesn't support them 2021-09-09 08:12:50 +10:00
e7f7be2a2b OpenIDC: Trigger the change event of the "restrict users" toggle when enabling/disabling oidc.
If this is not triggered and the OIDC toggle is enabled, the "disabled" property will be removed from the restricted user list input, causing an error when trying to submit the form without it.
2021-09-09 08:12:50 +10:00
076d89b5b5 Use localized strings for the OpenID Connect texts. 2021-09-09 08:12:50 +10:00
8539930f89 Updated the docs to add a section about OpenID Connect 2021-09-09 08:12:50 +10:00
87d9babbd3 Fix conditionals in the liquid template for OpenID Connect conf. 2021-09-09 08:12:50 +10:00
9f2d3a1737 Manually set the default values for the OpenID Connect columns.
There is a Knex issue ( https://github.com/knex/knex/issues/2649 ) that prevents .defaultTo from working for text columns.
2021-09-09 08:12:50 +10:00
daf399163c Allow limiting OpenID Connect auth to a list of users. 2021-09-09 08:12:50 +10:00
cdf702e545 Add a field to specify a list of allowed emails when using OpenID Connect auth. 2021-09-09 08:12:50 +10:00
5811345050 Use OpenResty instead of plain nginx to support OpenID Connect authorization. 2021-09-09 08:12:48 +10:00
53792a5cf7 Add database columns to store OpenID Connect information for Proxy Hosts. 2021-09-09 08:12:19 +10:00
8e10b7da37 Add UI tab for specifying OpenID Connect options for proxy hosts. 2021-09-09 08:12:19 +10:00
150 changed files with 5344 additions and 4748 deletions

View File

@ -1 +1 @@
2.10.0
2.9.8

101
Jenkinsfile vendored
View File

@ -1,9 +1,3 @@
import groovy.transform.Field
@Field
def shOutput = ""
def buildxPushTags = ""
pipeline {
agent {
label 'docker-multiarch'
@ -14,16 +8,14 @@ pipeline {
ansiColor('xterm')
}
environment {
IMAGE = 'nginx-proxy-manager'
IMAGE = "nginx-proxy-manager"
BUILD_VERSION = getVersion()
MAJOR_VERSION = '2'
MAJOR_VERSION = "2"
BRANCH_LOWER = "${BRANCH_NAME.toLowerCase().replaceAll('/', '-')}"
COMPOSE_PROJECT_NAME = "npm_${BRANCH_LOWER}_${BUILD_NUMBER}"
COMPOSE_FILE = 'docker/docker-compose.ci.yml'
COMPOSE_INTERACTIVE_NO_CLI = 1
BUILDX_NAME = "${COMPOSE_PROJECT_NAME}"
DOCS_BUCKET = 'jc21-npm-site'
DOCS_CDN = 'EN1G6DEWZUTDT'
}
stages {
stage('Environment') {
@ -34,7 +26,7 @@ pipeline {
}
steps {
script {
buildxPushTags = "-t docker.io/jc21/${IMAGE}:${BUILD_VERSION} -t docker.io/jc21/${IMAGE}:${MAJOR_VERSION} -t docker.io/jc21/${IMAGE}:latest"
env.BUILDX_PUSH_TAGS = "-t docker.io/jc21/${IMAGE}:${BUILD_VERSION} -t docker.io/jc21/${IMAGE}:${MAJOR_VERSION} -t docker.io/jc21/${IMAGE}:latest"
}
}
}
@ -47,7 +39,7 @@ pipeline {
steps {
script {
// Defaults to the Branch name, which is applies to all branches AND pr's
buildxPushTags = "-t docker.io/jc21/${IMAGE}:github-${BRANCH_LOWER}"
env.BUILDX_PUSH_TAGS = "-t docker.io/jc21/${IMAGE}:github-${BRANCH_LOWER}"
}
}
}
@ -62,28 +54,34 @@ pipeline {
}
}
}
stage('Build and Test') {
stage('Frontend') {
steps {
script {
// Frontend and Backend
def shStatusCode = sh(label: 'Checking and Building', returnStatus: true, script: '''
set -e
./scripts/ci/frontend-build > ${WORKSPACE}/tmp-sh-build 2>&1
./scripts/ci/test-and-build > ${WORKSPACE}/tmp-sh-build 2>&1
''')
shOutput = readFile "${env.WORKSPACE}/tmp-sh-build"
if (shStatusCode != 0) {
error "${shOutput}"
}
}
sh './scripts/frontend-build'
}
post {
always {
sh 'rm -f ${WORKSPACE}/tmp-sh-build'
}
failure {
npmGithubPrComment("CI Error:\n\n```\n${shOutput}\n```", true)
}
}
stage('Backend') {
steps {
echo 'Checking Syntax ...'
// See: https://github.com/yarnpkg/yarn/issues/3254
sh '''docker run --rm \\
-v "$(pwd)/backend:/app" \\
-v "$(pwd)/global:/app/global" \\
-w /app \\
node:latest \\
sh -c "yarn install && yarn eslint . && rm -rf node_modules"
'''
echo 'Docker Build ...'
sh '''docker build --pull --no-cache --squash --compress \\
-t "${IMAGE}:ci-${BUILD_NUMBER}" \\
-f docker/Dockerfile \\
--build-arg TARGETPLATFORM=linux/amd64 \\
--build-arg BUILDPLATFORM=linux/amd64 \\
--build-arg BUILD_VERSION="${BUILD_VERSION}" \\
--build-arg BUILD_COMMIT="${BUILD_COMMIT}" \\
--build-arg BUILD_DATE="$(date '+%Y-%m-%d %T %Z')" \\
.
'''
}
}
stage('Integration Tests Sqlite') {
@ -102,8 +100,8 @@ pipeline {
always {
// Dumps to analyze later
sh 'mkdir -p debug'
sh 'docker-compose logs fullstack-sqlite > debug/docker_fullstack_sqlite.log'
sh 'docker-compose logs db > debug/docker_db.log'
sh 'docker-compose logs fullstack-sqlite | gzip > debug/docker_fullstack_sqlite.log.gz'
sh 'docker-compose logs db | gzip > debug/docker_db.log.gz'
// Cypress videos and screenshot artifacts
dir(path: 'test/results') {
archiveArtifacts allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml'
@ -128,8 +126,8 @@ pipeline {
always {
// Dumps to analyze later
sh 'mkdir -p debug'
sh 'docker-compose logs fullstack-mysql > debug/docker_fullstack_mysql.log'
sh 'docker-compose logs db > debug/docker_db.log'
sh 'docker-compose logs fullstack-mysql | gzip > debug/docker_fullstack_mysql.log.gz'
sh 'docker-compose logs db | gzip > debug/docker_db.log.gz'
// Cypress videos and screenshot artifacts
dir(path: 'test/results') {
archiveArtifacts allowEmptyArchive: true, artifacts: '**/*', excludes: '**/*.xml'
@ -165,8 +163,10 @@ pipeline {
}
steps {
withCredentials([usernamePassword(credentialsId: 'jc21-dockerhub', passwordVariable: 'dpass', usernameVariable: 'duser')]) {
sh 'docker login -u "${duser}" -p "${dpass}"'
sh "./scripts/buildx --push ${buildxPushTags}"
// Docker Login
sh "docker login -u '${duser}' -p '${dpass}'"
// Buildx with push from cache
sh "./scripts/buildx --push ${BUILDX_PUSH_TAGS}"
}
}
}
@ -180,7 +180,26 @@ pipeline {
}
}
steps {
npmDocsRelease("$DOCS_BUCKET", "$DOCS_CDN")
withCredentials([[$class: 'AmazonWebServicesCredentialsBinding', accessKeyVariable: 'AWS_ACCESS_KEY_ID', credentialsId: 'npm-s3-docs', secretKeyVariable: 'AWS_SECRET_ACCESS_KEY']]) {
sh """docker run --rm \\
--name \${COMPOSE_PROJECT_NAME}-docs-upload \\
-e S3_BUCKET=jc21-npm-site \\
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \\
-v \$(pwd):/app \\
-w /app \\
jc21/ci-tools \\
scripts/docs-upload /app/docs/.vuepress/dist/
"""
sh """docker run --rm \\
--name \${COMPOSE_PROJECT_NAME}-docs-invalidate \\
-e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\
-e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \\
jc21/ci-tools \\
aws cloudfront create-invalidation --distribution-id EN1G6DEWZUTDT --paths '/*'
"""
}
}
}
stage('PR Comment') {
@ -194,14 +213,14 @@ pipeline {
}
steps {
script {
npmGithubPrComment("Docker Image for build ${BUILD_NUMBER} is available on [DockerHub](https://cloud.docker.com/repository/docker/jc21/${IMAGE}) as `jc21/${IMAGE}:github-${BRANCH_LOWER}`\n\n**Note:** ensure you backup your NPM instance before testing this PR image! Especially if this PR contains database changes.", true)
def comment = pullRequest.comment("This is an automated message from CI:\n\nDocker Image for build ${BUILD_NUMBER} is available on [DockerHub](https://cloud.docker.com/repository/docker/jc21/${IMAGE}) as `jc21/${IMAGE}:github-${BRANCH_LOWER}`\n\n**Note:** ensure you backup your NPM instance before testing this PR image! Especially if this PR contains database changes.")
}
}
}
}
post {
always {
sh 'docker-compose down --remove-orphans --volumes -t 30'
sh 'docker-compose down --rmi all --remove-orphans --volumes -t 30'
sh 'echo Reverting ownership'
sh 'docker run --rm -v $(pwd):/data jc21/ci-tools chown -R $(id -u):$(id -g) /data'
}

412
README.md
View File

@ -1,13 +1,22 @@
<p align="center">
<img src="https://nginxproxymanager.com/github.png">
<br><br>
<img src="https://img.shields.io/badge/version-2.10.0-green.svg?style=for-the-badge">
<img src="https://img.shields.io/badge/version-2.9.8-green.svg?style=for-the-badge">
<a href="https://hub.docker.com/repository/docker/jc21/nginx-proxy-manager">
<img src="https://img.shields.io/docker/stars/jc21/nginx-proxy-manager.svg?style=for-the-badge">
</a>
<a href="https://hub.docker.com/repository/docker/jc21/nginx-proxy-manager">
<img src="https://img.shields.io/docker/pulls/jc21/nginx-proxy-manager.svg?style=for-the-badge">
</a>
<a href="https://ci.nginxproxymanager.com/blue/organizations/jenkins/nginx-proxy-manager/branches/">
<img src="https://img.shields.io/jenkins/build?jobUrl=https%3A%2F%2Fci.nginxproxymanager.com%2Fjob%2Fnginx-proxy-manager%2Fjob%2Fmaster&style=for-the-badge">
</a>
<a href="https://gitter.im/nginx-proxy-manager/community">
<img alt="Gitter" src="https://img.shields.io/gitter/room/nginx-proxy-manager/community?style=for-the-badge">
</a>
<a href="https://reddit.com/r/nginxproxymanager">
<img alt="Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/nginxproxymanager?label=Reddit%20Community&style=for-the-badge">
</a>
</p>
This project comes as a pre-built docker image that enables you to easily forward to your websites
@ -56,7 +65,7 @@ I won't go in to too much detail here but here are the basics for someone new to
2. Create a docker-compose.yml file similar to this:
```yml
version: '3.8'
version: '3'
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
@ -65,21 +74,31 @@ services:
- '80:80'
- '81:81'
- '443:443'
environment:
DB_MYSQL_HOST: "db"
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "npm"
DB_MYSQL_PASSWORD: "npm"
DB_MYSQL_NAME: "npm"
volumes:
- ./data:/data
- ./letsencrypt:/etc/letsencrypt
db:
image: 'jc21/mariadb-aria:latest'
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: 'npm'
MYSQL_DATABASE: 'npm'
MYSQL_USER: 'npm'
MYSQL_PASSWORD: 'npm'
volumes:
- ./data/mysql:/var/lib/mysql
```
This is the bare minimum configuration required. See the [documentation](https://nginxproxymanager.com/setup/) for more.
3. Bring up your stack by running
3. Bring up your stack
```bash
docker-compose up -d
# If using docker-compose-plugin
docker compose up -d
```
4. Log in to the Admin UI
@ -100,12 +119,371 @@ Immediately after logging in with this default user you will be asked to modify
## Contributors
Special thanks to [all of our contributors](https://github.com/NginxProxyManager/nginx-proxy-manager/graphs/contributors).
Special thanks to the following contributors:
## Getting Support
1. [Found a bug?](https://github.com/NginxProxyManager/nginx-proxy-manager/issues)
2. [Discussions](https://github.com/NginxProxyManager/nginx-proxy-manager/discussions)
3. [Development Gitter](https://gitter.im/nginx-proxy-manager/community)
4. [Reddit](https://reddit.com/r/nginxproxymanager)
<!-- prettier-ignore-start -->
<!-- markdownlint-disable -->
<table>
<tr>
<td align="center">
<a href="https://github.com/Subv">
<img src="https://avatars1.githubusercontent.com/u/357072?s=460&u=d8adcdc91d749ae53e177973ed9b6bb6c4c894a3&v=4" width="80" alt=""/>
<br /><sub><b>Sebastian Valle</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Indemnity83">
<img src="https://avatars3.githubusercontent.com/u/35218?s=460&u=7082004ff35138157c868d7d9c683ccebfce5968&v=4" width="80" alt=""/>
<br /><sub><b>Kyle Klaus</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/theraw">
<img src="https://avatars1.githubusercontent.com/u/32969774?s=460&u=6b359971e15685fb0359e6a8c065a399b40dc228&v=4" width="80" alt=""/>
<br /><sub><b>ƬHE ЯAW</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/spalger">
<img src="https://avatars2.githubusercontent.com/u/1329312?s=400&u=565223e38f1c052afb4c5dcca3fcf1c63ba17ae7&v=4" width="80" alt=""/>
<br /><sub><b>Spencer</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Xantios">
<img src="https://avatars3.githubusercontent.com/u/1507836?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Xantios Krugor</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/dpanesso">
<img src="https://avatars2.githubusercontent.com/u/2687121?s=460&v=4" width="80" alt=""/>
<br /><sub><b>David Panesso</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/IronTooch">
<img src="https://avatars3.githubusercontent.com/u/27360514?s=460&u=69bf854a6647c55725f62ecb8d39249c6c0b2602&v=4" width="80" alt=""/>
<br /><sub><b>IronTooch</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/damianog">
<img src="https://avatars1.githubusercontent.com/u/2786682?s=460&u=76c6136fae797abb76b951cd8a246dcaecaf21af&v=4" width="80" alt=""/>
<br /><sub><b>Damiano</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/tfmm">
<img src="https://avatars3.githubusercontent.com/u/6880538?s=460&u=ce0160821cc4aa802df8395200f2d4956a5bc541&v=4" width="80" alt=""/>
<br /><sub><b>Russ</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/margaale">
<img src="https://avatars3.githubusercontent.com/u/20794934?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Marcelo Castagna</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Steven-Harris">
<img src="https://avatars2.githubusercontent.com/u/7720242?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Steven Harris</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/jlesage">
<img src="https://avatars0.githubusercontent.com/u/1791123?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Jocelyn Le Sage</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/cmer">
<img src="https://avatars0.githubusercontent.com/u/412?s=460&u=67dd8b2e3661bfd6f68ec1eaa5b9821bd8a321cd&v=4" width="80" alt=""/>
<br /><sub><b>Carl Mercier</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/the1ts">
<img src="https://avatars1.githubusercontent.com/u/84956?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Paul Mansfield</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/OhHeyAlan">
<img src="https://avatars0.githubusercontent.com/u/11955126?s=460&u=fbaa5a1a4f73ef8960132c703349bfd037fe2630&v=4" width="80" alt=""/>
<br /><sub><b>OhHeyAlan</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/dogmatic69">
<img src="https://avatars2.githubusercontent.com/u/94674?s=460&u=ca7647de53145c6283b6373ade5dc94ba99347db&v=4" width="80" alt=""/>
<br /><sub><b>Carl Sutton</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/tg44">
<img src="https://avatars0.githubusercontent.com/u/31839?s=460&u=ad32f4cadfef5e5fb09cdfa4b7b7b36a99ba6811&v=4" width="80" alt=""/>
<br /><sub><b>Gergő Törcsvári</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/vrenjith">
<img src="https://avatars3.githubusercontent.com/u/2093241?s=460&u=96ce93a9bebabdd0a60a2dc96cd093a41d5edaba&v=4" width="80" alt=""/>
<br /><sub><b>vrenjith</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/duhruh">
<img src="https://avatars2.githubusercontent.com/u/1133969?s=460&u=c0691e6131ec6d516416c1c6fcedb5034f877bbe&v=4" width="80" alt=""/>
<br /><sub><b>David Rivera</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/jipjan">
<img src="https://avatars2.githubusercontent.com/u/1384618?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Jaap-Jan de Wit</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/jmwebslave">
<img src="https://avatars2.githubusercontent.com/u/6118262?s=460&u=7db409c47135b1e141c366bbb03ed9fae6ac2638&v=4" width="80" alt=""/>
<br /><sub><b>James Morgan</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/chaptergy">
<img src="https://avatars2.githubusercontent.com/u/26956711?s=460&u=7d9adebabb6b4e7af7cb05d98d751087a372304b&v=4" width="80" alt=""/>
<br /><sub><b>chaptergy</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Philip-Mooney">
<img src="https://avatars0.githubusercontent.com/u/48624631?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Philip Mooney</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/WaterCalm">
<img src="https://avatars1.githubusercontent.com/u/23502129?s=400&v=4" width="80" alt=""/>
<br /><sub><b>WaterCalm</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/lebrou34">
<img src="https://avatars1.githubusercontent.com/u/16373103?s=460&v=4" width="80" alt=""/>
<br /><sub><b>lebrou34</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/lightglitch">
<img src="https://avatars0.githubusercontent.com/u/196953?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Mário Franco</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/klutchell">
<img src="https://avatars3.githubusercontent.com/u/20458272?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Kyle Harding</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/ahgraber">
<img src="https://avatars.githubusercontent.com/u/24922003?s=460&u=8376c9f00af9b6057ba4d2fb03b4f1b20a75277f&v=4" width="80" alt=""/>
<br /><sub><b>Alex Graber</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/MooBaloo">
<img src="https://avatars.githubusercontent.com/u/9493496?s=460&v=4" width="80" alt=""/>
<br /><sub><b>MooBaloo</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Shuro">
<img src="https://avatars.githubusercontent.com/u/944030?s=460&v=4" width="80" alt=""/>
<br /><sub><b>Shuro</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/lorisbergeron">
<img src="https://avatars.githubusercontent.com/u/51918567?s=460&u=778e4ff284b7d7304450f98421c99f79298371fb&v=4" width="80" alt=""/>
<br /><sub><b>Loris Bergeron</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/hepelayo">
<img src="https://avatars.githubusercontent.com/u/8243119?v=4" width="80" alt=""/>
<br /><sub><b>hepelayo</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/jonasled">
<img src="https://avatars.githubusercontent.com/u/46790650?v=4" width="80" alt=""/>
<br /><sub><b>Jonas Leder</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/stegmannb">
<img src="https://avatars.githubusercontent.com/u/12850482?v=4" width="80" alt=""/>
<br /><sub><b>Bastian Stegmann</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Stealthii">
<img src="https://avatars.githubusercontent.com/u/998920?v=4" width="80" alt=""/>
<br /><sub><b>Stealthii</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/thegamingninja">
<img src="https://avatars.githubusercontent.com/u/8020534?v=4" width="80" alt=""/>
<br /><sub><b>THEGamingninja</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/italobb">
<img src="https://avatars.githubusercontent.com/u/1801687?v=4" width="80" alt=""/>
<br /><sub><b>Italo Borssatto</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/GurjinderSingh">
<img src="https://avatars.githubusercontent.com/u/3470709?v=4" width="80" alt=""/>
<br /><sub><b>Gurjinder Singh</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/phantomski77">
<img src="https://avatars.githubusercontent.com/u/69464125?v=4" width="80" alt=""/>
<br /><sub><b>David Dosoudil</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/ijaron">
<img src="https://avatars.githubusercontent.com/u/5156472?v=4" width="80" alt=""/>
<br /><sub><b>ijaron</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/nielscil">
<img src="https://avatars.githubusercontent.com/u/9073152?v=4" width="80" alt=""/>
<br /><sub><b>Niels Bouma</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/ogarai">
<img src="https://avatars.githubusercontent.com/u/2949572?v=4" width="80" alt=""/>
<br /><sub><b>Orko Garai</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/baruffaldi">
<img src="https://avatars.githubusercontent.com/u/36949?v=4" width="80" alt=""/>
<br /><sub><b>Filippo Baruffaldi</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/bikram990">
<img src="https://avatars.githubusercontent.com/u/6782131?v=4" width="80" alt=""/>
<br /><sub><b>Bikramjeet Singh</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/razvanstoica89">
<img src="https://avatars.githubusercontent.com/u/28236583?v=4" width="80" alt=""/>
<br /><sub><b>Razvan Stoica</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/psharma04">
<img src="https://avatars.githubusercontent.com/u/22587474?v=4" width="80" alt=""/>
<br /><sub><b>RBXII3</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/demize">
<img src="https://avatars.githubusercontent.com/u/264914?v=4" width="80" alt=""/>
<br /><sub><b>demize</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/PUP-Loki">
<img src="https://avatars.githubusercontent.com/u/75944209?v=4" width="80" alt=""/>
<br /><sub><b>PUP-Loki</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/DSorlov">
<img src="https://avatars.githubusercontent.com/u/8133650?v=4" width="80" alt=""/>
<br /><sub><b>Daniel Sörlöv</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/Theyooo">
<img src="https://avatars.githubusercontent.com/u/58510131?v=4" width="80" alt=""/>
<br /><sub><b>Theyooo</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/mrdink">
<img src="https://avatars.githubusercontent.com/u/514751?v=4" width="80" alt=""/>
<br /><sub><b>Justin Peacock</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/ChrisTracy">
<img src="https://avatars.githubusercontent.com/u/58871574?v=4" width="80" alt=""/>
<br /><sub><b>Chris Tracy</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/Fuechslein">
<img src="https://avatars.githubusercontent.com/u/15112818?v=4" width="80" alt=""/>
<br /><sub><b>Fuechslein</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/nightah">
<img src="https://avatars.githubusercontent.com/u/3339418?v=4" width="80" alt=""/>
<br /><sub><b>Amir Zarrinkafsh</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/gabbe">
<img src="https://avatars.githubusercontent.com/u/156397?v=4" width="80" alt=""/>
<br /><sub><b>gabbe</b></sub>
</a>
</td>
<td align="center">
<a href="https://github.com/bmbvenom">
<img src="https://avatars.githubusercontent.com/u/20530371?v=4" width="80" alt=""/>
<br /><sub><b>bmbvenom</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center">
<a href="https://github.com/FMeinicke">
<img src="https://avatars.githubusercontent.com/u/42121639?v=4" width="80" alt=""/>
<br /><sub><b>Florian Meinicke</b></sub>
</a>
</td>
</tr>
</table>
<!-- markdownlint-enable -->
<!-- prettier-ignore-end -->

View File

@ -2,7 +2,6 @@ const express = require('express');
const bodyParser = require('body-parser');
const fileUpload = require('express-fileupload');
const compression = require('compression');
const config = require('./lib/config');
const log = require('./logger').express;
/**
@ -25,7 +24,7 @@ app.enable('trust proxy', ['loopback', 'linklocal', 'uniquelocal']);
app.enable('strict routing');
// pretty print JSON when not live
if (config.debug()) {
if (process.env.NODE_ENV !== 'production') {
app.set('json spaces', 2);
}
@ -41,12 +40,13 @@ app.use(function (req, res, next) {
}
res.set({
'X-XSS-Protection': '1; mode=block',
'X-Content-Type-Options': 'nosniff',
'X-Frame-Options': x_frame_options,
'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate',
Pragma: 'no-cache',
Expires: 0
'Strict-Transport-Security': 'includeSubDomains; max-age=631138519; preload',
'X-XSS-Protection': '1; mode=block',
'X-Content-Type-Options': 'nosniff',
'X-Frame-Options': x_frame_options,
'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate',
Pragma: 'no-cache',
Expires: 0
});
next();
});
@ -66,7 +66,7 @@ app.use(function (err, req, res, next) {
}
};
if (config.debug() || (req.baseUrl + req.path).includes('nginx/certificates')) {
if (process.env.NODE_ENV === 'development' || (req.baseUrl + req.path).includes('nginx/certificates')) {
payload.debug = {
stack: typeof err.stack !== 'undefined' && err.stack ? err.stack.split('\n') : null,
previous: err.previous
@ -75,7 +75,7 @@ app.use(function (err, req, res, next) {
// Not every error is worth logging - but this is good for now until it gets annoying.
if (typeof err.stack !== 'undefined' && err.stack) {
if (config.debug()) {
if (process.env.NODE_ENV === 'development') {
log.debug(err.stack);
} else if (typeof err.public == 'undefined' || !err.public) {
log.warn(err.message);

View File

@ -1,27 +1,33 @@
const config = require('./lib/config');
const config = require('config');
if (!config.has('database')) {
throw new Error('Database config does not exist! Please read the instructions: https://nginxproxymanager.com/setup/');
throw new Error('Database config does not exist! Please read the instructions: https://github.com/jc21/nginx-proxy-manager/blob/master/doc/INSTALL.md');
}
function generateDbConfig() {
const cfg = config.get('database');
if (cfg.engine === 'knex-native') {
return cfg.knex;
}
return {
client: cfg.engine,
connection: {
host: cfg.host,
user: cfg.user,
password: cfg.password,
database: cfg.name,
port: cfg.port
},
migrations: {
tableName: 'migrations'
}
};
if (config.database.engine === 'knex-native') {
return config.database.knex;
} else
return {
client: config.database.engine,
connection: {
host: config.database.host,
user: config.database.user,
password: config.database.password,
database: config.database.name,
port: config.database.port
},
migrations: {
tableName: 'migrations'
}
};
}
module.exports = require('knex')(generateDbConfig());
let data = generateDbConfig();
if (typeof config.database.version !== 'undefined') {
data.version = config.database.version;
}
module.exports = require('knex')(data);

View File

@ -3,6 +3,9 @@
const logger = require('./logger').global;
async function appStart () {
// Create config file db settings if environment variables have been set
await createDbConfigFromEnvironment();
const migrate = require('./migrate');
const setup = require('./setup');
const app = require('./app');
@ -39,6 +42,89 @@ async function appStart () {
});
}
async function createDbConfigFromEnvironment() {
return new Promise((resolve, reject) => {
const envMysqlHost = process.env.DB_MYSQL_HOST || null;
const envMysqlPort = process.env.DB_MYSQL_PORT || null;
const envMysqlUser = process.env.DB_MYSQL_USER || null;
const envMysqlName = process.env.DB_MYSQL_NAME || null;
const envSqliteFile = process.env.DB_SQLITE_FILE || null;
if ((envMysqlHost && envMysqlPort && envMysqlUser && envMysqlName) || envSqliteFile) {
const fs = require('fs');
const filename = (process.env.NODE_CONFIG_DIR || './config') + '/' + (process.env.NODE_ENV || 'default') + '.json';
let configData = {};
try {
configData = require(filename);
} catch (err) {
// do nothing
}
if (configData.database && configData.database.engine && !configData.database.fromEnv) {
logger.info('Manual db configuration already exists, skipping config creation from environment variables');
resolve();
return;
}
if (envMysqlHost && envMysqlPort && envMysqlUser && envMysqlName) {
const newConfig = {
fromEnv: true,
engine: 'mysql',
host: envMysqlHost,
port: envMysqlPort,
user: envMysqlUser,
password: process.env.DB_MYSQL_PASSWORD,
name: envMysqlName,
};
if (JSON.stringify(configData.database) === JSON.stringify(newConfig)) {
// Config is unchanged, skip overwrite
resolve();
return;
}
logger.info('Generating MySQL db configuration from environment variables');
configData.database = newConfig;
} else {
const newConfig = {
fromEnv: true,
engine: 'knex-native',
knex: {
client: 'sqlite3',
connection: {
filename: envSqliteFile
},
useNullAsDefault: true
}
};
if (JSON.stringify(configData.database) === JSON.stringify(newConfig)) {
// Config is unchanged, skip overwrite
resolve();
return;
}
logger.info('Generating Sqlite db configuration from environment variables');
configData.database = newConfig;
}
// Write config
fs.writeFile(filename, JSON.stringify(configData, null, 2), (err) => {
if (err) {
logger.error('Could not write db config to config file: ' + filename);
reject(err);
} else {
logger.info('Wrote db configuration to config file: ' + filename);
resolve();
}
});
} else {
resolve();
}
});
}
try {
appStart();
} catch (err) {

View File

@ -3,13 +3,13 @@ const fs = require('fs');
const batchflow = require('batchflow');
const logger = require('../logger').access;
const error = require('../lib/error');
const utils = require('../lib/utils');
const accessListModel = require('../models/access_list');
const accessListAuthModel = require('../models/access_list_auth');
const accessListClientModel = require('../models/access_list_client');
const proxyHostModel = require('../models/proxy_host');
const internalAuditLog = require('./audit-log');
const internalNginx = require('./nginx');
const utils = require('../lib/utils');
function omissions () {
return ['is_deleted'];
@ -27,13 +27,13 @@ const internalAccessList = {
.then((/*access_data*/) => {
return accessListModel
.query()
.omit(omissions())
.insertAndFetch({
name: data.name,
satisfy_any: data.satisfy_any,
pass_auth: data.pass_auth,
owner_user_id: access.token.getUserId(1)
})
.then(utils.omitRow(omissions()));
});
})
.then((row) => {
data.id = row.id;
@ -218,7 +218,7 @@ const internalAccessList = {
// re-fetch with expansions
return internalAccessList.get(access, {
id: data.id,
expand: ['owner', 'items', 'clients', 'proxy_hosts.[certificate,access_list.[clients,items]]']
expand: ['owner', 'items', 'clients', 'proxy_hosts.access_list.[clients,items]']
}, true /* <- skip masking */);
})
.then((row) => {
@ -256,31 +256,35 @@ const internalAccessList = {
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
.where('access_list.is_deleted', 0)
.andWhere('access_list.id', data.id)
.allowGraph('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]')
.allowEager('[owner,items,clients,proxy_hosts.[*, access_list.[clients,items]]]')
.omit(['access_list.is_deleted'])
.first();
if (access_data.permission_visibility !== 'all') {
query.andWhere('access_list.owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
if (!skip_masking && typeof row.items !== 'undefined' && row.items) {
row = internalAccessList.maskItems(row);
}
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
query.omit(data.omit);
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.eager('[' + data.expand.join(', ') + ']');
}
return query;
})
.then((row) => {
if (row) {
if (!skip_masking && typeof row.items !== 'undefined' && row.items) {
row = internalAccessList.maskItems(row);
}
return _.omit(row, omissions());
} else {
throw new error.ItemNotFoundError(data.id);
}
return row;
});
},
@ -377,7 +381,8 @@ const internalAccessList = {
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
.where('access_list.is_deleted', 0)
.groupBy('access_list.id')
.allowGraph('[owner,items,clients]')
.omit(['access_list.is_deleted'])
.allowEager('[owner,items,clients]')
.orderBy('access_list.name', 'ASC');
if (access_data.permission_visibility !== 'all') {
@ -392,10 +397,10 @@ const internalAccessList = {
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
return query;
})
.then((rows) => {
if (rows) {
@ -502,7 +507,7 @@ const internalAccessList = {
if (typeof item.password !== 'undefined' && item.password.length) {
logger.info('Adding: ' + item.username);
utils.execFile('/usr/bin/htpasswd', ['-b', htpasswd_file, item.username, item.password])
utils.exec('/usr/bin/htpasswd -b "' + htpasswd_file + '" "' + item.username + '" "' + item.password + '"')
.then((/*result*/) => {
next();
})

View File

@ -19,7 +19,7 @@ const internalAuditLog = {
.orderBy('created_on', 'DESC')
.orderBy('id', 'DESC')
.limit(100)
.allowGraph('[user]');
.allowEager('[user]');
// Query is used for searching
if (typeof search_query === 'string') {
@ -29,7 +29,7 @@ const internalAuditLog = {
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query;

View File

@ -1,24 +1,20 @@
const _ = require('lodash');
const fs = require('fs');
const https = require('https');
const tempWrite = require('temp-write');
const moment = require('moment');
const logger = require('../logger').ssl;
const config = require('../lib/config');
const error = require('../lib/error');
const utils = require('../lib/utils');
const certificateModel = require('../models/certificate');
const dnsPlugins = require('../global/certbot-dns-plugins');
const internalAuditLog = require('./audit-log');
const internalNginx = require('./nginx');
const internalHost = require('./host');
const archiver = require('archiver');
const path = require('path');
const { isArray } = require('lodash');
const letsencryptStaging = config.useLetsencryptStaging();
const _ = require('lodash');
const fs = require('fs');
const tempWrite = require('temp-write');
const moment = require('moment');
const logger = require('../logger').ssl;
const error = require('../lib/error');
const utils = require('../lib/utils');
const certificateModel = require('../models/certificate');
const dnsPlugins = require('../global/certbot-dns-plugins');
const internalAuditLog = require('./audit-log');
const internalNginx = require('./nginx');
const internalHost = require('./host');
const letsencryptStaging = process.env.NODE_ENV !== 'production';
const letsencryptConfig = '/etc/letsencrypt.ini';
const certbotCommand = 'certbot';
const archiver = require('archiver');
const path = require('path');
function omissions() {
return ['is_deleted'];
@ -48,8 +44,6 @@ const internalCertificate = {
const cmd = certbotCommand + ' renew --non-interactive --quiet ' +
'--config "' + letsencryptConfig + '" ' +
'--work-dir "/tmp/letsencrypt-lib" ' +
'--logs-dir "/tmp/letsencrypt-log" ' +
'--preferred-challenges "dns,http" ' +
'--disable-hook-validation ' +
(letsencryptStaging ? '--staging' : '');
@ -120,13 +114,13 @@ const internalCertificate = {
data.owner_user_id = access.token.getUserId(1);
if (data.provider === 'letsencrypt') {
data.nice_name = data.domain_names.join(', ');
data.nice_name = data.domain_names.sort().join(', ');
}
return certificateModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
.omit(omissions())
.insertAndFetch(data);
})
.then((certificate) => {
if (certificate.provider === 'letsencrypt') {
@ -175,7 +169,6 @@ const internalCertificate = {
// 3. Generate the LE config
return internalNginx.generateLetsEncryptRequestConfig(certificate)
.then(internalNginx.reload)
.then(async() => await new Promise((r) => setTimeout(r, 5000)))
.then(() => {
// 4. Request cert
return internalCertificate.requestLetsEncryptSsl(certificate);
@ -273,8 +266,8 @@ const internalCertificate = {
return certificateModel
.query()
.omit(omissions())
.patchAndFetchById(row.id, data)
.then(utils.omitRow(omissions()))
.then((saved_row) => {
saved_row.meta = internalCertificate.cleanMeta(saved_row.meta);
data.meta = internalCertificate.cleanMeta(data.meta);
@ -292,7 +285,7 @@ const internalCertificate = {
meta: _.omit(data, ['expires_on']) // this prevents json circular reference because expires_on might be raw
})
.then(() => {
return saved_row;
return _.omit(saved_row, omissions());
});
});
});
@ -317,28 +310,30 @@ const internalCertificate = {
.query()
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner]')
.allowEager('[owner]')
.first();
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
query.omit(data.omit);
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.eager('[' + data.expand.join(', ') + ']');
}
return query;
})
.then((row) => {
if (row) {
return _.omit(row, omissions());
} else {
throw new error.ItemNotFoundError(data.id);
}
return row;
});
},
@ -468,7 +463,8 @@ const internalCertificate = {
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner]')
.omit(['is_deleted'])
.allowEager('[owner]')
.orderBy('nice_name', 'ASC');
if (access_data.permission_visibility !== 'all') {
@ -478,15 +474,15 @@ const internalCertificate = {
// Query is used for searching
if (typeof search_query === 'string') {
query.where(function () {
this.where('nice_name', 'like', '%' + search_query + '%');
this.where('name', 'like', '%' + search_query + '%');
});
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
return query;
});
},
@ -663,6 +659,7 @@ const internalCertificate = {
meta: _.clone(row.meta) // Prevent the update method from changing this value that we'll use later
})
.then((certificate) => {
console.log('ROWMETA:', row.meta);
certificate.meta = row.meta;
return internalCertificate.writeCustomCert(certificate);
});
@ -835,10 +832,8 @@ const internalCertificate = {
requestLetsEncryptSsl: (certificate) => {
logger.info('Requesting Let\'sEncrypt certificates for Cert #' + certificate.id + ': ' + certificate.domain_names.join(', '));
const cmd = certbotCommand + ' certonly ' +
const cmd = certbotCommand + ' certonly --non-interactive ' +
'--config "' + letsencryptConfig + '" ' +
'--work-dir "/tmp/letsencrypt-lib" ' +
'--logs-dir "/tmp/letsencrypt-log" ' +
'--cert-name "npm-' + certificate.id + '" ' +
'--agree-tos ' +
'--authenticator webroot ' +
@ -873,19 +868,13 @@ const internalCertificate = {
logger.info(`Requesting Let'sEncrypt certificates via ${dns_plugin.display_name} for Cert #${certificate.id}: ${certificate.domain_names.join(', ')}`);
const credentialsLocation = '/etc/letsencrypt/credentials/credentials-' + certificate.id;
// Escape single quotes and backslashes
const escapedCredentials = certificate.meta.dns_provider_credentials.replaceAll('\'', '\\\'').replaceAll('\\', '\\\\');
const credentialsCmd = 'mkdir -p /etc/letsencrypt/credentials 2> /dev/null; echo \'' + escapedCredentials + '\' > \'' + credentialsLocation + '\' && chmod 600 \'' + credentialsLocation + '\'';
// we call `. /opt/certbot/bin/activate` (`.` is alternative to `source` in dash) to access certbot venv
const prepareCmd = '. /opt/certbot/bin/activate && pip install --no-cache-dir --user ' + dns_plugin.package_name + (dns_plugin.version_requirement || '') + ' ' + dns_plugin.dependencies + ' && deactivate';
const credentialsCmd = 'mkdir -p /etc/letsencrypt/credentials 2> /dev/null; echo \'' + certificate.meta.dns_provider_credentials.replace('\'', '\\\'') + '\' > \'' + credentialsLocation + '\' && chmod 600 \'' + credentialsLocation + '\'';
const prepareCmd = 'pip install ' + dns_plugin.package_name + '==' + dns_plugin.package_version + ' ' + dns_plugin.dependencies;
// Whether the plugin has a --<name>-credentials argument
const hasConfigArg = certificate.meta.dns_provider !== 'route53';
let mainCmd = certbotCommand + ' certonly ' +
'--config "' + letsencryptConfig + '" ' +
'--work-dir "/tmp/letsencrypt-lib" ' +
'--logs-dir "/tmp/letsencrypt-log" ' +
let mainCmd = certbotCommand + ' certonly --non-interactive ' +
'--cert-name "npm-' + certificate.id + '" ' +
'--agree-tos ' +
'--email "' + certificate.meta.letsencrypt_email + '" ' +
@ -980,13 +969,10 @@ const internalCertificate = {
renewLetsEncryptSsl: (certificate) => {
logger.info('Renewing Let\'sEncrypt certificates for Cert #' + certificate.id + ': ' + certificate.domain_names.join(', '));
const cmd = certbotCommand + ' renew --force-renewal ' +
const cmd = certbotCommand + ' renew --force-renewal --non-interactive ' +
'--config "' + letsencryptConfig + '" ' +
'--work-dir "/tmp/letsencrypt-lib" ' +
'--logs-dir "/tmp/letsencrypt-log" ' +
'--cert-name "npm-' + certificate.id + '" ' +
'--preferred-challenges "dns,http" ' +
'--no-random-sleep-on-renew ' +
'--disable-hook-validation ' +
(letsencryptStaging ? '--staging' : '');
@ -1012,13 +998,9 @@ const internalCertificate = {
logger.info(`Renewing Let'sEncrypt certificates via ${dns_plugin.display_name} for Cert #${certificate.id}: ${certificate.domain_names.join(', ')}`);
let mainCmd = certbotCommand + ' renew ' +
'--config "' + letsencryptConfig + '" ' +
'--work-dir "/tmp/letsencrypt-lib" ' +
'--logs-dir "/tmp/letsencrypt-log" ' +
let mainCmd = certbotCommand + ' renew --non-interactive ' +
'--cert-name "npm-' + certificate.id + '" ' +
'--disable-hook-validation ' +
'--no-random-sleep-on-renew ' +
'--disable-hook-validation' +
(letsencryptStaging ? ' --staging' : '');
// Prepend the path to the credentials file as an environment variable
@ -1044,8 +1026,7 @@ const internalCertificate = {
revokeLetsEncryptSsl: (certificate, throw_errors) => {
logger.info('Revoking Let\'sEncrypt certificates for Cert #' + certificate.id + ': ' + certificate.domain_names.join(', '));
const mainCmd = certbotCommand + ' revoke ' +
'--config "' + letsencryptConfig + '" ' +
const mainCmd = certbotCommand + ' revoke --non-interactive ' +
'--cert-path "/etc/letsencrypt/live/npm-' + certificate.id + '/fullchain.pem" ' +
'--delete-after-revoke ' +
(letsencryptStaging ? '--staging' : '');
@ -1138,94 +1119,6 @@ const internalCertificate = {
} else {
return Promise.resolve();
}
},
testHttpsChallenge: async (access, domains) => {
await access.can('certificates:list');
if (!isArray(domains)) {
throw new error.InternalValidationError('Domains must be an array of strings');
}
if (domains.length === 0) {
throw new error.InternalValidationError('No domains provided');
}
// Create a test challenge file
const testChallengeDir = '/data/letsencrypt-acme-challenge/.well-known/acme-challenge';
const testChallengeFile = testChallengeDir + '/test-challenge';
fs.mkdirSync(testChallengeDir, {recursive: true});
fs.writeFileSync(testChallengeFile, 'Success', {encoding: 'utf8'});
async function performTestForDomain (domain) {
logger.info('Testing http challenge for ' + domain);
const url = `http://${domain}/.well-known/acme-challenge/test-challenge`;
const formBody = `method=G&url=${encodeURI(url)}&bodytype=T&requestbody=&headername=User-Agent&headervalue=None&locationid=1&ch=false&cc=false`;
const options = {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': Buffer.byteLength(formBody)
}
};
const result = await new Promise((resolve) => {
const req = https.request('https://www.site24x7.com/tools/restapi-tester', options, function (res) {
let responseBody = '';
res.on('data', (chunk) => responseBody = responseBody + chunk);
res.on('end', function () {
const parsedBody = JSON.parse(responseBody + '');
if (res.statusCode !== 200) {
logger.warn(`Failed to test HTTP challenge for domain ${domain}`, res);
resolve(undefined);
}
resolve(parsedBody);
});
});
// Make sure to write the request body.
req.write(formBody);
req.end();
req.on('error', function (e) { logger.warn(`Failed to test HTTP challenge for domain ${domain}`, e);
resolve(undefined); });
});
if (!result) {
// Some error occurred while trying to get the data
return 'failed';
} else if (`${result.responsecode}` === '200' && result.htmlresponse === 'Success') {
// Server exists and has responded with the correct data
return 'ok';
} else if (`${result.responsecode}` === '200') {
// Server exists but has responded with wrong data
logger.info(`HTTP challenge test failed for domain ${domain} because of invalid returned data:`, result.htmlresponse);
return 'wrong-data';
} else if (`${result.responsecode}` === '404') {
// Server exists but responded with a 404
logger.info(`HTTP challenge test failed for domain ${domain} because code 404 was returned`);
return '404';
} else if (`${result.responsecode}` === '0' || (typeof result.reason === 'string' && result.reason.toLowerCase() === 'host unavailable')) {
// Server does not exist at domain
logger.info(`HTTP challenge test failed for domain ${domain} the host was not found`);
return 'no-host';
} else {
// Other errors
logger.info(`HTTP challenge test failed for domain ${domain} because code ${result.responsecode} was returned`);
return `other:${result.responsecode}`;
}
}
const results = {};
for (const domain of domains){
results[domain] = await performTestForDomain(domain);
}
// Remove the test challenge file
fs.unlinkSync(testChallengeFile);
return results;
}
};

View File

@ -1,6 +1,5 @@
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const deadHostModel = require('../models/dead_host');
const internalHost = require('./host');
const internalNginx = require('./nginx');
@ -50,8 +49,8 @@ const internalDeadHost = {
return deadHostModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
.omit(omissions())
.insertAndFetch(data);
})
.then((row) => {
if (create_certificate) {
@ -219,28 +218,31 @@ const internalDeadHost = {
.query()
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner,certificate]')
.allowEager('[owner,certificate]')
.first();
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
query.omit(data.omit);
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.eager('[' + data.expand.join(', ') + ']');
}
return query;
})
.then((row) => {
if (row) {
row = internalHost.cleanRowCertificateMeta(row);
return _.omit(row, omissions());
} else {
throw new error.ItemNotFoundError(data.id);
}
return row;
});
},
@ -402,7 +404,8 @@ const internalDeadHost = {
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner,certificate]')
.omit(['is_deleted'])
.allowEager('[owner,certificate]')
.orderBy('domain_names', 'ASC');
if (access_data.permission_visibility !== 'all') {
@ -417,10 +420,10 @@ const internalDeadHost = {
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
return query;
})
.then((rows) => {
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {

View File

@ -2,16 +2,13 @@ const https = require('https');
const fs = require('fs');
const logger = require('../logger').ip_ranges;
const error = require('../lib/error');
const utils = require('../lib/utils');
const internalNginx = require('./nginx');
const { Liquid } = require('liquidjs');
const CLOUDFRONT_URL = 'https://ip-ranges.amazonaws.com/ip-ranges.json';
const CLOUDFARE_V4_URL = 'https://www.cloudflare.com/ips-v4';
const CLOUDFARE_V6_URL = 'https://www.cloudflare.com/ips-v6';
const regIpV4 = /^(\d+\.?){4}\/\d+/;
const regIpV6 = /^(([\da-fA-F]+)?:)+\/\d+/;
const internalIpRanges = {
interval_timeout: 1000 * 60 * 60 * 6, // 6 hours
@ -77,14 +74,14 @@ const internalIpRanges = {
return internalIpRanges.fetchUrl(CLOUDFARE_V4_URL);
})
.then((cloudfare_data) => {
let items = cloudfare_data.split('\n').filter((line) => regIpV4.test(line));
let items = cloudfare_data.split('\n');
ip_ranges = [... ip_ranges, ... items];
})
.then(() => {
return internalIpRanges.fetchUrl(CLOUDFARE_V6_URL);
})
.then((cloudfare_data) => {
let items = cloudfare_data.split('\n').filter((line) => regIpV6.test(line));
let items = cloudfare_data.split('\n');
ip_ranges = [... ip_ranges, ... items];
})
.then(() => {
@ -119,7 +116,10 @@ const internalIpRanges = {
* @returns {Promise}
*/
generateConfig: (ip_ranges) => {
const renderEngine = utils.getRenderEngine();
let renderEngine = new Liquid({
root: __dirname + '/../templates/'
});
return new Promise((resolve, reject) => {
let template = null;
let filename = '/etc/nginx/conf.d/include/ip_ranges.conf';

View File

@ -1,9 +1,10 @@
const _ = require('lodash');
const fs = require('fs');
const logger = require('../logger').nginx;
const config = require('../lib/config');
const utils = require('../lib/utils');
const error = require('../lib/error');
const _ = require('lodash');
const fs = require('fs');
const logger = require('../logger').nginx;
const utils = require('../lib/utils');
const error = require('../lib/error');
const { Liquid } = require('liquidjs');
const debug_mode = process.env.NODE_ENV !== 'production' || !!process.env.DEBUG;
const internalNginx = {
@ -28,9 +29,7 @@ const internalNginx = {
.then(() => {
// Nginx is OK
// We're deleting this config regardless.
// Don't throw errors, as the file may not exist at all
// Delete the .err file too
return internalNginx.deleteConfig(host_type, host, false, true);
return internalNginx.deleteConfig(host_type, host); // Don't throw errors, as the file may not exist at all
})
.then(() => {
return internalNginx.generateConfig(host_type, host);
@ -65,7 +64,7 @@ const internalNginx = {
}
});
if (config.debug()) {
if (debug_mode) {
logger.error('Nginx test failed:', valid_lines.join('\n'));
}
@ -81,9 +80,6 @@ const internalNginx = {
.patch({
meta: combined_meta
})
.then(() => {
internalNginx.renameConfigAsError(host_type, host);
})
.then(() => {
return internalNginx.deleteConfig(host_type, host, true);
});
@ -101,7 +97,7 @@ const internalNginx = {
* @returns {Promise}
*/
test: () => {
if (config.debug()) {
if (debug_mode) {
logger.info('Testing Nginx configuration');
}
@ -125,10 +121,13 @@ const internalNginx = {
* @returns {String}
*/
getConfigName: (host_type, host_id) => {
host_type = host_type.replace(new RegExp('-', 'g'), '_');
if (host_type === 'default') {
return '/data/nginx/default_host/site.conf';
}
return '/data/nginx/' + internalNginx.getFileFriendlyHostType(host_type) + '/' + host_id + '.conf';
return '/data/nginx/' + host_type + '/' + host_id + '.conf';
},
/**
@ -137,6 +136,8 @@ const internalNginx = {
* @returns {Promise}
*/
renderLocations: (host) => {
//logger.info('host = ' + JSON.stringify(host, null, 2));
return new Promise((resolve, reject) => {
let template;
@ -147,7 +148,9 @@ const internalNginx = {
return;
}
const renderEngine = utils.getRenderEngine();
let renderer = new Liquid({
root: __dirname + '/../templates/'
});
let renderedLocations = '';
const locationRendering = async () => {
@ -165,8 +168,10 @@ const internalNginx = {
locationCopy.forward_path = `/${splitted.join('/')}`;
}
//logger.info('locationCopy = ' + JSON.stringify(locationCopy, null, 2));
// eslint-disable-next-line
renderedLocations += await renderEngine.parseAndRender(template, locationCopy);
renderedLocations += await renderer.parseAndRender(template, locationCopy);
}
};
@ -182,20 +187,24 @@ const internalNginx = {
* @returns {Promise}
*/
generateConfig: (host_type, host) => {
const nice_host_type = internalNginx.getFileFriendlyHostType(host_type);
host_type = host_type.replace(new RegExp('-', 'g'), '_');
if (config.debug()) {
logger.info('Generating ' + nice_host_type + ' Config:', JSON.stringify(host, null, 2));
if (debug_mode) {
logger.info('Generating ' + host_type + ' Config:', host);
}
const renderEngine = utils.getRenderEngine();
// logger.info('host = ' + JSON.stringify(host, null, 2));
let renderEngine = new Liquid({
root: __dirname + '/../templates/'
});
return new Promise((resolve, reject) => {
let template = null;
let filename = internalNginx.getConfigName(nice_host_type, host.id);
let filename = internalNginx.getConfigName(host_type, host.id);
try {
template = fs.readFileSync(__dirname + '/../templates/' + nice_host_type + '.conf', {encoding: 'utf8'});
template = fs.readFileSync(__dirname + '/../templates/' + host_type + '.conf', {encoding: 'utf8'});
} catch (err) {
reject(new error.ConfigurationError(err.message));
return;
@ -205,7 +214,7 @@ const internalNginx = {
let origLocations;
// Manipulate the data a bit before sending it to the template
if (nice_host_type !== 'default') {
if (host_type !== 'default') {
host.use_default_location = true;
if (typeof host.advanced_config !== 'undefined' && host.advanced_config) {
host.use_default_location = !internalNginx.advancedConfigHasDefaultLocation(host.advanced_config);
@ -239,7 +248,7 @@ const internalNginx = {
.then((config_text) => {
fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
if (config.debug()) {
if (debug_mode) {
logger.success('Wrote config:', filename, config_text);
}
@ -249,7 +258,7 @@ const internalNginx = {
resolve(true);
})
.catch((err) => {
if (config.debug()) {
if (debug_mode) {
logger.warn('Could not write ' + filename + ':', err.message);
}
@ -268,11 +277,13 @@ const internalNginx = {
* @returns {Promise}
*/
generateLetsEncryptRequestConfig: (certificate) => {
if (config.debug()) {
if (debug_mode) {
logger.info('Generating LetsEncrypt Request Config:', certificate);
}
const renderEngine = utils.getRenderEngine();
let renderEngine = new Liquid({
root: __dirname + '/../templates/'
});
return new Promise((resolve, reject) => {
let template = null;
@ -292,14 +303,14 @@ const internalNginx = {
.then((config_text) => {
fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
if (config.debug()) {
if (debug_mode) {
logger.success('Wrote config:', filename, config_text);
}
resolve(true);
})
.catch((err) => {
if (config.debug()) {
if (debug_mode) {
logger.warn('Could not write ' + filename + ':', err.message);
}
@ -308,58 +319,33 @@ const internalNginx = {
});
},
/**
* A simple wrapper around unlinkSync that writes to the logger
*
* @param {String} filename
*/
deleteFile: (filename) => {
logger.debug('Deleting file: ' + filename);
try {
fs.unlinkSync(filename);
} catch (err) {
logger.debug('Could not delete file:', JSON.stringify(err, null, 2));
}
},
/**
*
* @param {String} host_type
* @returns String
*/
getFileFriendlyHostType: (host_type) => {
return host_type.replace(new RegExp('-', 'g'), '_');
},
/**
* This removes the temporary nginx config file generated by `generateLetsEncryptRequestConfig`
*
* @param {Object} certificate
* @param {Boolean} [throw_errors]
* @returns {Promise}
*/
deleteLetsEncryptRequestConfig: (certificate) => {
const config_file = '/data/nginx/temp/letsencrypt_' + certificate.id + '.conf';
return new Promise((resolve/*, reject*/) => {
internalNginx.deleteFile(config_file);
resolve();
});
},
deleteLetsEncryptRequestConfig: (certificate, throw_errors) => {
return new Promise((resolve, reject) => {
try {
let config_file = '/data/nginx/temp/letsencrypt_' + certificate.id + '.conf';
/**
* @param {String} host_type
* @param {Object} [host]
* @param {Boolean} [delete_err_file]
* @returns {Promise}
*/
deleteConfig: (host_type, host, delete_err_file) => {
const config_file = internalNginx.getConfigName(internalNginx.getFileFriendlyHostType(host_type), typeof host === 'undefined' ? 0 : host.id);
const config_file_err = config_file + '.err';
if (debug_mode) {
logger.warn('Deleting nginx config: ' + config_file);
}
return new Promise((resolve/*, reject*/) => {
internalNginx.deleteFile(config_file);
if (delete_err_file) {
internalNginx.deleteFile(config_file_err);
fs.unlinkSync(config_file);
} catch (err) {
if (debug_mode) {
logger.warn('Could not delete config:', err.message);
}
if (throw_errors) {
reject(err);
}
}
resolve();
});
},
@ -367,20 +353,32 @@ const internalNginx = {
/**
* @param {String} host_type
* @param {Object} [host]
* @param {Boolean} [throw_errors]
* @returns {Promise}
*/
renameConfigAsError: (host_type, host) => {
const config_file = internalNginx.getConfigName(internalNginx.getFileFriendlyHostType(host_type), typeof host === 'undefined' ? 0 : host.id);
const config_file_err = config_file + '.err';
deleteConfig: (host_type, host, throw_errors) => {
host_type = host_type.replace(new RegExp('-', 'g'), '_');
return new Promise((resolve/*, reject*/) => {
fs.unlink(config_file, () => {
// ignore result, continue
fs.rename(config_file, config_file_err, () => {
// also ignore result, as this is a debugging informative file anyway
resolve();
});
});
return new Promise((resolve, reject) => {
try {
let config_file = internalNginx.getConfigName(host_type, typeof host === 'undefined' ? 0 : host.id);
if (debug_mode) {
logger.warn('Deleting nginx config: ' + config_file);
}
fs.unlinkSync(config_file);
} catch (err) {
if (debug_mode) {
logger.warn('Could not delete config:', err.message);
}
if (throw_errors) {
reject(err);
}
}
resolve();
});
},
@ -401,12 +399,13 @@ const internalNginx = {
/**
* @param {String} host_type
* @param {Array} hosts
* @param {Boolean} [throw_errors]
* @returns {Promise}
*/
bulkDeleteConfigs: (host_type, hosts) => {
bulkDeleteConfigs: (host_type, hosts, throw_errors) => {
let promises = [];
hosts.map(function (host) {
promises.push(internalNginx.deleteConfig(host_type, host, true));
promises.push(internalNginx.deleteConfig(host_type, host, throw_errors));
});
return Promise.all(promises);
@ -416,8 +415,8 @@ const internalNginx = {
* @param {string} config
* @returns {boolean}
*/
advancedConfigHasDefaultLocation: function (cfg) {
return !!cfg.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im);
advancedConfigHasDefaultLocation: function (config) {
return !!config.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im);
},
/**

View File

@ -1,6 +1,5 @@
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const proxyHostModel = require('../models/proxy_host');
const internalHost = require('./host');
const internalNginx = require('./nginx');
@ -50,8 +49,8 @@ const internalProxyHost = {
return proxyHostModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
.omit(omissions())
.insertAndFetch(data);
})
.then((row) => {
if (create_certificate) {
@ -171,7 +170,6 @@ const internalProxyHost = {
.query()
.where({id: data.id})
.patch(data)
.then(utils.omitRow(omissions()))
.then((saved_row) => {
// Add to audit log
return internalAuditLog.add(access, {
@ -181,7 +179,7 @@ const internalProxyHost = {
meta: data
})
.then(() => {
return saved_row;
return _.omit(saved_row, omissions());
});
});
})
@ -225,29 +223,31 @@ const internalProxyHost = {
.query()
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner,access_list,access_list.[clients,items],certificate]')
.allowEager('[owner,access_list,access_list.[clients,items],certificate]')
.first();
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
row = internalHost.cleanRowCertificateMeta(row);
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
query.omit(data.omit);
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.eager('[' + data.expand.join(', ') + ']');
}
return query;
})
.then((row) => {
if (row) {
row = internalHost.cleanRowCertificateMeta(row);
return _.omit(row, omissions());
} else {
throw new error.ItemNotFoundError(data.id);
}
return row;
});
},
@ -409,7 +409,8 @@ const internalProxyHost = {
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner,access_list,certificate]')
.omit(['is_deleted'])
.allowEager('[owner,access_list,certificate]')
.orderBy('domain_names', 'ASC');
if (access_data.permission_visibility !== 'all') {
@ -424,10 +425,10 @@ const internalProxyHost = {
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
return query;
})
.then((rows) => {
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {

View File

@ -1,6 +1,5 @@
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const redirectionHostModel = require('../models/redirection_host');
const internalHost = require('./host');
const internalNginx = require('./nginx');
@ -50,8 +49,8 @@ const internalRedirectionHost = {
return redirectionHostModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
.omit(omissions())
.insertAndFetch(data);
})
.then((row) => {
if (create_certificate) {
@ -66,8 +65,9 @@ const internalRedirectionHost = {
.then(() => {
return row;
});
} else {
return row;
}
return row;
})
.then((row) => {
// re-fetch with cert
@ -218,29 +218,31 @@ const internalRedirectionHost = {
.query()
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner,certificate]')
.allowEager('[owner,certificate]')
.first();
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
row = internalHost.cleanRowCertificateMeta(row);
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
query.omit(data.omit);
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.eager('[' + data.expand.join(', ') + ']');
}
return query;
})
.then((row) => {
if (row) {
row = internalHost.cleanRowCertificateMeta(row);
return _.omit(row, omissions());
} else {
throw new error.ItemNotFoundError(data.id);
}
return row;
});
},
@ -402,7 +404,8 @@ const internalRedirectionHost = {
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner,certificate]')
.omit(['is_deleted'])
.allowEager('[owner,certificate]')
.orderBy('domain_names', 'ASC');
if (access_data.permission_visibility !== 'all') {
@ -417,10 +420,10 @@ const internalRedirectionHost = {
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
return query;
})
.then((rows) => {
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {

View File

@ -1,6 +1,5 @@
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const streamModel = require('../models/stream');
const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log');
@ -28,8 +27,8 @@ const internalStream = {
return streamModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
.omit(omissions())
.insertAndFetch(data);
})
.then((row) => {
// Configure nginx
@ -72,8 +71,8 @@ const internalStream = {
return streamModel
.query()
.omit(omissions())
.patchAndFetchById(row.id, data)
.then(utils.omitRow(omissions()))
.then((saved_row) => {
return internalNginx.configure(streamModel, 'stream', saved_row)
.then(() => {
@ -89,7 +88,7 @@ const internalStream = {
meta: data
})
.then(() => {
return saved_row;
return _.omit(saved_row, omissions());
});
});
});
@ -114,28 +113,30 @@ const internalStream = {
.query()
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner]')
.allowEager('[owner]')
.first();
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
query.omit(data.omit);
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.eager('[' + data.expand.join(', ') + ']');
}
return query;
})
.then((row) => {
if (row) {
return _.omit(row, omissions());
} else {
throw new error.ItemNotFoundError(data.id);
}
return row;
});
},
@ -297,7 +298,8 @@ const internalStream = {
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner]')
.omit(['is_deleted'])
.allowEager('[owner]')
.orderBy('incoming_port', 'ASC');
if (access_data.permission_visibility !== 'all') {
@ -312,10 +314,10 @@ const internalStream = {
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
return query;
});
},

View File

@ -24,7 +24,7 @@ module.exports = {
return userModel
.query()
.where('email', data.identity.toLowerCase().trim())
.where('email', data.identity)
.andWhere('is_deleted', 0)
.andWhere('is_disabled', 0)
.first()

View File

@ -1,6 +1,5 @@
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const userModel = require('../models/user');
const userPermissionModel = require('../models/user_permission');
const authModel = require('../models/auth');
@ -36,8 +35,8 @@ const internalUser = {
return userModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
.omit(omissions())
.insertAndFetch(data);
})
.then((user) => {
if (auth) {
@ -141,8 +140,11 @@ const internalUser = {
return userModel
.query()
.omit(omissions())
.patchAndFetchById(user.id, data)
.then(utils.omitRow(omissions()));
.then((saved_user) => {
return _.omit(saved_user, omissions());
});
})
.then(() => {
return internalUser.get(access, {id: data.id});
@ -184,24 +186,26 @@ const internalUser = {
.query()
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[permissions]')
.allowEager('[permissions]')
.first();
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
query.omit(data.omit);
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.eager('[' + data.expand.join(', ') + ']');
}
return query;
})
.then((row) => {
if (row) {
return _.omit(row, omissions());
} else {
throw new error.ItemNotFoundError(data.id);
}
return row;
});
},
@ -318,7 +322,8 @@ const internalUser = {
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[permissions]')
.omit(['is_deleted'])
.allowEager('[permissions]')
.orderBy('name', 'ASC');
// Query is used for searching
@ -330,10 +335,10 @@ const internalUser = {
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
query.eager('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
return query;
});
},

View File

@ -55,8 +55,8 @@ module.exports = function (token_string) {
.where('id', token_data.attrs.id)
.andWhere('is_deleted', 0)
.andWhere('is_disabled', 0)
.allowGraph('[permissions]')
.withGraphFetched('[permissions]')
.allowEager('[permissions]')
.eager('[permissions]')
.first()
.then((user) => {
if (user) {

View File

@ -1,184 +0,0 @@
const fs = require('fs');
const NodeRSA = require('node-rsa');
const logger = require('../logger').global;
const keysFile = '/data/keys.json';
let instance = null;
// 1. Load from config file first (not recommended anymore)
// 2. Use config env variables next
const configure = () => {
const filename = (process.env.NODE_CONFIG_DIR || './config') + '/' + (process.env.NODE_ENV || 'default') + '.json';
if (fs.existsSync(filename)) {
let configData;
try {
configData = require(filename);
} catch (err) {
// do nothing
}
if (configData && configData.database) {
logger.info(`Using configuration from file: ${filename}`);
instance = configData;
instance.keys = getKeys();
return;
}
}
const envMysqlHost = process.env.DB_MYSQL_HOST || null;
const envMysqlUser = process.env.DB_MYSQL_USER || null;
const envMysqlName = process.env.DB_MYSQL_NAME || null;
if (envMysqlHost && envMysqlUser && envMysqlName) {
// we have enough mysql creds to go with mysql
logger.info('Using MySQL configuration');
instance = {
database: {
engine: 'mysql',
host: envMysqlHost,
port: process.env.DB_MYSQL_PORT || 3306,
user: envMysqlUser,
password: process.env.DB_MYSQL_PASSWORD,
name: envMysqlName,
},
keys: getKeys(),
};
return;
}
const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/database.sqlite';
logger.info(`Using Sqlite: ${envSqliteFile}`);
instance = {
database: {
engine: 'knex-native',
knex: {
client: 'sqlite3',
connection: {
filename: envSqliteFile
},
useNullAsDefault: true
}
},
keys: getKeys(),
};
};
const getKeys = () => {
// Get keys from file
if (!fs.existsSync(keysFile)) {
generateKeys();
} else if (process.env.DEBUG) {
logger.info('Keys file exists OK');
}
try {
return require(keysFile);
} catch (err) {
logger.error('Could not read JWT key pair from config file: ' + keysFile, err);
process.exit(1);
}
};
const generateKeys = () => {
logger.info('Creating a new JWT key pair...');
// Now create the keys and save them in the config.
const key = new NodeRSA({ b: 2048 });
key.generateKeyPair();
const keys = {
key: key.exportKey('private').toString(),
pub: key.exportKey('public').toString(),
};
// Write keys config
try {
fs.writeFileSync(keysFile, JSON.stringify(keys, null, 2));
} catch (err) {
logger.error('Could not write JWT key pair to config file: ' + keysFile + ': ' . err.message);
process.exit(1);
}
logger.info('Wrote JWT key pair to config file: ' + keysFile);
};
module.exports = {
/**
*
* @param {string} key ie: 'database' or 'database.engine'
* @returns {boolean}
*/
has: function(key) {
instance === null && configure();
const keys = key.split('.');
let level = instance;
let has = true;
keys.forEach((keyItem) =>{
if (typeof level[keyItem] === 'undefined') {
has = false;
} else {
level = level[keyItem];
}
});
return has;
},
/**
* Gets a specific key from the top level
*
* @param {string} key
* @returns {*}
*/
get: function (key) {
instance === null && configure();
if (key && typeof instance[key] !== 'undefined') {
return instance[key];
}
return instance;
},
/**
* Is this a sqlite configuration?
*
* @returns {boolean}
*/
isSqlite: function () {
instance === null && configure();
return instance.database.knex && instance.database.knex.client === 'sqlite3';
},
/**
* Are we running in debug mdoe?
*
* @returns {boolean}
*/
debug: function () {
return !!process.env.DEBUG;
},
/**
* Returns a public key
*
* @returns {string}
*/
getPublicKey: function () {
instance === null && configure();
return instance.keys.pub;
},
/**
* Returns a private key
*
* @returns {string}
*/
getPrivateKey: function () {
instance === null && configure();
return instance.keys.key;
},
/**
* @returns {boolean}
*/
useLetsencryptStaging: function () {
return !!process.env.LE_STAGING;
}
};

View File

@ -1,8 +1,4 @@
const _ = require('lodash');
const exec = require('child_process').exec;
const execFile = require('child_process').execFile;
const { Liquid } = require('liquidjs');
const logger = require('../logger').global;
const exec = require('child_process').exec;
module.exports = {
@ -20,82 +16,5 @@ module.exports = {
}
});
});
},
/**
* @param {String} cmd
* @param {Array} args
* @returns {Promise}
*/
execFile: function (cmd, args) {
logger.debug('CMD: ' + cmd + ' ' + (args ? args.join(' ') : ''));
return new Promise((resolve, reject) => {
execFile(cmd, args, function (err, stdout, /*stderr*/) {
if (err && typeof err === 'object') {
reject(err);
} else {
resolve(stdout.trim());
}
});
});
},
/**
* Used in objection query builder
*
* @param {Array} omissions
* @returns {Function}
*/
omitRow: function (omissions) {
/**
* @param {Object} row
* @returns {Object}
*/
return (row) => {
return _.omit(row, omissions);
};
},
/**
* Used in objection query builder
*
* @param {Array} omissions
* @returns {Function}
*/
omitRows: function (omissions) {
/**
* @param {Array} rows
* @returns {Object}
*/
return (rows) => {
rows.forEach((row, idx) => {
rows[idx] = _.omit(row, omissions);
});
return rows;
};
},
/**
* @returns {Object} Liquid render engine
*/
getRenderEngine: function () {
const renderEngine = new Liquid({
root: __dirname + '/../templates/'
});
/**
* nginxAccessRule expects the object given to have 2 properties:
*
* directive string
* address string
*/
renderEngine.registerFilter('nginxAccessRule', (v) => {
if (typeof v.directive !== 'undefined' && typeof v.address !== 'undefined' && v.directive && v.address) {
return `${v.directive} ${v.address};`;
}
return '';
});
return renderEngine;
}
};

View File

@ -5,7 +5,7 @@ const definitions = require('../../schema/definitions.json');
RegExp.prototype.toJSON = RegExp.prototype.toString;
const ajv = require('ajv')({
verbose: true,
verbose: true, //process.env.NODE_ENV === 'development',
allErrors: true,
format: 'full', // strict regexes for format checks
coerceTypes: true,

View File

@ -0,0 +1,48 @@
const migrate_name = 'openid_connect';
const logger = require('../logger').migrate;
/**
* Migrate
*
* @see http://knexjs.org/#Schema
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.up = function (knex/*, Promise*/) {
logger.info('[' + migrate_name + '] Migrating Up...');
return knex.schema.table('proxy_host', function (proxy_host) {
proxy_host.integer('openidc_enabled').notNull().unsigned().defaultTo(0);
proxy_host.text('openidc_redirect_uri').notNull().defaultTo('');
proxy_host.text('openidc_discovery').notNull().defaultTo('');
proxy_host.text('openidc_auth_method').notNull().defaultTo('');
proxy_host.text('openidc_client_id').notNull().defaultTo('');
proxy_host.text('openidc_client_secret').notNull().defaultTo('');
})
.then(() => {
logger.info('[' + migrate_name + '] proxy_host Table altered');
});
};
/**
* Undo Migrate
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.down = function (knex/*, Promise*/) {
return knex.schema.table('proxy_host', function (proxy_host) {
proxy_host.dropColumn('openidc_enabled');
proxy_host.dropColumn('openidc_redirect_uri');
proxy_host.dropColumn('openidc_discovery');
proxy_host.dropColumn('openidc_auth_method');
proxy_host.dropColumn('openidc_client_id');
proxy_host.dropColumn('openidc_client_secret');
})
.then(() => {
logger.info('[' + migrate_name + '] proxy_host Table altered');
});
};

View File

@ -0,0 +1,40 @@
const migrate_name = 'openid_allowed_users';
const logger = require('../logger').migrate;
/**
* Migrate
*
* @see http://knexjs.org/#Schema
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.up = function (knex/*, Promise*/) {
logger.info('[' + migrate_name + '] Migrating Up...');
return knex.schema.table('proxy_host', function (proxy_host) {
proxy_host.integer('openidc_restrict_users_enabled').notNull().unsigned().defaultTo(0);
proxy_host.json('openidc_allowed_users').notNull().defaultTo([]);
})
.then(() => {
logger.info('[' + migrate_name + '] proxy_host Table altered');
});
};
/**
* Undo Migrate
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.down = function (knex/*, Promise*/) {
return knex.schema.table('proxy_host', function (proxy_host) {
proxy_host.dropColumn('openidc_restrict_users_enabled');
proxy_host.dropColumn('openidc_allowed_users');
})
.then(() => {
logger.info('[' + migrate_name + '] proxy_host Table altered');
});
};

View File

@ -1,50 +0,0 @@
const migrate_name = 'stream_domain';
const logger = require('../logger').migrate;
const internalNginx = require('../internal/nginx');
async function regenerateDefaultHost(knex) {
const row = await knex('setting').select('*').where('id', 'default-site').first();
if (!row) {
return Promise.resolve();
}
return internalNginx.deleteConfig('default')
.then(() => {
return internalNginx.generateConfig('default', row);
})
.then(() => {
return internalNginx.test();
})
.then(() => {
return internalNginx.reload();
});
}
/**
* Migrate
*
* @see http://knexjs.org/#Schema
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.up = function (knex) {
logger.info('[' + migrate_name + '] Migrating Up...');
return regenerateDefaultHost(knex);
};
/**
* Undo Migrate
*
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
exports.down = function (knex) {
logger.info('[' + migrate_name + '] Migrating Down...');
return regenerateDefaultHost(knex);
};

View File

@ -50,6 +50,7 @@ class AccessList extends Model {
},
modify: function (qb) {
qb.where('user.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
}
},
items: {
@ -58,6 +59,9 @@ class AccessList extends Model {
join: {
from: 'access_list.id',
to: 'access_list_auth.access_list_id'
},
modify: function (qb) {
qb.omit(['id', 'created_on', 'modified_on', 'access_list_id', 'meta']);
}
},
clients: {
@ -66,6 +70,9 @@ class AccessList extends Model {
join: {
from: 'access_list.id',
to: 'access_list_client.access_list_id'
},
modify: function (qb) {
qb.omit(['id', 'created_on', 'modified_on', 'access_list_id', 'meta']);
}
},
proxy_hosts: {
@ -77,10 +84,19 @@ class AccessList extends Model {
},
modify: function (qb) {
qb.where('proxy_host.is_deleted', 0);
qb.omit(['is_deleted', 'meta']);
}
}
};
}
get satisfy() {
return this.satisfy_any ? 'satisfy any' : 'satisfy all';
}
get passauth() {
return this.pass_auth ? '' : 'proxy_set_header Authorization "";';
}
}
module.exports = AccessList;

View File

@ -45,6 +45,7 @@ class AccessListAuth extends Model {
},
modify: function (qb) {
qb.where('access_list.is_deleted', 0);
qb.omit(['created_on', 'modified_on', 'is_deleted', 'access_list_id']);
}
}
};

View File

@ -45,10 +45,15 @@ class AccessListClient extends Model {
},
modify: function (qb) {
qb.where('access_list.is_deleted', 0);
qb.omit(['created_on', 'modified_on', 'is_deleted', 'access_list_id']);
}
}
};
}
get rule() {
return `${this.directive} ${this.address}`;
}
}
module.exports = AccessListClient;

View File

@ -43,6 +43,9 @@ class AuditLog extends Model {
join: {
from: 'audit_log.user_id',
to: 'user.id'
},
modify: function (qb) {
qb.omit(['id', 'created_on', 'modified_on', 'roles']);
}
}
};

View File

@ -74,6 +74,9 @@ class Auth extends Model {
},
filter: {
is_deleted: 0
},
modify: function (qb) {
qb.omit(['is_deleted']);
}
}
};

View File

@ -63,6 +63,7 @@ class Certificate extends Model {
},
modify: function (qb) {
qb.where('user.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
}
}
};

View File

@ -59,6 +59,7 @@ class DeadHost extends Model {
},
modify: function (qb) {
qb.where('user.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
}
},
certificate: {
@ -70,6 +71,7 @@ class DeadHost extends Model {
},
modify: function (qb) {
qb.where('certificate.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
}
}
};

View File

@ -1,13 +1,13 @@
const db = require('../db');
const config = require('../lib/config');
const config = require('config');
const Model = require('objection').Model;
Model.knex(db);
module.exports = function () {
if (config.isSqlite()) {
// eslint-disable-next-line
return Model.raw("datetime('now','localtime')");
if (config.database.knex && config.database.knex.client === 'sqlite3') {
return Model.raw('datetime(\'now\',\'localtime\')');
} else {
return Model.raw('NOW()');
}
return Model.raw('NOW()');
};

View File

@ -20,12 +20,23 @@ class ProxyHost extends Model {
this.domain_names = [];
}
// Default for openidc_allowed_users
if (typeof this.openidc_allowed_users === 'undefined') {
this.openidc_allowed_users = [];
}
// Default for meta
if (typeof this.meta === 'undefined') {
this.meta = {};
}
// Openidc defaults
if (typeof this.openidc_auth_method === 'undefined') {
this.openidc_auth_method = 'client_secret_post';
}
this.domain_names.sort();
this.openidc_allowed_users.sort();
}
$beforeUpdate () {
@ -35,6 +46,11 @@ class ProxyHost extends Model {
if (typeof this.domain_names !== 'undefined') {
this.domain_names.sort();
}
// Sort openidc_allowed_users
if (typeof this.openidc_allowed_users !== 'undefined') {
this.openidc_allowed_users.sort();
}
}
static get name () {
@ -46,7 +62,7 @@ class ProxyHost extends Model {
}
static get jsonAttributes () {
return ['domain_names', 'meta', 'locations'];
return ['domain_names', 'meta', 'locations', 'openidc_allowed_users'];
}
static get relationMappings () {
@ -60,6 +76,7 @@ class ProxyHost extends Model {
},
modify: function (qb) {
qb.where('user.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
}
},
access_list: {
@ -71,6 +88,7 @@ class ProxyHost extends Model {
},
modify: function (qb) {
qb.where('access_list.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
}
},
certificate: {
@ -82,6 +100,7 @@ class ProxyHost extends Model {
},
modify: function (qb) {
qb.where('certificate.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
}
}
};

View File

@ -1,4 +1,3 @@
// Objection Docs:
// http://vincit.github.io/objection.js/
@ -60,6 +59,7 @@ class RedirectionHost extends Model {
},
modify: function (qb) {
qb.where('user.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
}
},
certificate: {
@ -71,6 +71,7 @@ class RedirectionHost extends Model {
},
modify: function (qb) {
qb.where('certificate.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted']);
}
}
};

View File

@ -46,6 +46,7 @@ class Stream extends Model {
},
modify: function (qb) {
qb.where('user.is_deleted', 0);
qb.omit(['id', 'created_on', 'modified_on', 'is_deleted', 'email', 'roles']);
}
}
};

View File

@ -6,36 +6,44 @@
const _ = require('lodash');
const jwt = require('jsonwebtoken');
const crypto = require('crypto');
const config = require('../lib/config');
const error = require('../lib/error');
const logger = require('../logger').global;
const ALGO = 'RS256';
let public_key = null;
let private_key = null;
function checkJWTKeyPair() {
if (!public_key || !private_key) {
let config = require('config');
public_key = config.get('jwt.pub');
private_key = config.get('jwt.key');
}
}
module.exports = function () {
let token_data = {};
const self = {
let self = {
/**
* @param {Object} payload
* @returns {Promise}
*/
create: (payload) => {
if (!config.getPrivateKey()) {
logger.error('Private key is empty!');
}
// sign with RSA SHA256
const options = {
let options = {
algorithm: ALGO,
expiresIn: payload.expiresIn || '1d'
};
payload.jti = crypto.randomBytes(12)
.toString('base64')
.substring(-8);
.substr(-8);
checkJWTKeyPair();
return new Promise((resolve, reject) => {
jwt.sign(payload, config.getPrivateKey(), options, (err, token) => {
jwt.sign(payload, private_key, options, (err, token) => {
if (err) {
reject(err);
} else {
@ -54,15 +62,13 @@ module.exports = function () {
* @returns {Promise}
*/
load: function (token) {
if (!config.getPublicKey()) {
logger.error('Public key is empty!');
}
return new Promise((resolve, reject) => {
checkJWTKeyPair();
try {
if (!token || token === null || token === 'null') {
reject(new error.AuthError('Empty token'));
} else {
jwt.verify(token, config.getPublicKey(), {ignoreExpiration: false, algorithms: [ALGO]}, (err, result) => {
jwt.verify(token, public_key, {ignoreExpiration: false, algorithms: [ALGO]}, (err, result) => {
if (err) {
if (err.name === 'TokenExpiredError') {
@ -77,6 +83,8 @@ module.exports = function () {
// Hack: some tokens out in the wild have a scope of 'all' instead of 'user'.
// For 30 days at least, we need to replace 'all' with user.
if ((typeof token_data.scope !== 'undefined' && _.indexOf(token_data.scope, 'all') !== -1)) {
//console.log('Warning! Replacing "all" scope with "user"');
token_data.scope = ['user'];
}
@ -126,7 +134,7 @@ module.exports = function () {
* @returns {Integer}
*/
getUserId: (default_value) => {
const attrs = self.get('attrs');
let attrs = self.get('attrs');
if (attrs && typeof attrs.id !== 'undefined' && attrs.id) {
return attrs.id;
}

View File

@ -43,6 +43,9 @@ class User extends Model {
join: {
from: 'user.id',
to: 'user_permission.user_id'
},
modify: function (qb) {
qb.omit(['id', 'created_on', 'modified_on', 'user_id']);
}
}
};

View File

@ -10,22 +10,29 @@
"bcrypt": "^5.0.0",
"body-parser": "^1.19.0",
"compression": "^1.7.4",
"express": "^4.17.3",
"config": "^3.3.1",
"diskdb": "^0.1.17",
"express": "^4.17.1",
"express-fileupload": "^1.1.9",
"gravatar": "^1.8.0",
"html-entities": "^1.2.1",
"json-schema-ref-parser": "^8.0.0",
"jsonwebtoken": "^9.0.0",
"knex": "2.4.2",
"liquidjs": "10.6.1",
"jsonwebtoken": "^8.5.1",
"knex": "^0.20.13",
"liquidjs": "^9.11.10",
"lodash": "^4.17.21",
"moment": "^2.29.4",
"moment": "^2.24.0",
"mysql": "^2.18.1",
"node-rsa": "^1.0.8",
"objection": "3.0.1",
"nodemon": "^2.0.2",
"objection": "^2.1.3",
"path": "^0.12.7",
"signale": "1.4.0",
"sqlite3": "5.1.6",
"temp-write": "^4.0.0"
"pg": "^7.12.1",
"restler": "^3.4.0",
"signale": "^1.4.0",
"sqlite3": "^4.1.1",
"temp-write": "^4.0.0",
"unix-timestamp": "^0.2.0"
},
"signale": {
"displayDate": true,
@ -34,9 +41,8 @@
"author": "Jamie Curnow <jc@jc21.com>",
"license": "MIT",
"devDependencies": {
"eslint": "^8.36.0",
"eslint": "^6.8.0",
"eslint-plugin-align-assignments": "^1.1.2",
"nodemon": "^2.0.2",
"prettier": "^2.0.4"
}
}

View File

@ -68,32 +68,6 @@ router
.catch(next);
});
/**
* Test HTTP challenge for domains
*
* /api/nginx/certificates/test-http
*/
router
.route('/test-http')
.options((req, res) => {
res.sendStatus(204);
})
.all(jwtdecode())
/**
* GET /api/nginx/certificates/test-http
*
* Test HTTP challenge for domains
*/
.get((req, res, next) => {
internalCertificate.testHttpsChallenge(res.locals.access, JSON.parse(req.query.domains))
.then((result) => {
res.status(200)
.send(result);
})
.catch(next);
});
/**
* Specific certificate
*
@ -235,6 +209,7 @@ router
.catch(next);
});
/**
* Download LE Certs
*

View File

@ -153,7 +153,7 @@
"example": "john@example.com",
"format": "email",
"type": "string",
"minLength": 6,
"minLength": 8,
"maxLength": 100
},
"password": {
@ -235,6 +235,43 @@
"description": "Should we cache assets",
"example": true,
"type": "boolean"
},
"openidc_enabled": {
"description": "Is OpenID Connect authentication enabled",
"example": true,
"type": "boolean"
},
"openidc_redirect_uri": {
"type": "string"
},
"openidc_discovery": {
"type": "string"
},
"openidc_auth_method": {
"type": "string",
"pattern": "^(client_secret_basic|client_secret_post)$"
},
"openidc_client_id": {
"type": "string"
},
"openidc_client_secret": {
"type": "string"
},
"openidc_restrict_users_enabled": {
"description": "Only allow a specific set of OpenID Connect emails to access the resource",
"example": true,
"type": "boolean"
},
"openidc_allowed_users": {
"type": "array",
"minItems": 0,
"items": {
"type": "string",
"description": "Email Address",
"example": "john@example.com",
"format": "email",
"minLength": 1
}
}
}
}

View File

@ -157,17 +157,6 @@
"targetSchema": {
"type": "boolean"
}
},
{
"title": "Test HTTP Challenge",
"description": "Tests whether the HTTP challenge should work",
"href": "/nginx/certificates/{definitions.identity.example}/test-http",
"access": "private",
"method": "GET",
"rel": "info",
"http_header": {
"$ref": "../examples.json#/definitions/auth_header"
}
}
]
}

View File

@ -64,6 +64,30 @@
"advanced_config": {
"type": "string"
},
"openidc_enabled": {
"$ref": "../definitions.json#/definitions/openidc_enabled"
},
"openidc_redirect_uri": {
"$ref": "../definitions.json#/definitions/openidc_redirect_uri"
},
"openidc_discovery": {
"$ref": "../definitions.json#/definitions/openidc_discovery"
},
"openidc_auth_method": {
"$ref": "../definitions.json#/definitions/openidc_auth_method"
},
"openidc_client_id": {
"$ref": "../definitions.json#/definitions/openidc_client_id"
},
"openidc_client_secret": {
"$ref": "../definitions.json#/definitions/openidc_client_secret"
},
"openidc_restrict_users_enabled": {
"$ref": "../definitions.json#/definitions/openidc_restrict_users_enabled"
},
"openidc_allowed_users": {
"$ref": "../definitions.json#/definitions/openidc_allowed_users"
},
"enabled": {
"$ref": "../definitions.json#/definitions/enabled"
},
@ -161,6 +185,30 @@
"advanced_config": {
"$ref": "#/definitions/advanced_config"
},
"openidc_enabled": {
"$ref": "#/definitions/openidc_enabled"
},
"openidc_redirect_uri": {
"$ref": "#/definitions/openidc_redirect_uri"
},
"openidc_discovery": {
"$ref": "#/definitions/openidc_discovery"
},
"openidc_auth_method": {
"$ref": "#/definitions/openidc_auth_method"
},
"openidc_client_id": {
"$ref": "#/definitions/openidc_client_id"
},
"openidc_client_secret": {
"$ref": "#/definitions/openidc_client_secret"
},
"openidc_restrict_users_enabled": {
"$ref": "#/definitions/openidc_restrict_users_enabled"
},
"openidc_allowed_users": {
"$ref": "#/definitions/openidc_allowed_users"
},
"enabled": {
"$ref": "#/definitions/enabled"
},
@ -251,6 +299,30 @@
"advanced_config": {
"$ref": "#/definitions/advanced_config"
},
"openidc_enabled": {
"$ref": "#/definitions/openidc_enabled"
},
"openidc_redirect_uri": {
"$ref": "#/definitions/openidc_redirect_uri"
},
"openidc_discovery": {
"$ref": "#/definitions/openidc_discovery"
},
"openidc_auth_method": {
"$ref": "#/definitions/openidc_auth_method"
},
"openidc_client_id": {
"$ref": "#/definitions/openidc_client_id"
},
"openidc_client_secret": {
"$ref": "#/definitions/openidc_client_secret"
},
"openidc_restrict_users_enabled": {
"$ref": "#/definitions/openidc_restrict_users_enabled"
},
"openidc_allowed_users": {
"$ref": "#/definitions/openidc_allowed_users"
},
"enabled": {
"$ref": "#/definitions/enabled"
},
@ -324,6 +396,30 @@
"advanced_config": {
"$ref": "#/definitions/advanced_config"
},
"openidc_enabled": {
"$ref": "#/definitions/openidc_enabled"
},
"openidc_redirect_uri": {
"$ref": "#/definitions/openidc_redirect_uri"
},
"openidc_discovery": {
"$ref": "#/definitions/openidc_discovery"
},
"openidc_auth_method": {
"$ref": "#/definitions/openidc_auth_method"
},
"openidc_client_id": {
"$ref": "#/definitions/openidc_client_id"
},
"openidc_client_secret": {
"$ref": "#/definitions/openidc_client_secret"
},
"openidc_restrict_users_enabled": {
"$ref": "#/definitions/openidc_restrict_users_enabled"
},
"openidc_allowed_users": {
"$ref": "#/definitions/openidc_allowed_users"
},
"enabled": {
"$ref": "#/definitions/enabled"
},

View File

@ -1,4 +1,6 @@
const config = require('./lib/config');
const fs = require('fs');
const NodeRSA = require('node-rsa');
const config = require('config');
const logger = require('./logger').setup;
const certificateModel = require('./models/certificate');
const userModel = require('./models/user');
@ -7,6 +9,62 @@ const utils = require('./lib/utils');
const authModel = require('./models/auth');
const settingModel = require('./models/setting');
const dns_plugins = require('./global/certbot-dns-plugins');
const debug_mode = process.env.NODE_ENV !== 'production' || !!process.env.DEBUG;
/**
* Creates a new JWT RSA Keypair if not alread set on the config
*
* @returns {Promise}
*/
const setupJwt = () => {
return new Promise((resolve, reject) => {
// Now go and check if the jwt gpg keys have been created and if not, create them
if (!config.has('jwt') || !config.has('jwt.key') || !config.has('jwt.pub')) {
logger.info('Creating a new JWT key pair...');
// jwt keys are not configured properly
const filename = config.util.getEnv('NODE_CONFIG_DIR') + '/' + (config.util.getEnv('NODE_ENV') || 'default') + '.json';
let config_data = {};
try {
config_data = require(filename);
} catch (err) {
// do nothing
if (debug_mode) {
logger.debug(filename + ' config file could not be required');
}
}
// Now create the keys and save them in the config.
let key = new NodeRSA({ b: 2048 });
key.generateKeyPair();
config_data.jwt = {
key: key.exportKey('private').toString(),
pub: key.exportKey('public').toString(),
};
// Write config
fs.writeFile(filename, JSON.stringify(config_data, null, 2), (err) => {
if (err) {
logger.error('Could not write JWT key pair to config file: ' + filename);
reject(err);
} else {
logger.info('Wrote JWT key pair to config file: ' + filename);
delete require.cache[require.resolve('config')];
resolve();
}
});
} else {
// JWT key pair exists
if (debug_mode) {
logger.debug('JWT Keypair already exists');
}
resolve();
}
});
};
/**
* Creates a default admin users if one doesn't already exist in the database
@ -61,8 +119,8 @@ const setupDefaultUser = () => {
.then(() => {
logger.info('Initial admin setup completed');
});
} else if (config.debug()) {
logger.info('Admin user setup not required');
} else if (debug_mode) {
logger.debug('Admin user setup not required');
}
});
};
@ -93,8 +151,8 @@ const setupDefaultSettings = () => {
logger.info('Default settings added');
});
}
if (config.debug()) {
logger.info('Default setting setup not required');
if (debug_mode) {
logger.debug('Default setting setup not required');
}
});
};
@ -116,22 +174,20 @@ const setupCertbotPlugins = () => {
certificates.map(function (certificate) {
if (certificate.meta && certificate.meta.dns_challenge === true) {
const dns_plugin = dns_plugins[certificate.meta.dns_provider];
const dns_plugin = dns_plugins[certificate.meta.dns_provider];
const packages_to_install = `${dns_plugin.package_name}==${dns_plugin.package_version} ${dns_plugin.dependencies}`;
const packages_to_install = `${dns_plugin.package_name}${dns_plugin.version_requirement || ''} ${dns_plugin.dependencies}`;
if (plugins.indexOf(packages_to_install) === -1) plugins.push(packages_to_install);
// Make sure credentials file exists
const credentials_loc = '/etc/letsencrypt/credentials/credentials-' + certificate.id;
// Escape single quotes and backslashes
const escapedCredentials = certificate.meta.dns_provider_credentials.replaceAll('\'', '\\\'').replaceAll('\\', '\\\\');
const credentials_cmd = '[ -f \'' + credentials_loc + '\' ] || { mkdir -p /etc/letsencrypt/credentials 2> /dev/null; echo \'' + escapedCredentials + '\' > \'' + credentials_loc + '\' && chmod 600 \'' + credentials_loc + '\'; }';
const credentials_cmd = '[ -f \'' + credentials_loc + '\' ] || { mkdir -p /etc/letsencrypt/credentials 2> /dev/null; echo \'' + certificate.meta.dns_provider_credentials.replace('\'', '\\\'') + '\' > \'' + credentials_loc + '\' && chmod 600 \'' + credentials_loc + '\'; }';
promises.push(utils.exec(credentials_cmd));
}
});
if (plugins.length) {
const install_cmd = '. /opt/certbot/bin/activate && pip install --no-cache-dir --user ' + plugins.join(' ') + ' && deactivate';
const install_cmd = 'pip install ' + plugins.join(' ');
promises.push(utils.exec(install_cmd));
}
@ -167,7 +223,8 @@ const setupLogrotation = () => {
};
module.exports = function () {
return setupDefaultUser()
return setupJwt()
.then(setupDefaultUser)
.then(setupDefaultSettings)
.then(setupCertbotPlugins)
.then(setupLogrotation);

View File

@ -1,25 +0,0 @@
{% if access_list_id > 0 %}
{% if access_list.items.length > 0 %}
# Authorization
auth_basic "Authorization required";
auth_basic_user_file /data/access/{{ access_list_id }};
{% if access_list.pass_auth == 0 %}
proxy_set_header Authorization "";
{% endif %}
{% endif %}
# Access Rules: {{ access_list.clients | size }} total
{% for client in access_list.clients %}
{{client | nginxAccessRule}}
{% endfor %}
deny all;
# Access checks must...
{% if access_list.satisfy_any == 1 %}
satisfy any;
{% else %}
satisfy all;
{% endif %}
{% endif %}

View File

@ -1,14 +1,36 @@
location {{ path }} {
set $upstream {{ forward_scheme }}://{{ forward_host }}:{{ forward_port }}{{ forward_path }};
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass {{ forward_scheme }}://{{ forward_host }}:{{ forward_port }}{{ forward_path }};
proxy_pass $upstream;
{% if access_list_id > 0 %}
{% if access_list.items.length > 0 %}
# Authorization
auth_basic "Authorization required";
auth_basic_user_file /data/access/{{ access_list_id }};
{{ access_list.passauth }}
{% endif %}
# Access Rules
{% for client in access_list.clients %}
{{- client.rule -}};
{% endfor %}deny all;
# Access checks must...
{% if access_list.satisfy %}
{{ access_list.satisfy }};
{% endif %}
{% endif %}
{% include "_access.conf" %}
{% include "_assets.conf" %}
{% include "_exploits.conf" %}
{% include "_forced_ssl.conf" %}
{% include "_hsts.conf" %}

View File

@ -0,0 +1,47 @@
{% if openidc_enabled == 1 or openidc_enabled == true -%}
access_by_lua_block {
local openidc = require("resty.openidc")
local opts = {
redirect_uri = "{{- openidc_redirect_uri -}}",
discovery = "{{- openidc_discovery -}}",
token_endpoint_auth_method = "{{- openidc_auth_method -}}",
client_id = "{{- openidc_client_id -}}",
client_secret = "{{- openidc_client_secret -}}",
scope = "openid email profile"
}
local res, err = openidc.authenticate(opts)
if err then
ngx.status = 500
ngx.say(err)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
end
{% if openidc_restrict_users_enabled == 1 or openidc_restrict_users_enabled == true -%}
local function contains(table, val)
for i=1,#table do
if table[i] == val then
return true
end
end
return false
end
local allowed_users = {
{% for user in openidc_allowed_users %}
"{{ user }}",
{% endfor %}
}
if not contains(allowed_users, res.id_token.email) then
ngx.exit(ngx.HTTP_FORBIDDEN)
end
{% endif -%}
ngx.req.set_header("X-OIDC-SUB", res.id_token.sub)
ngx.req.set_header("X-OIDC-EMAIL", res.id_token.email)
ngx.req.set_header("X-OIDC-NAME", res.id_token.name)
}
{% endif %}

View File

@ -7,9 +7,9 @@
server {
listen 80 default;
{% if ipv6 -%}
listen [::]:80 default;
listen [::]:80;
{% else -%}
#listen [::]:80 default;
#listen [::]:80;
{% endif %}
server_name default-host.localhost;
access_log /data/logs/default-host_access.log combined;

View File

@ -30,8 +30,29 @@ proxy_http_version 1.1;
location / {
{% include "_access.conf" %}
{% include "_hsts.conf" %}
{% if access_list_id > 0 %}
{% if access_list.items.length > 0 %}
# Authorization
auth_basic "Authorization required";
auth_basic_user_file /data/access/{{ access_list_id }};
{{ access_list.passauth }}
{% endif %}
# Access Rules
{% for client in access_list.clients %}
{{- client.rule -}};
{% endfor %}deny all;
# Access checks must...
{% if access_list.satisfy %}
{{ access_list.satisfy }};
{% endif %}
{% endif %}
{% include "_openid_connect.conf" %}
{% include "_hsts.conf" %}
{% if allow_websocket_upgrade == 1 or allow_websocket_upgrade == true %}
proxy_set_header Upgrade $http_upgrade;

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,7 @@
# This file assumes that the frontend has been built using ./scripts/frontend-build
FROM jc21/nginx-full:certbot-node
FROM nginxproxymanager/nginx-full:node
ARG TARGETPLATFORM
ARG BUILD_VERSION
@ -25,7 +25,7 @@ RUN echo "fs.file-max = 65535" > /etc/sysctl.conf \
&& rm -rf /var/lib/apt/lists/*
# s6 overlay
COPY docker/scripts/install-s6 /tmp/install-s6
COPY scripts/install-s6 /tmp/install-s6
RUN /tmp/install-s6 "${TARGETPLATFORM}" && rm -f /tmp/install-s6
EXPOSE 80 81 443
@ -35,17 +35,16 @@ COPY frontend/dist /app/frontend
COPY global /app/global
WORKDIR /app
RUN yarn install \
&& yarn cache clean
RUN yarn install
# add late to limit cache-busting by modifications
COPY docker/rootfs /
# Remove frontend service not required for prod, dev nginx config as well
RUN rm -rf /etc/services.d/frontend /etc/nginx/conf.d/dev.conf \
&& chmod 644 /etc/logrotate.d/nginx-proxy-manager \
&& pip uninstall --yes setuptools \
&& pip install --no-cache-dir "setuptools==58.0.0"
RUN rm -rf /etc/services.d/frontend /etc/nginx/conf.d/dev.conf
# Change permission of logrotate config file
RUN chmod 644 /etc/logrotate.d/nginx-proxy-manager
VOLUME [ "/data", "/etc/letsencrypt" ]
ENTRYPOINT [ "/init" ]

View File

@ -1,4 +1,4 @@
FROM jc21/nginx-full:certbot-node
FROM nginxproxymanager/nginx-full:node
LABEL maintainer="Jamie Curnow <jc@jc21.com>"
ENV S6_LOGGING=0 \
@ -7,7 +7,7 @@ ENV S6_LOGGING=0 \
RUN echo "fs.file-max = 65535" > /etc/sysctl.conf \
&& apt-get update \
&& apt-get install -y jq python3-pip logrotate \
&& apt-get install -y certbot jq python3-pip logrotate \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
@ -21,8 +21,9 @@ RUN rm -f /etc/nginx/conf.d/production.conf
RUN chmod 644 /etc/logrotate.d/nginx-proxy-manager
# s6 overlay
COPY scripts/install-s6 /tmp/install-s6
RUN /tmp/install-s6 "${TARGETPLATFORM}" && rm -f /tmp/install-s6
RUN curl -L -o /tmp/s6-overlay-amd64.tar.gz "https://github.com/just-containers/s6-overlay/releases/download/v1.22.1.0/s6-overlay-amd64.tar.gz" \
&& tar -xzf /tmp/s6-overlay-amd64.tar.gz -C /
EXPOSE 80 81 443
ENTRYPOINT [ "/init" ]

View File

@ -1,18 +1,17 @@
# WARNING: This is a CI docker-compose file used for building and testing of the entire app, it should not be used for production.
version: '3.8'
version: "3"
services:
fullstack-mysql:
image: "${IMAGE}:ci-${BUILD_NUMBER}"
image: ${IMAGE}:ci-${BUILD_NUMBER}
environment:
DEBUG: 'true'
LE_STAGING: 'true'
NODE_ENV: "development"
FORCE_COLOR: 1
DB_MYSQL_HOST: 'db'
DB_MYSQL_PORT: '3306'
DB_MYSQL_USER: 'npm'
DB_MYSQL_PASSWORD: 'npm'
DB_MYSQL_NAME: 'npm'
DB_MYSQL_HOST: "db"
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "npm"
DB_MYSQL_PASSWORD: "npm"
DB_MYSQL_NAME: "npm"
volumes:
- npm_data:/data
expose:
@ -27,12 +26,11 @@ services:
timeout: 3s
fullstack-sqlite:
image: "${IMAGE}:ci-${BUILD_NUMBER}"
image: ${IMAGE}:ci-${BUILD_NUMBER}
environment:
DEBUG: 'true'
LE_STAGING: 'true'
NODE_ENV: "development"
FORCE_COLOR: 1
DB_SQLITE_FILE: '/data/mydb.sqlite'
DB_SQLITE_FILE: "/data/database.sqlite"
volumes:
- npm_data:/data
expose:
@ -47,26 +45,26 @@ services:
db:
image: jc21/mariadb-aria
environment:
MYSQL_ROOT_PASSWORD: 'npm'
MYSQL_DATABASE: 'npm'
MYSQL_USER: 'npm'
MYSQL_PASSWORD: 'npm'
MYSQL_ROOT_PASSWORD: "npm"
MYSQL_DATABASE: "npm"
MYSQL_USER: "npm"
MYSQL_PASSWORD: "npm"
volumes:
- db_data:/var/lib/mysql
cypress-mysql:
image: "${IMAGE}-cypress:ci-${BUILD_NUMBER}"
image: ${IMAGE}-cypress:ci-${BUILD_NUMBER}
build:
context: ../test/
dockerfile: cypress/Dockerfile
environment:
CYPRESS_baseUrl: 'http://fullstack-mysql:81'
CYPRESS_baseUrl: "http://fullstack-mysql:81"
volumes:
- cypress-logs:/results
command: cypress run --browser chrome --config-file=${CYPRESS_CONFIG:-cypress/config/ci.json}
cypress-sqlite:
image: "${IMAGE}-cypress:ci-${BUILD_NUMBER}"
image: ${IMAGE}-cypress:ci-${BUILD_NUMBER}
build:
context: ../test/
dockerfile: cypress/Dockerfile

View File

@ -1,7 +1,6 @@
# WARNING: This is a DEVELOPMENT docker-compose file, it should not be used for production.
version: '3.8'
version: "3.5"
services:
npm:
image: nginxproxymanager:dev
container_name: npm_core
@ -15,19 +14,14 @@ services:
networks:
- nginx_proxy_manager
environment:
PUID: 1000
PGID: 1000
NODE_ENV: "development"
FORCE_COLOR: 1
# specifically for dev:
DEBUG: 'true'
DEVELOPMENT: 'true'
LE_STAGING: 'true'
# db:
DB_MYSQL_HOST: 'db'
DB_MYSQL_PORT: '3306'
DB_MYSQL_USER: 'npm'
DB_MYSQL_PASSWORD: 'npm'
DB_MYSQL_NAME: 'npm'
DEVELOPMENT: "true"
DB_MYSQL_HOST: "db"
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "npm"
DB_MYSQL_PASSWORD: "npm"
DB_MYSQL_NAME: "npm"
# DB_SQLITE_FILE: "/data/database.sqlite"
# DISABLE_IPV6: "true"
volumes:
@ -43,18 +37,29 @@ services:
db:
image: jc21/mariadb-aria
container_name: npm_db
ports:
- 33306:3306
networks:
- nginx_proxy_manager
environment:
MYSQL_ROOT_PASSWORD: 'npm'
MYSQL_DATABASE: 'npm'
MYSQL_USER: 'npm'
MYSQL_PASSWORD: 'npm'
MYSQL_ROOT_PASSWORD: "npm"
MYSQL_DATABASE: "npm"
MYSQL_USER: "npm"
MYSQL_PASSWORD: "npm"
volumes:
- db_data:/var/lib/mysql
swagger:
image: "swaggerapi/swagger-ui:latest"
container_name: npm_swagger
ports:
- 3001:80
networks:
- nginx_proxy_manager
environment:
URL: "http://127.0.0.1:3081/api/schema"
PORT: "80"
depends_on:
- npm
volumes:
npm_data:
name: npm_core_data

View File

@ -1,29 +0,0 @@
#!/bin/bash
set -e
CYAN='\E[1;36m'
BLUE='\E[1;34m'
YELLOW='\E[1;33m'
RED='\E[1;31m'
RESET='\E[0m'
export CYAN BLUE YELLOW RED RESET
log_info () {
echo -e "${BLUE} ${CYAN}$1${RESET}"
}
log_error () {
echo -e "${RED} $1${RESET}"
}
# The `run` file will only execute 1 line so this helps keep things
# logically separated
log_fatal () {
echo -e "${RED}--------------------------------------${RESET}"
echo -e "${RED}ERROR: $1${RESET}"
echo -e "${RED}--------------------------------------${RESET}"
/run/s6/basedir/bin/halt
exit 1
}

View File

@ -0,0 +1,46 @@
#!/bin/bash
# This command reads the `DISABLE_IPV6` env var and will either enable
# or disable ipv6 in all nginx configs based on this setting.
# Lowercase
DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
CYAN='\E[1;36m'
BLUE='\E[1;34m'
YELLOW='\E[1;33m'
RED='\E[1;31m'
RESET='\E[0m'
FOLDER=$1
if [ "$FOLDER" == "" ]; then
echo -e "${RED} $0 requires a absolute folder path as the first argument!${RESET}"
echo -e "${YELLOW} ie: $0 /data/nginx${RESET}"
exit 1
fi
FILES=$(find "$FOLDER" -type f -name "*.conf")
if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ]; then
# IPV6 is disabled
echo "Disabling IPV6 in hosts"
echo -e "${BLUE} ${CYAN}Disabling IPV6 in hosts: ${YELLOW}${FOLDER}${RESET}"
# Iterate over configs and run the regex
for FILE in $FILES
do
echo -e " ${BLUE} ${YELLOW}${FILE}${RESET}"
sed -E -i 's/^([^#]*)listen \[::\]/\1#listen [::]/g' "$FILE"
done
else
# IPV6 is enabled
echo -e "${BLUE} ${CYAN}Enabling IPV6 in hosts: ${YELLOW}${FOLDER}${RESET}"
# Iterate over configs and run the regex
for FILE in $FILES
do
echo -e " ${BLUE} ${YELLOW}${FILE}${RESET}"
sed -E -i 's/^(\s*)#listen \[::\]/\1listen [::]/g' "$FILE"
done
fi

View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -0,0 +1,3 @@
*
!.gitignore
!*.sh

View File

@ -0,0 +1,7 @@
#!/usr/bin/with-contenv bash
set -e
mkdir -p /data/logs
echo "Changing ownership of /data/logs to $(id -u):$(id -g)"
chown -R "$(id -u):$(id -g)" /data/logs

View File

@ -0,0 +1,29 @@
#!/usr/bin/with-contenv bash
# ref: https://github.com/linuxserver/docker-baseimage-alpine/blob/master/root/etc/cont-init.d/01-envfile
# in s6, environmental variables are written as text files for s6 to monitor
# seach through full-path filenames for files ending in "__FILE"
for FILENAME in $(find /var/run/s6/container_environment/ | grep "__FILE$"); do
echo "[secret-init] Evaluating ${FILENAME##*/} ..."
# set SECRETFILE to the contents of the full-path textfile
SECRETFILE=$(cat ${FILENAME})
# SECRETFILE=${FILENAME}
# echo "[secret-init] Set SECRETFILE to ${SECRETFILE}" # DEBUG - rm for prod!
# if SECRETFILE exists / is not null
if [[ -f ${SECRETFILE} ]]; then
# strip the appended "__FILE" from environmental variable name ...
STRIPFILE=$(echo ${FILENAME} | sed "s/__FILE//g")
# echo "[secret-init] Set STRIPFILE to ${STRIPFILE}" # DEBUG - rm for prod!
# ... and set value to contents of secretfile
# since s6 uses text files, this is effectively "export ..."
printf $(cat ${SECRETFILE}) > ${STRIPFILE}
# echo "[secret-init] Set ${STRIPFILE##*/} to $(cat ${STRIPFILE})" # DEBUG - rm for prod!"
echo "[secret-init] Success! ${STRIPFILE##*/} set from ${FILENAME##*/}"
else
echo "[secret-init] cannot find secret in ${FILENAME}"
fi
done

View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -3,4 +3,3 @@ non-interactive = True
webroot-path = /data/letsencrypt-acme-challenge
key-type = ecdsa
elliptic-curve = secp384r1
preferred-chain = ISRG Root X1

View File

@ -30,9 +30,11 @@ server {
set $port "443";
server_name localhost;
access_log /data/logs/fallback_access.log standard;
access_log /data/logs/fallback-access.log standard;
error_log /dev/null crit;
ssl_reject_handshake on;
ssl_certificate /data/nginx/dummycert.pem;
ssl_certificate_key /data/nginx/dummykey.pem;
include conf.d/include/ssl-ciphers.conf;
return 444;
}

View File

@ -1,4 +1,4 @@
location ~* ^.*\.(css|js|jpe?g|gif|png|webp|woff|eot|ttf|svg|ico|css\.map|js\.map)$ {
location ~* ^.*\.(css|js|jpe?g|gif|png|woff|eot|ttf|svg|ico|css\.map|js\.map)$ {
if_modified_since off;
# use the public cache

View File

@ -2,7 +2,7 @@ add_header X-Served-By $host;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Scheme $scheme;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass $forward_scheme://$server:$port$request_uri;
proxy_pass $forward_scheme://$server:$port;

View File

@ -1,6 +1,7 @@
# run nginx in foreground
daemon off;
pid /run/nginx/nginx.pid;
user root;
# Set number of worker processes automatically based on number of CPU cores.
worker_processes auto;
@ -14,7 +15,7 @@ error_log /data/logs/fallback_error.log warn;
include /etc/nginx/modules/*.conf;
events {
include /data/nginx/custom/events[.]conf;
worker_connections 1024;
}
http {
@ -42,6 +43,16 @@ http {
proxy_cache_path /var/lib/nginx/cache/public levels=1:2 keys_zone=public-cache:30m max_size=192m;
proxy_cache_path /var/lib/nginx/cache/private levels=1:2 keys_zone=private-cache:5m max_size=1024m;
lua_package_path '~/lua/?.lua;;';
lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;
lua_ssl_verify_depth 5;
# cache for discovery metadata documents
lua_shared_dict discovery 1m;
# cache for JWKs
lua_shared_dict jwks 1m;
log_format proxy '[$time_local] $upstream_cache_status $upstream_status $status - $request_method $scheme $host "$request_uri" [Client $remote_addr] [Length $body_bytes_sent] [Gzip $gzip_ratio] [Sent-to $server] "$http_user_agent" "$http_referer"';
log_format standard '[$time_local] $status - $request_method $scheme $host "$request_uri" [Client $remote_addr] [Length $body_bytes_sent] [Gzip $gzip_ratio] "$http_user_agent" "$http_referer"';

View File

@ -1,22 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
. /bin/common.sh
log_info 'Starting backend ...'
if [ "$DEVELOPMENT" == "true" ]; then
cd /app || exit 1
# If yarn install fails: add --verbose --network-concurrency 1
s6-setuidgid npmuser yarn install
exec s6-setuidgid npmuser bash -c 'export HOME=/tmp/npmuserhome;node --max_old_space_size=250 --abort_on_uncaught_exception node_modules/nodemon/bin/nodemon.js'
else
cd /app || exit 1
while :
do
s6-setuidgid npmuser bash -c 'export HOME=/tmp/npmuserhome;node --abort_on_uncaught_exception --max_old_space_size=250 index.js'
sleep 1
done
fi

View File

@ -1 +0,0 @@
longrun

View File

@ -1,21 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
# This service is DEVELOPMENT only.
if [ "$DEVELOPMENT" == "true" ]; then
. /bin/common.sh
cd /app/frontend || exit 1
log_info 'Starting frontend ...'
HOME=/tmp/npmuserhome
export HOME
mkdir -p /app/frontend/dist
chown -R npmuser:npmuser /app/frontend/dist
# If yarn install fails: add --verbose --network-concurrency 1
s6-setuidgid npmuser yarn install
exec s6-setuidgid npmuser yarn watch
else
exit 0
fi

View File

@ -1 +0,0 @@
longrun

View File

@ -1,10 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
. /bin/common.sh
log_info 'Starting nginx ...'
exec s6-setuidgid npmuser nginx

View File

@ -1 +0,0 @@
longrun

View File

@ -1,18 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
. /bin/common.sh
if [ "$(id -u)" != "0" ]; then
log_fatal "This docker container must be run as root, do not specify a user.\nYou can specify PUID and PGID env vars to run processes as that user and group after initialization."
fi
. /etc/s6-overlay/s6-rc.d/prepare/10-npmuser.sh
. /etc/s6-overlay/s6-rc.d/prepare/20-paths.sh
. /etc/s6-overlay/s6-rc.d/prepare/30-ownership.sh
. /etc/s6-overlay/s6-rc.d/prepare/40-dynamic.sh
. /etc/s6-overlay/s6-rc.d/prepare/50-ipv6.sh
. /etc/s6-overlay/s6-rc.d/prepare/60-secrets.sh
. /etc/s6-overlay/s6-rc.d/prepare/90-banner.sh

View File

@ -1,25 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
PUID=${PUID:-911}
PGID=${PGID:-911}
log_info 'Configuring npmuser ...'
groupmod -g 1000 users || exit 1
if id -u npmuser; then
# user already exists
usermod -u "${PUID}" npmuser || exit 1
else
# Add npmuser user
useradd -u "${PUID}" -U -d /tmp/npmuserhome -s /bin/false npmuser || exit 1
fi
usermod -G users npmuser || exit 1
groupmod -o -g "${PGID}" npmuser || exit 1
# Home for npmuser
mkdir -p /tmp/npmuserhome
chown -R npmuser:npmuser /tmp/npmuserhome

View File

@ -1,41 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
log_info 'Checking paths ...'
# Ensure /data is mounted
if [ ! -d '/data' ]; then
log_fatal '/data is not mounted! Check your docker configuration.'
fi
# Ensure /etc/letsencrypt is mounted
if [ ! -d '/etc/letsencrypt' ]; then
log_fatal '/etc/letsencrypt is not mounted! Check your docker configuration.'
fi
# Create required folders
mkdir -p \
/data/nginx \
/data/custom_ssl \
/data/logs \
/data/access \
/data/nginx/default_host \
/data/nginx/default_www \
/data/nginx/proxy_host \
/data/nginx/redirection_host \
/data/nginx/stream \
/data/nginx/dead_host \
/data/nginx/temp \
/data/letsencrypt-acme-challenge \
/run/nginx \
/tmp/nginx/body \
/var/log/nginx \
/var/lib/nginx/cache/public \
/var/lib/nginx/cache/private \
/var/cache/nginx/proxy_temp
touch /var/log/nginx/error.log || true
chmod 777 /var/log/nginx/error.log || true
chmod -R 777 /var/cache/nginx || true
chmod 644 /etc/logrotate.d/nginx-proxy-manager

View File

@ -1,21 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
log_info 'Setting ownership ...'
# root
chown root /tmp/nginx
# npmuser
chown -R npmuser:npmuser \
/data \
/etc/letsencrypt \
/etc/nginx \
/run/nginx \
/tmp/nginx \
/var/cache/nginx \
/var/lib/logrotate \
/var/lib/nginx \
/var/log/nginx

View File

@ -1,17 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
log_info 'Dynamic resolvers ...'
DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
# Dynamically generate resolvers file, if resolver is IPv6, enclose in `[]`
# thanks @tfmm
if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ];
then
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) ipv6=off valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
else
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" { sub(/%.*$/,"",$2); print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf) valid=10s;" > /etc/nginx/conf.d/include/resolvers.conf
fi

View File

@ -1,36 +0,0 @@
#!/bin/bash
# This command reads the `DISABLE_IPV6` env var and will either enable
# or disable ipv6 in all nginx configs based on this setting.
log_info 'IPv6 ...'
# Lowercase
DISABLE_IPV6=$(echo "${DISABLE_IPV6:-}" | tr '[:upper:]' '[:lower:]')
process_folder () {
FILES=$(find "$1" -type f -name "*.conf")
SED_REGEX=
if [ "$DISABLE_IPV6" == "true" ] || [ "$DISABLE_IPV6" == "on" ] || [ "$DISABLE_IPV6" == "1" ] || [ "$DISABLE_IPV6" == "yes" ]; then
# IPV6 is disabled
echo "Disabling IPV6 in hosts in: $1"
SED_REGEX='s/^([^#]*)listen \[::\]/\1#listen [::]/g'
else
# IPV6 is enabled
echo "Enabling IPV6 in hosts in: $1"
SED_REGEX='s/^(\s*)#listen \[::\]/\1listen [::]/g'
fi
for FILE in $FILES
do
echo "- ${FILE}"
sed -E -i "$SED_REGEX" "$FILE"
done
# ensure the files are still owned by the npmuser
chown -R npmuser:npmuser "$1"
}
process_folder /etc/nginx/conf.d
process_folder /data/nginx

View File

@ -1,30 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
# in s6, environmental variables are written as text files for s6 to monitor
# search through full-path filenames for files ending in "__FILE"
log_info 'Docker secrets ...'
for FILENAME in $(find /var/run/s6/container_environment/ | grep "__FILE$"); do
echo "[secret-init] Evaluating ${FILENAME##*/} ..."
# set SECRETFILE to the contents of the full-path textfile
SECRETFILE=$(cat "${FILENAME}")
# if SECRETFILE exists / is not null
if [[ -f "${SECRETFILE}" ]]; then
# strip the appended "__FILE" from environmental variable name ...
STRIPFILE=$(echo "${FILENAME}" | sed "s/__FILE//g")
# echo "[secret-init] Set STRIPFILE to ${STRIPFILE}" # DEBUG - rm for prod!
# ... and set value to contents of secretfile
# since s6 uses text files, this is effectively "export ..."
printf $(cat "${SECRETFILE}") > "${STRIPFILE}"
# echo "[secret-init] Set ${STRIPFILE##*/} to $(cat ${STRIPFILE})" # DEBUG - rm for prod!"
echo "Success: ${STRIPFILE##*/} set from ${FILENAME##*/}"
else
echo "Cannot find secret in ${FILENAME}"
fi
done

View File

@ -1,17 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
set -e
echo
echo "-------------------------------------
_ _ ____ __ __
| \ | | _ \| \/ |
| \| | |_) | |\/| |
| |\ | __/| | | |
|_| \_|_| |_| |_|
-------------------------------------
User UID: $(id -u npmuser)
User GID: $(id -g npmuser)
-------------------------------------
"

View File

@ -1 +0,0 @@
oneshot

View File

@ -1,2 +0,0 @@
# shellcheck shell=bash
/etc/s6-overlay/s6-rc.d/prepare/00-all.sh

View File

@ -0,0 +1,6 @@
#!/usr/bin/execlineb -S1
if { s6-test ${1} -ne 0 }
if { s6-test ${1} -ne 256 }
s6-svscanctl -t /var/run/s6/services

View File

@ -0,0 +1,12 @@
#!/usr/bin/with-contenv bash
# This service is DEVELOPMENT only.
if [ "$DEVELOPMENT" == "true" ]; then
cd /app/frontend || exit 1
# If yarn install fails: add --verbose --network-concurrency 1
yarn install
yarn watch
else
exit 0
fi

View File

@ -0,0 +1,3 @@
#!/usr/bin/with-contenv bash
s6-svscanctl -t /var/run/s6/services

View File

@ -0,0 +1,19 @@
#!/usr/bin/with-contenv bash
mkdir -p /data/letsencrypt-acme-challenge
cd /app || echo
if [ "$DEVELOPMENT" == "true" ]; then
cd /app || exit 1
# If yarn install fails: add --verbose --network-concurrency 1
yarn install
node --max_old_space_size=250 --abort_on_uncaught_exception node_modules/nodemon/bin/nodemon.js
else
cd /app || exit 1
while :
do
node --abort_on_uncaught_exception --max_old_space_size=250 index.js
sleep 1
done
fi

View File

@ -0,0 +1 @@
/bin/true

View File

@ -0,0 +1,49 @@
#!/usr/bin/with-contenv bash
# Create required folders
mkdir -p /tmp/nginx/body \
/run/nginx \
/var/log/nginx \
/data/nginx \
/data/custom_ssl \
/data/logs \
/data/access \
/data/nginx/default_host \
/data/nginx/default_www \
/data/nginx/proxy_host \
/data/nginx/redirection_host \
/data/nginx/stream \
/data/nginx/dead_host \
/data/nginx/temp \
/var/lib/nginx/cache/public \
/var/lib/nginx/cache/private \
/var/cache/nginx/proxy_temp
touch /var/log/nginx/error.log && chmod 777 /var/log/nginx/error.log && chmod -R 777 /var/cache/nginx
chown root /tmp/nginx
# Dynamically generate resolvers file, if resolver is IPv6, enclose in `[]`
# thanks @tfmm
echo resolver "$(awk 'BEGIN{ORS=" "} $1=="nameserver" {print ($2 ~ ":")? "["$2"]": $2}' /etc/resolv.conf);" > /etc/nginx/conf.d/include/resolvers.conf
# Generate dummy self-signed certificate.
if [ ! -f /data/nginx/dummycert.pem ] || [ ! -f /data/nginx/dummykey.pem ]
then
echo "Generating dummy SSL certificate..."
openssl req \
-new \
-newkey rsa:2048 \
-days 3650 \
-nodes \
-x509 \
-subj '/O=localhost/OU=localhost/CN=localhost' \
-keyout /data/nginx/dummykey.pem \
-out /data/nginx/dummycert.pem
echo "Complete"
fi
# Handle IPV6 settings
/bin/handle-ipv6-setting /etc/nginx/conf.d
/bin/handle-ipv6-setting /data/nginx
exec nginx

View File

@ -18,14 +18,14 @@ services running on this Docker host:
```yml
networks:
default:
external: true
name: scoobydoo
external:
name: scoobydoo
```
Let's look at a Portainer example:
```yml
version: '3.8'
version: '3'
services:
portainer:
@ -38,8 +38,8 @@ services:
networks:
default:
external: true
name: scoobydoo
external:
name: scoobydoo
```
Now in the NPM UI you can create a proxy host with `portainer` as the hostname,
@ -60,14 +60,14 @@ healthcheck:
timeout: 3s
```
## Docker File Secrets
## Docker Secrets
This image supports the use of Docker secrets to import from files and keep sensitive usernames or passwords from being passed or preserved in plaintext.
This image supports the use of Docker secrets to import from file and keep sensitive usernames or passwords from being passed or preserved in plaintext.
You can set any environment variable from a file by appending `__FILE` (double-underscore FILE) to the environmental variable name.
```yml
version: '3.8'
version: "3.7"
secrets:
# Secrets are single-line text files where the sole content is the secret
@ -96,7 +96,9 @@ services:
# DB_MYSQL_PASSWORD: "npm" # use secret instead
DB_MYSQL_PASSWORD__FILE: /run/secrets/MYSQL_PWD
DB_MYSQL_NAME: "npm"
# If you would rather use Sqlite, remove all DB_MYSQL_* lines above
# If you would rather use Sqlite uncomment this
# and remove all DB_MYSQL_* lines above
# DB_SQLITE_FILE: "/data/database.sqlite"
# Uncomment this if IPv6 is not enabled on your host
# DISABLE_IPV6: 'true'
volumes:
@ -106,7 +108,6 @@ services:
- MYSQL_PWD
depends_on:
- db
db:
image: jc21/mariadb-aria
restart: unless-stopped
@ -150,7 +151,6 @@ You can add your custom configuration snippet files at `/data/nginx/custom` as f
- `/data/nginx/custom/root.conf`: Included at the very end of nginx.conf
- `/data/nginx/custom/http_top.conf`: Included at the top of the main http block
- `/data/nginx/custom/http.conf`: Included at the end of the main http block
- `/data/nginx/custom/events.conf`: Included at the end of the events block
- `/data/nginx/custom/stream.conf`: Included at the end of the main stream block
- `/data/nginx/custom/server_proxy.conf`: Included at the end of every proxy server block
- `/data/nginx/custom/server_redirect.conf`: Included at the end of every redirection server block
@ -172,3 +172,26 @@ value by specifying it as a Docker environment variable. The default if not spec
X_FRAME_OPTIONS: "sameorigin"
...
```
## OpenID Connect SSO
You can secure any of your proxy hosts with OpenID Connect authentication, providing SSO support from an identity provider like Azure AD or KeyCloak. OpenID Connect support is provided through the [`lua-resty-openidc`](https://github.com/zmartzone/lua-resty-openidc) library of [`OpenResty`](https://github.com/openresty/openresty).
You will need a few things to get started with OpenID Connect:
- A registered application with your identity provider, they will provide you with a `Client ID` and a `Client Secret`. Public OpenID Connect applications (without a client secret) are not yet supported.
- A redirect URL to send the users to after they login with the identity provider, this can be any unused URL under the proxy host, like `https://<proxy host url>/private/callback`, the server will take care of capturing that URL and redirecting you to the proxy host root. You will need to add this URL to the list of allowed redirect URLs for the application you registered with your identity provider.
- The well-known discovery endpoint of the identity provider you want to use, this is an URL usually with the form `https://<provider URL>/.well-known/openid-configuration`.
After you have all this you can proceed to configure the proxy host with OpenID Connect authentication.
You can also add some rudimentary access control through a list of allowed emails in case your identity provider doesn't let you do that, if this option is enabled, any email not on that list will be denied access to the proxied host.
The proxy adds some headers based on the authentication result from the identity provider:
- `X-OIDC-SUB`: The subject identifier, according to the OpenID Coonect spec: `A locally unique and never reassigned identifier within the Issuer for the End-User`.
- `X-OIDC-EMAIL`: The email of the user that logged in, as specified in the `id_token` returned from the identity provider. The same value that will be checked for the email whitelist.
- `X-OIDC-NAME`: The user's name claim from the `id_token`, please note that not all id tokens necessarily contain this claim.

View File

@ -21,6 +21,3 @@ Your best bet is to ask the [Reddit community for support](https://www.reddit.co
Gitter is best left for anyone contributing to the project to ask for help about internals, code reviews etc.
## When adding username and password access control to a proxy host, I can no longer login into the app.
Having an Access Control List (ACL) with username and password requires the browser to always send this username and password in the `Authorization` header on each request. If your proxied app also requires authentication (like Nginx Proxy Manager itself), most likely the app will also use the `Authorization` header to transmit this information, as this is the standardized header meant for this kind of information. However having multiples of the same headers is not allowed in the [internet standard](https://www.rfc-editor.org/rfc/rfc7230#section-3.2.2) and almost all apps do not support multiple values in the `Authorization` header. Hence one of the two logins will be broken. This can only be fixed by either removing one of the logins or by changing the app to use other non-standard headers for authorization.

View File

@ -16,7 +16,7 @@
"alphanum-sort": "^1.0.2",
"ansi-colors": "^4.1.1",
"ansi-escapes": "^4.3.1",
"ansi-html": "^0.0.8",
"ansi-html": "^0.0.7",
"ansi-regex": "^5.0.0",
"ansi-styles": "^4.2.1",
"anymatch": "^3.1.1",
@ -213,7 +213,7 @@
"etag": "^1.8.1",
"eventemitter3": "^4.0.4",
"events": "^3.2.0",
"eventsource": "^2.0.2",
"eventsource": "^1.0.7",
"evp_bytestokey": "^1.0.3",
"execa": "^4.0.3",
"expand-brackets": "^4.0.0",
@ -357,7 +357,7 @@
"jsbn": "^1.1.0",
"jsesc": "^3.0.1",
"json-parse-better-errors": "^1.0.2",
"json-schema": "^0.4.0",
"json-schema": "^0.2.5",
"json-schema-traverse": "^0.4.1",
"json-stringify-safe": "^5.0.1",
"json3": "^3.3.3",
@ -394,7 +394,7 @@
"map-age-cleaner": "^0.1.3",
"map-cache": "^0.2.2",
"map-visit": "^1.0.0",
"markdown-it": "^12.3.2",
"markdown-it": "^11.0.0",
"markdown-it-anchor": "^5.3.0",
"markdown-it-chain": "^1.3.0",
"markdown-it-container": "^3.0.0",
@ -434,7 +434,7 @@
"neo-async": "^2.6.2",
"nice-try": "^2.0.1",
"no-case": "^3.0.3",
"node-forge": "^1.0.0",
"node-forge": "^0.10.0",
"node-libs-browser": "^2.2.1",
"node-releases": "^1.1.60",
"nopt": "^4.0.3",
@ -443,7 +443,7 @@
"normalize-url": "^5.1.0",
"npm-run-path": "^4.0.1",
"nprogress": "^0.2.0",
"nth-check": "^2.0.1",
"nth-check": "^1.0.2",
"num2fraction": "^1.2.2",
"number-is-nan": "^2.0.0",
"oauth-sign": "^0.9.0",
@ -612,7 +612,7 @@
"serve-index": "^1.9.1",
"serve-static": "^1.14.1",
"set-blocking": "^2.0.0",
"set-value": "^4.0.1",
"set-value": "^3.0.2",
"setimmediate": "^1.0.5",
"setprototypeof": "^1.2.0",
"sha.js": "^2.4.11",

View File

@ -1,44 +1,6 @@
# Full Setup Instructions
## Running the App
Create a `docker-compose.yml` file:
```yml
version: '3.8'
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
ports:
# These ports are in format <host-port>:<container-port>
- '80:80' # Public HTTP Port
- '443:443' # Public HTTPS Port
- '81:81' # Admin Web Port
# Add any other Stream port you want to expose
# - '21:21' # FTP
# Uncomment the next line if you uncomment anything in the section
# environment:
# Uncomment this if you want to change the location of
# the SQLite DB file within the container
# DB_SQLITE_FILE: "/data/database.sqlite"
# Uncomment this if IPv6 is not enabled on your host
# DISABLE_IPV6: 'true'
volumes:
- ./data:/data
- ./letsencrypt:/etc/letsencrypt
```
Then:
```bash
docker-compose up -d
```
## Using MySQL / MariaDB Database
## MySQL Database
If you opt for the MySQL configuration you will have to provide the database server yourself. You can also use MariaDB. Here are the minimum supported versions:
@ -48,31 +10,41 @@ If you opt for the MySQL configuration you will have to provide the database ser
It's easy to use another docker container for your database also and link it as part of the docker stack, so that's what the following examples
are going to use.
Here is an example of what your `docker-compose.yml` will look like when using a MariaDB container:
::: warning
When using a `mariadb` database, the NPM configuration file should still use the `mysql` engine!
:::
## Running the App
Via `docker-compose`:
```yml
version: '3.8'
version: "3"
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
ports:
# These ports are in format <host-port>:<container-port>
- '80:80' # Public HTTP Port
- '443:443' # Public HTTPS Port
- '81:81' # Admin Web Port
# Public HTTP Port:
- '80:80'
# Public HTTPS Port:
- '443:443'
# Admin Web Port:
- '81:81'
# Add any other Stream port you want to expose
# - '21:21' # FTP
environment:
# Unix user and group IDs, optional
PUID: 1000
PGID: 1000
# Mysql/Maria connection parameters:
# These are the settings to access your db
DB_MYSQL_HOST: "db"
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "npm"
DB_MYSQL_PASSWORD: "npm"
DB_MYSQL_NAME: "npm"
# If you would rather use Sqlite uncomment this
# and remove all DB_MYSQL_* lines above
# DB_SQLITE_FILE: "/data/database.sqlite"
# Uncomment this if IPv6 is not enabled on your host
# DISABLE_IPV6: 'true'
volumes:
@ -80,7 +52,6 @@ services:
- ./letsencrypt:/etc/letsencrypt
depends_on:
- db
db:
image: 'jc21/mariadb-aria:latest'
restart: unless-stopped
@ -93,11 +64,13 @@ services:
- ./data/mysql:/var/lib/mysql
```
::: warning
_Please note, that `DB_MYSQL_*` environment variables will take precedent over `DB_SQLITE_*` variables. So if you keep the MySQL variables, you will not be able to use Sqlite._
Please note, that `DB_MYSQL_*` environment variables will take precedent over `DB_SQLITE_*` variables. So if you keep the MySQL variables, you will not be able to use SQLite.
Then:
:::
```bash
docker-compose up -d
```
## Running on Raspberry PI / ARM devices
@ -111,23 +84,74 @@ you don't have to worry about doing anything special and you can follow the comm
Check out the [dockerhub tags](https://hub.docker.com/r/jc21/nginx-proxy-manager/tags)
for a list of supported architectures and if you want one that doesn't exist,
[create a feature request](https://github.com/NginxProxyManager/nginx-proxy-manager/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=).
[create a feature request](https://github.com/jc21/nginx-proxy-manager/issues/new?assignees=&labels=enhancement&template=feature_request.md&title=).
Also, if you don't know how to already, follow [this guide to install docker and docker-compose](https://manre-universe.net/how-to-run-docker-and-docker-compose-on-raspbian/)
on Raspbian.
Please note that the `jc21/mariadb-aria:latest` image might have some problems on some ARM devices, if you want a separate database container, use the `yobasystems/alpine-mariadb:latest` image.
Via `docker-compose`:
```yml
version: "3"
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
restart: unless-stopped
ports:
# Public HTTP Port:
- '80:80'
# Public HTTPS Port:
- '443:443'
# Admin Web Port:
- '81:81'
environment:
# These are the settings to access your db
DB_MYSQL_HOST: "db"
DB_MYSQL_PORT: 3306
DB_MYSQL_USER: "changeuser"
DB_MYSQL_PASSWORD: "changepass"
DB_MYSQL_NAME: "npm"
# If you would rather use Sqlite uncomment this
# and remove all DB_MYSQL_* lines above
# DB_SQLITE_FILE: "/data/database.sqlite"
# Uncomment this if IPv6 is not enabled on your host
# DISABLE_IPV6: 'true'
volumes:
- ./data/nginx-proxy-manager:/data
- ./letsencrypt:/etc/letsencrypt
depends_on:
- db
db:
image: yobasystems/alpine-mariadb:latest
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: "changeme"
MYSQL_DATABASE: "npm"
MYSQL_USER: "changeuser"
MYSQL_PASSWORD: "changepass"
volumes:
- ./data/mariadb:/var/lib/mysql
```
_Please note, that `DB_MYSQL_*` environment variables will take precedent over `DB_SQLITE_*` var>
Then:
```bash
docker-compose up -d
```
## Initial Run
After the app is running for the first time, the following will happen:
1. GPG keys will be generated and saved in the data folder
2. The database will initialize with table structures
1. The database will initialize with table structures
2. GPG keys will be generated and saved in the configuration file
3. A default admin user will be created
This process can take a couple of minutes depending on your machine.
## Default Administrator User
```
@ -137,3 +161,49 @@ Password: changeme
Immediately after logging in with this default user you will be asked to modify your details and change your password.
## Configuration File
::: warning
This section is meant for advanced users
:::
If you would like more control over the database settings you can define a custom config JSON file.
Here's an example for `sqlite` configuration as it is generated from the environment variables:
```json
{
"database": {
"engine": "knex-native",
"knex": {
"client": "sqlite3",
"connection": {
"filename": "/data/database.sqlite"
},
"useNullAsDefault": true
}
}
}
```
You can modify the `knex` object with your custom configuration, but note that not all knex clients might be installed in the image.
Once you've created your configuration file you can mount it to `/app/config/production.json` inside you container using:
```
[...]
services:
app:
image: 'jc21/nginx-proxy-manager:latest'
[...]
volumes:
- ./config.json:/app/config/production.json
[...]
[...]
```
**Note:** After the first run of the application, the config file will be altered to include generated encryption keys unique to your installation.
These keys affect the login and session management of the application. If these keys change for any reason, all users will be logged out.

View File

@ -1,6 +1,6 @@
# Third Party
As this software gains popularity it's common to see it integrated with other platforms. Please be aware that unless specifically mentioned in the documentation of those
As this software gains popularity it's common to see it integrated with other platforms. Please be aware that unless specifically mentioned in the documenation of those
integrations, they are *not supported* by me.
Known integrations:

Some files were not shown because too many files have changed in this diff Show More