* Remove obsolete scripts

* Update README
This commit is contained in:
Lutchy Horace 2021-01-26 03:10:15 -05:00
parent 2500470a1a
commit c0e6080a66
18 changed files with 10 additions and 1199 deletions

13
README
View File

@ -4,11 +4,18 @@ README
This repository is a collection of scripts that I utilize throughout my infrastructure. This repository is a collection of scripts that I utilize throughout my infrastructure.
You can follow issue tracking here https://redmine.lhprojects.net/projects/scripts You can follow issue tracking here https://gitlab.lhprojects.net/lhprojects-information-network/scripts
Scripts Scripts
---------- ----------
backup_files.sh: Script I use to rsync files to off-site storage center. You can | Command | Description |
| --- | --- |
| backup.sh | Script I use to rsync files to off-site storage center. You can
costomize the configuration via backup.conf and then appending it as an argument costomize the configuration via backup.conf and then appending it as an argument
during the script execution. during the script execution. |
| cachet_notifyV2.php | A script to update issue on Cachet website. |
| convertPTR2InvidiualDomains.php | Migrate PTR records from a single zone to individual zones. |
| createVhosts.sh | Modern VHOST script to create VHOSTS RHEL systems |
| beep-notify.sh | Attempts to emit a beeping sound through system speaker |
| rsync.sh | A rsync script to rsync data to rsync to remote servers |

View File

@ -1,30 +0,0 @@
<?php
/* Init Classes */
class GetOpts
{
private $longOpts = [];
function __construct()
{
}
public function setLongOpt(string $opt, array $callback)
{
$this->longOpts[$opt] = $callback;
}
}
class CheckDomain
{
public function host(string $domain)
{
echo $domain;
}
}
CheckDomain::host('lhprojects.net');
/* Get command line options */
$args = new GetOpts;

View File

@ -1,6 +0,0 @@
NGINX_USER=nginx #Unused
NGINX_DIR=/etc/nginx
NGINX_CONF_D=$NGINX_DIR/conf.d
NGINX_SVC_RESTART='rc-service nginx reload'
#NGINX_LISTEN_IP=192.168.1.128
VHOSTS_DIR=/srv/vhosts

View File

@ -1,176 +0,0 @@
#!/bin/bash
# README
# This script is specifically designed to work on Alpine Linux but
# may work with other distributions by including a $HOME/.enable_sites.conf.
#
# See example .enable_sites.conf.example in this repo.
#
# This script will create a vhost under Nginx
DEBUG=0
set -e
[ $DEBUG -gt 1 ] && set -x
function usage {
echo -e "$0 domain.tld [disable-php|false] [enable-le]"
}
function cleanup {
test -n "$DRY_RUN" && rm $VHOST_CONF_FILE
}
function reload_nginx {
echo "${_DRY_RUN}Reloading Nginx"
if [ -z "$DRY_RUN" ]; then
nginx -t
$NGINX_SVC_RESTART
else
echo "Skipping..."
fi
}
# Init variables
_conf=$HOME/.enable_site.conf
_cwd="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
_bootstrap=${_cwd}/bootstrap.sh
# Init script
if test -f $_boostrap; then
source $_bootstrap 2> /dev/null
else
echo "Unable to parse BOOTSTRAP: $_bootstrap"
exit 1
fi
test -f $_conf && source $_conf || debug "Unable to parse configuration: $_conf, using defaults"
NGINX_USER=${NGINX_USER:-nginx}
NGINX_DIR=${NGINX_DIR:-/etc/nginx}
NGINX_CONF_D=$NGINX_DIR/conf.d
NGINX_SVC_RESTART=${NGINX_SVC_RESTART:-rc-service nginx reload}
NGINX_SSL_DIR=$NGINX_DIR/ssl
VHOSTS_DIR=${VHOSTS_DIR:-/srv/vhosts}
SSL_CRT=/etc/nginx/webserver.crt
SSL_KEY=/etc/nginx/webserver.key
if test -z "$1"; then
usage
exit 0
fi
# VHOST dir
_vhost_dir=$VHOSTS_DIR/$1/htdocs
if [ -z "$DRY_RUN" ]; then
# Check domain is a valid domain
host $1 &> /dev/null || err "Invalid domain: $1"
echo "Creating $_vhost_dir"
mkdir -p $_vhost_dir
else
echo "DRY_RUN detected"
_DRY_RUN="DRY_RUN: "
echo "${_DRY_RUN}Creating $_vhost_dir"
fi
# Check if we should enable php
[ "$2" != "true" ] && [ "$2" != "disable-php" ] && _enable_php='include php.conf;'
echo "${_DRY_RUN}Creating NGINX configuration for $1"
VHOST_CONF_FILE=$NGINX_CONF_D/$1.conf
if [ -n "$DRY_RUN" ]; then
VHOST_CONF_FILE=/tmp/$1.conf
debug "${_DRY_RUN}Redirecting to $VHOST_CONF_FILE"
fi
# Set listen ip if provided
test -n "$NGINX_LISTEN_IP" && _v_listen_ip="$NGINX_LISTEN_IP:"
# set default listening port to 80
_v_listen=${_v_listen_ip}80
# Redirect plain-text to SSL
if [ "$3" = "enable-le" ]; then
# Change default listening port to 443
_v_listen="${_v_listen_ip}443 ssl"
# set ssl configuration
_v_ssl=$(cat << EOF
ssl_certificate ssl/$1.pem;
ssl_certificate_key ssl/$1.key;
EOF
)
# write the plain-text virtual host so
# we authenticate with Let's encrypt and
# redirect plain-text to SSL
cat << EOF > $VHOST_CONF_FILE
server {
listen ${_v_listen_ip}80;
server_name $1;
root $_vhost_dir;
location /.well-known {
autoindex on;
}
location / {
return 302 https://$1\$request_uri;
}
}
EOF
reload_nginx
echo "${_DRY_RUN}Requesting a Let's Encrypt certificate"
if [ -z "$DRY_RUN" ]; then
certbot certonly --webroot --webroot-path=$_vhost_dir -d $1
fi
_le_path=/etc/letsencrypt/live
_le_crt="$_le_path/$1/fullchain.pem $NGINX_SSL_DIR/$1.pem"
_le_key="$_le_path/$1/privkey.pem $NGINX_SSL_DIR/$1.key"
echo "${_DRY_RUN}Creating symlink $_le_crt"
[ -z "$DRY_RUN" ] && ln -s $_le_crt
echo "${_DRY_RUN}Creating symlink $_le_key"
[ -z "$DRY_RUN" ] && ln -s $_le_key
fi
cat << EOF >> $VHOST_CONF_FILE
$rd
server {
listen $_v_listen;
server_name $1;
root $_vhost_dir;
index index.php index.html index.html;
error_log /var/log/nginx/$1.error.log;
access_log /var/log/nginx/$1.access.log main;
$_v_ssl
location / {
}
$_enable_php
}
EOF
if [ -n "$DRY_RUN" ]; then
echo -e "${_DRY_RUN}I would have wrote this: \n"
cat $VHOST_CONF_FILE
fi
reload_nginx
echo "Success!"
cleanup

View File

@ -1,110 +0,0 @@
<?php
function deny($code, $reason) {
switch($code) {
case 401:
$header = '401 Unauthorized';
header('WWW-Authenticate: Basic realm="handleLXCAppArmorProfiles"');
break;
case 403:
$header = '403 Forbidden';
break;
case 503:
$header = '503 Service Unavailable';
break;
}
header('HTTP/1.0 ' . $header);
print "$code $reason";
exit(1);
}
function cleanup() {
# Do intelligent cleanup!
foreach($_FILES as $fieldName => $key) {
if ('tmp_name' === $key) {
if (is_countable($_FILES[$fieldName][$key])) {
foreach($_FILES[$fieldName][$key] as $fileName) {
@unlink($fileName);
}
} else {
@unlink($_FILES[$fieldName][$key]);
}
}
}
}
# Check if SAPI is cli
if ('cli' === php_sapi_name()) {
if ('hash' === @$argv[1] && !empty($argv[2])) {
# We going to hash a password and return the result
# and write to file
$pw = password_hash($argv[2], PASSWORD_DEFAULT);
echo $pw;
file_put_contents('.htpasswd', "tar.lxc-apparmor-profiles-user:$pw");
} else {
print "Nothing to do here! Exiting...";
exit(0);
}
} else {
if (!file_exists('.htpasswd')) {
header('HTTP/1.0 503 Service Unavailable');
print '503 Service Unavailable';
exit(1);
}
$auth_creds = file_get_contents('.htpasswd');
$auth_creds = explode(':', $auth_creds);
if (!isset($_SERVER['PHP_AUTH_USER'])) {
deny(401, 'No authorize headers sent!');
} else {
$user = $_SERVER['PHP_AUTH_USER'];
$result = password_verify($_SERVER['PHP_AUTH_PW'], $auth_creds[1]);
if (true === $result && $auth_creds[0] === $user) {
if ('ProcessUpload' === $_SERVER['HTTP_X_APPARMOR_STATE']) {
# Process upload.
$uploadedFileTmp = $_FILES['apparmor-profiles']['tmp_name'];
# Check if multiple files where uploaded!
if (is_countable($_FILES['apparmor-profiles']['tmp_name'])) {
cleanup();
deny(403, 'Multiple Uploads not supported!');
}
# Check file mime type is accepted
$finfo = new finfo(FILEINFO_MIME);
$mime = $finfo->file($uploadedFileTmp);
if ('application/x-gzip; charset=binary' !== $mime) {
# Clean up tmp file
cleanup();
deny(403, 'Forbidden mime-type: ' . $mime);
}
# Check if the hash matches what we were given
$uploadedHash = hash_file('sha256', $uploadedFileTmp);
if ($_SERVER['HTTP_X_TAR_HASH'] !== $uploadedHash) {
cleanup();
deny(403, 'File hash doesn\'t match!');
}
$dest = dirname(__FILE__) . '/apparmor/' . $_FILES['apparmor-profiles']['name'];
$result = @move_uploaded_file($uploadedFileTmp, $dest);
if (false === $result) {
cleanup();
deny(503, 'Error processing upload');
} else {
file_put_contents("$dest.sha256", hash_file('sha256', $dest));
cleanup();
echo '200 OK';
}
}
} else {
cleanup();
deny(401, 'Unauthorized');
}
}
}

View File

@ -1,194 +0,0 @@
#!/bin/bash
LOG=/tmp/pulp_consumer_install.log
PYTHON_RHSM_URL=http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-1.15.4-5.el7.x86_64.rpm
ERROR_MSG="Check $LOG log"
. /etc/os-release
export LOG
export ERROR_MSG
function debug {
if [ -n "$DEBUG" ]; then
echo "DEBUG: $1"
fi
}
function run_cmd {
debug "Executing: $1..."
$1 >> $LOG 2>&1
if [ $? != 0 ]; then
echo "Error: Failed running command '$1'. $ERROR_MSG"
exit 1;
fi
}
echo ""
echo "This script will now install pulp_consumer"
echo ""
export TMPDIR=`mktemp -d /tmp/pulp_consumer.XXXXXX`
echo -e "Creating temp directory $TMPDIR...\n"
OSVER=$(echo $VERSION | awk '{print int($1+0.5)}') # Bash can only compare whole numbers
if [ $ID = 'ol' ] && [ $OSVER -ge 7 ]; then
echo "Note: python-rhsm has been remove from Oracle Linux 7. Pulling"
echo " python-rhsm from mirror.centos.org"
echo "Installing $PYTHON_RHSM_URL now..."
run_cmd "yum install -y $PYTHON_RHSM_URL"
fi
echo "Retrieving pulp repo..."
run_cmd "wget https://repos.fedorapeople.org/repos/pulp/pulp/rhel-pulp.repo -O /etc/yum.repos.d/rhel-pulp.repo"
echo "Installing pulp_consumer and its dependencies..."
run_cmd 'yum groupinstall -y pulp-consumer-qpid'
echo "Installing ssl packages for pulp_consumer message query (qpid)..."
run_cmd 'yum install -y qpid-cpp-server-ssl'
echo ""
echo "Extracting embedded files..."
echo ""
cat > $TMPDIR/archive.enc <<'_ENC'
H4sIAGy7J1cAA+06a4+jSJLzGan/A9JKN7PyTBkwfrWuT+JpYxtsDPi1arV4g8GA
eRjwr79MsKtc1T0zq5Vm905yqLvLzoyMiIx3ZJcZR1lxstMXM46cn/4awAAMSBL+
xId97PEn/Ijjg/5PODHEcZIgsSHAw/uDIfYTiv1F8ryDIsv1FEV/Covc9OoXL051
0/4B3p/t/z+Fv6GqZ6OZned+5GaoH6G552eo44c2qqfgbxiiZnw62VFuW6hRo5bt
6EWY/4rqkQVw7YfdVyqZF5dwD/nbHR296GFhZ2BRz9FVESYoc3M8lAl9cBwtfcjJ
i+PMRn0HjeIczRLb9B0fUPbs1H5BkFbYBB7P7PQCDkOv9d0i1XM/jpC/AQwvzvLP
4Cf6HS7cifST/YKiwgcGv7a3boRsJbnLncdoFpuBnb+4dn6n8MvfXwCHJE4fOYFv
aJLGF98COmg0s+YU1SlClFoJAC3N9G9JYfxYNrAR+iYa2DVaZODCTpyiegGIRLlv
NpeDHPXE/5aktuNXb1QgE8gBbTcgGiDoO/W3LAtbNMXO2/uBy/B6CBQMPtw1Z9+M
eLMGVAvYbUm05sobB4Fi/pyhirJATRtcFZDLfDeyrYYFcAwdzVMQTEB4KHic+nnd
bG2ptSRIk89QjLxVzaMo4HNjCt1p7F7HBTBA5kEEoInWV0o/99DU1kM00bOsjFMr
Q+O0of4KelSjMRAV6DL1L3oOqEVAi6dWeajQSpynceSGNSDWuq0FxAWcAttOWrky
u7m/mhZ24+LvmdyEggoAfgMMc3dpGCqtOn5HFWgcNYosbeNuc3jwPflmI2vuDO1o
AofRc+9uayAd+KOjcA1KordRGjvQelCWSIcs76wZ6p2cvwKNtacsH9w+j9MaHr1h
Nyw+nEB/afQeJ3YEXOm3LK8BN0/PPBjq9Sn0owBSjd7pA03AzaBcIESQf7Q3/Yo0
Qdk4/EvogSA5AgGyFz/K72GEkmTvvYOjXYjeBSsPsYN27dzsJoHfbt69ttvy6d7Q
XkAYIQ9B0Hram0LRL2908jDrQvHBv/pvRhFZof1ipjkCkw31LgCb7DK36+wtQk92
lumu/X2k3kQGcjwEqkK9uuafxPl3qaI5+0+kCOQf71e+Pgjye7oD+43C/lzLj+qF
+rll7nvav907Du3PjdBmuw0XoGTt16+vKHeyyD2vh3ErNHTL1M7iIjWBE97i5jVB
NT6f1cBpTy1DuwKen4Fz2Tfg2W86eyT3evom09uZRt12En+DdH98GLLfa2KDlvlN
4DRCnPQIGL8J+OQWsSc/TeP0W+hn+e8LA+m9xSAQLdf9CCZGyKSlgEIKDZeH/PLA
754h3MSFtv2Dm1txGYWxDvPcZDWB3gOzYJw+0GmL0S2ptFUbfYcIq28jAYhM8DEy
3w6DivAD1TTJCwTVPyXW7qWPjd8nnj8QsKHWOMQPDvxLgvrWtz+W9b29yoYJXGtK
06tnCey7NOjfhXpkAVnC/uGND/x259E41YM7NDw4EQX3iKGiXlk9ZhHYhTUabEg+
SPDSVoyGKPj5oHB45FFhVpFCdqntAqdLXzNJ431NnH39PsrQbpGl3dA3PqSIN7R3
cdVmlLo4vTQGeLGaY82XH0TNd9gfPf19gvoN7P0G9x697mMOa+K+e2P4aPLfS3Y/
MturCX6D6y+JfWqS19o24vgtD35u0hKogyf/1h+ums9AHxAPlHAnh1bUzQAWDz8C
E0gYZrD7Se0z6JLz1mssO9Qf6kd7ul1Ffzn5UQE8v6my7c7XN6avBa8lgfbadP0h
CcZFnhR5m0STOAy/OQ37yKy/+dG3DHRIkZW1/KXiZICDwFFvy6hh56VtR3eJs7Yc
wR4MyNCGTpO4gMZt2JaAngvyyF7j99YEwWOWnuvwwkDJRmh/M+MwTr/vXJ1752r5
GcS7jSYAF7UzU08gyUZ8GzIpUz35lsffcqiRSL91wqDv/7gBgyO/dXug4QUO3KAk
MCSaSQA2fE3CKdK0HVQs2BU5rze5U3qB5Bspf23W22kCTFQNy/aY33YPL3cJm9WH
dPCqZ9PTwaAJSGcA0c9BVAGVA2XZb9K12RqNoMigGWuKbGvTr39oURT/qOtGAT9S
Wnud98KiI6ztjlyojbYFAuK0fpSZnv2Y34w0DsCFtPXitvWCcn7Tpf+cm8nPsCf9
GbRoP4PWDF6wVU5z8j6AAY01qC8/mO5u1OEi+svtwOf2K/jmw24cqP/ehf797urp
9ySa6a3V/nfs+4Mh0R69vGcfJ9DPgQtRorxqN9F2wPxA4TZUQU96Q731SyDjRtl7
mRqc1/UbyY0e+taNZdYUuZ/PiW+1Kkx1w/Dz0/nn73XXILWsTB2mrTc+v9wv8HdU
N7I4LHL7Ybh4LD3vJ4NmQITiNJ8BId+CLWde3ypZOxE2ir0xbhLOe+avyvuO9SNj
WOL+6x3vpveFAr41uvY9FFqeDQs4urTKeEj2Ten7mNUbXTZUfadNNjeL3VwIhtWr
l39Fbh6OAq999cd2hAF+gj86yZtlUWgE5Kb+DwUHboHZoxk6HvT0Q6xm+3U8WaVx
U91hUUvzNgTvQ3abj9ud1kRtlWnbiBxGbtIev7nhrZ682QcMZyBsYH6M7psPNO/9
yR+RRf5x+/z1gT5KkBjyn370esIrtJ731/Jo3n8H2O+8/2IETry+/xIYAdbhQzDx
fP/9d8BvEGhuIkgow61VgRcYSuWaVUQUBKZzZRjKXrpUKdCUK/DUfN+XelrJyvvZ
PD4I3sWUKJlb0DJVrq7cQqSCCYVrHO2JzCdksxErjqWWtCttwHGVxvhsv515xpar
WJVatOuxSk9p3JhSFXulpHZNVOloFn5CDhPPs1jK5kusko4ULrJcJanyFezrzdr1
/ZrIjirmSs1aKnv1E0KFG1VcUyXr7tmNLAtcadHGhPf3W9wTZaxkymZ9zpXr0tpW
pSiP4O3gGsuVhxrIsF3H+51cui7nixQ2YZTzRBGMHitzNCVrFEUKNFtScH9OxUBP
Mruza38pWq6JL2biJ8RjzxK7HZZZn8WycaJpeib6zrawKmbr7hzMZvlhRxhzpFxb
2o7tdDbZJawXCdvvnDiF7H9CZsN6HItT2lqvzQOGc9vLuleWcjiKjqlV1cpwEAfT
zdiyD9l6sp9kXoSbPrvAlUISygttfUISv+xXWne0H0qpS42GnL0MFv4uYaIZq1rK
bNIfu7FyEZVjHjEJNrR6U1cJNMPCllktiJNPyJybq+SWcCJvHpRWGS2WQV/mEiWc
Z6tJYHWP7ki4poORSFYH+YwpWk7SIXOIO9O+cnRi6hNCLC/lwRAmyqUnY9vrtVyw
lUzomWqd2UN9Jqnhzo9VZbWYEd6ESgZWtRxn4izYhrO92ckAhWx6iM8ydqBckaYo
7kd+yAI7AAfSSEbad3CPxpcesap0bjSqT8tPyFWQ7Yt4HVmqur+mRpKFmjczDmrU
uS62y021uVyKqbhQZtsL37G66fR63ZhbRrg4ix1DDrFPSM/ojU/LeDwyJNPmq0Th
aWKqDUTM0y+r2WVruatgJUmxPOoE9YifrsP9qqtuqcXay00zH31C9t2kOuyH/GiJ
J4uTXAynq1pwnGka0vvzeaWu+yI2dRZ6n8KH+4zuM/uhM+5jAxMXtsQK4z4h2Jkj
5pmbcYeNzGIT1WUMfZeqvq7Onc2hF1f8IDFGx3nJ9MzrlpXTzgVbnlZVtlxfQny6
AbcQp3Ob6HFGEB/E+liPOaLfx3z9aq3p3aJMbYnYEasg5vOdYUXCaLEcdIiZRK7N
TkSk8RekSRKcxH6fOP7TSe0J/zS89ZR/HY8/qf8DHBt+qP99nMSf9f/fAbTuolSe
p74B23ME/i+UkwKXsMJaap+8GgdpNuCjaDi3a4H9jOIsig/QcR8lcHQ8Qoc0Sg3Q
EYkOGbTHoRSOcgzKDFCMQrEh2uujXA8djFGKQDECRQCNB6af0f+W4ofv/4M8NCWr
tbABeQWdc/vXpoS7yKDIsrBVCM5e4E/GJQaKLsdTlMLQcxMUYuVIuRx1K8YnYhSH
tarFeWd2RjZsrqx1111rDqV2D10iwvfnckWeyWoE0r+2SaaqbA0Hy4RmVzsm6vV2
3aIzPztihM/qyQXhWLLgonVUjxjGok5pfeyWkTP21py/cbeOEkrCbLo6OUJ58phQ
3lLTw6yLzTTxMpUls+og+mZ3jWqX582ZIZurSN2QKrszszzQ5+NrZ6xrE3kslDgp
EWl3Vyn48hrOI72ec4KQVtYOEYarMaOeNMqk59Rlndj9xIjX+fzsbmtBqsfX6VjH
td727HU7UmlPc6pcr2y7o6aUPNUVGmFru5qtDnS0PirxRd921LrTX0yN3SSv12wq
d3CNoPNNalwTNRU2w7IMBspRrNOex6THrYJM7bPFzAjrVoYZUHIp7drXzwLth8Ek
kAhB5TvRWMCn0051Haiumu6lkXV2CrwqxwkyrHh7KelLmSaoQbRfnDJ147NGzGPM
lacHVowJcmziy7JWKmzqnZjQGo+s/Yoy5DHvOntk3J8lAm3a04MVrroOOZbL+QkL
DC2gfFsMeH17uopUdpb7pu+Sa/xa0upGrbK1b1tX9TxBhP4loo4xodFGmV7ImPOu
bs9RnC2pduZDjd8t/R5x4eU0sIjcwtwoCI2RdEiu3StR7K5gWq+VrY2FwcLkjBCU
XLlfdWYZm121wLcn/WI6cS+sN51w1wvWP8v0RNK3kkTugiU2GRD5EPFVS0jIdaRR
qbEbYpMonq2qeDfuzLZVJirT2Ot6uwz0XOWcdmX2qh6j3O2NMHLQKThyzyPkIWOC
wCrMUO+PruOZwp0PiSUm7qk3mzI7aufk9Ohsx0V9Ha4w36iYdXXIJG2+KOuKlFzE
7Zld8rDoYQPPG+1qPlekYxFfR1pyHmwG1W5OdOanjjOZRHY4Gzjzracbskt2NHE8
5zJ1ixQlHbCB1/eu8ZFgNVWeCl173pfdVuS6j3VpcxMP51y16Qwcpg5lzpPHhptk
XFBmGiKpah2GG6kyymUXXwkcJWiiFvX5GTVi9Z7JVNhsMCuH29VqKhURNXLkATHu
c/X4SBk8SSKOwtkac0qOO4KO+J0z1IzR9jzjqA4buGnBMwscV4JM8jVizKtKQWmj
YVhORtYm57tOt4OU84JYdYjuDorM0sMVcAtKEXhirgaKHGIdj9cniT2yi4lW2sxm
71OH3A6EI96bn+TRGRlcMOD2a2wxlAIQEQIfG8Xa704HRzqeVWdLGgxjSpZ3rD0u
tKM6MU+7zZDoDjsmKHmEvkDWfid08PNltdvLu/EqcnQb0wqPHnQrNqpz4mT2t1Kg
Hib8ZLCY8EJ6zcWjZU+peEJTwlVFkrynrX2nM1FWcoQb7LWOu+ds2dvKTDUc9lP/
LM4jY7YxzIEmHBYuv173hpXFk+KptGaujpw347WiLC4aqZdyvxeC76fASvs0v02d
WE2ScrefUuPxWpyb6gr3kxF3Aaki4y5pQs1NDvGOIrEkN3RwqOd6XkWqoygOI16l
akUrxG5oJbPd2lsmQGRKUnc+GcrRelKPTGum76YRgexPPD3aDPqiYnUODGF246Q3
ATblo4ryhu5ZZtSpsB342NEOOkeip5qMK4zcsXc6HfZugCEjfiuOzkwwksQBKyaE
y+vcdSrvDwwemYtFnS54pWMv+H1vrDGipA3c0swnZBXxbGEQozWyHJ8SIgqtXkcW
c9CwD5fayZKChz73u1r0HymeWWHAX5/40mW+aEpXUb9QoW7oJ727/NI8FTLSlxtb
P8sKO/0jxNS24C+h/NkjwBg+Alid+yMATR0/1t6MB/W4K8pZybSj84QrZxvtysmI
SJHNiwBDi1N5W3nA0vle5SSQnNt1T+TM6SYzWW4Fp2u4RlUiCJNNoE94DBFpcseq
HCGyWimqHC6xYiWGMVzrfVj77gkCPkAg//ILxEnK9O2mQCwW3F8RPg6b9G3YXFFw
X46ZZvBcGkbt72aS4vfIU6It8RmPnBmaX4/o1Tjq6s7GT6munx6nZX/Hr7eBJdG9
SzwIOHFszh3HcjpD8pyMakeLBH0kr3xSQayJsyiFGcbol/mqy1hdgufAhEofxnMN
cwOzcxgdGZ2bb+g9ZQVRVw7lemRR+HXaTzbO0kHmlLYpw5wuO5a080TJqmtiHae9
1RBP5AOLHdmd29MvY4dfaMDzz1YqxLLrX3jcPGage0GWB5kuuZgZT8LhKSy8CXY+
04bg9hY90cHTjWrp52mX7NGkjbE0N+4P+8uMpqxE4FazIRcg42BiTaS5P06M/mox
DDIrN/XejFvm7HCj7if4Nr8sw2Xm7S4soyZz8Tq/2Ny8kA6CNUgwAYkOGEOVoKtT
f+B91BJYgKMOks9Pj5tVwXv2an502dVoPyOwMwFqEpJZlCf2lF5emd6p7J2jUbRW
u9zGUPeuxOdTcnsMy+x4IU21Rx3ly6iz63hqfxYls/HG1YeIfu1Oqzk/yeSV0dnr
4j7MjaE13IJbScNppyh0UBePDuVNhY7iBY51yWmgRZEoJosVP18ihyNdLbA5MffI
fmcXyLs62PbdHqk56ygc7C6b3XZ7mJ56uOhgu8zYbGpu5sxIsa8W+mZh4kiaTY7Y
oq/sQ0XMwgW236dbsxjMWKJgdFK8XNxsaZzmm7rHzzozfsLPR6C9iwhHLDhO4HYI
FRzXbDg70Ecjtxerk6JF426Zn9IwC6iVp9RkZYx6Kq1z3YV5zaZe5pKJtDyZ8pfn
xP+EJzzhCU94whOe8IQnPOEJT3jCE57whCc84QlPeMITnvCEJzzhCU94whOe8IQn
/B+D/wUWlo//AFAAAA==
_ENC
debug "Creating $TMPDIR/files..."
mkdir $TMPDIR/files
debug "Extracting archive to $TMPDIR/files..."
cat $TMPDIR/archive.enc | openssl base64 -d | tar xz -C $TMPDIR/files
echo "Done"
ca_file=$TMPDIR/files/ca.crt
cl_file=$TMPDIR/files/client.crt
cf_file=$TMPDIR/files/consumer.conf
dest=/etc/pki/pulp/qpid
debug "Creating $dest..."
run_cmd "mkdir $dest"
echo "Copying $ca_file to $dest..."
run_cmd "cp $ca_file $dest"
echo "Copying $cl_file to $dest..."
run_cmd "cp $cl_file $dest"
conf_dest=/etc/pulp/consumer
echo "Copying $cf_file to $conf_dest..."
run_cmd "mv $conf_dest/consumer.conf $conf_dest/consumer.conf.orig"
run_cmd "cp $cf_file $conf_dest"
echo "Enabling systemd pulp-agent service..."
run_cmd "systemctl enable goferd"
run_cmd "systemctl start goferd"
debug "Cleaning up..."
run_cmd "rm -fR $TMPDIR"

View File

@ -1,21 +0,0 @@
#!/usr/bin/env bash
#
# Auto renew letsencrypt certs
#
HOSTNAME=`hostname`
EMAIL="alerts@lhprojects.net"
OUTPUT=$(letsencrypt renew)
LE_STATUS=$?
echo $OUTPUT >> /var/log/letsencrypt/renew.log
if [ "$LE_STATUS" != 0 ]; then
SUBJECT="CRON: Letsencrypt automated renewal failed on ${HOSTNAME}"
else
SUBJECT="CRON: Renewing all letsencrypt certs on ${HOSTNAME}"
fi
echo $OUTPUT | mail -s "$SUBJECT" $EMAIL
# Restart nginx
systemctl restart nginx

View File

@ -1,13 +0,0 @@
#!/bin/bash
# Utility script to mount ecryptfs on my PVE boxes
# This script is executed via systemd service file
PASSPHRASE_FILE=$HOME/.ecryptfs/passphrase
if [ ! -e ${PASSPHRASE_FILE} ];then
echo "EXITING: ${PASSPHRASE_FILE} missing!"
exit 1
fi
PASSPHRASE=$(cat $PASSPHRASE_FILE)
/usr/bin/printf "%s" "${PASSPHRASE}" | /usr/bin/ecryptfs-add-passphrase --fnek -
/sbin/mount.ecryptfs_private dump

View File

@ -1,12 +0,0 @@
#!/bin/bash
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=ecryptfs-x86_64
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=hhvm-x86_64-base
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-uek3
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-optional-latest
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-addons
pulp-admin consumer bind --consumer-id=$1 --repo-id=epel7-x86_64-base
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-latest
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=icinga-x86_64-stable-release
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=ngtech-x86_64

View File

@ -1,65 +0,0 @@
#!/bin/bash
# This script will run a pulp command to update consumers and reboot them
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
# Arguments that require values
declare -A arr_args_req_value=()
#######################################
function usage
{
echo "Usage: ${0}"
echo " --consumer-group-id"
echo " Pulp consumer group id"
echo " -r | --reboot"
echo " Should the consumer require reboot after update"
echo " -h | --help"
echo " Show this usage"
exit 0
}
if [ ! -e ${BOOTSTRAP} ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
. $BOOTSTRAP
# Check command line
if [[ -z "${@}" ]]; then
usage
fi
echo "${@}" | grep 'consumer-group-id' > /dev/null
if [ $? != 0 ]; then
echo 'Error: A consumer group id is required!'
exit
fi
# Process command line arguments
arr_args_req_value['consumer-group-id']=true
while [[ $# > 0 ]]; do
key="$1"
case $key in
--consumer-group-id)
check_values arr_args_req_value 'consumer-group-id' 'A consumer group id is required' "$2"
_consumer_group_id=$2
shift;;
-r|--reboot)
reboot_arg='--reboot'
shift;;
-h|--help)
usage
shift;;
*)
_default=$key
shift;;
esac
done
pulp-admin rpm consumer group package update --consumer-group-id=${_consumer_group_id} --all $reboot_arg

View File

@ -1,21 +0,0 @@
#!/bin/bash
# Script to sync pulp repositories
# This script will be ran manually every week by hand
# BETA BETA BETA
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
if [ ! -e ${BOOTSTRAP} ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
. $BOOTSTRAP
repo_ids="oraclelinux7-x86_64-latest epel7-x86_64-base oraclelinux7-x86_64-addons oraclelinux7-x86_64-optional-latest oraclelinux7-x86_64-uek3 hhvm-x86_64-base webmin-noarch-base icinga-x86_64-stable-release ngtech-x86_64"
for repo_id in $repo_ids; do
pulp-admin rpm repo sync run --repo-id=$repo_id
done

View File

@ -1,21 +0,0 @@
# Sample Configuration file
#
# Where to log my output?
LOG=/tmp/repo_sync.log
# Where to download my repository packages?
REPO_DIR=repos
# Syntax:
# reponame:repopath
#
# Retrieve repo name from 'yum repolist' and set repo
# path you get from a first run of reposync.
#
# Note: It appears the path changes between repo's is the
# reason you need to run reposync first to get the path
REPOS=(
repo1:repo1_path
repo2:repo2_path
)

View File

@ -1,44 +0,0 @@
#!/bin/bash
# Script to sync repos
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
CONFIG_FILE=${CURR_DIR}/repo_sync.conf
YUM_LOCK=/var/run/yum.pid
if [ ! -e ${BOOTSTRAP} ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
. $BOOTSTRAP
if [ ! -e ${CONFIG_FILE} ]; then
echo 'EXITING: repo_sync.conf must be in the same directory as this script'
exit 1
fi
. $CONFIG_FILE
debug "Current directory: $PWD"
debug "Changing directory to ${REPO_DIR}"
test ! -d $REPO && err "${REPO_DIR} directory doesn't exists!"
cd $REPO_DIR
debug "Current directory: $PWD"
for repo_id in "${!REPOS[@]}"; do
## Avoid running while yum is already running
if test -e $YUM_LOCK; then
err "Yum appears to be running, if this is not the case\nplease delete $YUM_LOCK"
fi
IFS=":" read -r -a array <<< "${REPOS[$repo_id]}"
name=${array[0]}
path=${array[1]}
echo "Processing repository '${name}'..."
run_cmd "reposync" "-n --repoid=${name}"
_path=${REPO_DIR}/$path
run_cmd "createrepo" "--update ${_path}"
unset name
unset path
done

View File

@ -1,20 +0,0 @@
#
# SquidBlocker Updater Configuration
#
# Note: This updater only supports shallalist blacklist
#
# Blacklist url to download and update SquidBlocker with
#
blacklist_url='http://dl.lhprojects.net/static_files/shallalist.tar.gz' #Recommended, updated once a day
# In situtations where you have multiple SquidBlocker instances running.
# You can specify a URL for each instance
## First instance
sb_url[1]='http://username:password@127.0.0.1:8081/db/set_batch/'
blacklists[1]='warez spyware redirector hacking dynamic costtraps adv tracker' #Blacklist to import
## Second instance
sb_url[2]='http://username2:password2@127.0.0.1:8080/db/set_batch/'
blacklists[2]='warez adv redirector costtraps dynamic hacking spyware porn tracker anonvpn'

View File

@ -1,110 +0,0 @@
#!/usr/bin/env bash
#
# Define a function to cleanup working directory
#
function cleanup {
rm -rf "$WORK_DIR"
echo "Deleted temp working directory $WORK_DIR"
}
trap cleanup EXIT INT
#
# Load configuration information
#
## Regular one
#source /etc/sysconfig/sbblupdater
## Debug statement
source ./sbblupdater.conf
#
# Create TEMP directory
#
echo 'Creating TEMP directory...'
WORK_DIR=`mktemp -d -p /tmp bl_staging.XXXXXXX`
cd ${WORK_DIR}
if [ "${DEBUG}" = '1' ]; then
curr_wd=`pwd`
echo "DEBUG: Current directory: ${curr_wd}"
fi
#
# Download blacklist
#
## Debug variable
#blacklist_url=https://www.lhprojects.net/blacklist/shallalist.tar.gz
echo "Downloading blacklist from ${blacklist_url}"
## Regular one
wget -nv ${blacklist_url}
exit_status=$?
if [ ${exit_status} != 0 ]; then
echo "ERROR: Unable to download blacklist"
exit
fi
#
# Extract tar
#
tar xvf shallalist.tar.gz > /dev/null
if [ "${DEBUG}" = '1' ]; then
curr_wd=`pwd`
echo "DEBUG: Current directory: ${curr_wd}"
fi
#
# Create domain and url list for each instance then import into SquidBlocker
#
sb_int=0
empty_vars=0
while true; do
## If there more than 5 consecutive empty sb_url variables, exit out of the loop
if [ $empty_vars -gt 5 ]; then
break
fi
if [ -z "${sb_url[sb_int]}" ]; then
((empty_vars++))
((sb_int++))
continue
fi
echo "Creating blacklists for instance ${sb_int}"
domains_file=`mktemp domains.XXXX`; domains_file="${WORK_DIR}/${domains_file}"
urls_file=`mktemp urls.XXXX`; urls_file="${WORK_DIR}/${urls_file}"
for bl in ${blacklists[sb_int]}; do
cat "BL/${bl}/domains" >> ${domains_file}
cat "BL/${bl}/urls" >> ${urls_file}
if [ "${DEBUG}" = '1' ]; then
linecount=`wc -l ${domains_file}`
printf "DEBUG: %s has %d domains for instance ${sb_int}\n" ${domains_file} ${linecount}
linecount=`wc -l ${urls_file}`
printf "DEBUG: %s has %d urls for instance ${sb_int}\n" ${urls_file} ${linecount}
fi
done
echo "Importing blacklist into instance ${sb_int}..."
curl -i -X POST -H "Content-Type: multipart/form-data" \
-F "prefix=dom:" \
-F "val=1" \
-F "listfile=@${domains_file}" \
${sb_url[sb_int]}
curl -i -X POST -H "Content-Type: multipart/form-data" \
-F "prefix=url:http://" \
-F "val=1" \
-F "listfile=@${urls_file}" \
${sb_url[sb_int]}
rm -f ${domains_file}
rm -f ${urls_file}
((sb_int++))
done

View File

@ -1,53 +0,0 @@
#!/bin/bash
#
# This will start multiple tcptunnel instances
function _shutdown
{
for i in {1..6} ; do
if [ -z "${PIDS[i]}" ]; then
# assume we are at the end of the list
break;
fi
kill ${PIDS[i]} 2>&1 > /dev/null;
done
exit 0
}
# Trap signals
trap "_shutdown" SIGINT SIGTERM
# This is for debug
source tcptunnel.conf
# Source configuration file
#source /etc/sysconfig/tcptunnel
TCPTUNNEL_STARTED=0
NPID=0
for i in {0..5} ; do
if [ -z "${BIND_ADDRESS[$i]}" ]; then
continue
fi
${TCPTUNNEL_BIN} --local-port=${LOCAL_PORT[i]} --remote-port=${REMOTE_PORT[i]} --remote-host=${REMOTE_HOST[i]} --bind-address=${BIND_ADDRESS[i]} &
# Store pid
((NPID++))
PIDS[NPID]=$!
disown ${PIDS[NPID]}
if [ $? = 0 ]; then
((TCPTUNNEL_STARTED++))
fi
done
if [ $TCPTUNNEL_STARTED = 0 ]; then
echo "No TCP Tunnels were started"
exit 255
fi
echo "${TCPTUNNEL_STARTED} tcptunnels were started successfully"
sleep infinity

View File

@ -1,299 +0,0 @@
#!/usr/bin/env bash
#
# This script will tar lxc-pve AppArmor profiles and
# upload archive to remote download server.
#
# Path to auth file that contains username and password.
# Example
# username=authuser
# password=authpw
auth_file=$HOME/.tar.lxc-apparmor-profiles.authfile
# Do not edit anything below this line
set -e
_curl_opts='--silent'
_wget_opts='--quiet'
_ssh_opts='-q'
if [ -n $DEBUG ] && [[ $DEBUG -ge 2 ]]; then
set -x
_tar_opts='-v'
_curl_opts='--show-error --verbose'
_wget_opts='--verbose'
_ssh_opts='-vv'
fi
function debug {
if test -n "$DEBUG"; then
echo "[DEBUG] $1"
fi
}
function cleanup {
debug "Cleaning up temporary directory..."
if test -d $_tmp_dir; then
rm -r $_tmp_dir
fi
}
function getfilehash {
echo $(sha256sum $1|awk '{print $1}')
}
function _wget {
wget $_wget_opts $1 2> /dev/null || true
}
function httpdownload {
_wget $_dl_filedir/$_dl_filename
_wget $_dl_filedir/$_dl_filesha256
if test ! -f $_dl_filename && test ! -f $_dl_filename.sha256; then
echo "ERROR: Failed to download files"; exit 1;
fi
# TODO: rename $_dl_filename to $_tar_filename
_file_hash=$(getfilehash $_dl_filename)
_remote_hash=$(cat $_dl_filename.sha256)
if [ $_file_hash != $_remote_hash ]; then
echo "Downloaded file corrupted!"
exit 1
fi
}
if test -n "$TESTING"; then
_dl_server=http://localhost:8080
else
# This is a temporary location, perhaps
# permanent. dl.lhprojects.net is not configured
# to execute PHP scripts
_dl_server=https://www.lhprojects.net
fi
_tar_filename=apparmor-profiles.tgz
_dl_scriptname=handleLXCAppArmorProfiles.php
_dl_filedir=$_dl_server/apparmor
_dl_filename=$_tar_filename
_rsync_server=pve1.lhprojects.int
_tmp_dir=/tmp/.tar.lxc-apparmor-profiles
_rsync_user=apparmor_rsync
_remote_script_path=/usr/local/bin/tar.lxc-apparmor-profiles
function usage
{
echo "Usage: ${0}"
echo "This script can upload an archive of LXC AppArmor profiles to a remote HTTP"
echo "server via handleLXCAppArmorProfiles.php. Alternatively, you can use --download-scp."
echo ""
echo " --download"
echo " Download and extract archive using http via handleLXCAppArmorProfiles.php"
echo " NOTE: You need to upload the archive to a middleman HTTP server prior to using --download."
echo " --download-scp"
echo " Same as above however use scp for file transfer."
echo " NOTE: the remote SSH server must have $_remote_script_path installed"
echo " --download-test"
echo " Same as --download, but extract in temp directory"
echo " -h | --help"
echo " Show this usage"
exit 0
}
# We going to attempt to create a tmp dir
# and set the _tmp_dir variable
if test -d $_tmp_dir && test ! -O $_tmp_dir; then
debug "ERROR: We don't own $_tmp_dir!"
_old_tmp_dir=$_tmp_dir
for i in {1..5}; do
__tmp_dir=$_tmp_dir$i
if test -d $__tmp_dir && test -O $__tmp_dir || test ! -d $__tmp_dir; then
debug "Setting _tmp_dir to '$__tmp_dir'"
_tmp_dir=$__tmp_dir
break
fi
done
if [[ $_tmp_dir = $_old_tmp_dir ]]; then
echo "ERROR: Unable to set a tmp dir"
exit 1
fi
fi
_staging_dir=$_tmp_dir/stg
case "$1" in
-h | --help )
usage
;;
--download | --download-scp)
cleanup
mkdir -p $_staging_dir
cd $_tmp_dir
if [ $1 = '--download-scp' ]; then
echo "Archiving remote AppArmor profiles..."
_result=$(ssh $_ssh_opts $_rsync_user@$_rsync_server || true)
if [[ $_result != '200 OK' ]]; then
echo "ERROR: Something went wrong: REMOTE $_result"
cleanup
exit 1
fi
echo "Downloading archive..."
scp $_ssh_opts $_rsync_user@$_rsync_server:$_tar_filename .
scp $_ssh_opts $_rsync_user@$_rsync_server:$_tar_filename.sha256 .
if test ! -f $_tar_filename && test ! -f $_tar_filename.sha256; then
echo "ERROR: Failed to download files"; exit 1;
fi
_file_hash=$(getfilehash $_tar_filename)
_remote_hash=$(cat $_tar_filename.sha256)
if [ $_file_hash != $_remote_hash ]; then
echo "Downloaded file corrupted!"
exit 1
fi
else
httpdownload
fi
echo "Extracting archive..."
cd /
tar $_tar_opts -xf $_tmp_dir/$_dl_filename .
cleanup
exit 0
;;
--download-test )
echo "This is only a download test and archive extract"
cleanup
mkdir -p $_staging_dir
cd $_tmp_dir
httpdownload
cd $_staging_dir
tar $_tar_opts -xf ../$_dl_filename .
echo "Download test completed successfully"
cleanup
exit 0
;;
* )
# Check if running under a SSH connection
if [ -n "$SSH_CLIENT" ] && [ -z "$1" ] && [ $USER != 'root' ]; then
echo "Running from SSH connection and no command arguments given"
echo "Exiting"
exit 1
fi
# Init
_tar_file=$_tmp_dir/$_tar_filename
_user_home=$(eval echo "~$_rsync_user")
_scp_remote=0
if test -n "$1" && [ $1 = '--scp-remote' ]; then
debug "Remote SCP enabled, not uploading..."
_scp_remote=1
# Determine if we need to run scp instead
case "$SSH_ORIGINAL_COMMAND" in
"scp -f $_tar_filename" | "scp -f $_tar_filename" | "scp -f $_tar_filename.sha256" | "scp -v -f $_tar_filename.sha256" )
cd $_user_home
# Replace script with scp command
eval "$SSH_ORIGINAL_COMMAND"
exit 0
;;
esac
fi
cleanup
mkdir -p $_staging_dir
debug "Copying AppArmor profiles over to tmp dir..."
read -r -d '' _profile_files <<- EOF || true
/etc/apparmor.d
/etc/apparmor.d/abstractions
/etc/apparmor.d/abstractions/lxc
/etc/apparmor.d/abstractions/lxc/container-base
/etc/apparmor.d/abstractions/lxc/start-container
/etc/apparmor.d/lxc
/etc/apparmor.d/lxc/lxc-default
/etc/apparmor.d/lxc/lxc-default-cgns
/etc/apparmor.d/lxc/lxc-default-with-mounting
/etc/apparmor.d/lxc/lxc-default-with-nesting
/etc/apparmor.d/lxc-containers
EOF
# Read file list
while read -r file; do
if test -d $file;
then
_dest=$_staging_dir/.$file
debug "Creating directory: $_dest"
mkdir -p $_dest
elif test -f $file
then
_src=$file
_dest=$_staging_dir/.$file
debug "Copying: $_src to $_dest"
cp $_src $_dest
fi
done <<< "$_profile_files"
debug "Changing directory to $_tmp_dir"
cd $_staging_dir
if [ $PWD != $_staging_dir ]; then
echo "Failed to change directory!"
exit 1
fi
# Tar files
tar $_tar_opts -czf $_tar_file .
# Get file hash
_file_hash=$(getfilehash $_tar_file)
if [ $_scp_remote -ne 0 ]; then
debug "Moving: $_tar_file to $_user_home"
mv -f $_tar_file $_user_home/
echo $_file_hash > $_user_home/$_tar_filename.sha256
echo "200 OK"
cleanup
exit 0
elif test -n "$1"; then
echo "ERROR: Invalid argument: $1"
cleanup
exit 1
fi
# Get auth creds
if test ! -f $auth_file; then
echo "ERROR: Auth file '$auth_file' not found!"
cleanup
exit 1
fi
source $auth_file
_result=$(curl $_curl_opts -H 'X-AppArmor-State: ProcessUpload' -H "X-Tar-Hash: $_file_hash" -F "apparmor-profiles=@$_tmp_dir/$_tar_filename" --user "$username:$password" $_dl_server/$_dl_scriptname)
if [[ $_results = '200 OK' ]]; then
echo 'Successfully uploaded archive!'
else
echo "Something went wrong: $_result"
fi
;;
esac

View File

@ -1 +0,0 @@
#!/bin/bash