Initial commit

This commit is contained in:
Lutchy Horace 2021-01-21 16:58:32 -05:00
parent cb24cc9b9b
commit 2500470a1a
32 changed files with 2660 additions and 0 deletions

1
.htpasswd Normal file
View File

@ -0,0 +1 @@
tar.lxc-apparmor-profiles-user:$2y$10$DAJYpLDBPo8ErO6UMUCNpOh7RACBkwk0knlmP/3JKWE3XlfV2fOxW

5
.vscode/extensions.json vendored Normal file
View File

@ -0,0 +1,5 @@
{
"recommendations": [
"onecentlin.php-productive-pack"
]
}

22
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,22 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Listen for XDebug",
"type": "php",
"request": "launch",
"port": 9000
},
{
"name": "Launch currently open script",
"type": "php",
"request": "launch",
"program": "${file}",
"cwd": "${fileDirname}",
"port": 9000
}
]
}

14
README Normal file
View File

@ -0,0 +1,14 @@
##########
README
##########
This repository is a collection of scripts that I utilize throughout my infrastructure.
You can follow issue tracking here https://redmine.lhprojects.net/projects/scripts
Scripts
----------
backup_files.sh: Script I use to rsync files to off-site storage center. You can
costomize the configuration via backup.conf and then appending it as an argument
during the script execution.

143
autoattach Executable file
View File

@ -0,0 +1,143 @@
#!/usr/bin/env bash
#
# Auto attach to tmux
#
CONF_FILE=$HOME/.autoattach.conf
###########################
function write_setting
{
if ! sed -i -E "s/^$1=.*/$1=$2/" $CONF_FILE 2> /dev/null; then
echo "$1=$2" >> $CONF_FILE
fi
}
function attach_session
{
if [ -n "${TMUX}" ]; then
exit 1
fi
tmux attach -t $1 2> /dev/null
# Check for exit status
if [ $? -gt 0 ]; then
echo "There is no tmux session '$1'"
if [ -z "${AUTO_CREATE}" ]; then
read -p "Do you want me to auto create sessions by default? (y/n): "
if [ "${REPLY}" = "y" ]; then
write_setting 'AUTO_CREATE' '1'
AUTO_CREATE=1
else
write_setting 'AUTO_CREATE' '0'
fi
fi
if [ "${AUTO_CREATE}" = "1" ]; then
tmux new-session -s $1 2> /dev/null
exit 0
fi
echo 'Nothing to do, exiting...'
exit 1
fi
}
function usage
{
echo "Usage: ${0}"
echo " -c | --clear"
echo " Delete configuration file"
echo " -s | --set auto_create on"
echo " Set configuration variable"
echo " auto_create on/off"
echo " Auto create session on start if session is missing"
echo " -h | --help"
echo " Show this usage"
exit 0
}
# Load configuration file
test -f $CONF_FILE && . $CONF_FILE
# Detect platform
case "$OSTYPE" in
freebsd* )
echo "Freebsb detected!!"
echo "Long options is disabled!!"
OPTS=`getopt chgs: $*`
set_opts_exit_status=$?
PLATFORM='Freebsd' ;;
#if [ $? -ne 0 ]; then
# echo 'Usage: ...'
# exit 2
#fi
* )
OPTS=$(getopt -o chgs: -l clear,help,set:,get: -n 'autoattach' -- "$@")
set_opts_exit_status=$?
PLATFORM='Linux' ;;
esac
if [ "$set_opts_exit_status" -gt '0' ]; then
echo 'Failed to set command line arguments'
exit 1;
fi
eval set -- "$OPTS"
CLEAR=false
SET=false
while true; do
case "$1" in
-c | --clear ) CLEAR=true; shift ;;
-s | --set )
case "$2" in
'auto_create' )
case "$4" in
'on' )
echo 'Setting auto_create to 1'
write_setting 'AUTO_CREATE' '1'
shift;;
'off' )
echo 'Setting auto_create to 0'
write_setting 'AUTO_CREATE' '0'
shift;;
esac
shift;;
*)
echo "Failed to set configuration $2 $4"
exit 1
esac
exit 0
shift ;;
-h | --help ) usage; shift ;;
-- ) shift; break ;;
* ) break ;;
esac
done
# Clear default window
if [ $CLEAR = true ]; then
echo "Deleting configuration..."
rm -f $CONF_FILE
exit 0
fi
if [ -n "${SET}" -a $SET != false ]; then
echo "Updating configuration..."
write_setting $SET
exit 0
fi
# Check if this a drop down terminal from env
if [ -n "${DROP_DOWN_TERMINAL}" ]; then
attach_session drop_down
else
attach_session regular
fi

15
backup.conf.example Normal file
View File

@ -0,0 +1,15 @@
# **
#
# Backup files configuration
FROM='backup@example.com'
EMAIL='alerts@example.com'
LOG='/var/log/backup.log'
RSYNC_OPTIONS='--stats -rtc'
RSYNC_LOCAL_PATH='/backup/dir///./'
RSYNC_REMOTE_PATH='username@rsync.example.com:'
NUM_BACKUPS=5
BACKUP_DIR=/path/to/dir/to/backup
BACKUP_DEST=/destination/of/backup/files
B_PREFIX=devel_repo

77
backup.sh Executable file
View File

@ -0,0 +1,77 @@
#!/usr/bin/env bash
#
# Tar directory files
#
CONF=$1
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
## Check for perl regex support in grep
echo 'test' | grep -P 'test' > /dev/null 2>&1
if [ $? != 0 ]; then
echo "EXITING: your distribution of grep binary doesn't support -P"
exit 1
fi
if [ ! -e ${BOOTSTRAP} ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
source ${BOOTSTRAP}
if [ -z "${CONF}" ] || [ ! -e ${CONF} ];then
echo 'EXITING: missing configuration file'
exit 1
fi
source ${CONF}
## Check if we exceeded number of back up files
FILES=$(ls -r ${BACKUP_DEST})
i=0
RM=''
for f in $FILES
do
FILE=$(echo $f | grep -P "(${B_PREFIX}\..*\.tgz)$")
if [ -n "${FILE}" ]; then
FILE=${BACKUP_DEST}/${FILE}
((i++))
if [ $i -gt ${NUM_BACKUPS} ]; then
if [ -z "${RM}" ]; then
RM="rm -f ${FILE}"
else
RM="${RM} ${FILE}"
fi
fi
fi
done
if [ -n "${RM}" ]; then
echo 'CRON: Removing old backups..'
debug "Executing: ${RM}"
${RM}
else
echo 'CRON: Did not remove any old backups this run...'
fi
## Run backup
echo "CRON: Backing up files now..."
BASENAME=$(basename ${BACKUP_DIR})
cd ${BACKUP_DIR}
cd ..
### Debug
debug "Current directory: "`pwd`
tar -czf ${BACKUP_DEST}/${B_PREFIX}.`date +%m-%d-%Y`.tgz ${BASENAME} >> ${LOG} 2>&1
if [ $? != 0 ]; then
echo "CRON: Failed to backup files"
send_notification "CRON: Unable to backup directory on ${HOSTNAME}" "DATE: {$TODAY}\n\nUnable to backup directory: ${BACKUP_DIR}"
exit 1
else
echo "CRON: Successfully backed up files"
send_notification "CRON: Successfully backed up directory on ${HOSTNAME}" "DATE: $TODAY\n\n Successfully backed up directory: ${BACKUP_DIR}"
exit 0
fi

62
beep-notify.sh Executable file
View File

@ -0,0 +1,62 @@
#!/bin/bash
#
# Icinga2 Notification
#
# This will beep the system speaker when a problem occur
DEBUG_VARS_PATH=".beep-notify.vars"
if [ -e ${DEBUG_VARS_PATH} ]; then
. ${DEBUG_VARS_PATH}
fi
function _beep {
if [ -z "${RPID}" ]; then
# We need to store our subshell pid
#
if [ "${BASH_VERSINFO[0]}" -lt 4 ]; then
MYPID=$( : ; bash -c 'echo $PPID' )
else
MYPID=$BASHPID
fi
echo $MYPID > $PID_PATH
while true; do
sudo /bin/beep -f 1000 -n -f 2000 -n -f 1500 ## Only ROOT can access /dev/tty0
if [ $? != 0 ]; then ## If we can't beep, no need to keep trying
exit 1 ## No TTY to output error to, just exit
fi
sleep 2
done
fi
}
PID_PATH=/tmp/beep-notify.pid
RPID=
if [ -e ${PID_PATH} ]; then
if [ -f /proc/$(cat ${PID_PATH})/status ]; then
RPID=$(cat ${PID_PATH})
fi
fi
case $NOTIFICATIONTYPE in
PROBLEM)
if [ "${HOSTSTATE}" = "DOWN" ]; then
echo "OK HOST DOWN - INITIATING SYSTEM BEEP"
_beep &
exit 0
elif [ "${SERVICESTATE}" = "CRITICAL" ]; then
echo "OK SERVICE CRITICAL - INITIATING SYSTEM BEEP"
_beep &
exit 0
fi
;;
ACKNOWLEDGEMENT|RECOVERY)
echo "OK ${NOTIFICATIONTYPE} - KILLING SYSTEM BEEP"
kill -SIGTERM $RPID 2> /dev/null
rm -f $PID_PATH
exit 0
;;
esac
exit 1

89
bootstrap.sh Normal file
View File

@ -0,0 +1,89 @@
#!/usr/bin/env bash
#
# Collection of reusable functions and variables
#
TODAY=`date`
HOSTNAME=`hostname`
function debug
{
if [ "${DEBUG}" = '1' ]; then
echo -e "DEBUG: $1"
fi
}
function err
{
echo -e "FATAL ERROR: $1"
exit 1
}
function send_notification
{
opt=""
if [ -n "$FROM" ]; then
local mailcmd=$(readlink -f `which mail`)
if [ "${mailcmd}" = "/usr/bin/bsd-mailx" ]; then
opt="-a 'From: ${FROM}'"
else
opt="-r $FROM"
fi
fi
echo "Sending notification."
echo -e "$2" | mail $opt -s "$1" ${EMAIL}
}
function check_values
{
a_name=$1[@]
a_array=("${!a_name}")
if [ -z ${!a_name+x} ]; then
return
fi
if [ ${a_array[$2]} == true ]; then
if [ "x${4}" = 'x' ]; then
echo $3
exit 1
fi
fi
}
function run_cmd {
debug "Executing: '$1 $2'"
if [ ! -e $LOG ]; then
touch $LOG
fi
## Check if command exists on system
if ! command -v $1; then
err "$1: command not found"
fi
## Check if debug is set and send command output to shell
## Some commands send animated text that can corrupt the log
if test -n "${DEBUG}"; then
debug "DEBUG variable set, redirecting command output to console"
$1 $2
if [ $? -gt 0 ]; then
err "Failed running command '$1 $2'"
fi
else
$1 $2 2>> $LOG
# FIXME: This logic might need re-work?
if [ $? -gt 0 ]; then
echo "ERROR: Failed running command '$1 $2'."
echo "Displaying last 20 lines of ${LOG}..."
echo
tail $LOG
echo
exit 1
fi
fi
}

190
cachet_notify Executable file
View File

@ -0,0 +1,190 @@
#!/usr/bin/php5
<?php
if ($argc != 6) {
echo 'Usage: ' . basename(__FILE__) . ' cachet_component service_name service_state service_state_type service_output' . "\n";
exit(1);
}
$api_path = dirname(__FILE__) . '/.api_key';
if ( is_file($api_path) ) {
$api = parse_ini_file($api_path);
} else {
exit(1);
}
$cachet_url = $api['url'];
$api_key = $api['key'];
$incident_prefix = '[Icinga2]';
$cachet_notify_subscribers = true; // Enable subscribers notifcation for incidents creation and updates
$cachet_component = $argv[1];
$service_name = $argv[2];
$service_status = $argv[3];
$service_status_type = $argv[4];
$service_output = $argv[5];
define('CATCHET_STATUS_INVESTIGATING', 1);
define('CATCHET_STATUS_IDENTIFIED', 2);
define('CATCHET_STATUS_WATCHING', 3);
define('CATCHET_STATUS_FIXED', 4);
define('CATCHET_COMP_OPERATIONAL', 1);
define('CATCHET_COMP_PERFISSUES', 2);
define('CATCHET_COMP_PARTIALOUTAGE', 3);
define('CATCHET_COMP_MAJOROUTAGE', 4);
function cachet_query($api_part, $action = 'GET', $data = null) {
global $api_key, $cachet_url;
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, $cachet_url . $api_part);
curl_setopt($ch, CURLOPT_SSL_VERIFYHOST, false);
curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false);
if (in_array($action, array('GET', 'POST', 'PUT'))) {
curl_setopt($ch, CURLOPT_CUSTOMREQUEST, $action);
}
if ($data !== null && is_array($data)) {
$ch_data = http_build_query($data);
curl_setopt($ch, CURLOPT_POST, 1);
curl_setopt($ch, CURLOPT_POSTFIELDS, $ch_data);
}
$ch_headers = array(
'X-Cachet-Token: ' . $api_key
);
curl_setopt($ch, CURLOPT_HTTPHEADER, $ch_headers);
curl_setopt($ch, CURLOPT_HEADER, false); // Don't return headers
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true); // Return body
$http_body = curl_exec($ch);
$http_code = curl_getinfo($ch, CURLINFO_HTTP_CODE);
curl_close($ch);
return array('code' => $http_code, 'body' => json_decode($http_body));
}
/* Find Cachet component ID */
$result = cachet_query('components');
if ($result['code'] != 200) {
echo 'Can\'t query components' . "\n";
exit(1);
}
$cachet_component_id = false;
foreach ($result['body']->data as $component) {
if ($cachet_component == $component->name) { // We nailed it
$cachet_component_id = $component->id;
break; // Yes, bad.
}
}
if ($cachet_component_id === false) {
echo 'Can\'t find component "' . $cachet_component . '"' . "\n";
exit(1);
}
/*
Determine what to to:
- if PROBLEM and SOFT then don't cry just yet
- if PROBLEM and HARD then create incident
- if RECOVERY and SOFT then update incident
- if RECOVERY and HARD then update incident
PROBLEM = !OK = (WARNING | CRITICAL | UNKONWN)
RECOVERY = OK
*/
if ($service_status != 'OK' && $service_status_type == 'SOFT') { // Hope it will be back soon
echo 'KO SOFT: not doing anything' . "\n";
exit(0);
} elseif ($service_status != 'OK' && $service_status_type == 'HARD') { // Something went wrong, let's notify
echo 'KO HARD: creating incident' . "\n";
$query = array(
'name' => $incident_prefix . ' ' . $service_name,
'message' => $service_output,
'status' => CATCHET_STATUS_INVESTIGATING,
'component_id' => $cachet_component_id,
'component_status' => CATCHET_COMP_MAJOROUTAGE,
'notify' => $cachet_notify_subscribers,
);
$result = cachet_query('incidents', 'POST', $query);
if ($result['code'] != 200) {
echo 'Can\'t create incident' . "\n";
//exit(1);
}
} elseif ($service_status == 'OK' && $service_status_type == 'SOFT') { // Recovery underway
echo 'OK SOFT: updating incident' . "\n";
/* Get the incident ID */
$results = cachet_query('incidents');
if ($result['code'] != 200) {
echo 'Can\'t get incidents' . "\n";
exit(1);
}
$cachet_incident_id = false;
foreach ($results['body']->data as $incident) {
if ($incident->name == $incident_prefix . ' ' . $service_name) {
$cachet_incident_id = $incident->id;
break; // Yes, bad.
}
}
if ($cachet_incident_id === false) {
echo 'Can\'t find incident "' . $incident_prefix . ' ' . $service_name . '"' . "\n";
exit(1);
}
/* Update the incident */
$query = array(
'name' => $incident_prefix . ' ' . $service_name,
'message' => $service_output,
'status' => CATCHET_STATUS_WATCHING,
'component_id' => $cachet_component_id,
'component_status' => CATCHET_COMP_PARTIALOUTAGE,
'notify' => $cachet_notify_subscribers,
);
$result = cachet_query('incidents/' . $cachet_incident_id, 'PUT', $query);
if ($result['code'] != 200) {
echo 'Can\'t update incident' . "\n";
exit(1);
}
} elseif ($service_status == 'OK' && $service_status_type == 'HARD') { // Recovery completed
echo 'OK HARD: updating incident' . "\n";
/* Get the incident ID */
$results = cachet_query('incidents');
if ($result['code'] != 200) {
echo 'Can\'t get incidents' . "\n";
exit(1);
}
$cachet_incident_id = false;
foreach ($results['body']->data as $incident) {
if ($incident->name == $incident_prefix . ' ' . $service_name) {
$cachet_incident_id = $incident->id;
break; // Yes, bad.
}
}
if ($cachet_incident_id === false) {
echo 'Can\'t find incident "' . $incident_prefix . ' ' . $service_name . '"' . "\n";
exit(1);
}
/* Update the incident */
$query = array(
'name' => $incident_prefix . ' ' . $service_name,
'message' => $service_output,
'status' => CATCHET_STATUS_FIXED,
'component_id' => $cachet_component_id,
'component_status' => CATCHET_COMP_OPERATIONAL,
'notify' => $cachet_notify_subscribers,
);
$result = cachet_query('incidents/' . $cachet_incident_id, 'PUT', $query);
if ($result['code'] != 200) {
echo 'Can\'t update incident' . "\n";
exit(1);
}
} else {
echo 'Bad arguments' . "\n";
exit(1);
}
exit(0);

343
cachet_notifyV2.php Executable file
View File

@ -0,0 +1,343 @@
#!/usr/bin/env php
<?php
class Component
{
public $id = null;
public $name = null;
public $type = null;
public $status = null;
public $state = null;
public $state_type = null;
public function __construct($name, $type, $state, $state_type) {
$this->id = $this->generateSerial();
$this->name = $name;
$this->type = $type;
$this->state = $state;
$this->state_type = $state_type;
}
private function generateSerial() {
$tokens = 'ABCDE0123456789';
$serial = '';
for ($j = 0; $j < 7; $j++) {
$serial .= $tokens[rand(0, 14)];
}
return $serial;
}
}
class Cachet
{
public $body = null;
public $code = null;
private $api_key = null;
private $api_url = null;
private $cache = null;
private $cache_path = '/tmp/.cachet_notifyV2.cache';
const CATCHET_STATUS_INVESTIGATING = 1;
const CATCHET_STATUS_IDENTIFIED = 2;
const CATCHET_STATUS_WATCHING = 3;
const CATCHET_STATUS_FIXED = 4;
const CATCHET_COMP_OPERATIONAL = 1;
const CATCHET_COMP_PERF_ISSUES = 2;
const CATCHET_COMP_PARTIAL_OUTAGE = 3;
const CATCHET_COMP_MAJOR_OUTAGE = 4;
const MSG_HOST_DOWN = 'Automated incident created by Icinga2. Host \'%s\' is currently experiencing an outage and the system administrator has been notified';
const MSG_SERVICE_DOWN = 'Automated incident created by Icinga2. Service \'%s\' is currently experiencing an outage and the system administrator has been notified';
const MSG_HOST_UP = 'Automated incident created by Icinga2. Host \'%s\' has recovered from it\'s outage';
const MSG_SERVICE_UP = 'Automated incident created by Icinga2. Service \'%s\' has recovered from it\'s outage';
public function __construct() {
global $argv;
// Get API information
$search_paths = array(dirname($argv[0]), '/tmp', dirname(__FILE__));
foreach($search_paths as $path) {
$api_key_path = $path . '/.cachet_api_key';
if (file_exists($api_key_path)) {
$api_info = parse_ini_file($api_key_path);
$this->api_key = $api_info['key'];
$this->api_url = $api_info['url'];
}
}
// Load cache if it exists
if (file_exists($this->cache_path)) {
$this->cache = json_decode(file_get_contents($this->cache_path));
} else {
$this->cache = new stdClass();
}
}
public function cache_store($name, $value) {
$this->cache->$name->expires = time() + 86400; // Expires in 24h?
$this->cache->$name->data = $value;
file_put_contents($this->cache_path, json_encode($this->cache, JSON_PRETTY_PRINT));
}
public function cache_get($name) {
if (!empty($this->cache->$name)) {
if ($this->cache->$name->expires > time()) {
return $this->cache->$name->data;
}
}
return null;
}
public function newOutage($component) {
$previous_outage = $this->findOutage($component->name);
if (!empty($previous_outage)) { // Create an outage if there isn't a previous outage
debug('Cachet::newOutage() Previous outage detected, aborting new outage');
return;
}
$component->status = 'outage'; // Set outage status
$this->trackOutage($component); // Now we track outage
// Create incident
$msg = ($component->type == 'host') ?
sprintf(self::MSG_HOST_DOWN, $component->name) :
sprintf(self::MSG_SERVICE_DOWN, $component->name);
$this->createIncident($component->name, "{$component->name} {$component->id}", $msg);
}
public function endOutage($component) {
// Track outage
$component->status = 'operational';
$this->trackOutage($component);
// Create incident
$msg = ($component->type == 'host') ?
sprintf(self::MSG_HOST_UP, $component->name) :
sprintf(self::MSG_SERVICE_UP, $component->name);
$this->createIncident($component->name, "{$component->name} {$component->id}", $msg, self::CATCHET_STATUS_FIXED, self::CATCHET_COMP_OPERATIONAL);
}
public function trackOutage($component) {
$tracker = new stdClass();
$tracker->{$component->id}->expires = time() + 3600;
$tracker->{$component->id}->data = $component;
$this->cache_store('tracker', $tracker);
}
public function findOutage($name) {
$tracker = $this->cache_get('tracker');
if (!empty($tracker)) {
foreach($tracker as $id => $component) {
if ($component->expires > time()) { // Check if the outage is within the outage timeframe
if ($name == $component->data->name && $component->data->status == 'outage') {
return $component->data; // Return object
}
} else {
// Let us do some cleanup
unset($tracker->{$component->data->id});
$this->cache_store('tracker', $tracker);
}
}
}
return null;
}
public function createIncident($name, $title, $msg, $status = self::CATCHET_STATUS_IDENTIFIED, $component_status = self::CATCHET_COMP_MAJOR_OUTAGE) {
global $incident_prefix, $cachet_notify_subscribers;
$cachet_component = $this->getComponent($name);
$data = array(
'name' => "$incident_prefix $title",
'message' => "Alert: $msg",
'status' => $status,
'component_id' => $cachet_component->id,
'component_status' => $component_status,
'notify' => $cachet_notify_subscribers,
);
$this->curl('incidents', 'POST', $data);
if ($this->code != 200) {
$error_msg = $this->body->errors[0]->detail;
throw new Exception('Unable to create ticket: ' . $error_msg, $this->code);
}
}
public function updateIncident($component, $msg) {
global $incident_prefix, $cachet_notify_subscribers;
$incident_info = $this->getIncident("$incident_prefix $component");
$data = array(
'name' => "$incident_prefix $component",
'message' => $msg,
'status' => self::CATCHET_STATUS_FIXED,
'component_id' => $incident_info->component_id,
'component_status' => self::CATCHET_COMP_OPERATIONAL,
'notify' => $cachet_notify_subscribers,
);
$this->curl("incidents/{$incident_info->id}", 'PUT', $data);
if ($this->code != 200) {
$error_msg = $this->body->errors[0]->detail;
throw new Exception('Unable to update ticket: ' . $error_msg, $this->code);
}
}
private function getComponent($name) {
$components = $this->cache_get('components');
if (empty($components)) {
$this->curl('components');
$components = $this->body->data;
// Let's cache this request
$this->cache_store('components', $components);
}
foreach ($components as $component) {
if ($name == $component->name) { // We nailed it
return $component;
}
}
return false;
}
private function getIncident($name) {
$incidents = $this->cache_get('incidents');
if (empty($incidents)) {
$this->curl('incidents');
$incidents = $this->body->data;
// Let's cache this request
$this->cache_store('incidents', $incidents);
}
foreach ($incidents as $incident) {
if ($name == $incident->name) { // We nailed it
return $incident;
}
}
return false;
}
private function curl($component, $action = 'GET', $data = null) {
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, $this->api_url . $component);
curl_setopt($ch, CURLOPT_SSL_VERIFYHOST, false);
curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false);
curl_setopt($ch, CURLOPT_CUSTOMREQUEST, $action);
if ($data !== null) {
$ch_data = http_build_query($data);
curl_setopt($ch, CURLOPT_POST, 1);
curl_setopt($ch, CURLOPT_POSTFIELDS, http_build_query($data));
}
$ch_headers = array('X-Cachet-Token: ' . $this->api_key);
curl_setopt($ch, CURLOPT_HTTPHEADER, $ch_headers);
curl_setopt($ch, CURLOPT_HEADER, false); // Don't return headers
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true); // Return body
$body = curl_exec($ch); // For PHP 5.4.x support
if (empty($body)) {
throw new Exception('Curl failed processing request: ' . curl_error($ch), curl_errno($ch));
}
$this->body = json_decode($body);
$this->code = curl_getinfo($ch, CURLINFO_HTTP_CODE);
curl_close($ch);
}
}
function debug($msg)
{
global $debug;
if ($debug == true) {
write_log(LOG_DEBUG, "DEBUG: $msg");
}
}
function logmsg($msg)
{
write_log(LOG_INFO, $msg);
}
function write_log($log_priority, $log_msg)
{
openlog('cachet_notifyV2', LOG_PID | LOG_PERROR, LOG_LOCAL1);
syslog($log_priority, $log_msg);
closelog();
}
// This is for me to debug
//include '.cachet_vars.php';
// Config
$incident_prefix = '[Icinga2]';
$cachet_notify_subscribers = true;
$debug = false;
$cachet = new Cachet();
// Let us process eventype accordanly
switch ($_ENV['EVENT_TYPE']) {
case 'HOST':
debug("EVENT_TYPE{HOST} detected");
if ($_ENV['HOST_STATE'] == 'DOWN' && $_ENV['HOST_STATE_TYPE'] == 'HARD') {
$component = new Component($_ENV['COMPONENT_ID'], 'host', $_ENV['HOST_STATE'], $_ENV['HOST_STATE_TYPE']);
logmsg("{$_ENV['COMPONENT_ID']}: KO HOST DOWN: Creating new outage");
$cachet->newOutage($component);
} elseif ($_ENV['HOST_STATE'] == 'UP' && $_ENV['HOST_STATE_TYPE'] == 'HARD') {
// We going to check if this component ever had outage
$component = $cachet->findOutage($_ENV['COMPONENT_ID']);
if (!empty($component)) {
$component->state = $_ENV['HOST_STATE'];
$component->state_type = $_ENV['HOST_STATE_TYPE'];
logmsg("{$_ENV['COMPONENT_ID']}: OK HOST UP: Recovering from outage");
$cachet->endOutage($component);
} else {
debug("No incident was triggered for this EVENT_TYPE{HOST} for COMPONENT_ID{{$_ENV['COMPONENT_ID']}} with HOST_STATE{{$_ENV['HOST_STATE']}} and HOST_STATE_TYPE{{$_ENV['HOST_STATE_TYPE']}}.");
debug('No previous outage was found for this component.');
}
} else {
debug("No incident was triggered for this EVENT_TYPE{HOST} for COMPONENT_ID{{$_ENV['COMPONENT_ID']}} with HOST_STATE{{$_ENV['HOST_STATE']}} and HOST_STATE_TYPE{{$_ENV['HOST_STATE_TYPE']}}");
}
break;
case 'SERVICE':
debug("EVENT_TYPE{SERVICE} detected");
if ($_ENV['SERVICE_STATE'] == 'CRITICAL' && $_ENV['SERVICE_STATE_TYPE'] == 'HARD') {
$component = new Component($_ENV['COMPONENT_ID'], 'service', $_ENV['SERVICE_STATE'], $_ENV['SERVICE_STATE_TYPE']);
logmsg("{$_ENV['COMPONENT_ID']}: KO SERVICE CRITICAL: Creating new outage");
$cachet->newOutage($component);
} elseif ($_ENV['SERVICE_STATE'] == 'OK' && $_ENV['SERVICE_STATE_TYPE'] == 'HARD') {
// We going to check if this component ever had outage
$component = $cachet->findOutage($_ENV['COMPONENT_ID']);
if (!empty($component)) {
$component->state = $_ENV['SERVICE_STATE'];
$component->state_type = $_ENV['SERVICE_STATE_TYPE'];
logmsg("{$_ENV['COMPONENT_ID']}: OK SERVICE OK: Recovering from outage");
$cachet->endOutage($component);
} else {
debug("No incident was triggered for this EVENT_TYPE{SERVICE} for COMPONENT_ID{{$_ENV['COMPONENT_ID']}} with SERVICE_STATE{{$_ENV['SERVICE_STATE']}} and SERVICE_STATE_TYPE{{$_ENV['SERVICE_STATE_TYPE']}}");
debug('No previous outage was found for this component.');
}
} else {
debug("No incident was triggered for this EVENT_TYPE{SERVICE} for COMPONENT_ID{{$_ENV['COMPONENT_ID']}} with SERVICE_STATE{{$_ENV['SERVICE_STATE']}} and SERVICE_STATE_TYPE{{$_ENV['SERVICE_STATE_TYPE']}}");
}
break;
default :
throw new Exception('No Event Type detected!');
}

View File

@ -0,0 +1,275 @@
<?php
$servername = 'mysql.ct';
$u = 'powerdns_usr';
$p = 'QYVdcqxK4XBVlWLM';
$db = 'powerdns_db';
$conn = new mysqli($servername, $u, $p, $db);
if (empty($conn->connect_error) === false) {
die('Connection failed: '.$conn->connect_error);
}
// Functions
class sqlQuery {
private $conn;
private $re;
function __construct($conn)
{
$this->conn = $conn;
}
private function getError()
{
return $this->conn->error;
}
private function getErrno()
{
return $this->conn->errno;
}
function exec(string $sql, bool $ignoreResultSet = false)
{
$ret = $this->conn->query($sql);
if ($ret === false && $this->getErrno() !== 0) {
$errMsg = $this->getError();
throw new Exception('Error Processing SQL query: '.$errMsg, 1);
}
if ($ret instanceof mysqli_result) {
throw new Exception('sqlQuery->exec(): Unexpected result set.', 1);
}
return true;
}
function query(string $sql)
{
if ($this->re instanceof mysqli_result) {
// Do not modify the result set
throw new Exception('sqlQuery->query(): We already contain a result set.', 1);
}
$this->re = $this->conn->query($sql);
if ($this->re === false) {
$errMsg = $this->getError();
throw new Exception('Error Processing SQL query: '.$errMsg, 1);
}
if (($this->re instanceof mysqli_result) === false) {
throw new Exception('sqlQuery->query(): This query didn\'t return any result set', 1);
}
return true;
}
function longQuery(string $sql)
{
if ($this->conn->multi_query($sql) === false) {
throw new Exception('Error Processing SQL query: '.$this->getError(), 1);
}
}
function fetchRow()
{
$obj = $this->re;
if (($obj instanceof mysqli_result) === false) {
throw new Exception('Error fetching row: There\'s no result set.', 1);
}
return $this->re->fetch_assoc();
}
function fetchAll()
{
$obj = $this->re;
if (($obj instanceof mysqli_result) === false) {
throw new Exception('Error fetching results: There\'s no result set.', 1);
}
return $this->re->fetch_all(MYSQLI_ASSOC);
}
function getNumRows()
{
$obj = &$this->re;
if ($obj instanceof mysqli_result) {
return $this->re->num_rows;
}
return false;
}
function getAffectedRows()
{
$obj = &$this->conn;
if ($obj instanceof mysqli) {
return $obj->affected_rows;
}
return false;
}
function freeResult()
{
$obj = $this->re;
if ($obj instanceof mysqli_result) {
$this->re->free_result();
$this->re = null;
}
}
}
echo <<<HERE
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! Now converting PTR records to their !!
!! individual zones. !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
\n
HERE;
usleep(500);
$sqlQuery = new sqlQuery($conn);
// DNS SOA Serial format YYYYmmddss.
// Domain table format
// id,name,master,last_check,type,notified_serial,account
// We going to create domains for each subnet up to 10.0.9.0
// Query for PTR Domains
try {
$sqlQuery->query("SELECT `id`,`name`,`type` FROM `domains` WHERE `name` LIKE '%.0.10.in-addr.arpa'");
} catch (Exception $e) {
echo 'Error occurred on line '.__LINE__.': '.$e->getMessage()."\n";
exit(1);
}
$rows = $sqlQuery->fetchAll();
$sqlQuery->freeResult();
foreach ($rows as $row) {
$ptrDomains[] = ['id' => $row['id'], 'name' => $row['name']];
// While we are here, change ZONE from NATIVE to MASTER
if ($row['type'] === 'NATIVE') {
echo "DETECTED: Domain '{$row['name']}' is NATIVE, switching domain to MASTER.\n";
try {
$sqlQuery->exec("UPDATE `domains` SET `type` = 'MASTER' WHERE `id` = '{$row['id']}'");
} catch (Exception $e) {
echo 'Error occurred on line '.__LINE__.': '.$e->getMessage()."\n";
exit(1);
}
}
}
// Insert the missing records into the database
$found = false;
for ($net = 0; $net <= 10; $net++) {
$subnet = $net.'.0.10.in-addr.arpa';
foreach ($ptrDomains as $value) {
if ($value['name'] === $subnet) {
$found = true;
}
}
if ($found === false) {
echo "Domain record '$subnet' doesn't exists. Creating...\n";
try {
$sqlQuery->exec("INSERT INTO domains (name,type) VALUES ('$subnet','MASTER')");
$sqlQuery->query("SELECT `id`,`name` FROM `domains` WHERE `name` = '$subnet'");
} catch (Exception $e) {
echo 'Error occurred on line '.__LINE__.': '.$e->getMessage()."\n";
exit(1);
}
$row = $sqlQuery->fetchRow();
$sqlQuery->freeResult();
$ptrDomains[] = ['id' => $row['id'], 'name' => $row['name']];
}
$found = false;
}
// Create SOA and NS records
foreach ($ptrDomains as $value) {
echo "Checking for SOA record for zone '{$value['name']}'.\n";
try {
$sqlQuery->query("SELECT `domain_id`,`type`,`content` FROM `records` WHERE `type` = 'SOA' AND `domain_id` = '{$value['id']}'");
} catch (Exception $e) {
echo 'Error occurred on line '.__LINE__.': '.$e->getMessage()."\n";
exit(1);
}
if ($sqlQuery->getNumRows() === 0) {
echo "Missing SOA record, creating...\n";
$serial = date('Ymds');
$soaRecord = "0.dns.ct admin+dns.lhpmail.us $serial 60 60 60 60";
$sql = "INSERT INTO `records` (domain_id,name,type,content,ttl,prio) VALUES ('{$value['id']}','{$value['name']}','SOA','$soaRecord',10,0)";
try {
$sqlQuery->exec($sql);
} catch (Exception $e) {
echo 'Error occurred on line '.__LINE__.': '.$e->getMessage()."\n";
exit(1);
}
}
$sqlQuery->freeResult();
echo "Checking for NS records for zone '{$value['name']}'.\n";
$dnsNames = ['0.dns.ct', '1.dns.ct'];
foreach($dnsNames as $name) {
try {
$sqlQuery->query("SELECT `type`,`content` FROM `records` WHERE `content` = '$name' AND `type` = 'NS' AND `domain_id` = '{$value['id']}'");
} catch (Exception $e) {
echo 'Error occurred on line '.__LINE__.': '.$e->getMessage()."\n";
exit(1);
}
if ($sqlQuery->getNumRows() === 0) {
echo "Missing NS record '$name' for zone '{$value['name']}'. Creating...\n";
$sql = "INSERT INTO `records` (domain_id,name,type,content,ttl,prio) VALUES ('{$value['id']}','{$value['name']}','NS','$name',10,0)";
$sqlQuery->exec($sql);
}
$sqlQuery->freeResult();
}
echo "Associating related PTR records to new ZONE '{$value['name']}' from old ZONE.\n";
$sql = <<<HERE
UPDATE
`records` AS `dest`,
(
SELECT `id`
FROM `domains`
WHERE `name` = '{$value['name']}'
) AS `src`
SET
`dest`.`domain_id` = `src`.`id`
WHERE
`dest`.`name` LIKE '%.{$value['name']}';
HERE;
try {
$sqlQuery->exec($sql);
} catch (Exception $e) {
echo 'Error occurred on line '.__LINE__.': '.$e->getMessage()."\n";
exit(1);
}
echo 'Number of rows affected: '.$sqlQuery->getAffectedRows()."\n";
}
echo "Deleting '0.10.in-addr.arpa' ZONE and related PTR records.\n";
$sql = <<<HERE
DELETE `domains`,`records` FROM `domains`
INNER JOIN
`records` ON `records`.`domain_id` = 8
WHERE
`domains`.`id` = 8;
HERE;
$sqlQuery->exec($sql);
$conn->close();

143
createVhosts.sh Executable file
View File

@ -0,0 +1,143 @@
#!/usr/bin/env bash
#
# Create Vhosts on VPS3
#
DEBUG=0
set -e
function usage
{
echo "Usage: ${0}"
echo " --domain domain.tld"
echo " Domain to use when creating vhost"
echo " --proxypass x.x.x.x"
echo " IP of the backend server to pass traffic to"
echo " --listenip x.x.x.x"
echo " IP to bind to when listening"
echo " --desc x.x.x.x"
echo " Description of VHosts"
echo " -h | --help"
echo " Show this usage"
exit 0
}
function stop_nginx
{
systemctl stop nginx
}
function start_nginx
{
systemctl start nginx
}
function get_cert
{
/root/.acme.sh/acme.sh --issue --domain $_domain --standalone --cert-file /etc/nginx/ssl/${_domain}.crt --key-file /etc/nginx/ssl/${_domain}.key --fullchain-file /etc/nginx/ssl/${_domain}-fullchain.crt
}
_cwd="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
_bootstrap=${_cwd}/bootstrap.sh
# Init script
if test -f $_bootstrap; then
source $_bootstrap 2> /dev/null
else
echo "Unable to parse BOOTSTRAP: $_bootstrap"
exit 1
fi
OPTS=$(getopt -o h -l domain:,proxypass:,listenip:,desc: -n 'createVhosts' -- "$@")
if [ "$?" -gt '0' ]; then
echo 'Failed to set command line arguments'
exit 1;
fi
eval set -- "$OPTS"
_domain=false
_proxyip=false
_listenip=false
while true; do
case "$1" in
--domain )
_domain=$2
shift ;;
--proxypass )
_proxyip=$2
shift ;;
--listenip )
_listenip=$2
shift ;;
--desc )
_desc=$2
shift ;;
-h | --help ) usage; shift ;;
-- ) shift; break ;;
* ) shift;;
esac
done
if [[ $_domain = false ]]; then
err "You must set domain"
fi
if [[ $_proxyip = false ]]; then
err "You must set the proxy pass IP"
fi
if [[ $_listenip = false ]]; then
err "You must set listen ip"
fi
echo "Creating Nginx Vhosts..."
_vhost_conf_file=/etc/nginx/conf.d/${_domain}.conf
cat << EOF > $_vhost_conf_file
#### Description
## Type: HTTP
## VHost: $_domain
## $_desc
server {
listen ${_listenip}:80;
server_name $_domain;
location /.well-known {
autoindex on;
}
location / {
return 302 https://${_domain}\$request_uri;
}
}
server {
listen ${_listenip}:443 http2 ssl;
server_name $_domain;
error_log /var/log/nginx/${_domain}.error.log;
access_log /var/log/nginx/${_domain}.access.log main;
ssl_certificate ssl/${_domain}-fullchain.crt;
ssl_certificate_key ssl/${_domain}.key;
location / {
proxy_pass https://${_proxyip}:443;
include proxy_params;
}
}
EOF
echo "Setting permissions on conf file..."
setfacl -m user:sysadmin:rw $_vhost_conf_file
echo "Stopping Nginx..."
stop_nginx
echo "Retrieving Let's Encrypt Certificate..."
get_cert
echo "Starting Nginx..."
start_nginx

30
enableSite.php Normal file
View File

@ -0,0 +1,30 @@
<?php
/* Init Classes */
class GetOpts
{
private $longOpts = [];
function __construct()
{
}
public function setLongOpt(string $opt, array $callback)
{
$this->longOpts[$opt] = $callback;
}
}
class CheckDomain
{
public function host(string $domain)
{
echo $domain;
}
}
CheckDomain::host('lhprojects.net');
/* Get command line options */
$args = new GetOpts;

6
enable_site.conf.example Normal file
View File

@ -0,0 +1,6 @@
NGINX_USER=nginx #Unused
NGINX_DIR=/etc/nginx
NGINX_CONF_D=$NGINX_DIR/conf.d
NGINX_SVC_RESTART='rc-service nginx reload'
#NGINX_LISTEN_IP=192.168.1.128
VHOSTS_DIR=/srv/vhosts

176
enable_site.sh Executable file
View File

@ -0,0 +1,176 @@
#!/bin/bash
# README
# This script is specifically designed to work on Alpine Linux but
# may work with other distributions by including a $HOME/.enable_sites.conf.
#
# See example .enable_sites.conf.example in this repo.
#
# This script will create a vhost under Nginx
DEBUG=0
set -e
[ $DEBUG -gt 1 ] && set -x
function usage {
echo -e "$0 domain.tld [disable-php|false] [enable-le]"
}
function cleanup {
test -n "$DRY_RUN" && rm $VHOST_CONF_FILE
}
function reload_nginx {
echo "${_DRY_RUN}Reloading Nginx"
if [ -z "$DRY_RUN" ]; then
nginx -t
$NGINX_SVC_RESTART
else
echo "Skipping..."
fi
}
# Init variables
_conf=$HOME/.enable_site.conf
_cwd="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
_bootstrap=${_cwd}/bootstrap.sh
# Init script
if test -f $_boostrap; then
source $_bootstrap 2> /dev/null
else
echo "Unable to parse BOOTSTRAP: $_bootstrap"
exit 1
fi
test -f $_conf && source $_conf || debug "Unable to parse configuration: $_conf, using defaults"
NGINX_USER=${NGINX_USER:-nginx}
NGINX_DIR=${NGINX_DIR:-/etc/nginx}
NGINX_CONF_D=$NGINX_DIR/conf.d
NGINX_SVC_RESTART=${NGINX_SVC_RESTART:-rc-service nginx reload}
NGINX_SSL_DIR=$NGINX_DIR/ssl
VHOSTS_DIR=${VHOSTS_DIR:-/srv/vhosts}
SSL_CRT=/etc/nginx/webserver.crt
SSL_KEY=/etc/nginx/webserver.key
if test -z "$1"; then
usage
exit 0
fi
# VHOST dir
_vhost_dir=$VHOSTS_DIR/$1/htdocs
if [ -z "$DRY_RUN" ]; then
# Check domain is a valid domain
host $1 &> /dev/null || err "Invalid domain: $1"
echo "Creating $_vhost_dir"
mkdir -p $_vhost_dir
else
echo "DRY_RUN detected"
_DRY_RUN="DRY_RUN: "
echo "${_DRY_RUN}Creating $_vhost_dir"
fi
# Check if we should enable php
[ "$2" != "true" ] && [ "$2" != "disable-php" ] && _enable_php='include php.conf;'
echo "${_DRY_RUN}Creating NGINX configuration for $1"
VHOST_CONF_FILE=$NGINX_CONF_D/$1.conf
if [ -n "$DRY_RUN" ]; then
VHOST_CONF_FILE=/tmp/$1.conf
debug "${_DRY_RUN}Redirecting to $VHOST_CONF_FILE"
fi
# Set listen ip if provided
test -n "$NGINX_LISTEN_IP" && _v_listen_ip="$NGINX_LISTEN_IP:"
# set default listening port to 80
_v_listen=${_v_listen_ip}80
# Redirect plain-text to SSL
if [ "$3" = "enable-le" ]; then
# Change default listening port to 443
_v_listen="${_v_listen_ip}443 ssl"
# set ssl configuration
_v_ssl=$(cat << EOF
ssl_certificate ssl/$1.pem;
ssl_certificate_key ssl/$1.key;
EOF
)
# write the plain-text virtual host so
# we authenticate with Let's encrypt and
# redirect plain-text to SSL
cat << EOF > $VHOST_CONF_FILE
server {
listen ${_v_listen_ip}80;
server_name $1;
root $_vhost_dir;
location /.well-known {
autoindex on;
}
location / {
return 302 https://$1\$request_uri;
}
}
EOF
reload_nginx
echo "${_DRY_RUN}Requesting a Let's Encrypt certificate"
if [ -z "$DRY_RUN" ]; then
certbot certonly --webroot --webroot-path=$_vhost_dir -d $1
fi
_le_path=/etc/letsencrypt/live
_le_crt="$_le_path/$1/fullchain.pem $NGINX_SSL_DIR/$1.pem"
_le_key="$_le_path/$1/privkey.pem $NGINX_SSL_DIR/$1.key"
echo "${_DRY_RUN}Creating symlink $_le_crt"
[ -z "$DRY_RUN" ] && ln -s $_le_crt
echo "${_DRY_RUN}Creating symlink $_le_key"
[ -z "$DRY_RUN" ] && ln -s $_le_key
fi
cat << EOF >> $VHOST_CONF_FILE
$rd
server {
listen $_v_listen;
server_name $1;
root $_vhost_dir;
index index.php index.html index.html;
error_log /var/log/nginx/$1.error.log;
access_log /var/log/nginx/$1.access.log main;
$_v_ssl
location / {
}
$_enable_php
}
EOF
if [ -n "$DRY_RUN" ]; then
echo -e "${_DRY_RUN}I would have wrote this: \n"
cat $VHOST_CONF_FILE
fi
reload_nginx
echo "Success!"
cleanup

View File

@ -0,0 +1,110 @@
<?php
function deny($code, $reason) {
switch($code) {
case 401:
$header = '401 Unauthorized';
header('WWW-Authenticate: Basic realm="handleLXCAppArmorProfiles"');
break;
case 403:
$header = '403 Forbidden';
break;
case 503:
$header = '503 Service Unavailable';
break;
}
header('HTTP/1.0 ' . $header);
print "$code $reason";
exit(1);
}
function cleanup() {
# Do intelligent cleanup!
foreach($_FILES as $fieldName => $key) {
if ('tmp_name' === $key) {
if (is_countable($_FILES[$fieldName][$key])) {
foreach($_FILES[$fieldName][$key] as $fileName) {
@unlink($fileName);
}
} else {
@unlink($_FILES[$fieldName][$key]);
}
}
}
}
# Check if SAPI is cli
if ('cli' === php_sapi_name()) {
if ('hash' === @$argv[1] && !empty($argv[2])) {
# We going to hash a password and return the result
# and write to file
$pw = password_hash($argv[2], PASSWORD_DEFAULT);
echo $pw;
file_put_contents('.htpasswd', "tar.lxc-apparmor-profiles-user:$pw");
} else {
print "Nothing to do here! Exiting...";
exit(0);
}
} else {
if (!file_exists('.htpasswd')) {
header('HTTP/1.0 503 Service Unavailable');
print '503 Service Unavailable';
exit(1);
}
$auth_creds = file_get_contents('.htpasswd');
$auth_creds = explode(':', $auth_creds);
if (!isset($_SERVER['PHP_AUTH_USER'])) {
deny(401, 'No authorize headers sent!');
} else {
$user = $_SERVER['PHP_AUTH_USER'];
$result = password_verify($_SERVER['PHP_AUTH_PW'], $auth_creds[1]);
if (true === $result && $auth_creds[0] === $user) {
if ('ProcessUpload' === $_SERVER['HTTP_X_APPARMOR_STATE']) {
# Process upload.
$uploadedFileTmp = $_FILES['apparmor-profiles']['tmp_name'];
# Check if multiple files where uploaded!
if (is_countable($_FILES['apparmor-profiles']['tmp_name'])) {
cleanup();
deny(403, 'Multiple Uploads not supported!');
}
# Check file mime type is accepted
$finfo = new finfo(FILEINFO_MIME);
$mime = $finfo->file($uploadedFileTmp);
if ('application/x-gzip; charset=binary' !== $mime) {
# Clean up tmp file
cleanup();
deny(403, 'Forbidden mime-type: ' . $mime);
}
# Check if the hash matches what we were given
$uploadedHash = hash_file('sha256', $uploadedFileTmp);
if ($_SERVER['HTTP_X_TAR_HASH'] !== $uploadedHash) {
cleanup();
deny(403, 'File hash doesn\'t match!');
}
$dest = dirname(__FILE__) . '/apparmor/' . $_FILES['apparmor-profiles']['name'];
$result = @move_uploaded_file($uploadedFileTmp, $dest);
if (false === $result) {
cleanup();
deny(503, 'Error processing upload');
} else {
file_put_contents("$dest.sha256", hash_file('sha256', $dest));
cleanup();
echo '200 OK';
}
}
} else {
cleanup();
deny(401, 'Unauthorized');
}
}
}

194
install_pulp_consumer.sh Executable file
View File

@ -0,0 +1,194 @@
#!/bin/bash
LOG=/tmp/pulp_consumer_install.log
PYTHON_RHSM_URL=http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-1.15.4-5.el7.x86_64.rpm
ERROR_MSG="Check $LOG log"
. /etc/os-release
export LOG
export ERROR_MSG
function debug {
if [ -n "$DEBUG" ]; then
echo "DEBUG: $1"
fi
}
function run_cmd {
debug "Executing: $1..."
$1 >> $LOG 2>&1
if [ $? != 0 ]; then
echo "Error: Failed running command '$1'. $ERROR_MSG"
exit 1;
fi
}
echo ""
echo "This script will now install pulp_consumer"
echo ""
export TMPDIR=`mktemp -d /tmp/pulp_consumer.XXXXXX`
echo -e "Creating temp directory $TMPDIR...\n"
OSVER=$(echo $VERSION | awk '{print int($1+0.5)}') # Bash can only compare whole numbers
if [ $ID = 'ol' ] && [ $OSVER -ge 7 ]; then
echo "Note: python-rhsm has been remove from Oracle Linux 7. Pulling"
echo " python-rhsm from mirror.centos.org"
echo "Installing $PYTHON_RHSM_URL now..."
run_cmd "yum install -y $PYTHON_RHSM_URL"
fi
echo "Retrieving pulp repo..."
run_cmd "wget https://repos.fedorapeople.org/repos/pulp/pulp/rhel-pulp.repo -O /etc/yum.repos.d/rhel-pulp.repo"
echo "Installing pulp_consumer and its dependencies..."
run_cmd 'yum groupinstall -y pulp-consumer-qpid'
echo "Installing ssl packages for pulp_consumer message query (qpid)..."
run_cmd 'yum install -y qpid-cpp-server-ssl'
echo ""
echo "Extracting embedded files..."
echo ""
cat > $TMPDIR/archive.enc <<'_ENC'
H4sIAGy7J1cAA+06a4+jSJLzGan/A9JKN7PyTBkwfrWuT+JpYxtsDPi1arV4g8GA
eRjwr79MsKtc1T0zq5Vm905yqLvLzoyMiIx3ZJcZR1lxstMXM46cn/4awAAMSBL+
xId97PEn/Ijjg/5PODHEcZIgsSHAw/uDIfYTiv1F8ryDIsv1FEV/Covc9OoXL051
0/4B3p/t/z+Fv6GqZ6OZned+5GaoH6G552eo44c2qqfgbxiiZnw62VFuW6hRo5bt
6EWY/4rqkQVw7YfdVyqZF5dwD/nbHR296GFhZ2BRz9FVESYoc3M8lAl9cBwtfcjJ
i+PMRn0HjeIczRLb9B0fUPbs1H5BkFbYBB7P7PQCDkOv9d0i1XM/jpC/AQwvzvLP
4Cf6HS7cifST/YKiwgcGv7a3boRsJbnLncdoFpuBnb+4dn6n8MvfXwCHJE4fOYFv
aJLGF98COmg0s+YU1SlClFoJAC3N9G9JYfxYNrAR+iYa2DVaZODCTpyiegGIRLlv
NpeDHPXE/5aktuNXb1QgE8gBbTcgGiDoO/W3LAtbNMXO2/uBy/B6CBQMPtw1Z9+M
eLMGVAvYbUm05sobB4Fi/pyhirJATRtcFZDLfDeyrYYFcAwdzVMQTEB4KHic+nnd
bG2ptSRIk89QjLxVzaMo4HNjCt1p7F7HBTBA5kEEoInWV0o/99DU1kM00bOsjFMr
Q+O0of4KelSjMRAV6DL1L3oOqEVAi6dWeajQSpynceSGNSDWuq0FxAWcAttOWrky
u7m/mhZ24+LvmdyEggoAfgMMc3dpGCqtOn5HFWgcNYosbeNuc3jwPflmI2vuDO1o
AofRc+9uayAd+KOjcA1KordRGjvQelCWSIcs76wZ6p2cvwKNtacsH9w+j9MaHr1h
Nyw+nEB/afQeJ3YEXOm3LK8BN0/PPBjq9Sn0owBSjd7pA03AzaBcIESQf7Q3/Yo0
Qdk4/EvogSA5AgGyFz/K72GEkmTvvYOjXYjeBSsPsYN27dzsJoHfbt69ttvy6d7Q
XkAYIQ9B0Hram0LRL2908jDrQvHBv/pvRhFZof1ipjkCkw31LgCb7DK36+wtQk92
lumu/X2k3kQGcjwEqkK9uuafxPl3qaI5+0+kCOQf71e+Pgjye7oD+43C/lzLj+qF
+rll7nvav907Du3PjdBmuw0XoGTt16+vKHeyyD2vh3ErNHTL1M7iIjWBE97i5jVB
NT6f1cBpTy1DuwKen4Fz2Tfg2W86eyT3evom09uZRt12En+DdH98GLLfa2KDlvlN
4DRCnPQIGL8J+OQWsSc/TeP0W+hn+e8LA+m9xSAQLdf9CCZGyKSlgEIKDZeH/PLA
754h3MSFtv2Dm1txGYWxDvPcZDWB3gOzYJw+0GmL0S2ptFUbfYcIq28jAYhM8DEy
3w6DivAD1TTJCwTVPyXW7qWPjd8nnj8QsKHWOMQPDvxLgvrWtz+W9b29yoYJXGtK
06tnCey7NOjfhXpkAVnC/uGND/x259E41YM7NDw4EQX3iKGiXlk9ZhHYhTUabEg+
SPDSVoyGKPj5oHB45FFhVpFCdqntAqdLXzNJ431NnH39PsrQbpGl3dA3PqSIN7R3
cdVmlLo4vTQGeLGaY82XH0TNd9gfPf19gvoN7P0G9x697mMOa+K+e2P4aPLfS3Y/
MturCX6D6y+JfWqS19o24vgtD35u0hKogyf/1h+ums9AHxAPlHAnh1bUzQAWDz8C
E0gYZrD7Se0z6JLz1mssO9Qf6kd7ul1Ffzn5UQE8v6my7c7XN6avBa8lgfbadP0h
CcZFnhR5m0STOAy/OQ37yKy/+dG3DHRIkZW1/KXiZICDwFFvy6hh56VtR3eJs7Yc
wR4MyNCGTpO4gMZt2JaAngvyyF7j99YEwWOWnuvwwkDJRmh/M+MwTr/vXJ1752r5
GcS7jSYAF7UzU08gyUZ8GzIpUz35lsffcqiRSL91wqDv/7gBgyO/dXug4QUO3KAk
MCSaSQA2fE3CKdK0HVQs2BU5rze5U3qB5Bspf23W22kCTFQNy/aY33YPL3cJm9WH
dPCqZ9PTwaAJSGcA0c9BVAGVA2XZb9K12RqNoMigGWuKbGvTr39oURT/qOtGAT9S
Wnud98KiI6ztjlyojbYFAuK0fpSZnv2Y34w0DsCFtPXitvWCcn7Tpf+cm8nPsCf9
GbRoP4PWDF6wVU5z8j6AAY01qC8/mO5u1OEi+svtwOf2K/jmw24cqP/ehf797urp
9ySa6a3V/nfs+4Mh0R69vGcfJ9DPgQtRorxqN9F2wPxA4TZUQU96Q731SyDjRtl7
mRqc1/UbyY0e+taNZdYUuZ/PiW+1Kkx1w/Dz0/nn73XXILWsTB2mrTc+v9wv8HdU
N7I4LHL7Ybh4LD3vJ4NmQITiNJ8BId+CLWde3ypZOxE2ir0xbhLOe+avyvuO9SNj
WOL+6x3vpveFAr41uvY9FFqeDQs4urTKeEj2Ten7mNUbXTZUfadNNjeL3VwIhtWr
l39Fbh6OAq999cd2hAF+gj86yZtlUWgE5Kb+DwUHboHZoxk6HvT0Q6xm+3U8WaVx
U91hUUvzNgTvQ3abj9ud1kRtlWnbiBxGbtIev7nhrZ682QcMZyBsYH6M7psPNO/9
yR+RRf5x+/z1gT5KkBjyn370esIrtJ731/Jo3n8H2O+8/2IETry+/xIYAdbhQzDx
fP/9d8BvEGhuIkgow61VgRcYSuWaVUQUBKZzZRjKXrpUKdCUK/DUfN+XelrJyvvZ
PD4I3sWUKJlb0DJVrq7cQqSCCYVrHO2JzCdksxErjqWWtCttwHGVxvhsv515xpar
WJVatOuxSk9p3JhSFXulpHZNVOloFn5CDhPPs1jK5kusko4ULrJcJanyFezrzdr1
/ZrIjirmSs1aKnv1E0KFG1VcUyXr7tmNLAtcadHGhPf3W9wTZaxkymZ9zpXr0tpW
pSiP4O3gGsuVhxrIsF3H+51cui7nixQ2YZTzRBGMHitzNCVrFEUKNFtScH9OxUBP
Mruza38pWq6JL2biJ8RjzxK7HZZZn8WycaJpeib6zrawKmbr7hzMZvlhRxhzpFxb
2o7tdDbZJawXCdvvnDiF7H9CZsN6HItT2lqvzQOGc9vLuleWcjiKjqlV1cpwEAfT
zdiyD9l6sp9kXoSbPrvAlUISygttfUISv+xXWne0H0qpS42GnL0MFv4uYaIZq1rK
bNIfu7FyEZVjHjEJNrR6U1cJNMPCllktiJNPyJybq+SWcCJvHpRWGS2WQV/mEiWc
Z6tJYHWP7ki4poORSFYH+YwpWk7SIXOIO9O+cnRi6hNCLC/lwRAmyqUnY9vrtVyw
lUzomWqd2UN9Jqnhzo9VZbWYEd6ESgZWtRxn4izYhrO92ckAhWx6iM8ydqBckaYo
7kd+yAI7AAfSSEbad3CPxpcesap0bjSqT8tPyFWQ7Yt4HVmqur+mRpKFmjczDmrU
uS62y021uVyKqbhQZtsL37G66fR63ZhbRrg4ix1DDrFPSM/ojU/LeDwyJNPmq0Th
aWKqDUTM0y+r2WVruatgJUmxPOoE9YifrsP9qqtuqcXay00zH31C9t2kOuyH/GiJ
J4uTXAynq1pwnGka0vvzeaWu+yI2dRZ6n8KH+4zuM/uhM+5jAxMXtsQK4z4h2Jkj
5pmbcYeNzGIT1WUMfZeqvq7Onc2hF1f8IDFGx3nJ9MzrlpXTzgVbnlZVtlxfQny6
AbcQp3Ob6HFGEB/E+liPOaLfx3z9aq3p3aJMbYnYEasg5vOdYUXCaLEcdIiZRK7N
TkSk8RekSRKcxH6fOP7TSe0J/zS89ZR/HY8/qf8DHBt+qP99nMSf9f/fAbTuolSe
p74B23ME/i+UkwKXsMJaap+8GgdpNuCjaDi3a4H9jOIsig/QcR8lcHQ8Qoc0Sg3Q
EYkOGbTHoRSOcgzKDFCMQrEh2uujXA8djFGKQDECRQCNB6af0f+W4ofv/4M8NCWr
tbABeQWdc/vXpoS7yKDIsrBVCM5e4E/GJQaKLsdTlMLQcxMUYuVIuRx1K8YnYhSH
tarFeWd2RjZsrqx1111rDqV2D10iwvfnckWeyWoE0r+2SaaqbA0Hy4RmVzsm6vV2
3aIzPztihM/qyQXhWLLgonVUjxjGok5pfeyWkTP21py/cbeOEkrCbLo6OUJ58phQ
3lLTw6yLzTTxMpUls+og+mZ3jWqX582ZIZurSN2QKrszszzQ5+NrZ6xrE3kslDgp
EWl3Vyn48hrOI72ec4KQVtYOEYarMaOeNMqk59Rlndj9xIjX+fzsbmtBqsfX6VjH
td727HU7UmlPc6pcr2y7o6aUPNUVGmFru5qtDnS0PirxRd921LrTX0yN3SSv12wq
d3CNoPNNalwTNRU2w7IMBspRrNOex6THrYJM7bPFzAjrVoYZUHIp7drXzwLth8Ek
kAhB5TvRWMCn0051Haiumu6lkXV2CrwqxwkyrHh7KelLmSaoQbRfnDJ147NGzGPM
lacHVowJcmziy7JWKmzqnZjQGo+s/Yoy5DHvOntk3J8lAm3a04MVrroOOZbL+QkL
DC2gfFsMeH17uopUdpb7pu+Sa/xa0upGrbK1b1tX9TxBhP4loo4xodFGmV7ImPOu
bs9RnC2pduZDjd8t/R5x4eU0sIjcwtwoCI2RdEiu3StR7K5gWq+VrY2FwcLkjBCU
XLlfdWYZm121wLcn/WI6cS+sN51w1wvWP8v0RNK3kkTugiU2GRD5EPFVS0jIdaRR
qbEbYpMonq2qeDfuzLZVJirT2Ot6uwz0XOWcdmX2qh6j3O2NMHLQKThyzyPkIWOC
wCrMUO+PruOZwp0PiSUm7qk3mzI7aufk9Ohsx0V9Ha4w36iYdXXIJG2+KOuKlFzE
7Zld8rDoYQPPG+1qPlekYxFfR1pyHmwG1W5OdOanjjOZRHY4Gzjzracbskt2NHE8
5zJ1ixQlHbCB1/eu8ZFgNVWeCl173pfdVuS6j3VpcxMP51y16Qwcpg5lzpPHhptk
XFBmGiKpah2GG6kyymUXXwkcJWiiFvX5GTVi9Z7JVNhsMCuH29VqKhURNXLkATHu
c/X4SBk8SSKOwtkac0qOO4KO+J0z1IzR9jzjqA4buGnBMwscV4JM8jVizKtKQWmj
YVhORtYm57tOt4OU84JYdYjuDorM0sMVcAtKEXhirgaKHGIdj9cniT2yi4lW2sxm
71OH3A6EI96bn+TRGRlcMOD2a2wxlAIQEQIfG8Xa704HRzqeVWdLGgxjSpZ3rD0u
tKM6MU+7zZDoDjsmKHmEvkDWfid08PNltdvLu/EqcnQb0wqPHnQrNqpz4mT2t1Kg
Hib8ZLCY8EJ6zcWjZU+peEJTwlVFkrynrX2nM1FWcoQb7LWOu+ds2dvKTDUc9lP/
LM4jY7YxzIEmHBYuv173hpXFk+KptGaujpw347WiLC4aqZdyvxeC76fASvs0v02d
WE2ScrefUuPxWpyb6gr3kxF3Aaki4y5pQs1NDvGOIrEkN3RwqOd6XkWqoygOI16l
akUrxG5oJbPd2lsmQGRKUnc+GcrRelKPTGum76YRgexPPD3aDPqiYnUODGF246Q3
ATblo4ryhu5ZZtSpsB342NEOOkeip5qMK4zcsXc6HfZugCEjfiuOzkwwksQBKyaE
y+vcdSrvDwwemYtFnS54pWMv+H1vrDGipA3c0swnZBXxbGEQozWyHJ8SIgqtXkcW
c9CwD5fayZKChz73u1r0HymeWWHAX5/40mW+aEpXUb9QoW7oJ727/NI8FTLSlxtb
P8sKO/0jxNS24C+h/NkjwBg+Alid+yMATR0/1t6MB/W4K8pZybSj84QrZxvtysmI
SJHNiwBDi1N5W3nA0vle5SSQnNt1T+TM6SYzWW4Fp2u4RlUiCJNNoE94DBFpcseq
HCGyWimqHC6xYiWGMVzrfVj77gkCPkAg//ILxEnK9O2mQCwW3F8RPg6b9G3YXFFw
X46ZZvBcGkbt72aS4vfIU6It8RmPnBmaX4/o1Tjq6s7GT6munx6nZX/Hr7eBJdG9
SzwIOHFszh3HcjpD8pyMakeLBH0kr3xSQayJsyiFGcbol/mqy1hdgufAhEofxnMN
cwOzcxgdGZ2bb+g9ZQVRVw7lemRR+HXaTzbO0kHmlLYpw5wuO5a080TJqmtiHae9
1RBP5AOLHdmd29MvY4dfaMDzz1YqxLLrX3jcPGage0GWB5kuuZgZT8LhKSy8CXY+
04bg9hY90cHTjWrp52mX7NGkjbE0N+4P+8uMpqxE4FazIRcg42BiTaS5P06M/mox
DDIrN/XejFvm7HCj7if4Nr8sw2Xm7S4soyZz8Tq/2Ny8kA6CNUgwAYkOGEOVoKtT
f+B91BJYgKMOks9Pj5tVwXv2an502dVoPyOwMwFqEpJZlCf2lF5emd6p7J2jUbRW
u9zGUPeuxOdTcnsMy+x4IU21Rx3ly6iz63hqfxYls/HG1YeIfu1Oqzk/yeSV0dnr
4j7MjaE13IJbScNppyh0UBePDuVNhY7iBY51yWmgRZEoJosVP18ihyNdLbA5MffI
fmcXyLs62PbdHqk56ygc7C6b3XZ7mJ56uOhgu8zYbGpu5sxIsa8W+mZh4kiaTY7Y
oq/sQ0XMwgW236dbsxjMWKJgdFK8XNxsaZzmm7rHzzozfsLPR6C9iwhHLDhO4HYI
FRzXbDg70Ecjtxerk6JF426Zn9IwC6iVp9RkZYx6Kq1z3YV5zaZe5pKJtDyZ8pfn
xP+EJzzhCU94whOe8IQnPOEJT3jCE57whCc84QlPeMITnvCEJzzhCU94whOe8IQn
/B+D/wUWlo//AFAAAA==
_ENC
debug "Creating $TMPDIR/files..."
mkdir $TMPDIR/files
debug "Extracting archive to $TMPDIR/files..."
cat $TMPDIR/archive.enc | openssl base64 -d | tar xz -C $TMPDIR/files
echo "Done"
ca_file=$TMPDIR/files/ca.crt
cl_file=$TMPDIR/files/client.crt
cf_file=$TMPDIR/files/consumer.conf
dest=/etc/pki/pulp/qpid
debug "Creating $dest..."
run_cmd "mkdir $dest"
echo "Copying $ca_file to $dest..."
run_cmd "cp $ca_file $dest"
echo "Copying $cl_file to $dest..."
run_cmd "cp $cl_file $dest"
conf_dest=/etc/pulp/consumer
echo "Copying $cf_file to $conf_dest..."
run_cmd "mv $conf_dest/consumer.conf $conf_dest/consumer.conf.orig"
run_cmd "cp $cf_file $conf_dest"
echo "Enabling systemd pulp-agent service..."
run_cmd "systemctl enable goferd"
run_cmd "systemctl start goferd"
debug "Cleaning up..."
run_cmd "rm -fR $TMPDIR"

21
le_renew_cron.sh Executable file
View File

@ -0,0 +1,21 @@
#!/usr/bin/env bash
#
# Auto renew letsencrypt certs
#
HOSTNAME=`hostname`
EMAIL="alerts@lhprojects.net"
OUTPUT=$(letsencrypt renew)
LE_STATUS=$?
echo $OUTPUT >> /var/log/letsencrypt/renew.log
if [ "$LE_STATUS" != 0 ]; then
SUBJECT="CRON: Letsencrypt automated renewal failed on ${HOSTNAME}"
else
SUBJECT="CRON: Renewing all letsencrypt certs on ${HOSTNAME}"
fi
echo $OUTPUT | mail -s "$SUBJECT" $EMAIL
# Restart nginx
systemctl restart nginx

13
mount_ecryptfs.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/bash
# Utility script to mount ecryptfs on my PVE boxes
# This script is executed via systemd service file
PASSPHRASE_FILE=$HOME/.ecryptfs/passphrase
if [ ! -e ${PASSPHRASE_FILE} ];then
echo "EXITING: ${PASSPHRASE_FILE} missing!"
exit 1
fi
PASSPHRASE=$(cat $PASSPHRASE_FILE)
/usr/bin/printf "%s" "${PASSPHRASE}" | /usr/bin/ecryptfs-add-passphrase --fnek -
/sbin/mount.ecryptfs_private dump

12
pulp_bound_repo_to_consumer.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=ecryptfs-x86_64
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=hhvm-x86_64-base
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-uek3
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-optional-latest
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-addons
pulp-admin consumer bind --consumer-id=$1 --repo-id=epel7-x86_64-base
pulp-admin consumer bind --consumer-id=$1 --repo-id=oraclelinux7-x86_64-latest
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=icinga-x86_64-stable-release
#pulp-admin consumer group bind --consumer-id=$1 --repo-id=ngtech-x86_64

65
pulp_group_update.sh Executable file
View File

@ -0,0 +1,65 @@
#!/bin/bash
# This script will run a pulp command to update consumers and reboot them
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
# Arguments that require values
declare -A arr_args_req_value=()
#######################################
function usage
{
echo "Usage: ${0}"
echo " --consumer-group-id"
echo " Pulp consumer group id"
echo " -r | --reboot"
echo " Should the consumer require reboot after update"
echo " -h | --help"
echo " Show this usage"
exit 0
}
if [ ! -e ${BOOTSTRAP} ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
. $BOOTSTRAP
# Check command line
if [[ -z "${@}" ]]; then
usage
fi
echo "${@}" | grep 'consumer-group-id' > /dev/null
if [ $? != 0 ]; then
echo 'Error: A consumer group id is required!'
exit
fi
# Process command line arguments
arr_args_req_value['consumer-group-id']=true
while [[ $# > 0 ]]; do
key="$1"
case $key in
--consumer-group-id)
check_values arr_args_req_value 'consumer-group-id' 'A consumer group id is required' "$2"
_consumer_group_id=$2
shift;;
-r|--reboot)
reboot_arg='--reboot'
shift;;
-h|--help)
usage
shift;;
*)
_default=$key
shift;;
esac
done
pulp-admin rpm consumer group package update --consumer-group-id=${_consumer_group_id} --all $reboot_arg

21
pulp_sync.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/bash
# Script to sync pulp repositories
# This script will be ran manually every week by hand
# BETA BETA BETA
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
if [ ! -e ${BOOTSTRAP} ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
. $BOOTSTRAP
repo_ids="oraclelinux7-x86_64-latest epel7-x86_64-base oraclelinux7-x86_64-addons oraclelinux7-x86_64-optional-latest oraclelinux7-x86_64-uek3 hhvm-x86_64-base webmin-noarch-base icinga-x86_64-stable-release ngtech-x86_64"
for repo_id in $repo_ids; do
pulp-admin rpm repo sync run --repo-id=$repo_id
done

21
repo_sync.conf.example Normal file
View File

@ -0,0 +1,21 @@
# Sample Configuration file
#
# Where to log my output?
LOG=/tmp/repo_sync.log
# Where to download my repository packages?
REPO_DIR=repos
# Syntax:
# reponame:repopath
#
# Retrieve repo name from 'yum repolist' and set repo
# path you get from a first run of reposync.
#
# Note: It appears the path changes between repo's is the
# reason you need to run reposync first to get the path
REPOS=(
repo1:repo1_path
repo2:repo2_path
)

44
repo_sync.sh Executable file
View File

@ -0,0 +1,44 @@
#!/bin/bash
# Script to sync repos
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
CONFIG_FILE=${CURR_DIR}/repo_sync.conf
YUM_LOCK=/var/run/yum.pid
if [ ! -e ${BOOTSTRAP} ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
. $BOOTSTRAP
if [ ! -e ${CONFIG_FILE} ]; then
echo 'EXITING: repo_sync.conf must be in the same directory as this script'
exit 1
fi
. $CONFIG_FILE
debug "Current directory: $PWD"
debug "Changing directory to ${REPO_DIR}"
test ! -d $REPO && err "${REPO_DIR} directory doesn't exists!"
cd $REPO_DIR
debug "Current directory: $PWD"
for repo_id in "${!REPOS[@]}"; do
## Avoid running while yum is already running
if test -e $YUM_LOCK; then
err "Yum appears to be running, if this is not the case\nplease delete $YUM_LOCK"
fi
IFS=":" read -r -a array <<< "${REPOS[$repo_id]}"
name=${array[0]}
path=${array[1]}
echo "Processing repository '${name}'..."
run_cmd "reposync" "-n --repoid=${name}"
_path=${REPO_DIR}/$path
run_cmd "createrepo" "--update ${_path}"
unset name
unset path
done

52
rsync.sh Executable file
View File

@ -0,0 +1,52 @@
#!/usr/bin/env bash
#
# Backup automation to off-site storage service
#
#
# Note: This script is not portable
# License: Anyone is free to use and modify it
#
function send_notification
{
echo "Sending notification."
echo $2 | mail -s "$1" ${EMAIL}
}
CONF=$1
TODAY=`date`
HOSTNAME=`hostname -f`
CURR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BOOTSTRAP=${CURR_DIR}/bootstrap.sh
if [ ! -e $CONF ] || [ -z "${CONF}" ];then
echo 'EXITING: Missing configuration file'
exit 1
fi
source $CONF
if [ ! -e $BOOTSTRAP ];then
echo 'EXITING: boostrap.sh must be in the same directory as this script'
exit 1
fi
source $BOOTSTRAP
echo "RYSNC CRON: Syncing local directory to remote server"
run_cmd "rsync ${RSYNC_OPTIONS} ${RSYNC_LOCAL_PATH} ${RSYNC_REMOTE_PATH}"
if [ ${__returnstatus} != 0 ]; then
echo "RYSNC CRON: Failed to sync files..."
echo "RYSNC CRON: Additional information stored in ${LOG}..."
send_notification "CRON: Failed to upload files to off-site storage on ${HOSTNAME}" "Failed to upload ${RSYNC_LOCAL_PATH} to off-site storage on ${TODAY}"
exit 1
fi
echo "Completed..."
send_notification "CRON: Uploaded files to off-site storage successfully on ${HOSTNAME}" "Completed uploading ${RSYNC_LOCAL_PATH} to off-site storage on ${TODAY}"
exit 0

20
sbblupdater.conf.example Normal file
View File

@ -0,0 +1,20 @@
#
# SquidBlocker Updater Configuration
#
# Note: This updater only supports shallalist blacklist
#
# Blacklist url to download and update SquidBlocker with
#
blacklist_url='http://dl.lhprojects.net/static_files/shallalist.tar.gz' #Recommended, updated once a day
# In situtations where you have multiple SquidBlocker instances running.
# You can specify a URL for each instance
## First instance
sb_url[1]='http://username:password@127.0.0.1:8081/db/set_batch/'
blacklists[1]='warez spyware redirector hacking dynamic costtraps adv tracker' #Blacklist to import
## Second instance
sb_url[2]='http://username2:password2@127.0.0.1:8080/db/set_batch/'
blacklists[2]='warez adv redirector costtraps dynamic hacking spyware porn tracker anonvpn'

110
sblocker_updater.sh Executable file
View File

@ -0,0 +1,110 @@
#!/usr/bin/env bash
#
# Define a function to cleanup working directory
#
function cleanup {
rm -rf "$WORK_DIR"
echo "Deleted temp working directory $WORK_DIR"
}
trap cleanup EXIT INT
#
# Load configuration information
#
## Regular one
#source /etc/sysconfig/sbblupdater
## Debug statement
source ./sbblupdater.conf
#
# Create TEMP directory
#
echo 'Creating TEMP directory...'
WORK_DIR=`mktemp -d -p /tmp bl_staging.XXXXXXX`
cd ${WORK_DIR}
if [ "${DEBUG}" = '1' ]; then
curr_wd=`pwd`
echo "DEBUG: Current directory: ${curr_wd}"
fi
#
# Download blacklist
#
## Debug variable
#blacklist_url=https://www.lhprojects.net/blacklist/shallalist.tar.gz
echo "Downloading blacklist from ${blacklist_url}"
## Regular one
wget -nv ${blacklist_url}
exit_status=$?
if [ ${exit_status} != 0 ]; then
echo "ERROR: Unable to download blacklist"
exit
fi
#
# Extract tar
#
tar xvf shallalist.tar.gz > /dev/null
if [ "${DEBUG}" = '1' ]; then
curr_wd=`pwd`
echo "DEBUG: Current directory: ${curr_wd}"
fi
#
# Create domain and url list for each instance then import into SquidBlocker
#
sb_int=0
empty_vars=0
while true; do
## If there more than 5 consecutive empty sb_url variables, exit out of the loop
if [ $empty_vars -gt 5 ]; then
break
fi
if [ -z "${sb_url[sb_int]}" ]; then
((empty_vars++))
((sb_int++))
continue
fi
echo "Creating blacklists for instance ${sb_int}"
domains_file=`mktemp domains.XXXX`; domains_file="${WORK_DIR}/${domains_file}"
urls_file=`mktemp urls.XXXX`; urls_file="${WORK_DIR}/${urls_file}"
for bl in ${blacklists[sb_int]}; do
cat "BL/${bl}/domains" >> ${domains_file}
cat "BL/${bl}/urls" >> ${urls_file}
if [ "${DEBUG}" = '1' ]; then
linecount=`wc -l ${domains_file}`
printf "DEBUG: %s has %d domains for instance ${sb_int}\n" ${domains_file} ${linecount}
linecount=`wc -l ${urls_file}`
printf "DEBUG: %s has %d urls for instance ${sb_int}\n" ${urls_file} ${linecount}
fi
done
echo "Importing blacklist into instance ${sb_int}..."
curl -i -X POST -H "Content-Type: multipart/form-data" \
-F "prefix=dom:" \
-F "val=1" \
-F "listfile=@${domains_file}" \
${sb_url[sb_int]}
curl -i -X POST -H "Content-Type: multipart/form-data" \
-F "prefix=url:http://" \
-F "val=1" \
-F "listfile=@${urls_file}" \
${sb_url[sb_int]}
rm -f ${domains_file}
rm -f ${urls_file}
((sb_int++))
done

53
start-tcptunnel.sh Executable file
View File

@ -0,0 +1,53 @@
#!/bin/bash
#
# This will start multiple tcptunnel instances
function _shutdown
{
for i in {1..6} ; do
if [ -z "${PIDS[i]}" ]; then
# assume we are at the end of the list
break;
fi
kill ${PIDS[i]} 2>&1 > /dev/null;
done
exit 0
}
# Trap signals
trap "_shutdown" SIGINT SIGTERM
# This is for debug
source tcptunnel.conf
# Source configuration file
#source /etc/sysconfig/tcptunnel
TCPTUNNEL_STARTED=0
NPID=0
for i in {0..5} ; do
if [ -z "${BIND_ADDRESS[$i]}" ]; then
continue
fi
${TCPTUNNEL_BIN} --local-port=${LOCAL_PORT[i]} --remote-port=${REMOTE_PORT[i]} --remote-host=${REMOTE_HOST[i]} --bind-address=${BIND_ADDRESS[i]} &
# Store pid
((NPID++))
PIDS[NPID]=$!
disown ${PIDS[NPID]}
if [ $? = 0 ]; then
((TCPTUNNEL_STARTED++))
fi
done
if [ $TCPTUNNEL_STARTED = 0 ]; then
echo "No TCP Tunnels were started"
exit 255
fi
echo "${TCPTUNNEL_STARTED} tcptunnels were started successfully"
sleep infinity

299
tar.lxc-apparmor-profiles Executable file
View File

@ -0,0 +1,299 @@
#!/usr/bin/env bash
#
# This script will tar lxc-pve AppArmor profiles and
# upload archive to remote download server.
#
# Path to auth file that contains username and password.
# Example
# username=authuser
# password=authpw
auth_file=$HOME/.tar.lxc-apparmor-profiles.authfile
# Do not edit anything below this line
set -e
_curl_opts='--silent'
_wget_opts='--quiet'
_ssh_opts='-q'
if [ -n $DEBUG ] && [[ $DEBUG -ge 2 ]]; then
set -x
_tar_opts='-v'
_curl_opts='--show-error --verbose'
_wget_opts='--verbose'
_ssh_opts='-vv'
fi
function debug {
if test -n "$DEBUG"; then
echo "[DEBUG] $1"
fi
}
function cleanup {
debug "Cleaning up temporary directory..."
if test -d $_tmp_dir; then
rm -r $_tmp_dir
fi
}
function getfilehash {
echo $(sha256sum $1|awk '{print $1}')
}
function _wget {
wget $_wget_opts $1 2> /dev/null || true
}
function httpdownload {
_wget $_dl_filedir/$_dl_filename
_wget $_dl_filedir/$_dl_filesha256
if test ! -f $_dl_filename && test ! -f $_dl_filename.sha256; then
echo "ERROR: Failed to download files"; exit 1;
fi
# TODO: rename $_dl_filename to $_tar_filename
_file_hash=$(getfilehash $_dl_filename)
_remote_hash=$(cat $_dl_filename.sha256)
if [ $_file_hash != $_remote_hash ]; then
echo "Downloaded file corrupted!"
exit 1
fi
}
if test -n "$TESTING"; then
_dl_server=http://localhost:8080
else
# This is a temporary location, perhaps
# permanent. dl.lhprojects.net is not configured
# to execute PHP scripts
_dl_server=https://www.lhprojects.net
fi
_tar_filename=apparmor-profiles.tgz
_dl_scriptname=handleLXCAppArmorProfiles.php
_dl_filedir=$_dl_server/apparmor
_dl_filename=$_tar_filename
_rsync_server=pve1.lhprojects.int
_tmp_dir=/tmp/.tar.lxc-apparmor-profiles
_rsync_user=apparmor_rsync
_remote_script_path=/usr/local/bin/tar.lxc-apparmor-profiles
function usage
{
echo "Usage: ${0}"
echo "This script can upload an archive of LXC AppArmor profiles to a remote HTTP"
echo "server via handleLXCAppArmorProfiles.php. Alternatively, you can use --download-scp."
echo ""
echo " --download"
echo " Download and extract archive using http via handleLXCAppArmorProfiles.php"
echo " NOTE: You need to upload the archive to a middleman HTTP server prior to using --download."
echo " --download-scp"
echo " Same as above however use scp for file transfer."
echo " NOTE: the remote SSH server must have $_remote_script_path installed"
echo " --download-test"
echo " Same as --download, but extract in temp directory"
echo " -h | --help"
echo " Show this usage"
exit 0
}
# We going to attempt to create a tmp dir
# and set the _tmp_dir variable
if test -d $_tmp_dir && test ! -O $_tmp_dir; then
debug "ERROR: We don't own $_tmp_dir!"
_old_tmp_dir=$_tmp_dir
for i in {1..5}; do
__tmp_dir=$_tmp_dir$i
if test -d $__tmp_dir && test -O $__tmp_dir || test ! -d $__tmp_dir; then
debug "Setting _tmp_dir to '$__tmp_dir'"
_tmp_dir=$__tmp_dir
break
fi
done
if [[ $_tmp_dir = $_old_tmp_dir ]]; then
echo "ERROR: Unable to set a tmp dir"
exit 1
fi
fi
_staging_dir=$_tmp_dir/stg
case "$1" in
-h | --help )
usage
;;
--download | --download-scp)
cleanup
mkdir -p $_staging_dir
cd $_tmp_dir
if [ $1 = '--download-scp' ]; then
echo "Archiving remote AppArmor profiles..."
_result=$(ssh $_ssh_opts $_rsync_user@$_rsync_server || true)
if [[ $_result != '200 OK' ]]; then
echo "ERROR: Something went wrong: REMOTE $_result"
cleanup
exit 1
fi
echo "Downloading archive..."
scp $_ssh_opts $_rsync_user@$_rsync_server:$_tar_filename .
scp $_ssh_opts $_rsync_user@$_rsync_server:$_tar_filename.sha256 .
if test ! -f $_tar_filename && test ! -f $_tar_filename.sha256; then
echo "ERROR: Failed to download files"; exit 1;
fi
_file_hash=$(getfilehash $_tar_filename)
_remote_hash=$(cat $_tar_filename.sha256)
if [ $_file_hash != $_remote_hash ]; then
echo "Downloaded file corrupted!"
exit 1
fi
else
httpdownload
fi
echo "Extracting archive..."
cd /
tar $_tar_opts -xf $_tmp_dir/$_dl_filename .
cleanup
exit 0
;;
--download-test )
echo "This is only a download test and archive extract"
cleanup
mkdir -p $_staging_dir
cd $_tmp_dir
httpdownload
cd $_staging_dir
tar $_tar_opts -xf ../$_dl_filename .
echo "Download test completed successfully"
cleanup
exit 0
;;
* )
# Check if running under a SSH connection
if [ -n "$SSH_CLIENT" ] && [ -z "$1" ] && [ $USER != 'root' ]; then
echo "Running from SSH connection and no command arguments given"
echo "Exiting"
exit 1
fi
# Init
_tar_file=$_tmp_dir/$_tar_filename
_user_home=$(eval echo "~$_rsync_user")
_scp_remote=0
if test -n "$1" && [ $1 = '--scp-remote' ]; then
debug "Remote SCP enabled, not uploading..."
_scp_remote=1
# Determine if we need to run scp instead
case "$SSH_ORIGINAL_COMMAND" in
"scp -f $_tar_filename" | "scp -f $_tar_filename" | "scp -f $_tar_filename.sha256" | "scp -v -f $_tar_filename.sha256" )
cd $_user_home
# Replace script with scp command
eval "$SSH_ORIGINAL_COMMAND"
exit 0
;;
esac
fi
cleanup
mkdir -p $_staging_dir
debug "Copying AppArmor profiles over to tmp dir..."
read -r -d '' _profile_files <<- EOF || true
/etc/apparmor.d
/etc/apparmor.d/abstractions
/etc/apparmor.d/abstractions/lxc
/etc/apparmor.d/abstractions/lxc/container-base
/etc/apparmor.d/abstractions/lxc/start-container
/etc/apparmor.d/lxc
/etc/apparmor.d/lxc/lxc-default
/etc/apparmor.d/lxc/lxc-default-cgns
/etc/apparmor.d/lxc/lxc-default-with-mounting
/etc/apparmor.d/lxc/lxc-default-with-nesting
/etc/apparmor.d/lxc-containers
EOF
# Read file list
while read -r file; do
if test -d $file;
then
_dest=$_staging_dir/.$file
debug "Creating directory: $_dest"
mkdir -p $_dest
elif test -f $file
then
_src=$file
_dest=$_staging_dir/.$file
debug "Copying: $_src to $_dest"
cp $_src $_dest
fi
done <<< "$_profile_files"
debug "Changing directory to $_tmp_dir"
cd $_staging_dir
if [ $PWD != $_staging_dir ]; then
echo "Failed to change directory!"
exit 1
fi
# Tar files
tar $_tar_opts -czf $_tar_file .
# Get file hash
_file_hash=$(getfilehash $_tar_file)
if [ $_scp_remote -ne 0 ]; then
debug "Moving: $_tar_file to $_user_home"
mv -f $_tar_file $_user_home/
echo $_file_hash > $_user_home/$_tar_filename.sha256
echo "200 OK"
cleanup
exit 0
elif test -n "$1"; then
echo "ERROR: Invalid argument: $1"
cleanup
exit 1
fi
# Get auth creds
if test ! -f $auth_file; then
echo "ERROR: Auth file '$auth_file' not found!"
cleanup
exit 1
fi
source $auth_file
_result=$(curl $_curl_opts -H 'X-AppArmor-State: ProcessUpload' -H "X-Tar-Hash: $_file_hash" -F "apparmor-profiles=@$_tmp_dir/$_tar_filename" --user "$username:$password" $_dl_server/$_dl_scriptname)
if [[ $_results = '200 OK' ]]; then
echo 'Successfully uploaded archive!'
else
echo "Something went wrong: $_result"
fi
;;
esac

1
tcpbintest.sh Executable file
View File

@ -0,0 +1 @@
#!/bin/bash

View File

@ -0,0 +1,33 @@
#### Description
## Type: HTTP
## VHost: timesheet.lhprojects.net
## Time Keeping Website
server {
listen 194.242.3.57:80;
server_name timesheet.lhprojects.net;
root ;
location /.well-known {
autoindex on;
}
location / {
return 302 https://timesheet.lhprojects.net$request_uri;
}
}
server {
listen 194.242.3.57:443 http2 ssl;
server_name timesheet.lhprojects.net;
error_log /var/log/nginx/timesheet.lhprojects.net.error.log;
access_log /var/log/nginx/timesheet.lhprojects.net.access.log main;
ssl_certificate ssl/timesheet.lhprojects.net-fullchain.crt;
ssl_certificate_key ssl/timesheet.lhprojects.net.key;
location / {
proxy_pass https://10.0.6.25:443;
include proxy_params;
}
}