This commit is contained in:
cutemeli
2025-12-22 10:35:30 +00:00
parent 0bfc6c8425
commit 5ce7ca2c5d
38927 changed files with 0 additions and 4594700 deletions

View File

@@ -1,73 +0,0 @@
#!/bin/bash -e
# Free IP Geolocation by DB-IP: https://db-ip.com/db/download/ip-to-country-lite
VAR_D="/usr/local/psa/var/modules/firewall/geoip/`basename "$0"`.d"
die()
{
echo "`basename "$0"`: $*" >&2
exit 1
}
load_settings()
{
[ ! -s "$VAR_D/settings.sh" ] || . "$VAR_D/settings.sh"
}
save_settings()
{
cat > "./settings.sh" <<-EOT
: \${DOWNLOAD_TIMEOUT:=$DOWNLOAD_TIMEOUT}
EOT
}
exists()
{
[ -s "$VAR_D/db.sqlite3" ]
}
fetch()
{
load_settings
rm -rf "$VAR_D".*
mkdir -p "`dirname "$VAR_D"`"
local tgt_d=
tgt_d="`mktemp -d "$VAR_D.XXXXXX"`"
chmod 700 "$tgt_d"
cd "$tgt_d"
local url="https://download.db-ip.com/free/dbip-country-lite-`date -d 'now - 1 day' --utc +%Y-%m`.csv.gz"
date --utc --rfc-3339=seconds > updated-at
save_settings
curl ${DOWNLOAD_TIMEOUT:+-m "$DOWNLOAD_TIMEOUT"} -fsSL "$url" | gzip -cd > dbip.csv
sqlite3 db.sqlite3 <<-EOT
CREATE TABLE ips (ip_from TEXT, ip_to TEXT, country_iso_code TEXT);
CREATE INDEX ips_code ON ips (country_iso_code);
.mode csv
.import dbip.csv ips
EOT
[ "`sqlite3 db.sqlite3 'SELECT count(*) FROM ips;'`" -gt 0 ] || die "Downloaded DB is empty"
rm -f dbip.csv
rm -rf "$VAR_D"
mv -fT "$tgt_d" "$VAR_D"
}
list()
{
local country="$1"
[ -n "$country" ] || die "--list requires a single 2-letter ISO country code argument"
sqlite3 "$VAR_D/db.sqlite3" \
"SELECT ip_from || '-' || ip_to FROM ips WHERE country_iso_code = '$country';"
}
case "$1" in
--exists) exists ;;
--fetch) fetch ;;
--list) list "$2" ;;
*) die "Unknown command: '$1'" ;;
esac

View File

@@ -1,10 +0,0 @@
#!/bin/bash -e
# GeoIP2 data created by MaxMind: https://www.maxmind.com . Requires license key.
# Updated twice weekly, every Tuesday and Friday.
# Each account (license key) is limited to 2000 total direct downloads in a 24 hour period.
VAR_D="/usr/local/psa/var/modules/firewall/geoip/`basename "$0"`.d"
EDITION_TYPE="GeoIP2"
. "`dirname "$0"`/maxmind-lite"

View File

@@ -1,106 +0,0 @@
#!/bin/bash -e
# Free but less accurate GeoLite2 data created by MaxMind: https://www.maxmind.com . Requires license key.
# Updated twice weekly, every Tuesday and Friday.
# Each account (license key) is limited to 2000 total direct downloads in a 24 hour period.
: ${VAR_D:="/usr/local/psa/var/modules/firewall/geoip/`basename "$0"`.d"}
: ${EDITION_TYPE:="GeoLite2"}
EDITION_ID="$EDITION_TYPE-Country-CSV"
die()
{
echo "`basename "$0"`: $*" >&2
exit 1
}
load_settings()
{
[ ! -s "$VAR_D/settings.sh" ] || . "$VAR_D/settings.sh"
}
save_settings()
{
cat > "./settings.sh" <<-EOT
: \${DOWNLOAD_TIMEOUT:=$DOWNLOAD_TIMEOUT}
: \${LICENSE_KEY:=$LICENSE_KEY}
EOT
}
exists()
{
[ -s "$VAR_D/db.sqlite3" ]
}
fetch()
{
load_settings
[ "$EDITION_TYPE" = "GeoLite2" -o "$EDITION_TYPE" = "GeoIP2" ] ||
die "Unsupported MaxMind EDITION_TYPE='$EDITION_TYPE'"
[ -n "$LICENSE_KEY" ] ||
die "Missing MaxMind LICENSE_KEY"
rm -rf "$VAR_D".*
mkdir -p "`dirname "$VAR_D"`"
local tgt_d=
tgt_d="`mktemp -d "$VAR_D.XXXXXX"`"
chmod 700 "$tgt_d"
cd "$tgt_d"
# Database structure: https://dev.maxmind.com/geoip/docs/databases/city-and-country
local url="https://download.maxmind.com/app/geoip_download?edition_id=$EDITION_ID&license_key=$LICENSE_KEY&suffix=zip"
date --utc --rfc-3339=seconds > updated-at
save_settings
curl ${DOWNLOAD_TIMEOUT:+-m "$DOWNLOAD_TIMEOUT"} -fsSL "$url" -o dbip-csv.zip
unzip -jq dbip-csv.zip
sqlite3 db.sqlite3 <<-EOT
-- CentOS 7 has SQLite 3.7.17, which doesn't auto-create tables based on CSV header
CREATE TABLE blocks_ipv4 (network TEXT, geoname_id TEXT, registered_country_geoname_id TEXT,
represented_country_geoname_id TEXT, is_anonymous_proxy TEXT, is_satellite_provider TEXT);
CREATE TABLE blocks_ipv6 (network TEXT, geoname_id TEXT, registered_country_geoname_id TEXT,
represented_country_geoname_id TEXT, is_anonymous_proxy TEXT, is_satellite_provider TEXT);
CREATE TABLE locations_en(geoname_id TEXT, locale_code TEXT, continent_code TEXT,
continent_name TEXT, country_iso_code TEXT, country_name TEXT, is_in_european_union TEXT);
CREATE INDEX blocks_ipv4_geoname_id ON blocks_ipv4 (geoname_id);
CREATE INDEX blocks_ipv6_geoname_id ON blocks_ipv6 (geoname_id);
CREATE INDEX locations_en_code_geoname_id ON locations_en (country_iso_code, geoname_id);
.mode csv
.import $EDITION_TYPE-Country-Blocks-IPv4.csv blocks_ipv4
.import $EDITION_TYPE-Country-Blocks-IPv6.csv blocks_ipv6
.import $EDITION_TYPE-Country-Locations-en.csv locations_en
-- Since the tables already existed on import, header is also imported as data, remove it
DELETE FROM blocks_ipv4 WHERE geoname_id = 'geoname_id';
DELETE FROM blocks_ipv6 WHERE geoname_id = 'geoname_id';
DELETE FROM locations_en WHERE geoname_id = 'geoname_id';
EOT
for table in blocks_ipv4 blocks_ipv6 locations_en; do
[ "`sqlite3 db.sqlite3 "SELECT count(*) FROM $table;"`" -gt 0 ] ||
die "Table '$table' in the downloaded DB is empty"
done
rm -f dbip-csv.zip *.csv
rm -rf "$VAR_D"
mv -fT "$tgt_d" "$VAR_D"
}
list()
{
local country="$1"
[ -n "$country" ] || die "--list requires a single 2-letter ISO country code argument"
sqlite3 "$VAR_D/db.sqlite3" "
SELECT network FROM blocks_ipv4 b INNER JOIN locations_en l ON b.geoname_id = l.geoname_id
WHERE l.country_iso_code = '$country';
SELECT network FROM blocks_ipv6 b INNER JOIN locations_en l ON b.geoname_id = l.geoname_id
WHERE l.country_iso_code = '$country';
"
}
case "$1" in
--exists) exists ;;
--fetch) fetch ;;
--list) list "$2" ;;
*) die "Unknown command: '$1'" ;;
esac

View File

@@ -1,357 +0,0 @@
#!/usr/local/psa/bin/py3-python -IS
""" ipset management for country filtering in firewall. """
import argparse
import ipaddress
import json
import logging
import os
import subprocess
import sys
import textwrap
log = logging.getLogger('ipsets')
SBIN_D = os.path.dirname(os.path.abspath(__file__))
VAR_D = "/usr/local/psa/var/modules/firewall"
DATA_SOURCE_BIN_D = os.path.join(SBIN_D, 'geoip')
DATA_SOURCE_VAR_D = os.path.join(VAR_D, 'geoip')
SETTINGS_PATH = os.path.join(DATA_SOURCE_VAR_D, 'settings.json')
IPSET_PREFIX = "plesk-ip"
def set_up_logging(verbosity):
""" Set up logging based on --verbose count and PLESK_DEBUG environment. """
verbosity = verbosity or 0
level = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.INFO,
4: logging.DEBUG,
}.get(verbosity, logging.CRITICAL)
if verbosity >= 4 or os.getenv('PLESK_DEBUG'):
level = logging.DEBUG
logging.basicConfig(level=level, format='[%(asctime)s] %(levelname)8s %(message)s')
def parse_args():
epilog = f"""\
environment variables:
DOWNLOAD_TIMEOUT Data source download timeout, seconds
LICENSE_KEY Data source license key (e.g. for 'maxmind')
PLESK_DEBUG Set logging verbosity to maximum
data source contract:
Each --data-source value is an executable script with the following commands:
--exists Returns 0 only when the GeoIP data exists locally
(i.e. previous --fetch was successful).
--fetch Fetches GeoIP data from a remote source, preprocesses it,
and stores it locally. May use and store additional
environment variables, such as LICENSE_KEY. Such variables
may be absent on subsequent calls. Store data under
{DATA_SOURCE_VAR_D}/$data_source.d .
Avoid clobbering data on upstream errors.
--list ZZ Prints IP ranges or CIDR networks for both IPv4 and IPv6,
which are mapped to the country code ZZ, each on a separate
line. Order does not matter. Should use only local data,
but may use remote data (not recommended). Output examples:
127.0.0.0/8
192.0.0.0-192.0.0.255
fe80::/10
2001:db8::-2001:db8:ffff:ffff:ffff:ffff:ffff:ffff
"""
parser = argparse.ArgumentParser(description="Manage ipsets for country filtering in the firewall",
epilog=textwrap.dedent(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter)
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument('--configure', action='store_true',
help="Set up country ipsets. Create local GeoIP DB if missing, "
"persist settings, recreate country ipsets.")
commands.add_argument('--update', action='store_true',
help="Update local GeoIP DB from a remote source, then recreate all "
"country ipsets. Use from a cron job.")
commands.add_argument('--recreate', action='store_true',
help="Create missing and remove unused country ipsets. "
"Use from a firewall script.")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase logging verbosity, can be specified multiple times.")
parser.add_argument('-f', '--force', action='store_true',
help="Recreate all country ipsets instead of only missing and extra ones. "
"With --configure will also recreate local GeoIP DB and "
"update its settings.")
parser.add_argument('-d', '--data-source', metavar='NAME', required=True, type=type_data_source,
help="Data source name. Each data source is a script under "
f"{DATA_SOURCE_BIN_D} or {DATA_SOURCE_VAR_D}, e.g. 'maxmind'.")
parser.add_argument('-c', '--countries', nargs='*', metavar='ZZ', type=type_country_code,
help="List of 2-letter ISO 3166 country codes.")
args = parser.parse_args()
return args
def type_data_source(data_source):
""" Type caster and checker for --data-source. """
for data_source_d in (DATA_SOURCE_BIN_D, DATA_SOURCE_VAR_D):
path = os.path.join(data_source_d, data_source)
if os.access(path, os.X_OK):
return path
raise argparse.ArgumentTypeError(f"Unsupported data source: {data_source!r}")
def type_country_code(code):
""" Type caster and checker for --countries. """
if len(code) == 2 and code.isalpha() and code.isupper():
return code
raise argparse.ArgumentTypeError(f"Not a 2-letter ISO 3166 country code: {code!r}")
def log_geoip_data_dir(data_source):
""" Just logs expected data source local storage directory (by convention). """
data_source = os.path.basename(data_source)
log.debug("Data directory for %r data source is expected to be %r",
data_source, os.path.join(DATA_SOURCE_VAR_D, data_source + ".d"))
def has_geoip_data(data_source):
""" Returns True if GeoIP data is already fetched. """
log.debug("Checking for GeoIP data existence via %r", data_source)
return subprocess.call([data_source, '--exists']) == 0
def fetch_geoip_data(data_source):
""" Refetches GeoIP data from a remote source. """
log.info("Fetching GeoIP data via %r", data_source)
subprocess.check_call([data_source, '--fetch'])
def list_geoip_data(data_source, country_code):
""" Lists GeoIP data for the country_code (assuming it is fetched).
Data is a list of IP ranges or CIDR networks for both IPv4 and IPv6.
"""
log.debug("Listing GeoIP data for %r via %r", country_code, data_source)
data = subprocess.check_output([data_source, '--list', country_code], universal_newlines=True)
return data.split()
def geoip_data_to_networks(entries):
""" Generator of IPv4Network and IPv6Network objects from a list of ranges or networks.
>>> list(geoip_data_to_networks(['10.0.0.0/24', 'fe80::/10']))
[IPv4Network('10.0.0.0/24'), IPv6Network('fe80::/10')]
>>> list(geoip_data_to_networks(['10.0.0.0-10.0.0.19', '::-::3']))
[IPv4Network('10.0.0.0/28'), IPv4Network('10.0.0.16/30'), IPv6Network('::/126')]
>>> list(geoip_data_to_networks(['127.0.0.1', '::1']))
[IPv4Network('127.0.0.1/32'), IPv6Network('::1/128')]
>>> list(geoip_data_to_networks(['invalid']))
Traceback (most recent call last):
...
ValueError: 'invalid' does not appear to be an IPv4 or IPv6 network
>>> list(geoip_data_to_networks(['from-to']))
Traceback (most recent call last):
...
ValueError: 'from' does not appear to be an IPv4 or IPv6 address
"""
for entry in entries:
if '-' in entry:
str_from, str_to = entry.split('-', maxsplit=1)
ip_from, ip_to = ipaddress.ip_address(str_from), ipaddress.ip_address(str_to)
yield from ipaddress.summarize_address_range(ip_from, ip_to)
else:
yield ipaddress.ip_network(entry)
def list_existing_ipset_names():
""" Lists ipsets from the system. """
log.debug("Listing existing ipset names from system")
ipsets = subprocess.check_output(["ipset", "list", "-name"], universal_newlines=True).split()
log.debug("Got ipset names: %r", ipsets)
return ipsets
def round_to_power_of_2(x):
""" Returns value rounded to the next nearest non-negative power of 2.
>>> round_to_power_of_2(0)
1
>>> round_to_power_of_2(1)
1
>>> round_to_power_of_2(32)
32
>>> round_to_power_of_2(1000)
1024
"""
return 2 ** (x - 1).bit_length() if x >= 1 else 1
def create_ipset(ipset_name, ip_version, num_elements=0):
""" Creates ipset ipset_name for ip_version with num_elements estimate. """
# Account for possible growth due to updates, use a value that will not change often
num_elements = round_to_power_of_2(int(num_elements * 1.5))
maxelem_args = ["maxelem", str(num_elements)] if num_elements > 65536 else []
family_args = ["family", "inet" if str(ip_version) != '6' else "inet6"]
cmd = ["ipset", "create", ipset_name, "hash:net", "-exist"] + family_args + maxelem_args
try:
log.debug("Creating %r ipset: %r", ipset_name, cmd)
subprocess.check_call(cmd)
except Exception as ex:
log.warning("Failed to create %r ipset from the first try, possibly 'maxelem' changed, "
"will try recreating: %s",
ipset_name, ex)
try:
destroy_ipset(ipset_name)
except Exception as ex:
log.debug("Destroying %r ipset failed, likely due to existing references", ipset_name)
raise RuntimeError(f"Cannot recreate ipset {ipset_name!r}: {ex} "
"Try stopping the plesk-firewall.service first.") from ex
log.debug("Creating new %r ipset: %r", ipset_name, cmd)
subprocess.check_call(cmd)
def destroy_ipset(ipset_name):
""" Destroys ipset_name. This will fail if it is referenced by any iptables rules. """
log.debug("Destroying %r ipset", ipset_name)
subprocess.check_call(["ipset", "destroy", ipset_name])
def update_ipset(ipset_name, networks):
""" Replaces networks in ipset_name. """
stdin = "\n".join([f"flush {ipset_name}"] + [f"add {ipset_name} {net}" for net in networks])
log.debug("Updating %r ipset networks, %d entries", ipset_name, len(networks))
subprocess.run(["ipset", "restore"], check=True, universal_newlines=True, input=stdin)
def ipset_name(country_code, ip_version):
""" Returns ipset name for the country_code and ip_version (4 or 6). """
return IPSET_PREFIX + str(ip_version) + "-" + country_code
def recreate_ipsets(data_source, countries, recreate_all=False):
""" Recreates ipsets for the countries, using data_source.
By default, only missing ipsets are created and unused are removed.
If recreate_all, all ipsets are recreated.
"""
existing_ipsets = set(list_existing_ipset_names())
log.debug("Checking for missing ipsets (recreate_all=%r)", recreate_all)
required_ipsets = set()
for country_code in countries:
v4_name, v6_name = ipset_name(country_code, 4), ipset_name(country_code, 6)
required_ipsets.add(v4_name)
required_ipsets.add(v6_name)
if not recreate_all and v4_name in existing_ipsets and v6_name in existing_ipsets:
log.debug("Skip recreating already existing ipsets for %r country: %r, %r",
country_code, v4_name, v6_name)
continue
log.info("Creating and populating ipsets for %r country: %r, %r",
country_code, v4_name, v6_name)
v4_nets, v6_nets = [], []
for net in geoip_data_to_networks(list_geoip_data(data_source, country_code)):
if net.version == 4:
v4_nets.append(net)
elif net.version == 6:
v6_nets.append(net)
else:
raise RuntimeError(f"Network {net} is neither IPv4 nor IPv6")
create_ipset(v4_name, 4, len(v4_nets))
create_ipset(v6_name, 6, len(v6_nets))
update_ipset(v4_name, v4_nets)
update_ipset(v6_name, v6_nets)
log.debug("Checking for unused ipsets")
for name in existing_ipsets:
try:
if name.startswith(IPSET_PREFIX) and name not in required_ipsets:
log.info("Destroying unused ipset: %r", name)
destroy_ipset(name)
except Exception as ex:
log.warning("Cannot remove ipset %r, will try next time: %s", name, ex)
def store_settings(countries):
""" Stores settings for subsequent calls. """
log.debug("Storing settings into %r", SETTINGS_PATH)
data = {
'countries': sorted(countries),
}
os.makedirs(os.path.dirname(SETTINGS_PATH), 0o755, exist_ok=True)
with open(SETTINGS_PATH, 'w') as fd:
json.dump(data, fd)
fd.write("\n")
def fetch_settings():
""" Fetches previously stored settings. """
log.debug("Fetching settings from %r", SETTINGS_PATH)
try:
with open(SETTINGS_PATH, 'r') as fd:
data = json.load(fd)
log.debug("Fetched settings: %r", data)
return data['countries']
except Exception as ex:
raise RuntimeError(f"Cannot read persisted settings from {SETTINGS_PATH!r}: {ex}") from ex
def configure(data_source, countries, recreate_all=False):
""" Sets up countries ipsets from the data_source. Stores settings (countries, for data source). """
if recreate_all or not has_geoip_data(data_source):
fetch_geoip_data(data_source)
countries = countries or []
store_settings(countries)
recreate_ipsets(data_source, countries, recreate_all)
def update(data_source, countries):
""" Updates data from the data_source, then updates countries ipsets. """
fetch_geoip_data(data_source)
if countries is None:
countries = fetch_settings()
recreate_ipsets(data_source, countries, recreate_all=True)
def recreate(data_source, countries, recreate_all=False):
""" Recreates missing countries ipsets and removes unused ones, uses data from the data_source. """
if countries is None:
countries = fetch_settings()
recreate_ipsets(data_source, countries, recreate_all)
def main():
args = parse_args()
set_up_logging(args.verbose)
log.debug("Options: %s", args)
log_geoip_data_dir(args.data_source)
if args.configure:
configure(args.data_source, args.countries, args.force)
elif args.update:
update(args.data_source, args.countries)
elif args.recreate:
recreate(args.data_source, args.countries, args.force)
if __name__ == '__main__':
try:
main()
except Exception as ex:
print(f"{ex}", file=sys.stderr)
log.error("%s", ex)
log.debug("This exception happened at:", exc_info=sys.exc_info())
sys.exit(1)
# vim: ft=python

View File

@@ -1,445 +0,0 @@
#!/usr/local/psa/bin/py3-python -IS
""" Safe firewall rules activation and feature checks. This is a 'safeact' replacement. """
import argparse
import atexit
import errno
import logging
import os
import select
import shutil
import signal
import stat
import subprocess
import sys
import textwrap
import time
from datetime import datetime
log = logging.getLogger('rules')
PLESKRC_BIN = "/usr/local/psa/admin/sbin/pleskrc"
VAR_D = "/usr/local/psa/var/modules/firewall"
""" extension var directory """
SCRIPT_NEW = os.path.join(VAR_D, "firewall-new.sh")
""" new set of firewall rules """
SCRIPT_ACTIVE = os.path.join(VAR_D, "firewall-active.sh")
""" previous (active) set of firewall rules """
SCRIPT_EMERGENCY = os.path.join(VAR_D, "firewall-emergency.sh")
""" emergency set of firewall rules - ones that disable firewall """
PIPE_PATH = os.path.join(VAR_D, "confirm.pipe")
""" interprocess communication named pipe (fifo) """
ROLLBACK_FLAG = os.path.join(VAR_D, "rollback.flag")
""" "new firewall rules turned out to be bad" flag """
DEFAULT_CONFIRM_INTERVAL = 15
""" default confirmation timeout, in seconds """
MINIMAL_CONFIRM_INTERVAL = 5
""" minimal time the code will actually await confirmation token, in seconds """
MINIMAL_SCRIPT_TIMEOUT = 5
""" minimal time the code will allow a subprocess to execute, in seconds """
class ConfirmFailed(RuntimeError):
pass
def set_up_logging(verbosity):
""" Set up logging based on --verbose count and PLESK_DEBUG environment. """
verbosity = verbosity or 0
level = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.INFO,
4: logging.DEBUG,
}.get(verbosity, logging.CRITICAL)
if verbosity >= 4 or os.getenv('PLESK_DEBUG'):
level = logging.DEBUG
logging.basicConfig(level=level, format='[%(asctime)s] %(levelname)8s %(message)s')
def parse_args():
epilog = f"""\
environment variables:
PHP_SAFEACT_TOKEN Activation token
PHP_SAFEACT_CONFIRM_INTERVAL Confirmation timeout (default: {DEFAULT_CONFIRM_INTERVAL})
(activation and rollback each take at most this time,
but system will actually wait for confirmation token
for at least {MINIMAL_CONFIRM_INTERVAL} seconds, which may
increase the effective timeout, which may be
additionally increased due to misbehaving child
processes by up to {3 * MINIMAL_SCRIPT_TIMEOUT} seconds)
PLESK_DEBUG Set logging verbosity to maximum
"""
parser = argparse.ArgumentParser(description="Activate firewall rules or check its features safely",
epilog=textwrap.dedent(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase logging verbosity, can be specified multiple times.")
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument('--activate', action='store_true',
help="Activate new rules. Synchronous.")
commands.add_argument('--confirm', action='store_true',
help="Commit activation of the new rules. Should be invoked from a new "
"SSH session or web/app server worker process to ensure an existing "
"network connection is not re-used.")
commands.add_argument('--try-enable-features', action='store_true',
help="Probe iptables features support. This will both check and "
"try to enable the specified features.")
act_opts = parser.add_argument_group("--activate arguments")
act_opts.add_argument('--rules-file', type=argparse.FileType('r'), default='-', metavar='PATH',
help="New rules script (default: %(default)s, i.e. STDIN)")
cfm_opts = parser.add_argument_group("--confirm arguments")
cfm_opts.add_argument('--wait', action='store_true',
help="Wait for the activation process to appear")
try_opts = parser.add_argument_group("--try-enable-features arguments")
try_opts.add_argument('--iptables', default='/usr/sbin/iptables',
help="iptables binary path (default: %(default)s)")
try_opts.add_argument('--table', default='filter',
help="iptables table name (default: %(default)s)")
try_opts.add_argument('--rule', default='-L',
help="iptables rule options (default: %(default)s), "
"use the default to check table and/or binary availability")
args = parser.parse_args()
return args
def get_token():
""" Returns activation token string. """
token = os.getenv('PHP_SAFEACT_TOKEN', '').strip()
if not token:
raise RuntimeError("Activation token is absent")
# Writes to pipes are atomic only up to certain system-specific limit (at least 512)
if len(token) >= select.PIPE_BUF - 1:
raise RuntimeError(f"Activation token is too long: {len(token)} characters")
return token
def get_confirm_timeout():
""" Returns confirmation timeout as int. """
timeout = os.getenv('PHP_SAFEACT_CONFIRM_INTERVAL')
if not timeout:
return DEFAULT_CONFIRM_INTERVAL
else:
value = int(timeout)
if value <= 0:
raise ValueError(f"Confirmation timeout is too small: {value}")
return value
def rm_f(path):
""" Equivalent of 'rm -f' for a file path. """
try:
log.debug("rm -f %r", path)
os.unlink(path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def verify_script_perms(path):
""" Checks that script file looks to be OK. """
log.debug("Checking %r script attributes", path)
st = os.lstat(path)
if not stat.S_ISREG(st.st_mode):
raise ValueError(f"{path}: The script is not a regular file")
if st.st_size == 0:
raise ValueError(f"{path}: The script is empty")
if st.st_uid != 0:
raise ValueError(f"{path}: The script is not owned by root")
if st.st_mode != (stat.S_IFREG | 0o700):
raise ValueError(f"{path}: The script has permissions other than 0700")
def try_restart_service(service, timeout):
""" Restarts the service if it is already running. """
timeout = max(timeout, MINIMAL_SCRIPT_TIMEOUT)
log.debug("Trying to restart %r service with timeout=%s", service, timeout)
subprocess.check_call([PLESKRC_BIN, service, 'try-restart'], timeout=timeout)
def is_service_running(service):
""" Returns whether the given service is running. """
log.debug("Checking %r service status", service)
result = subprocess.run([PLESKRC_BIN, service, 'status'])
return result.returncode == 0
def execute_rules_script(script, timeout):
""" Executes script within a given timeout. """
timeout = max(timeout, MINIMAL_SCRIPT_TIMEOUT)
env = {k: v for k, v in os.environ.items() if k not in ('PHP_SAFEACT_TOKEN',)}
log.debug("Executing script %r with timeout=%s", script, timeout)
subprocess.check_call([script], timeout=timeout, env=env)
def apply_rules(script, cutoff_timestamp, confirm=True):
""" Applies rules script and (optionally) waits for confirmation until cutoff_timestamp.
On success links the script into active configuration.
"""
log.info("Trying to apply rules from %r until %s, %s confirmation",
script, datetime.fromtimestamp(cutoff_timestamp), "with" if confirm else "without")
execute_rules_script(script, cutoff_timestamp - time.time())
if confirm:
# This is required to ensure that there are no outstanding connections to browser
# and any new connections are allowed by firewall.
try:
try_restart_service('sw-cp-server', cutoff_timestamp - time.time())
if is_service_running('nginx'):
log.debug("Nginx looks to be the frontend web server")
try_restart_service('nginx', cutoff_timestamp - time.time())
else:
log.debug("Apache looks to be the frontend web server")
try_restart_service('apache', cutoff_timestamp - time.time())
except subprocess.TimeoutExpired as ex:
log.warning(f"{ex}. Will attempt to wait for confirmation anyway.")
log.debug("This exception happened at:", exc_info=sys.exc_info())
expected_token = get_token()
cutoff_timestamp = max(cutoff_timestamp, time.time() + MINIMAL_CONFIRM_INTERVAL)
log.debug("Waiting for a matching activation token on %r until %s",
PIPE_PATH, datetime.fromtimestamp(cutoff_timestamp))
# Open w/o blocking to ensure open doesn't block w/o writers present
with os.fdopen(os.open(PIPE_PATH, os.O_RDONLY | os.O_NONBLOCK), 'r') as pipe:
# Also keep the pipe open for writing, otherwise after the first read select()
# will immediately return with only EOF available to read
# (this normally indicates absence or writers).
with open(PIPE_PATH, 'wb'):
timeout = cutoff_timestamp - time.time()
while timeout > 0 and select.select([pipe], [], [], timeout)[0]:
token = pipe.readline().strip()
if token == expected_token:
log.info("Received matching activation token")
break
log.debug("Received non-matching activation token: %r", token)
timeout = cutoff_timestamp - time.time()
else:
raise ConfirmFailed("Did not receive a matching activation token "
"before confirmation timeout")
if script != SCRIPT_ACTIVE:
log.debug("Setting %r as the active configuration %r", script, SCRIPT_ACTIVE)
# Previously files were hardlinked, but we don't really need strict atomicity here
# and hardlinks may cause issues if somebody decides to meddle with the files manually
# (e.g. emergency may be hardlinked into active and may be updated due to copy into active)
rm_f(SCRIPT_ACTIVE)
log.debug("cp -Pa %r %r", script, SCRIPT_ACTIVE)
shutil.copy2(script, SCRIPT_ACTIVE, follow_symlinks=False)
else:
log.debug("Rules from %r are already the active configuration", script)
def try_create_pipe(path, stale_timestamp):
""" Creates a pipe if it doesn't exist, removes it if it is too old. Otherwise returns False. """
try:
ctime = os.path.getctime(path)
if ctime < stale_timestamp:
log.info("Removing stale named pipe %r created at %s", path, datetime.fromtimestamp(ctime))
os.unlink(path)
else:
return False
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
log.debug("Creating named pipe %r and setting up atexit handler", path)
os.mkfifo(path, 0o600)
@atexit.register
def remove_pipe():
log.debug("Removing named pipe %r on exit", path)
rm_f(path)
return True
def rollback():
""" Rolls back to some working configuration. """
log.info("Rolling back to working configuration")
log.debug("touch %r", ROLLBACK_FLAG)
with open(ROLLBACK_FLAG, 'wb'):
pass
try:
try:
log.info("Trying to roll back from new to active configuration")
cutoff_timestamp = time.time() + get_confirm_timeout()
apply_rules(SCRIPT_ACTIVE, cutoff_timestamp)
except ConfirmFailed as ex:
raise ConfirmFailed(
"Connectivity failure occurred with both the new and rollback (previous) firewall configurations, "
"indicating that both configurations are faulty.") from ex
except Exception as ex:
log.info("Trying to roll back from active to emergency configuration")
apply_rules(SCRIPT_EMERGENCY, 0, confirm=False)
raise ConfirmFailed(f"{ex} "
"As an emergency measure, "
"the firewall was disabled and a configuration without firewall rules was applied. "
"To resolve the issue, correct the firewall rules and re-enable the firewall.")
def activate(rules_file):
""" Activates new rules supplied via rules_file. """
rm_f(ROLLBACK_FLAG)
timeout = get_confirm_timeout()
start_timestamp = time.time()
cutoff_timestamp = start_timestamp + timeout
# Assume other activations use the same timeout
stale_timestamp = start_timestamp - 2.1 * timeout
log.info("Activating with token=%r, timeout=%s", get_token(), timeout)
log.debug("Setting up signal handlers to ensure cleanup")
for signum in (signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT):
signal.signal(signum, signal.getsignal(signal.SIGINT))
log.debug("Trying to create named pipe %r, until %s, file older than %s is considered stale",
PIPE_PATH,
datetime.fromtimestamp(cutoff_timestamp),
datetime.fromtimestamp(stale_timestamp))
while time.time() < cutoff_timestamp:
if try_create_pipe(PIPE_PATH, stale_timestamp):
log.debug("Pipe created")
break
time.sleep(0.5)
else:
log.debug("Could not create pipe")
raise RuntimeError("Previous rules activation didn't finish before confirmation timeout")
log.info("Writing new rules from %r into %r", rules_file.name, SCRIPT_NEW)
rm_f(SCRIPT_NEW)
log.debug("cat > %r", SCRIPT_NEW)
with os.fdopen(os.open(SCRIPT_NEW, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o700), 'w') as script_new:
shutil.copyfileobj(rules_file, script_new)
verify_script_perms(SCRIPT_ACTIVE)
try:
log.info("Trying to apply new configuration")
apply_rules(SCRIPT_NEW, cutoff_timestamp)
rm_f(SCRIPT_NEW)
except Exception:
rollback()
raise
def confirm(wait=False):
""" Confirms rules activation (new ones or during rollback). """
token = get_token()
if wait:
timeout = max(get_confirm_timeout(), MINIMAL_CONFIRM_INTERVAL)
cutoff_timestamp = time.time() + 2 * (timeout + 3 * MINIMAL_SCRIPT_TIMEOUT)
else:
cutoff_timestamp = time.time()
log.info("Confirming with token=%r until %s", token, datetime.fromtimestamp(cutoff_timestamp))
while True:
try:
# Open w/o creating the pipe/file if it doesn't exist ([Errno 2] No such file or directory)
# Open w/o blocking if no readers are present ([Errno 6] No such device or address)
with os.fdopen(os.open(PIPE_PATH, os.O_WRONLY | os.O_APPEND | os.O_NONBLOCK), 'w') as pipe:
log.debug("Writing activation token to %r", PIPE_PATH)
pipe.write(token + "\n")
break
except Exception as ex:
if time.time() >= cutoff_timestamp:
raise ConfirmFailed("Too late to confirm: no rules activation process") from ex
log.debug(f"No activation process yet, continue to wait: {ex}")
time.sleep(0.5)
if os.path.lexists(ROLLBACK_FLAG):
raise ConfirmFailed("Too late to confirm: new rules were rolled back")
def try_enable_features(iptables, table, rule):
"""
Checks if desired iptables features are enabled. Tries to enable them if not.
On modern systems iptables is capable of dynamically loading required kernel
modules. This is convenient, misleading and maybe even dangerous at the same time
( http://backstage.soundcloud.com/2012/08/shoot-yourself-in-the-foot-with-iptables-and-kmod-auto-loading/ ).
Since we don't want to meddle with kernel modules for obvious reasons, we use
iptables itself to check features support. As a side effect such checks may trigger
kernel module loading. Checks are isolated in a separate temporary chain, that
nobody refers to.
This approach has an added advantage of checking whether real iptables rules would
work, not some "support" per se. Practice shows that the latter may be misleading
and result in bugs. Therefore if you're not sure <rule> works on a given system,
just call this command with the given <rule>.
<rule> is <rule-specification> in terms of iptables(8). Specifying <target> as part
of it is not required and not particularly useful. <rule> can also be '-L' to check
table and/or binary availability.
"""
if rule == '-L':
# listing is "safe"
log.info("Checking feature: iptables=%r, table=%r, rule=%r", iptables, table, rule)
subprocess.check_call([iptables, '-t', table, rule, '-n'])
else:
# everything else is isolated in a temporary chain
chain = "plesk-fw-tmp-chain"
log.info("Checking feature: iptables=%r, table=%r, rule=%r, chain=%r",
iptables, table, rule, chain)
def remove_chain():
subprocess.check_call([iptables, '-t', table, '-F', chain])
subprocess.check_call([iptables, '-t', table, '-Z', chain])
subprocess.check_call([iptables, '-t', table, '-X', chain])
def create_chain():
subprocess.check_call([iptables, '-t', table, '-N', chain])
def append_rule(rule_args):
subprocess.check_call([iptables, '-t', table, '-A', chain] + rule_args)
try:
remove_chain()
except Exception as ex:
# Failure is OK here - it means chain didn't exist
log.debug("During initial %r chain removal: %s", chain, ex)
create_chain()
append_rule(rule.split())
remove_chain()
def main():
args = parse_args()
set_up_logging(args.verbose)
log.debug("Options: %s", args)
if args.activate:
activate(args.rules_file)
elif args.confirm:
confirm(args.wait)
elif args.try_enable_features:
try_enable_features(args.iptables, args.table, args.rule)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
log.debug("Interrupted:", exc_info=sys.exc_info())
sys.exit(2)
except Exception as ex:
print(f"{ex}")
log.error("%s", ex)
log.debug("This exception happened at:", exc_info=sys.exc_info())
sys.exit(1)
# vim: ft=python