This commit is contained in:
cutemeli
2025-12-22 10:35:30 +00:00
parent 0bfc6c8425
commit 5ce7ca2c5d
38927 changed files with 0 additions and 4594700 deletions

View File

@@ -1,73 +0,0 @@
#!/bin/bash -e
# Free IP Geolocation by DB-IP: https://db-ip.com/db/download/ip-to-country-lite
VAR_D="/usr/local/psa/var/modules/firewall/geoip/`basename "$0"`.d"
die()
{
echo "`basename "$0"`: $*" >&2
exit 1
}
load_settings()
{
[ ! -s "$VAR_D/settings.sh" ] || . "$VAR_D/settings.sh"
}
save_settings()
{
cat > "./settings.sh" <<-EOT
: \${DOWNLOAD_TIMEOUT:=$DOWNLOAD_TIMEOUT}
EOT
}
exists()
{
[ -s "$VAR_D/db.sqlite3" ]
}
fetch()
{
load_settings
rm -rf "$VAR_D".*
mkdir -p "`dirname "$VAR_D"`"
local tgt_d=
tgt_d="`mktemp -d "$VAR_D.XXXXXX"`"
chmod 700 "$tgt_d"
cd "$tgt_d"
local url="https://download.db-ip.com/free/dbip-country-lite-`date -d 'now - 1 day' --utc +%Y-%m`.csv.gz"
date --utc --rfc-3339=seconds > updated-at
save_settings
curl ${DOWNLOAD_TIMEOUT:+-m "$DOWNLOAD_TIMEOUT"} -fsSL "$url" | gzip -cd > dbip.csv
sqlite3 db.sqlite3 <<-EOT
CREATE TABLE ips (ip_from TEXT, ip_to TEXT, country_iso_code TEXT);
CREATE INDEX ips_code ON ips (country_iso_code);
.mode csv
.import dbip.csv ips
EOT
[ "`sqlite3 db.sqlite3 'SELECT count(*) FROM ips;'`" -gt 0 ] || die "Downloaded DB is empty"
rm -f dbip.csv
rm -rf "$VAR_D"
mv -fT "$tgt_d" "$VAR_D"
}
list()
{
local country="$1"
[ -n "$country" ] || die "--list requires a single 2-letter ISO country code argument"
sqlite3 "$VAR_D/db.sqlite3" \
"SELECT ip_from || '-' || ip_to FROM ips WHERE country_iso_code = '$country';"
}
case "$1" in
--exists) exists ;;
--fetch) fetch ;;
--list) list "$2" ;;
*) die "Unknown command: '$1'" ;;
esac

View File

@@ -1,10 +0,0 @@
#!/bin/bash -e
# GeoIP2 data created by MaxMind: https://www.maxmind.com . Requires license key.
# Updated twice weekly, every Tuesday and Friday.
# Each account (license key) is limited to 2000 total direct downloads in a 24 hour period.
VAR_D="/usr/local/psa/var/modules/firewall/geoip/`basename "$0"`.d"
EDITION_TYPE="GeoIP2"
. "`dirname "$0"`/maxmind-lite"

View File

@@ -1,106 +0,0 @@
#!/bin/bash -e
# Free but less accurate GeoLite2 data created by MaxMind: https://www.maxmind.com . Requires license key.
# Updated twice weekly, every Tuesday and Friday.
# Each account (license key) is limited to 2000 total direct downloads in a 24 hour period.
: ${VAR_D:="/usr/local/psa/var/modules/firewall/geoip/`basename "$0"`.d"}
: ${EDITION_TYPE:="GeoLite2"}
EDITION_ID="$EDITION_TYPE-Country-CSV"
die()
{
echo "`basename "$0"`: $*" >&2
exit 1
}
load_settings()
{
[ ! -s "$VAR_D/settings.sh" ] || . "$VAR_D/settings.sh"
}
save_settings()
{
cat > "./settings.sh" <<-EOT
: \${DOWNLOAD_TIMEOUT:=$DOWNLOAD_TIMEOUT}
: \${LICENSE_KEY:=$LICENSE_KEY}
EOT
}
exists()
{
[ -s "$VAR_D/db.sqlite3" ]
}
fetch()
{
load_settings
[ "$EDITION_TYPE" = "GeoLite2" -o "$EDITION_TYPE" = "GeoIP2" ] ||
die "Unsupported MaxMind EDITION_TYPE='$EDITION_TYPE'"
[ -n "$LICENSE_KEY" ] ||
die "Missing MaxMind LICENSE_KEY"
rm -rf "$VAR_D".*
mkdir -p "`dirname "$VAR_D"`"
local tgt_d=
tgt_d="`mktemp -d "$VAR_D.XXXXXX"`"
chmod 700 "$tgt_d"
cd "$tgt_d"
# Database structure: https://dev.maxmind.com/geoip/docs/databases/city-and-country
local url="https://download.maxmind.com/app/geoip_download?edition_id=$EDITION_ID&license_key=$LICENSE_KEY&suffix=zip"
date --utc --rfc-3339=seconds > updated-at
save_settings
curl ${DOWNLOAD_TIMEOUT:+-m "$DOWNLOAD_TIMEOUT"} -fsSL "$url" -o dbip-csv.zip
unzip -jq dbip-csv.zip
sqlite3 db.sqlite3 <<-EOT
-- CentOS 7 has SQLite 3.7.17, which doesn't auto-create tables based on CSV header
CREATE TABLE blocks_ipv4 (network TEXT, geoname_id TEXT, registered_country_geoname_id TEXT,
represented_country_geoname_id TEXT, is_anonymous_proxy TEXT, is_satellite_provider TEXT);
CREATE TABLE blocks_ipv6 (network TEXT, geoname_id TEXT, registered_country_geoname_id TEXT,
represented_country_geoname_id TEXT, is_anonymous_proxy TEXT, is_satellite_provider TEXT);
CREATE TABLE locations_en(geoname_id TEXT, locale_code TEXT, continent_code TEXT,
continent_name TEXT, country_iso_code TEXT, country_name TEXT, is_in_european_union TEXT);
CREATE INDEX blocks_ipv4_geoname_id ON blocks_ipv4 (geoname_id);
CREATE INDEX blocks_ipv6_geoname_id ON blocks_ipv6 (geoname_id);
CREATE INDEX locations_en_code_geoname_id ON locations_en (country_iso_code, geoname_id);
.mode csv
.import $EDITION_TYPE-Country-Blocks-IPv4.csv blocks_ipv4
.import $EDITION_TYPE-Country-Blocks-IPv6.csv blocks_ipv6
.import $EDITION_TYPE-Country-Locations-en.csv locations_en
-- Since the tables already existed on import, header is also imported as data, remove it
DELETE FROM blocks_ipv4 WHERE geoname_id = 'geoname_id';
DELETE FROM blocks_ipv6 WHERE geoname_id = 'geoname_id';
DELETE FROM locations_en WHERE geoname_id = 'geoname_id';
EOT
for table in blocks_ipv4 blocks_ipv6 locations_en; do
[ "`sqlite3 db.sqlite3 "SELECT count(*) FROM $table;"`" -gt 0 ] ||
die "Table '$table' in the downloaded DB is empty"
done
rm -f dbip-csv.zip *.csv
rm -rf "$VAR_D"
mv -fT "$tgt_d" "$VAR_D"
}
list()
{
local country="$1"
[ -n "$country" ] || die "--list requires a single 2-letter ISO country code argument"
sqlite3 "$VAR_D/db.sqlite3" "
SELECT network FROM blocks_ipv4 b INNER JOIN locations_en l ON b.geoname_id = l.geoname_id
WHERE l.country_iso_code = '$country';
SELECT network FROM blocks_ipv6 b INNER JOIN locations_en l ON b.geoname_id = l.geoname_id
WHERE l.country_iso_code = '$country';
"
}
case "$1" in
--exists) exists ;;
--fetch) fetch ;;
--list) list "$2" ;;
*) die "Unknown command: '$1'" ;;
esac

View File

@@ -1,357 +0,0 @@
#!/usr/local/psa/bin/py3-python -IS
""" ipset management for country filtering in firewall. """
import argparse
import ipaddress
import json
import logging
import os
import subprocess
import sys
import textwrap
log = logging.getLogger('ipsets')
SBIN_D = os.path.dirname(os.path.abspath(__file__))
VAR_D = "/usr/local/psa/var/modules/firewall"
DATA_SOURCE_BIN_D = os.path.join(SBIN_D, 'geoip')
DATA_SOURCE_VAR_D = os.path.join(VAR_D, 'geoip')
SETTINGS_PATH = os.path.join(DATA_SOURCE_VAR_D, 'settings.json')
IPSET_PREFIX = "plesk-ip"
def set_up_logging(verbosity):
""" Set up logging based on --verbose count and PLESK_DEBUG environment. """
verbosity = verbosity or 0
level = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.INFO,
4: logging.DEBUG,
}.get(verbosity, logging.CRITICAL)
if verbosity >= 4 or os.getenv('PLESK_DEBUG'):
level = logging.DEBUG
logging.basicConfig(level=level, format='[%(asctime)s] %(levelname)8s %(message)s')
def parse_args():
epilog = f"""\
environment variables:
DOWNLOAD_TIMEOUT Data source download timeout, seconds
LICENSE_KEY Data source license key (e.g. for 'maxmind')
PLESK_DEBUG Set logging verbosity to maximum
data source contract:
Each --data-source value is an executable script with the following commands:
--exists Returns 0 only when the GeoIP data exists locally
(i.e. previous --fetch was successful).
--fetch Fetches GeoIP data from a remote source, preprocesses it,
and stores it locally. May use and store additional
environment variables, such as LICENSE_KEY. Such variables
may be absent on subsequent calls. Store data under
{DATA_SOURCE_VAR_D}/$data_source.d .
Avoid clobbering data on upstream errors.
--list ZZ Prints IP ranges or CIDR networks for both IPv4 and IPv6,
which are mapped to the country code ZZ, each on a separate
line. Order does not matter. Should use only local data,
but may use remote data (not recommended). Output examples:
127.0.0.0/8
192.0.0.0-192.0.0.255
fe80::/10
2001:db8::-2001:db8:ffff:ffff:ffff:ffff:ffff:ffff
"""
parser = argparse.ArgumentParser(description="Manage ipsets for country filtering in the firewall",
epilog=textwrap.dedent(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter)
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument('--configure', action='store_true',
help="Set up country ipsets. Create local GeoIP DB if missing, "
"persist settings, recreate country ipsets.")
commands.add_argument('--update', action='store_true',
help="Update local GeoIP DB from a remote source, then recreate all "
"country ipsets. Use from a cron job.")
commands.add_argument('--recreate', action='store_true',
help="Create missing and remove unused country ipsets. "
"Use from a firewall script.")
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase logging verbosity, can be specified multiple times.")
parser.add_argument('-f', '--force', action='store_true',
help="Recreate all country ipsets instead of only missing and extra ones. "
"With --configure will also recreate local GeoIP DB and "
"update its settings.")
parser.add_argument('-d', '--data-source', metavar='NAME', required=True, type=type_data_source,
help="Data source name. Each data source is a script under "
f"{DATA_SOURCE_BIN_D} or {DATA_SOURCE_VAR_D}, e.g. 'maxmind'.")
parser.add_argument('-c', '--countries', nargs='*', metavar='ZZ', type=type_country_code,
help="List of 2-letter ISO 3166 country codes.")
args = parser.parse_args()
return args
def type_data_source(data_source):
""" Type caster and checker for --data-source. """
for data_source_d in (DATA_SOURCE_BIN_D, DATA_SOURCE_VAR_D):
path = os.path.join(data_source_d, data_source)
if os.access(path, os.X_OK):
return path
raise argparse.ArgumentTypeError(f"Unsupported data source: {data_source!r}")
def type_country_code(code):
""" Type caster and checker for --countries. """
if len(code) == 2 and code.isalpha() and code.isupper():
return code
raise argparse.ArgumentTypeError(f"Not a 2-letter ISO 3166 country code: {code!r}")
def log_geoip_data_dir(data_source):
""" Just logs expected data source local storage directory (by convention). """
data_source = os.path.basename(data_source)
log.debug("Data directory for %r data source is expected to be %r",
data_source, os.path.join(DATA_SOURCE_VAR_D, data_source + ".d"))
def has_geoip_data(data_source):
""" Returns True if GeoIP data is already fetched. """
log.debug("Checking for GeoIP data existence via %r", data_source)
return subprocess.call([data_source, '--exists']) == 0
def fetch_geoip_data(data_source):
""" Refetches GeoIP data from a remote source. """
log.info("Fetching GeoIP data via %r", data_source)
subprocess.check_call([data_source, '--fetch'])
def list_geoip_data(data_source, country_code):
""" Lists GeoIP data for the country_code (assuming it is fetched).
Data is a list of IP ranges or CIDR networks for both IPv4 and IPv6.
"""
log.debug("Listing GeoIP data for %r via %r", country_code, data_source)
data = subprocess.check_output([data_source, '--list', country_code], universal_newlines=True)
return data.split()
def geoip_data_to_networks(entries):
""" Generator of IPv4Network and IPv6Network objects from a list of ranges or networks.
>>> list(geoip_data_to_networks(['10.0.0.0/24', 'fe80::/10']))
[IPv4Network('10.0.0.0/24'), IPv6Network('fe80::/10')]
>>> list(geoip_data_to_networks(['10.0.0.0-10.0.0.19', '::-::3']))
[IPv4Network('10.0.0.0/28'), IPv4Network('10.0.0.16/30'), IPv6Network('::/126')]
>>> list(geoip_data_to_networks(['127.0.0.1', '::1']))
[IPv4Network('127.0.0.1/32'), IPv6Network('::1/128')]
>>> list(geoip_data_to_networks(['invalid']))
Traceback (most recent call last):
...
ValueError: 'invalid' does not appear to be an IPv4 or IPv6 network
>>> list(geoip_data_to_networks(['from-to']))
Traceback (most recent call last):
...
ValueError: 'from' does not appear to be an IPv4 or IPv6 address
"""
for entry in entries:
if '-' in entry:
str_from, str_to = entry.split('-', maxsplit=1)
ip_from, ip_to = ipaddress.ip_address(str_from), ipaddress.ip_address(str_to)
yield from ipaddress.summarize_address_range(ip_from, ip_to)
else:
yield ipaddress.ip_network(entry)
def list_existing_ipset_names():
""" Lists ipsets from the system. """
log.debug("Listing existing ipset names from system")
ipsets = subprocess.check_output(["ipset", "list", "-name"], universal_newlines=True).split()
log.debug("Got ipset names: %r", ipsets)
return ipsets
def round_to_power_of_2(x):
""" Returns value rounded to the next nearest non-negative power of 2.
>>> round_to_power_of_2(0)
1
>>> round_to_power_of_2(1)
1
>>> round_to_power_of_2(32)
32
>>> round_to_power_of_2(1000)
1024
"""
return 2 ** (x - 1).bit_length() if x >= 1 else 1
def create_ipset(ipset_name, ip_version, num_elements=0):
""" Creates ipset ipset_name for ip_version with num_elements estimate. """
# Account for possible growth due to updates, use a value that will not change often
num_elements = round_to_power_of_2(int(num_elements * 1.5))
maxelem_args = ["maxelem", str(num_elements)] if num_elements > 65536 else []
family_args = ["family", "inet" if str(ip_version) != '6' else "inet6"]
cmd = ["ipset", "create", ipset_name, "hash:net", "-exist"] + family_args + maxelem_args
try:
log.debug("Creating %r ipset: %r", ipset_name, cmd)
subprocess.check_call(cmd)
except Exception as ex:
log.warning("Failed to create %r ipset from the first try, possibly 'maxelem' changed, "
"will try recreating: %s",
ipset_name, ex)
try:
destroy_ipset(ipset_name)
except Exception as ex:
log.debug("Destroying %r ipset failed, likely due to existing references", ipset_name)
raise RuntimeError(f"Cannot recreate ipset {ipset_name!r}: {ex} "
"Try stopping the plesk-firewall.service first.") from ex
log.debug("Creating new %r ipset: %r", ipset_name, cmd)
subprocess.check_call(cmd)
def destroy_ipset(ipset_name):
""" Destroys ipset_name. This will fail if it is referenced by any iptables rules. """
log.debug("Destroying %r ipset", ipset_name)
subprocess.check_call(["ipset", "destroy", ipset_name])
def update_ipset(ipset_name, networks):
""" Replaces networks in ipset_name. """
stdin = "\n".join([f"flush {ipset_name}"] + [f"add {ipset_name} {net}" for net in networks])
log.debug("Updating %r ipset networks, %d entries", ipset_name, len(networks))
subprocess.run(["ipset", "restore"], check=True, universal_newlines=True, input=stdin)
def ipset_name(country_code, ip_version):
""" Returns ipset name for the country_code and ip_version (4 or 6). """
return IPSET_PREFIX + str(ip_version) + "-" + country_code
def recreate_ipsets(data_source, countries, recreate_all=False):
""" Recreates ipsets for the countries, using data_source.
By default, only missing ipsets are created and unused are removed.
If recreate_all, all ipsets are recreated.
"""
existing_ipsets = set(list_existing_ipset_names())
log.debug("Checking for missing ipsets (recreate_all=%r)", recreate_all)
required_ipsets = set()
for country_code in countries:
v4_name, v6_name = ipset_name(country_code, 4), ipset_name(country_code, 6)
required_ipsets.add(v4_name)
required_ipsets.add(v6_name)
if not recreate_all and v4_name in existing_ipsets and v6_name in existing_ipsets:
log.debug("Skip recreating already existing ipsets for %r country: %r, %r",
country_code, v4_name, v6_name)
continue
log.info("Creating and populating ipsets for %r country: %r, %r",
country_code, v4_name, v6_name)
v4_nets, v6_nets = [], []
for net in geoip_data_to_networks(list_geoip_data(data_source, country_code)):
if net.version == 4:
v4_nets.append(net)
elif net.version == 6:
v6_nets.append(net)
else:
raise RuntimeError(f"Network {net} is neither IPv4 nor IPv6")
create_ipset(v4_name, 4, len(v4_nets))
create_ipset(v6_name, 6, len(v6_nets))
update_ipset(v4_name, v4_nets)
update_ipset(v6_name, v6_nets)
log.debug("Checking for unused ipsets")
for name in existing_ipsets:
try:
if name.startswith(IPSET_PREFIX) and name not in required_ipsets:
log.info("Destroying unused ipset: %r", name)
destroy_ipset(name)
except Exception as ex:
log.warning("Cannot remove ipset %r, will try next time: %s", name, ex)
def store_settings(countries):
""" Stores settings for subsequent calls. """
log.debug("Storing settings into %r", SETTINGS_PATH)
data = {
'countries': sorted(countries),
}
os.makedirs(os.path.dirname(SETTINGS_PATH), 0o755, exist_ok=True)
with open(SETTINGS_PATH, 'w') as fd:
json.dump(data, fd)
fd.write("\n")
def fetch_settings():
""" Fetches previously stored settings. """
log.debug("Fetching settings from %r", SETTINGS_PATH)
try:
with open(SETTINGS_PATH, 'r') as fd:
data = json.load(fd)
log.debug("Fetched settings: %r", data)
return data['countries']
except Exception as ex:
raise RuntimeError(f"Cannot read persisted settings from {SETTINGS_PATH!r}: {ex}") from ex
def configure(data_source, countries, recreate_all=False):
""" Sets up countries ipsets from the data_source. Stores settings (countries, for data source). """
if recreate_all or not has_geoip_data(data_source):
fetch_geoip_data(data_source)
countries = countries or []
store_settings(countries)
recreate_ipsets(data_source, countries, recreate_all)
def update(data_source, countries):
""" Updates data from the data_source, then updates countries ipsets. """
fetch_geoip_data(data_source)
if countries is None:
countries = fetch_settings()
recreate_ipsets(data_source, countries, recreate_all=True)
def recreate(data_source, countries, recreate_all=False):
""" Recreates missing countries ipsets and removes unused ones, uses data from the data_source. """
if countries is None:
countries = fetch_settings()
recreate_ipsets(data_source, countries, recreate_all)
def main():
args = parse_args()
set_up_logging(args.verbose)
log.debug("Options: %s", args)
log_geoip_data_dir(args.data_source)
if args.configure:
configure(args.data_source, args.countries, args.force)
elif args.update:
update(args.data_source, args.countries)
elif args.recreate:
recreate(args.data_source, args.countries, args.force)
if __name__ == '__main__':
try:
main()
except Exception as ex:
print(f"{ex}", file=sys.stderr)
log.error("%s", ex)
log.debug("This exception happened at:", exc_info=sys.exc_info())
sys.exit(1)
# vim: ft=python

View File

@@ -1,445 +0,0 @@
#!/usr/local/psa/bin/py3-python -IS
""" Safe firewall rules activation and feature checks. This is a 'safeact' replacement. """
import argparse
import atexit
import errno
import logging
import os
import select
import shutil
import signal
import stat
import subprocess
import sys
import textwrap
import time
from datetime import datetime
log = logging.getLogger('rules')
PLESKRC_BIN = "/usr/local/psa/admin/sbin/pleskrc"
VAR_D = "/usr/local/psa/var/modules/firewall"
""" extension var directory """
SCRIPT_NEW = os.path.join(VAR_D, "firewall-new.sh")
""" new set of firewall rules """
SCRIPT_ACTIVE = os.path.join(VAR_D, "firewall-active.sh")
""" previous (active) set of firewall rules """
SCRIPT_EMERGENCY = os.path.join(VAR_D, "firewall-emergency.sh")
""" emergency set of firewall rules - ones that disable firewall """
PIPE_PATH = os.path.join(VAR_D, "confirm.pipe")
""" interprocess communication named pipe (fifo) """
ROLLBACK_FLAG = os.path.join(VAR_D, "rollback.flag")
""" "new firewall rules turned out to be bad" flag """
DEFAULT_CONFIRM_INTERVAL = 15
""" default confirmation timeout, in seconds """
MINIMAL_CONFIRM_INTERVAL = 5
""" minimal time the code will actually await confirmation token, in seconds """
MINIMAL_SCRIPT_TIMEOUT = 5
""" minimal time the code will allow a subprocess to execute, in seconds """
class ConfirmFailed(RuntimeError):
pass
def set_up_logging(verbosity):
""" Set up logging based on --verbose count and PLESK_DEBUG environment. """
verbosity = verbosity or 0
level = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.INFO,
4: logging.DEBUG,
}.get(verbosity, logging.CRITICAL)
if verbosity >= 4 or os.getenv('PLESK_DEBUG'):
level = logging.DEBUG
logging.basicConfig(level=level, format='[%(asctime)s] %(levelname)8s %(message)s')
def parse_args():
epilog = f"""\
environment variables:
PHP_SAFEACT_TOKEN Activation token
PHP_SAFEACT_CONFIRM_INTERVAL Confirmation timeout (default: {DEFAULT_CONFIRM_INTERVAL})
(activation and rollback each take at most this time,
but system will actually wait for confirmation token
for at least {MINIMAL_CONFIRM_INTERVAL} seconds, which may
increase the effective timeout, which may be
additionally increased due to misbehaving child
processes by up to {3 * MINIMAL_SCRIPT_TIMEOUT} seconds)
PLESK_DEBUG Set logging verbosity to maximum
"""
parser = argparse.ArgumentParser(description="Activate firewall rules or check its features safely",
epilog=textwrap.dedent(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='count', default=0,
help="Increase logging verbosity, can be specified multiple times.")
commands = parser.add_mutually_exclusive_group(required=True)
commands.add_argument('--activate', action='store_true',
help="Activate new rules. Synchronous.")
commands.add_argument('--confirm', action='store_true',
help="Commit activation of the new rules. Should be invoked from a new "
"SSH session or web/app server worker process to ensure an existing "
"network connection is not re-used.")
commands.add_argument('--try-enable-features', action='store_true',
help="Probe iptables features support. This will both check and "
"try to enable the specified features.")
act_opts = parser.add_argument_group("--activate arguments")
act_opts.add_argument('--rules-file', type=argparse.FileType('r'), default='-', metavar='PATH',
help="New rules script (default: %(default)s, i.e. STDIN)")
cfm_opts = parser.add_argument_group("--confirm arguments")
cfm_opts.add_argument('--wait', action='store_true',
help="Wait for the activation process to appear")
try_opts = parser.add_argument_group("--try-enable-features arguments")
try_opts.add_argument('--iptables', default='/usr/sbin/iptables',
help="iptables binary path (default: %(default)s)")
try_opts.add_argument('--table', default='filter',
help="iptables table name (default: %(default)s)")
try_opts.add_argument('--rule', default='-L',
help="iptables rule options (default: %(default)s), "
"use the default to check table and/or binary availability")
args = parser.parse_args()
return args
def get_token():
""" Returns activation token string. """
token = os.getenv('PHP_SAFEACT_TOKEN', '').strip()
if not token:
raise RuntimeError("Activation token is absent")
# Writes to pipes are atomic only up to certain system-specific limit (at least 512)
if len(token) >= select.PIPE_BUF - 1:
raise RuntimeError(f"Activation token is too long: {len(token)} characters")
return token
def get_confirm_timeout():
""" Returns confirmation timeout as int. """
timeout = os.getenv('PHP_SAFEACT_CONFIRM_INTERVAL')
if not timeout:
return DEFAULT_CONFIRM_INTERVAL
else:
value = int(timeout)
if value <= 0:
raise ValueError(f"Confirmation timeout is too small: {value}")
return value
def rm_f(path):
""" Equivalent of 'rm -f' for a file path. """
try:
log.debug("rm -f %r", path)
os.unlink(path)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def verify_script_perms(path):
""" Checks that script file looks to be OK. """
log.debug("Checking %r script attributes", path)
st = os.lstat(path)
if not stat.S_ISREG(st.st_mode):
raise ValueError(f"{path}: The script is not a regular file")
if st.st_size == 0:
raise ValueError(f"{path}: The script is empty")
if st.st_uid != 0:
raise ValueError(f"{path}: The script is not owned by root")
if st.st_mode != (stat.S_IFREG | 0o700):
raise ValueError(f"{path}: The script has permissions other than 0700")
def try_restart_service(service, timeout):
""" Restarts the service if it is already running. """
timeout = max(timeout, MINIMAL_SCRIPT_TIMEOUT)
log.debug("Trying to restart %r service with timeout=%s", service, timeout)
subprocess.check_call([PLESKRC_BIN, service, 'try-restart'], timeout=timeout)
def is_service_running(service):
""" Returns whether the given service is running. """
log.debug("Checking %r service status", service)
result = subprocess.run([PLESKRC_BIN, service, 'status'])
return result.returncode == 0
def execute_rules_script(script, timeout):
""" Executes script within a given timeout. """
timeout = max(timeout, MINIMAL_SCRIPT_TIMEOUT)
env = {k: v for k, v in os.environ.items() if k not in ('PHP_SAFEACT_TOKEN',)}
log.debug("Executing script %r with timeout=%s", script, timeout)
subprocess.check_call([script], timeout=timeout, env=env)
def apply_rules(script, cutoff_timestamp, confirm=True):
""" Applies rules script and (optionally) waits for confirmation until cutoff_timestamp.
On success links the script into active configuration.
"""
log.info("Trying to apply rules from %r until %s, %s confirmation",
script, datetime.fromtimestamp(cutoff_timestamp), "with" if confirm else "without")
execute_rules_script(script, cutoff_timestamp - time.time())
if confirm:
# This is required to ensure that there are no outstanding connections to browser
# and any new connections are allowed by firewall.
try:
try_restart_service('sw-cp-server', cutoff_timestamp - time.time())
if is_service_running('nginx'):
log.debug("Nginx looks to be the frontend web server")
try_restart_service('nginx', cutoff_timestamp - time.time())
else:
log.debug("Apache looks to be the frontend web server")
try_restart_service('apache', cutoff_timestamp - time.time())
except subprocess.TimeoutExpired as ex:
log.warning(f"{ex}. Will attempt to wait for confirmation anyway.")
log.debug("This exception happened at:", exc_info=sys.exc_info())
expected_token = get_token()
cutoff_timestamp = max(cutoff_timestamp, time.time() + MINIMAL_CONFIRM_INTERVAL)
log.debug("Waiting for a matching activation token on %r until %s",
PIPE_PATH, datetime.fromtimestamp(cutoff_timestamp))
# Open w/o blocking to ensure open doesn't block w/o writers present
with os.fdopen(os.open(PIPE_PATH, os.O_RDONLY | os.O_NONBLOCK), 'r') as pipe:
# Also keep the pipe open for writing, otherwise after the first read select()
# will immediately return with only EOF available to read
# (this normally indicates absence or writers).
with open(PIPE_PATH, 'wb'):
timeout = cutoff_timestamp - time.time()
while timeout > 0 and select.select([pipe], [], [], timeout)[0]:
token = pipe.readline().strip()
if token == expected_token:
log.info("Received matching activation token")
break
log.debug("Received non-matching activation token: %r", token)
timeout = cutoff_timestamp - time.time()
else:
raise ConfirmFailed("Did not receive a matching activation token "
"before confirmation timeout")
if script != SCRIPT_ACTIVE:
log.debug("Setting %r as the active configuration %r", script, SCRIPT_ACTIVE)
# Previously files were hardlinked, but we don't really need strict atomicity here
# and hardlinks may cause issues if somebody decides to meddle with the files manually
# (e.g. emergency may be hardlinked into active and may be updated due to copy into active)
rm_f(SCRIPT_ACTIVE)
log.debug("cp -Pa %r %r", script, SCRIPT_ACTIVE)
shutil.copy2(script, SCRIPT_ACTIVE, follow_symlinks=False)
else:
log.debug("Rules from %r are already the active configuration", script)
def try_create_pipe(path, stale_timestamp):
""" Creates a pipe if it doesn't exist, removes it if it is too old. Otherwise returns False. """
try:
ctime = os.path.getctime(path)
if ctime < stale_timestamp:
log.info("Removing stale named pipe %r created at %s", path, datetime.fromtimestamp(ctime))
os.unlink(path)
else:
return False
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
log.debug("Creating named pipe %r and setting up atexit handler", path)
os.mkfifo(path, 0o600)
@atexit.register
def remove_pipe():
log.debug("Removing named pipe %r on exit", path)
rm_f(path)
return True
def rollback():
""" Rolls back to some working configuration. """
log.info("Rolling back to working configuration")
log.debug("touch %r", ROLLBACK_FLAG)
with open(ROLLBACK_FLAG, 'wb'):
pass
try:
try:
log.info("Trying to roll back from new to active configuration")
cutoff_timestamp = time.time() + get_confirm_timeout()
apply_rules(SCRIPT_ACTIVE, cutoff_timestamp)
except ConfirmFailed as ex:
raise ConfirmFailed(
"Connectivity failure occurred with both the new and rollback (previous) firewall configurations, "
"indicating that both configurations are faulty.") from ex
except Exception as ex:
log.info("Trying to roll back from active to emergency configuration")
apply_rules(SCRIPT_EMERGENCY, 0, confirm=False)
raise ConfirmFailed(f"{ex} "
"As an emergency measure, "
"the firewall was disabled and a configuration without firewall rules was applied. "
"To resolve the issue, correct the firewall rules and re-enable the firewall.")
def activate(rules_file):
""" Activates new rules supplied via rules_file. """
rm_f(ROLLBACK_FLAG)
timeout = get_confirm_timeout()
start_timestamp = time.time()
cutoff_timestamp = start_timestamp + timeout
# Assume other activations use the same timeout
stale_timestamp = start_timestamp - 2.1 * timeout
log.info("Activating with token=%r, timeout=%s", get_token(), timeout)
log.debug("Setting up signal handlers to ensure cleanup")
for signum in (signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT):
signal.signal(signum, signal.getsignal(signal.SIGINT))
log.debug("Trying to create named pipe %r, until %s, file older than %s is considered stale",
PIPE_PATH,
datetime.fromtimestamp(cutoff_timestamp),
datetime.fromtimestamp(stale_timestamp))
while time.time() < cutoff_timestamp:
if try_create_pipe(PIPE_PATH, stale_timestamp):
log.debug("Pipe created")
break
time.sleep(0.5)
else:
log.debug("Could not create pipe")
raise RuntimeError("Previous rules activation didn't finish before confirmation timeout")
log.info("Writing new rules from %r into %r", rules_file.name, SCRIPT_NEW)
rm_f(SCRIPT_NEW)
log.debug("cat > %r", SCRIPT_NEW)
with os.fdopen(os.open(SCRIPT_NEW, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o700), 'w') as script_new:
shutil.copyfileobj(rules_file, script_new)
verify_script_perms(SCRIPT_ACTIVE)
try:
log.info("Trying to apply new configuration")
apply_rules(SCRIPT_NEW, cutoff_timestamp)
rm_f(SCRIPT_NEW)
except Exception:
rollback()
raise
def confirm(wait=False):
""" Confirms rules activation (new ones or during rollback). """
token = get_token()
if wait:
timeout = max(get_confirm_timeout(), MINIMAL_CONFIRM_INTERVAL)
cutoff_timestamp = time.time() + 2 * (timeout + 3 * MINIMAL_SCRIPT_TIMEOUT)
else:
cutoff_timestamp = time.time()
log.info("Confirming with token=%r until %s", token, datetime.fromtimestamp(cutoff_timestamp))
while True:
try:
# Open w/o creating the pipe/file if it doesn't exist ([Errno 2] No such file or directory)
# Open w/o blocking if no readers are present ([Errno 6] No such device or address)
with os.fdopen(os.open(PIPE_PATH, os.O_WRONLY | os.O_APPEND | os.O_NONBLOCK), 'w') as pipe:
log.debug("Writing activation token to %r", PIPE_PATH)
pipe.write(token + "\n")
break
except Exception as ex:
if time.time() >= cutoff_timestamp:
raise ConfirmFailed("Too late to confirm: no rules activation process") from ex
log.debug(f"No activation process yet, continue to wait: {ex}")
time.sleep(0.5)
if os.path.lexists(ROLLBACK_FLAG):
raise ConfirmFailed("Too late to confirm: new rules were rolled back")
def try_enable_features(iptables, table, rule):
"""
Checks if desired iptables features are enabled. Tries to enable them if not.
On modern systems iptables is capable of dynamically loading required kernel
modules. This is convenient, misleading and maybe even dangerous at the same time
( http://backstage.soundcloud.com/2012/08/shoot-yourself-in-the-foot-with-iptables-and-kmod-auto-loading/ ).
Since we don't want to meddle with kernel modules for obvious reasons, we use
iptables itself to check features support. As a side effect such checks may trigger
kernel module loading. Checks are isolated in a separate temporary chain, that
nobody refers to.
This approach has an added advantage of checking whether real iptables rules would
work, not some "support" per se. Practice shows that the latter may be misleading
and result in bugs. Therefore if you're not sure <rule> works on a given system,
just call this command with the given <rule>.
<rule> is <rule-specification> in terms of iptables(8). Specifying <target> as part
of it is not required and not particularly useful. <rule> can also be '-L' to check
table and/or binary availability.
"""
if rule == '-L':
# listing is "safe"
log.info("Checking feature: iptables=%r, table=%r, rule=%r", iptables, table, rule)
subprocess.check_call([iptables, '-t', table, rule, '-n'])
else:
# everything else is isolated in a temporary chain
chain = "plesk-fw-tmp-chain"
log.info("Checking feature: iptables=%r, table=%r, rule=%r, chain=%r",
iptables, table, rule, chain)
def remove_chain():
subprocess.check_call([iptables, '-t', table, '-F', chain])
subprocess.check_call([iptables, '-t', table, '-Z', chain])
subprocess.check_call([iptables, '-t', table, '-X', chain])
def create_chain():
subprocess.check_call([iptables, '-t', table, '-N', chain])
def append_rule(rule_args):
subprocess.check_call([iptables, '-t', table, '-A', chain] + rule_args)
try:
remove_chain()
except Exception as ex:
# Failure is OK here - it means chain didn't exist
log.debug("During initial %r chain removal: %s", chain, ex)
create_chain()
append_rule(rule.split())
remove_chain()
def main():
args = parse_args()
set_up_logging(args.verbose)
log.debug("Options: %s", args)
if args.activate:
activate(args.rules_file)
elif args.confirm:
confirm(args.wait)
elif args.try_enable_features:
try_enable_features(args.iptables, args.table, args.rule)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
log.debug("Interrupted:", exc_info=sys.exc_info())
sys.exit(2)
except Exception as ex:
print(f"{ex}")
log.error("%s", ex)
log.debug("This exception happened at:", exc_info=sys.exc_info())
sys.exit(1)
# vim: ft=python

View File

@@ -1,48 +0,0 @@
#!/bin/bash
# Copyright 1999-2023. Plesk International GmbH. All rights reserved.
PN=$(basename $0)
die ()
{
echo "$@" 1>&2;
exit 1
}
usage()
{
cat << EOT
Usage: $PN <user> <working-dir> [<gitarg1> <gitarg2>...]
Execute git command with user rights in selected working directory
EOT
}
if [ "$#" -le 3 ]; then
usage
exit 1
fi
username="$1"
if ! id "$username" >/dev/null 2>&1; then
die "Unknown user $username"
fi
shift
workdir="$1"
shift
git_cmd=$(command -v git)
if [ "$?" != "0" ]; then
die "Command git does not found"
fi
export PATH="/usr/local/bin:/usr/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin"
export GIT_SSH="/usr/local/psa/admin/sbin/modules/git/git_ssh"
export GIT_SSH_VARIANT=ssh
export PRESERVE_ENV="GIT_SSH,GIT_ASKPASS,GIT_PASS"
HOME=$(eval echo ~"$username")
export HOME
umask 022
/usr/local/psa/admin/sbin/filemng "$username" exec "$workdir" "$git_cmd" "$@"

View File

@@ -1,73 +0,0 @@
#!/bin/bash
# Copyright 1999-2016. Parallels IP Holdings GmbH. All Rights Reserved.
usage()
{
echo "Usage: $0 [-p <port>] <hostname> [<args>...]" >&2
exit 2
}
update_known_hosts()
{
local port= server= offset=0
if [ "$1" = "-o" ]; then
offset=2
fi
option_arg_num=$((offset+1))
if [ "${!option_arg_num}" = "-p" ]; then
port_offset=$((offset+2))
server_offset=$((offset+3))
port="${!port_offset}"
server="${!server_offset}"
else
server_offset=$((offset+1))
server="${!server_offset}"
fi
local hostname="`echo "$server" | cut -d@ -f2`"
local server_addr="$hostname${port:+:$port}"
KNOWN_HOSTS="`readlink -m ~/.ssh/git_known_hosts${port:+_with_port_$port}`"
if [ -f "$KNOWN_HOSTS" ] && [ -n "`ssh-keygen -F "$hostname" -f "$KNOWN_HOSTS"`" ]; then
echo "Public key for the server at '$server_addr' is already known in '$KNOWN_HOSTS'." >&2
return 0
fi
echo "Server at '$server_addr' is seen for the first time." >&2
echo "Adding its public key to the list of known hosts in '$KNOWN_HOSTS'." >&2
local key="`ssh-keyscan ${port:+-p $port} -H "$hostname"`"
[ -n "$key" ] || {
echo "Failed to gather public SSH host key for the '$server_addr'." >&2
return 1
}
mkdir -p -m0700 "`dirname "$KNOWN_HOSTS"`"
echo "$key" >> "$KNOWN_HOSTS" || {
echo "Failed to add public SSH host key for the '$server_addr' into '$KNOWN_HOSTS'." >&2
return 1
}
return 0
}
[ $# -ge 1 ] || usage
KNOWN_HOSTS=
update_known_hosts "$@"
[ -f "$KNOWN_HOSTS" ] || {
echo "Known hosts file '$KNOWN_HOSTS' doesn't exist" >&2
exit 1
}
if [ -z "$PLESK_SSH_KEY_PATH" ]
then
ssh -o UserKnownHostsFile="$KNOWN_HOSTS" -o StrictHostKeyChecking=yes -o HashKnownHosts=yes -o BatchMode=yes "$@"
else
ssh -i "$PLESK_SSH_KEY_PATH" -o UserKnownHostsFile="$KNOWN_HOSTS" -o StrictHostKeyChecking=yes -o HashKnownHosts=yes -o BatchMode=yes "$@"
fi

View File

@@ -1,8 +0,0 @@
#!/bin/bash
set -eu
if [ "${1:-}" = "remove" ] ; then
plesk sbin package --remove plesk-git-http || :
else
plesk sbin package --install git plesk-git-http
fi

View File

@@ -1,3 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2025. WebPros International GmbH. All rights reserved.
[ -d "$1" ] && chown -R root:root "$1"

View File

@@ -1,4 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2024. WebPros International GmbH. All rights reserved.
ldd --version | awk '/^ldd/{print $NF}'

View File

@@ -1,89 +0,0 @@
#!/bin/bash
set -eu
die()
{
echo "$*" 1>&2
exit 1
}
NODENV_SRC_PATH="/usr/local/psa/admin/plib/modules/nodejs/libexec/nodenv"
NODENV_DST_BASE_PATH="/usr/libexec"
NODENV_DST_PATH="$NODENV_DST_BASE_PATH/nodenv"
NODENV_TARGET="/usr/bin/nodenv"
HTTPD_SYSTEMD_CONF_DIR="/usr/lib/systemd/system/httpd.service.d"
install_nodenv()
{
chmod 755 "$NODENV_SRC_PATH"/*
cp -rf "$NODENV_SRC_PATH" "$NODENV_DST_BASE_PATH"
if [ ! -e "$NODENV_TARGET" ] ; then
ln -sf "$NODENV_DST_PATH/nodenv" "$NODENV_TARGET"
fi
cp "$NODENV_SRC_PATH/nodenv-init-profile" /etc/profile.d/nodenv.sh
}
remove_nodenv()
{
local nodenv_realpath=$(realpath "$NODENV_TARGET")
if [ "$nodenv_realpath" = "$NODENV_DST_PATH/nodenv" ] ; then
rm $NODENV_TARGET
fi
if [ -d "$NODENV_DST_PATH" ] ; then
rm -rf "$NODENV_DST_PATH"
fi
rm /etc/profile.d/nodenv.sh
if [ "$OS_NAME" = "AlmaLinux" ] && [ "$OS_SHORT_VERSION" = "10" ] && [ -f "$HTTPD_SYSTEMD_CONF_DIR/nodejs.conf" ]; then
rm "$HTTPD_SYSTEMD_CONF_DIR/nodejs.conf"
/bin/systemctl --system daemon-reload
fi
}
do_install()
{
local packages="passenger"
[ "$OS_ARCH" = "x86_64" -o "$OS_ARCH" = "aarch64" ] || die "Unsupported architecture"
case "$OS_PKGTYPE" in
rpm)
packages+=" mod_passenger libatomic"
;;
deb)
packages+=" libapache2-mod-passenger-plesk libatomic1"
;;
*)
die "Unsupported package type $OS_PKGTYPE ($OS_NAME $OS_VERSION): only rpm and deb are supported"
;;
esac
plesk sbin package --install $packages
if [ "$OS_NAME" = "AlmaLinux" -a "$OS_SHORT_VERSION" = "10" ]; then
mkdir -p "$HTTPD_SYSTEMD_CONF_DIR"
cat > "$HTTPD_SYSTEMD_CONF_DIR/nodejs.conf" <<EOF
[Service]
MemoryDenyWriteExecute=no
EOF
/bin/systemctl --system daemon-reload
fi
mkdir -p "/opt/plesk/node"
install_nodenv
}
do_remove()
{
remove_nodenv
}
if [ "${1:-}" = "remove" ] ; then
do_remove
else
do_install
fi

View File

@@ -1,7 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2024. WebPros International GmbH. All rights reserved.
find $1 -type f -print0 | xargs -0 sed -i \
-e "1s,#!/usr/bin/env node,#!${1}/bin/node,g" \
-e '1s,^#!\s*/usr/bin/python\($\|\s\),#!/usr/bin/python2\1,' \
-e '1s,^#!\s*/usr/bin/env\s\+python\($\|\s\),#!/usr/bin/env python2\1,'

View File

@@ -1,4 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2024. WebPros International GmbH. All rights reserved.
$1 -v

View File

@@ -1,4 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2024. WebPros International GmbH. All rights reserved.
$1 install -g $2

View File

@@ -1,10 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2024. WebPros International GmbH. All rights reserved.
[ -d "$2" ] && mv "$2" "$2.back"
mkdir -m 755 "$2"
tar -xf "$1" -C "$2" --unlink-first --strip-components 1 --no-same-owner
find "$2" -type f -print0 | xargs -0 sed -i \
-e "1s,#!/usr/bin/env node,#!${2}/bin/node,g" \
-e '1s,^#!\s*/usr/bin/python\($\|\s\),#!/usr/bin/python2\1,' \
-e '1s,^#!\s*/usr/bin/env\s\+python\($\|\s\),#!/usr/bin/env python2\1,'
[ -d "$2.back" ] && rm -rf "$2.back"

View File

@@ -1,21 +0,0 @@
#!/usr/local/psa/admin/bin/php
<?php
require_once('sdk.php');
pm_Context::init('notifier');
if ($argc < 4) {
$cmd = reset($argv);
echo "Usage: {$cmd} <host> <ports> <timeout>\n";
echo "e.g. {$cmd} smtp.gmail.com 25,587 3\n";
exit(1);
}
$host = $argv[1];
$ports = explode(',', $argv[2]);
$timeout = (int)$argv[3];
$checker = new PleskExt\Notifier\MailPorts\Checker($host, $ports, $timeout);
$list = $checker->run()->detectUnavailablePorts();
echo implode(',', $list) . "\n";
exit(0);

View File

@@ -1,4 +0,0 @@
#!/bin/bash
### Copyright 1999-2024. WebPros International GmbH. All rights reserved.
! rpm -q gpg-pubkey-1bb943db || rpm -q gpg-pubkey-c74cd1d8 || rpm --import "$1"

View File

@@ -1,44 +0,0 @@
#!/opt/plesk/python/3/bin/python
# Copyright 1999-2024. Plesk International GmbH. All rights reserved.
import platform
import os
import sys
if platform.system() != 'Windows':
# Configure migrator so:
# - nobody except root and Plesk user under which migrator is running can read/modify the files.
# - both backend (running as superuser) and frontend (running as "psaadm") could work with the files.
# Expected permissions for files created by backend are:
# rw-rw---- root psaadm
# Expected permissions for directories created by backend are:
# rwxrwx--- root psaadm
# So, frontend could work with files by group "psadm", and backend could work with the files as superuser.
import grp
try:
frontend_group_entry = grp.getgrnam('psaadm')
except KeyError:
frontend_group_entry = None
if frontend_group_entry is not None:
frontend_group_id = frontend_group_entry.gr_gid
os.setegid(frontend_group_id)
# Do not allow others to work with files
os.umask(0o007)
execution_path = __file__
while os.path.islink(execution_path):
execution_path = os.readlink(execution_path)
base_dir = '/usr/local/psa/admin/plib/modules/panel-migrator/backend'
lib_dir = '/usr/local/psa/admin/plib/modules/panel-migrator/backend/lib'
var_dir = '/usr/local/psa/var/modules/panel-migrator'
sys.path.extend([os.path.join(lib_dir, 'python')])
from parallels.core.cli.migration_cli import run
if __name__ == '__main__':
sys.exit(run(base_dir, var_dir, execution_path, sys.argv[1:]))

View File

@@ -1,8 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2017. Plesk International GmbH. All rights reserved.
find /usr/local/psa/var/modules/panel-migrator -type d -exec chmod 770 {} \;
find /usr/local/psa/var/modules/panel-migrator -type f -exec chmod 660 {} \;
find /usr/local/psa/var/modules/panel-migrator -exec chown root:psaadm {} \;
chmod -R o-rwx /var/lib/psa/dumps
chmod -R o-w /usr/local/psa/admin/share/pmmcli
chmod -R o-w /usr/local/psa/var/apspackages

View File

@@ -1,192 +0,0 @@
#!/bin/bash
### Copyright 1999-2025. Plesk International GmbH. All rights reserved.
set -e
ai_url="${REPOSITORY_URL:-https://autoinstall.plesk.com}"
gpg_key_url="$ai_url/plesk-2025-03-05.gpg"
python_package="plesk-py3"
modules=(
PyYAML==6.0 paramiko==3.4.0 dnspython==2.2.1 imapclient==3.0.1
)
modules_to_check=(
PyYAML paramiko dnspython imapclient
)
python_path="/opt/plesk/python/3/bin/pip"
export DEBIAN_FRONTEND=noninteractive
psa_version=()
IFS=" " read -r -a psa_version <<< "$(cat /usr/local/psa/version)"
if [ "$PLESK_DEBUG" != "1" ] ; then
yum_quiet="-q"
apt_quiet="-qq"
fi
remove_debian_migrator_repository()
{
rm -f /etc/apt/sources.list.d/plesk-migrator.list
}
remove_centos_migrator_repository()
{
rm -f /etc/yum.repos.d/plesk-migrator.repo
}
remove_debian()
{
remove_debian_migrator_repository
[ -n "$(command -v apt-get)" ] && apt-get remove -y $apt_quiet "$python_package"
}
remove_centos()
{
remove_centos_migrator_repository
[ -n "$(command -v yum)" ] && yum remove -y $yum_quiet "$python_package"
}
if [ "$1" = "remove" ] ; then
case "${psa_version[1]}${psa_version[2]}" in
Debian*|Ubuntu*) remove_debian ;;
CentOS*|RedHatel*) remove_centos ;;
*) not_supported "${psa_version[1]} ${psa_version[2]}" ;;
esac
exit 0
fi
not_supported()
{
echo "Sorry, your operating system $1 is not supported" >&2
exit 1
}
check_installed_debian()
{
if ! dpkg -s "$python_package" > /dev/null
then
echo "Package $python_package was not installed" >&2
exit 1
fi
}
check_installed_centos()
{
if ! rpm -q "$python_package" > /dev/null
then
echo "Package $python_package was not installed" >&2
exit 1
fi
}
install_python_modules_by_pip()
{
$python_path install "${modules[@]}" > /dev/null
}
check_installed_python_modules()
{
local has_missing_modules=0
for module in "${modules_to_check[@]}"; do
if ! $python_path show "$module" > /dev/null
then
echo "Python module $module was not installed" >&2
has_missing_modules=1
fi
done
exit $has_missing_modules
}
bootstrap_debian()
{
local plesk_can_manage_repos="$3"
[[ $plesk_can_manage_repos ]] && remove_debian_migrator_repository
local os_name="$1"
local os_version="$2"
local arch_spec=""
case "$(uname -m)" in
x86_64) arch_spec="[arch=amd64]" ;;
aarch64) arch_spec="[arch=arm64]" ;;
esac
local url_path="$4"
[[ -z "$5" ]] || echo 'APT::Get::AllowUnauthenticated "true";' > "/etc/apt/apt.conf.d/plesk-migrator"
echo "Bootstrapping dependencies for $os_name $os_version..."
if [[ $plesk_can_manage_repos ]]; then
plesk sbin repository --create --persistent --source "deb $arch_spec $ai_url/$url_path $os_version all" --gpg-key "$gpg_key_url"
plesk sbin package --install "$python_package"
plesk sbin repository --delete --persistent --source "deb $arch_spec $ai_url/$url_path $os_version all"
else
echo "deb $arch_spec $ai_url/$url_path $os_version all" > /etc/apt/sources.list.d/plesk-migrator.list
apt-get update $apt_quiet -o Dir::Etc::sourcelist="sources.list.d/plesk-migrator.list" \
-o Dir::Etc::sourceparts="-" \
-o APT::Get::List-Cleanup="0"
apt-get install -y $apt_quiet -o APT::Install-Suggests=false -o APT::Install-Recommends=false "$python_package"
fi
check_installed_debian
install_python_modules_by_pip
check_installed_python_modules
}
bootstrap_redhat()
{
local plesk_can_manage_repos="$3"
[[ $plesk_can_manage_repos ]] && remove_centos_migrator_repository
local os_name="$1"
local os_version="$2"
local os_arch
os_arch="$(uname -m)"
[[ "$(uname -m)" =~ .*64$ ]] || os_arch='i386'
local url_path="$4"
echo "Bootstrapping dependencies for centos $os_version $os_arch..."
if [[ $plesk_can_manage_repos ]]; then
plesk sbin repository --create --persistent \
--baseurl "$ai_url/$url_path/dist-rpm-$os_name-$os_version-$os_arch" \
--gpg-key "$gpg_key_url"
plesk sbin package --install "$python_package"
else
cat > /etc/yum.repos.d/plesk-migrator.repo <<EOF
[plesk-migrator]
name=Plesk packages for migrator
baseurl=$ai_url/$url_path/dist-rpm-$os_name-$os_version-$os_arch
enabled=1
gpgcheck=1
gpgkey=$gpg_key_url
EOF
yum install -y $yum_quiet "$python_package"
fi
check_installed_centos
install_python_modules_by_pip
check_installed_python_modules
}
pmm_repo="PMM_1.1.0"
plesk_version=()
IFS="." read -r -a plesk_version <<< "${psa_version[0]}"
[[ ${plesk_version[0]} -ge 17 ]] && plesk_can_manage_repos=true || plesk_can_manage_repos=false
case "${psa_version[1]}${psa_version[2]}" in
Debian10*) bootstrap_debian "debian" "buster" $plesk_can_manage_repos "$pmm_repo" ;;
Debian11*) bootstrap_debian "debian" "bullseye" $plesk_can_manage_repos "$pmm_repo" ;;
Debian12*) bootstrap_debian "debian" "bookworm" $plesk_can_manage_repos "$pmm_repo" ;;
Debian13*) bootstrap_debian "debian" "trixie" $plesk_can_manage_repos "$pmm_repo" ;;
Ubuntu18.04) bootstrap_debian "ubuntu" "bionic" $plesk_can_manage_repos "$pmm_repo" ;;
Ubuntu20.04) bootstrap_debian "ubuntu" "focal" $plesk_can_manage_repos "$pmm_repo" ;;
Ubuntu22.04) bootstrap_debian "ubuntu" "jammy" $plesk_can_manage_repos "$pmm_repo" ;;
Ubuntu24.04) bootstrap_debian "ubuntu" "noble" $plesk_can_manage_repos "$pmm_repo" ;;
CentOS7*|RedHatel7*) bootstrap_redhat "CentOS" "7" $plesk_can_manage_repos "$pmm_repo" ;;
RedHatel8*) bootstrap_redhat "RedHat" "el8" $plesk_can_manage_repos "$pmm_repo" ;;
RedHatel9*) bootstrap_redhat "RedHat" "el9" $plesk_can_manage_repos "$pmm_repo" ;;
RedHatel10*) bootstrap_redhat "RedHat" "el10" $plesk_can_manage_repos "$pmm_repo" ;;
*) not_supported "${psa_version[1]} ${psa_version[2]}" ;;
esac
echo "Done"

View File

@@ -1,5 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2017. Plesk International GmbH. All rights reserved.
chmod u=rwx,g=rx,o=rx /usr/local/psa/var/modules/panel-migrator/
chown psaadm:psaadm /usr/local/psa/var/modules/panel-migrator/

View File

@@ -1,17 +0,0 @@
@echo off
for /f "skip=2 tokens=3*" %%i IN ('reg query "HKLM\SOFTWARE\Plesk\PSA Config\Config" /v PRODUCT_DATA_D /reg:32') do set plesk_data_path=%%i %%j
call :TRIM %plesk_data_path% plesk_data_path
set PLESK_EXTENSION_ID=panel-migrator
set plesk_migrator_var_path=%plesk_data_path%var\modules\panel-migrator
set plesk_python_path=%plesk_migrator_var_path%\python\python.exe
set plesk_migrator_backend_path=%plesk_dir%admin\plib\modules\panel-migrator\backend\plesk-migrator.py
"%plesk_python_path%" "%plesk_migrator_backend_path%" "%plesk_migrator_var_path%" %*
goto :EOF
:TRIM
SET %2=%1
goto :EOF

View File

@@ -1,3 +0,0 @@
#!/bin/sh
rm -rf /var/log/plesk/PMM

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env bash
### Copyright 1999-2025. WebPros International GmbH. All rights reserved.
main() {
systemctl daemon-reload
exit $?
}
main "$@"

View File

@@ -1,11 +0,0 @@
#!/usr/bin/env bash
### Copyright 1999-2025. WebPros International GmbH. All rights reserved.
main() {
local daemon="${1:?"Daemon name must be specified as the first parameter"}"
systemctl show "${daemon}" --property=LimitNOFILE
exit $?
}
main "$@"

View File

@@ -1,4 +0,0 @@
#!/bin/bash -e
### Copyright 2025 WebPros International GmbH. All rights reserved.
declare -r FILE="${1?File is not set}"
realpath "$FILE"

View File

@@ -1,4 +0,0 @@
#!/bin/bash -e
### Copyright 2025 WebPros International GmbH. All rights reserved.
declare -r FILE="${1?File is not set}"
stat -c %a "$FILE"

View File

@@ -1,2 +0,0 @@
:: Copyright 1999-2024. WebPros International GmbH. All rights reserved.
plesk installer %*

View File

@@ -1,6 +0,0 @@
:: Copyright 1999-2024. WebPros International GmbH. All rights reserved.
@echo off
rem Copyright 1999-2019. Plesk International GmbH. All rights reserved.
set args=%*
plesk bin %args%

View File

@@ -1,4 +0,0 @@
#!/bin/bash -e
### Copyright 1999-2024. WebPros International GmbH. All rights reserved.
plesk bin "$@"

View File

@@ -1,74 +0,0 @@
#!/bin/bash
set -eu
die ()
{
echo "$@" 1>&2;
exit 1
}
do_install ()
{
local packages="plesk-rbenv passenger"
local plesk_ruby_version="1.5.0"
# Note: EoL Ruby versions are still built but not installed by default
local versions="3.2.9 3.3.9 3.4.5"
local base_url="${REPOSITORY_URL:-https://autoinstall.plesk.com}"
local gpg_key_url="$base_url/plesk-2025-03-05.gpg"
[ "$OS_ARCH" = "x86_64" -o "$OS_ARCH" = "aarch64" ] || die "Unsupported architecture"
case "$OS_PKGTYPE" in
rpm)
local os_name
local os_version
if [ "$OS_SHORT_VERSION" = "7" ]; then
os_name="CentOS"
os_version="7"
else
os_name="RedHat"
os_version="el$OS_SHORT_VERSION"
fi
plesk sbin repository --create --persistent \
--baseurl "$base_url/RUBY_${plesk_ruby_version}/dist-rpm-$os_name-$os_version-$OS_ARCH" \
--gpg-key "$gpg_key_url"
packages+=" mod_passenger"
;;
deb)
local arch_spec=
case "$OS_ARCH" in
x86_64) arch_spec="[arch=amd64]" ;;
aarch64) arch_spec="[arch=arm64]" ;;
esac
plesk sbin repository --create --persistent \
--source "deb $arch_spec $base_url/RUBY_${plesk_ruby_version} $OS_CODENAME all" \
--gpg-key "$gpg_key_url"
packages+=" libapache2-mod-passenger-plesk"
;;
*)
die "Unsupported package type $OS_PKGTYPE ($OS_NAME $OS_VERSION): only rpm and deb are supported"
;;
esac
for v in $versions; do
packages+=" plesk-ruby$v"
done
plesk sbin package --install $packages
plesk sbin httpd_modules_ctl --enable 'passenger'
# since Plesk 18.0.57 the nginx passenger module is disabled by default
# before Plesk 18.0.57 nginx_modules_ctl doesn't rule passenger module
# and this is the reason why the error is skipped here
plesk sbin nginx_modules_ctl --enable 'phusion-passenger' || true
}
if [ "${1:-}" = "remove" ] ; then
:
else
do_install
fi

View File

@@ -1,9 +0,0 @@
#!/usr/bin/env bash
# Copyright 1999-2024. WebPros International GmbH. All rights reserved.
main() {
killall monit || :
killall wdcollect || :
}
main "$@"

View File

@@ -1,18 +0,0 @@
#!/usr/local/psa/bin/sw-engine-pleskrun
<?php
// Copyright 1999-2024. WebPros International GmbH. All rights reserved.
try {
require_once('sdk.php');
require_once("modules/watchdog/wdcplib.php");
wd__db_connect();
$database = Database::build();
$setup = new Setup($database->getParam(), $database->getDisk(), $database->getService(), pm_Bootstrap::getDbAdapter());
$setup->run();
} catch (Exception $e) {
echo $e->getMessage() . "\n";
exit(1);
}

View File

@@ -1,22 +0,0 @@
#!/usr/bin/env bash
# Copyright 1999-2024. WebPros International GmbH. All rights reserved.
main() {
local command="${1:?"Command must be specified as the first parameter"}"
case "$command" in
fix-permissions)
chown psaadm:psaadm -R /usr/local/psa/var/modules/watchdog/
exit $?
;;
drop-rkhunter)
killall rkhunter || :
exit 0
;;
*)
echo "Unknown command: ${command}" >&2
exit 1
;;
esac
}
main "$@"

View File

@@ -1,14 +0,0 @@
#!/bin/sh
# Copyright 1999-2024. WebPros International GmbH. All rights reserved.
df -P 2>/dev/null| grep '^/dev\|^vzfs[[:space:]]' | while read fs_entry; do
device=`echo "$fs_entry" | awk '{print $1}'`
fs=`echo "$fs_entry" | awk '{print $6}'`
if [ "$device" = "vzfs" ]; then
device="/dev/vzfs"
fi
[ -e "$device" ] || continue
echo "$device $fs"
done
exit 0

View File

@@ -1,4 +0,0 @@
#!/bin/sh
# Copyright 1999-2025. WebPros International GmbH. All rights reserved.
exec /usr/bin/monit "$@"

View File

@@ -1,432 +0,0 @@
#!/usr/local/psa/bin/sw-engine-pleskrun
<?php
// Copyright 1999-2024. WebPros International GmbH. All rights reserved.
use PleskExt\Watchdog\System\Services;
require_once('sdk.php');
require("modules/watchdog/wdlib.php");
//------------------------------------------------------------------------------
class ParsCmdLineErr extends WDExc
{
}
//------------------------------------------------------------------------------
try {
main($argc, $argv);
} catch (ParsCmdLineErr $e) {
echo $e->getMessage() . "\n";
usage();
exit(1);
} catch (Exception $e) {
echo $e->getMessage() . "\n";
exit(1);
}
exit(0);
//------------------------------------------------------------------------------
function main($argc, $argv)
{
$access_opts = array(
'start' => false,
'stop' => false,
'restart' => false,
'full-restart' => false,
'start-monit' => false,
'stop-monit' => false,
'restart-monit' => false,
'full-restart-monit' => false,
'start-wdcollect' => false,
'stop-wdcollect' => false,
'restart-wdcollect' => false,
'full-restart-wdcollect' => false,
'adapt' => false,
'regen-all' => false,
'regen-monitrc' => false,
'ping-monit' => false,
'ping-wdcollect' => false,
'plesk-name' => false,
'monit-service' => true,
'unmonit-service' => true,
'start-service' => true,
'stop-service' => true,
'restart-service' => true,
'service-status' => true,
'service-monit-status' => true,
'monit-disk' => true,
'space-rate' => true,
'inods-rate' => true,
'command' => true,
'unmonit-disk' => true,
'disk-status' => true,
'disk-monit-status' => true,
);
if (!is_plesk_configured()) {
// non fatal error
echo "Plesk is not configured. You should go to the Server Administration Panel and configure it.\n";
return 0;
}
unset($argv[0]);
$opts = parse_cmd_line($argv, $access_opts);
wd__db_connect();
$is_plesk_name = isset($opts['plesk-name']);
$space_rate = $space_rate_unit = $inods_rate = $inods_rate_unit = null;
i__set_space_rate($opts, $space_rate, $space_rate_unit);
i__set_inods_rate($opts, $inods_rate, $inods_rate_unit);
$command = !isset($opts['command']) ? null : $opts['command'];
$services = new Services();
if (isset($opts['start']))
start_wd();
elseif (isset($opts['stop']))
stop_wd();
elseif (isset($opts['restart']))
restart_wd();
elseif (isset($opts['full-restart']))
restart_wd(true);
elseif (isset($opts['start-monit']))
start_monit();
elseif (isset($opts['stop-monit']))
stop_monit();
elseif (isset($opts['restart-monit']))
restart_monit();
elseif (isset($opts['full-restart-monit']))
restart_monit(true);
elseif (isset($opts['start-wdcollect']))
start_wdcollect();
elseif (isset($opts['stop-wdcollect']))
stop_wdcollect();
elseif (isset($opts['restart-wdcollect']))
restart_wdcollect();
elseif (isset($opts['full-restart-wdcollect']))
restart_wdcollect(true);
elseif (isset($opts['adapt']))
restart_wd(true, true);
elseif (isset($opts['regen-all'])) {
create_monit_config();
} elseif (isset($opts['regen-monitrc']))
create_monit_config();
elseif (isset($opts['ping-monit']))
f_ping_monit();
elseif (isset($opts['ping-wdcollect']))
f_ping_wdcollect();
elseif (isset($opts['monit-service']))
monit_service(norm_service_name($services, $opts['monit-service'], $is_plesk_name));
elseif (isset($opts['unmonit-service']))
unmonit_service(norm_service_name($services, $opts['unmonit-service'], $is_plesk_name));
elseif (isset($opts['service-status']))
f_service_status(norm_service_name($services, $opts['service-status'], $is_plesk_name));
elseif (isset($opts['service-monit-status']))
f_service_monit_status(norm_service_name($services, $opts['service-monit-status'], $is_plesk_name));
elseif (isset($opts['monit-disk'])) {
if ('percent' === $space_rate_unit && (20 > $space_rate || 100 < $space_rate)) {
throw new ParsCmdLineErr("Invalid value $space_rate for parameter 'space_rate'");
}
if ('percent' === $inods_rate_unit && (20 > $inods_rate || 100 < $inods_rate)) {
throw new ParsCmdLineErr("Invalid value $inods_rate for parameter 'inods_rate'");
}
if (1 > $space_rate) {
throw new ParsCmdLineErr("Invalid value $space_rate for parameter 'space_rate': Enter the positive number.");
}
if (1 > $inods_rate) {
throw new ParsCmdLineErr("Invalid value $inods_rate for parameter 'inods_rate': Enter the positive number.");
}
if (!empty($command) && !isValidCommandForMonit($command)) {
throw new ParsCmdLineErr("Impossible executable command $command: Enter command without the \" and which reference to executable file"); // to check
}
monit_disk($opts['monit-disk'], $space_rate, $space_rate_unit, $inods_rate, $inods_rate_unit, $command);
}
elseif (isset($opts['unmonit-disk']))
unmonit_disk($opts['unmonit-disk']);
elseif (isset($opts['disk-status']))
f_disk_status($opts['disk-status']);
elseif (isset($opts['disk-monit-status']))
f_disk_monit_status($opts['disk-monit-status']);
else
usage();
}
function usage()
{
echo "Usage: wd <options>
Options are as follows:
--start Start all Watchdog services
--stop Stop Watchdog and all its services
--restart Restart Watchdog
--full-restart Rebuild configuration file and restart
--start-monit Start the Monit service
--stop-monit Stop the Monit service
--restart-monit Restart the Monit service
--full-restart-monit Rebuild configuration file and restart the Monit service
--start-wdcollect Start the wdcollect service
--stop-wdcollect Stop the wdcollect service
--restart-wdcollect Restart the wdcollect service
--full-restart-wdcollect Rebuild configuration file and restart the wdcollect service
--adapt Reconfigure the module and restart the monitoring service with new settings
--regen-all Create all configuration files
--regen-monitrc Create configuration file for the Monit service
--ping-monit Check via the network if the Monit service is working
--ping-wdcollect Check via the network if the the wdcollect service is working
--monit-service=<service> Enable monitoring service
--unmonit-service=<service> Disable monitoring service
--service-status=<service> Return status of monitored service
--service-monit-status=<service> Return the system monitoring status
[ --plesk-name ] Service name as shown in the Plesk control panel
--monit-disk=<device> Start monitoring a partition
[ --space-rate=<space_rate> ] (%, Gb, Mb)
[ --inods-rate=<inods_rate> ] (%, file)
[ --command=<command> ] Define a command to run when disk space usage reaches the defined threshold
--unmonit-disk Stop monitoring partition
--disk-status=<device> Return status of monitored partition
--disk-monit-status=<device> Return partition monitoring status\n";
}
function f_ping_monit()
{
echo pm_Locale::lmsg(monit_wait_start(30) ? 'DNT.statusRunning' : 'DNT.statusStopped') . "\n";
}
function f_ping_wdcollect()
{
echo pm_Locale::lmsg(wdcollect_wait_start(30) ? 'DNT.statusRunning' : 'DNT.statusStopped') . "\n";
}
function f_service_status($srv_name)
{
echo get_service_wd_status(i__get_service_status($srv_name));
}
function f_service_monit_status($srv_name)
{
echo get_service_monit_wd_status(i__get_service_status($srv_name));
}
function f_disk_status($device)
{
echo get_disk_wd_status(i__get_disk_status($device));
}
function f_disk_monit_status($device)
{
echo get_disk_monit_wd_status(i__get_disk_status($device));
}
//------------------------------------------------------------------------------
function i__set_space_rate($opts, &$space_rate, &$space_rate_unit)
{
if (!isset($opts['space-rate'])) {
$space_rate = 80;
$space_rate_unit = 'percent';
return;
}
$m = sscanf($opts['space-rate'], "%f%s", $space_rate, $space_rate_unit);
if (1 > $m) {
throw new ParsCmdLineErr("Disk space rate is set incorrectly");
}
$space_rate_unit = 1 == $m ? "percent" : i__norm_space_unit($space_rate_unit);
if (false === $space_rate_unit) {
throw new ParsCmdLineErr("Disk space rate is set incorrectly");
}
if ("percent" == $space_rate_unit && (20 > $space_rate || 100 < $space_rate)) {
throw new ParsCmdLineErr("Disk space usage threshold has been set incorrectly: Enter a value from 20 to 100");
}
if (1 > $space_rate) {
throw new ParsCmdLineErr("Disk space usage threshold has been set incorrectly: Enter a value greater than 0");
}
}
function i__set_inods_rate($opts, &$inods_rate, &$inods_rate_unit)
{
if (!isset($opts['inods-rate'])) {
$inods_rate = 80;
$inods_rate_unit = 'percent';
return;
}
$m = sscanf($opts['inods-rate'], "%f%s", $inods_rate, $inods_rate_unit);
if (1 > $m) {
throw new ParsCmdLineErr("Files number rate is set incorrectly");
}
$inods_rate_unit = 1 == $m ? "file" : i__norm_inods_unit($inods_rate_unit);
if (false === $inods_rate_unit) {
throw new ParsCmdLineErr("Files number rate is set incorrectly");
}
if ("percent" == $inods_rate_unit && (20 > $inods_rate || 100 < $inods_rate)) {
throw new ParsCmdLineErr("Files number threshold has been set incorrectly: Enter a value from 20 to 100");
}
if (1 > $inods_rate) {
throw new ParsCmdLineErr("Files number threshold has been set incorrectly: Enter a value greater than 0");
}
}
function i__norm_space_unit($space_rate_unit)
{
if ("%" == $space_rate_unit || !stricmp("percent", $space_rate_unit))
return "percent";
if (!stricmp("Gb", $space_rate_unit) || !stricmp("gigabyte", $space_rate_unit))
return "gigabyte";
if (!stricmp("Mb", $space_rate_unit) || !stricmp("megabyte", $space_rate_unit))
return "megabyte";
if (!stricmp("Kb", $space_rate_unit) || !stricmp("kilobyte", $space_rate_unit))
return "kilobyte";
if (!stricmp("b", $space_rate_unit) || !stricmp("byte", $space_rate_unit))
return "byte";
return false;
}
function i__norm_inods_unit($inods_rate_unit)
{
if ("%" == $inods_rate_unit || !stricmp("percent", $inods_rate_unit))
return "percent";
return false;
}
function stricmp($s1, $s2)
{
return strncasecmp($s1, $s2, max(strlen($s1), strlen($s2)));
}
function i__get_service_status($srv_name)
{
$stats = get_monitoring_status();
if (!isset($stats['services'][$srv_name])) {
throw new WDExc("You did not configure monitoring for the $srv_name service");
}
if (!is_service($stats['services'][$srv_name]['type'])) {
throw new WDExc("$srv_name is not a service");
}
return $stats['services'][$srv_name];
}
function i__get_disk_status($device)
{
$device_name = ("/" == $device[0]) ? substr($device, 1) : $device;
$stats = get_monitoring_status();
if (!isset($stats['services'][$device_name])) {
throw new WDExc("You did not configure monitoring for the $device device");
}
if (!is_disk($stats['services'][$device_name]['type'])) {
throw new WDExc("$device is not a device");
}
return $stats['services'][$device_name];
}
function norm_service_name(Services $services, $service_name, $is_plesk_name)
{
if (!$is_plesk_name)
return $service_name;
$plesk_service_name = $service_name;
$service_name = $services->getWatchdogServiceName($plesk_service_name);
if ($service_name !== null) {
return $service_name;
}
throw new WDExc("There is no $plesk_service_name service in Plesk control panel");
}
function parse_cmd_line($argv, $all_opts)
{
$opts = array();
$i = 0;
$n = min(count($argv), 100);
reset($argv);
while ($n > $i) {
$opt = parse_option(current($argv));
check_option_on_access($opt, $all_opts);
$opts[$opt['name']] = $opt['value'];
next($argv);
++$i;
}
return $opts;
}
function check_option_on_access($opt, $all_opts)
{
if (!isset($all_opts[$opt['name']])) {
throw new ParsCmdLineErr("Unknown option '{$opt['name']}'");
}
if ($all_opts[$opt['name']]) {
if (is_bool($opt['value'])) {
throw new ParsCmdLineErr("Option '{$opt['name']}' requires an argument");
}
} else {
if (!is_bool($opt['value'])) {
throw new ParsCmdLineErr("Option '{$opt['name']}' does not require arguments");
}
}
}
function parse_option($opt)
{
if (strncmp("--", $opt, 2)) {
throw new ParsCmdLineErr("Unknown operand '$opt'");
}
if (false !== ($p = strpos($opt, "=", 2))) {
$name = substr($opt, 2, $p - 2);
$val = substr($opt, $p + 1);
} else {
$name = substr($opt, 2);
$val = true;
}
if (empty($name) || empty($val)) {
throw new ParsCmdLineErr("Option '$opt' is set up incorrectly");
}
return array('name' => $name, 'value' => $val);
}

View File

@@ -1,59 +0,0 @@
#!/usr/bin/env bash
# Copyright 1999-2024. WebPros International GmbH. All rights reserved.
validate_service() {
local service="$1"
if [[ -n "$service" && "$service" != "monit" && "$service" != "wdcollect" ]]; then
echo "Refusing to act on unknown service $service" >&2
exit 2
fi
}
main() {
local action="${1:?"Command must be specified as the first parameter"}"
local service="$2"
if [[ "$action" != "reload-systemd" && -z "$service" ]]; then
echo "Service must be specified as the second parameter" >&2
exit 1
fi
case "$action" in
start)
validate_service "$service"
# Ubuntu 18.04 does not support `enable --now` for init.d services.
systemctl enable "${service}.service" && systemctl start "${service}.service"
exit $?
;;
stop)
validate_service "$service"
# Ubuntu 18.04 does not support `disable --now` for init.d services.
systemctl disable "${service}.service" && systemctl stop "${service}.service"
exit $?
;;
status)
if systemctl is-active -q "${service}.service"; then
echo "is active"
else
echo "is inactive"
fi
;;
enabled)
if systemctl is-enabled -q "${service}.service"; then
echo "is enabled"
else
echo "is disabled"
fi
;;
reload-systemd)
systemctl daemon-reload
;;
*)
echo "Unknown command: ${action}" >&2
echo "Usage: $0 <monit|wdcollect> <start|stop|status>" >&2
exit 1
;;
esac
}
main "$@"