1
0
mirror of https://github.com/mailcow/mailcow-dockerized.git synced 2026-01-08 14:39:16 +00:00

Merge branch 'staging' into staging

This commit is contained in:
Patrick Schult
2023-12-11 16:28:05 +01:00
committed by GitHub
231 changed files with 15439 additions and 8892 deletions

View File

@@ -1,6 +1,6 @@
FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
RUN apk upgrade --no-cache \
&& apk add --update --no-cache \

View File

@@ -213,11 +213,13 @@ while true; do
done
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig')
if [[ ${SKIP_IP_CHECK} != "y" ]]; then
# Start IP detection
log_f "Detecting IP addresses..."
IPV4=$(get_ipv4)
IPV6=$(get_ipv6)
log_f "OK: ${IPV4}, ${IPV6:-"0000:0000:0000:0000:0000:0000:0000:0000"}"
fi
#########################################
# IP and webroot challenge verification #

View File

@@ -1,6 +1,6 @@
FROM clamav/clamav:1.0_base
FROM clamav/clamav:1.0.3_base
LABEL maintainer "André Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
RUN apk upgrade --no-cache \
&& apk add --update --no-cache \

View File

@@ -1,6 +1,6 @@
FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
WORKDIR /app
@@ -13,9 +13,13 @@ RUN apk add --update --no-cache python3 \
fastapi \
uvicorn \
aiodocker \
redis
docker \
aioredis
RUN mkdir /app/modules
COPY docker-entrypoint.sh /app/
COPY dockerapi.py /app/
COPY main.py /app/main.py
COPY modules/ /app/modules/
ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
CMD exec python main.py

View File

@@ -6,4 +6,4 @@
-subj /CN=dockerapi/O=mailcow \
-addext subjectAltName=DNS:dockerapi`
`uvicorn --host 0.0.0.0 --port 443 --ssl-certfile=/app/dockerapi_cert.pem --ssl-keyfile=/app/dockerapi_key.pem dockerapi:app`
exec "$@"

View File

@@ -1,623 +0,0 @@
from fastapi import FastAPI, Response, Request
import aiodocker
import psutil
import sys
import re
import time
import os
import json
import asyncio
import redis
from datetime import datetime
containerIds_to_update = []
host_stats_isUpdating = False
app = FastAPI()
@app.get("/host/stats")
async def get_host_update_stats():
global host_stats_isUpdating
if host_stats_isUpdating == False:
print("start host stats task")
asyncio.create_task(get_host_stats())
host_stats_isUpdating = True
while True:
if redis_client.exists('host_stats'):
break
print("wait for host_stats results")
await asyncio.sleep(1.5)
print("host stats pulled")
stats = json.loads(redis_client.get('host_stats'))
return Response(content=json.dumps(stats, indent=4), media_type="application/json")
@app.get("/containers/{container_id}/json")
async def get_container(container_id : str):
if container_id and container_id.isalnum():
try:
for container in (await async_docker_client.containers.list()):
if container._id == container_id:
container_info = await container.show()
return Response(content=json.dumps(container_info, indent=4), media_type="application/json")
res = {
"type": "danger",
"msg": "no container found"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
"type": "danger",
"msg": "no or invalid id defined"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
@app.get("/containers/json")
async def get_containers():
containers = {}
try:
for container in (await async_docker_client.containers.list()):
container_info = await container.show()
containers.update({container_info['Id']: container_info})
return Response(content=json.dumps(containers, indent=4), media_type="application/json")
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
@app.post("/containers/{container_id}/{post_action}")
async def post_containers(container_id : str, post_action : str, request: Request):
try :
request_json = await request.json()
except Exception as err:
request_json = {}
if container_id and container_id.isalnum() and post_action:
try:
"""Dispatch container_post api call"""
if post_action == 'exec':
if not request_json or not 'cmd' in request_json:
res = {
"type": "danger",
"msg": "cmd is missing"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
if not request_json or not 'task' in request_json:
res = {
"type": "danger",
"msg": "task is missing"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
api_call_method_name = '__'.join(['container_post', str(post_action), str(request_json['cmd']), str(request_json['task']) ])
else:
api_call_method_name = '__'.join(['container_post', str(post_action) ])
docker_utils = DockerUtils(async_docker_client)
api_call_method = getattr(docker_utils, api_call_method_name, lambda container_id: Response(content=json.dumps({'type': 'danger', 'msg':'container_post - unknown api call' }, indent=4), media_type="application/json"))
print("api call: %s, container_id: %s" % (api_call_method_name, container_id))
return await api_call_method(container_id, request_json)
except Exception as e:
print("error - container_post: %s" % str(e))
res = {
"type": "danger",
"msg": str(e)
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
"type": "danger",
"msg": "invalid container id or missing action"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
@app.post("/container/{container_id}/stats/update")
async def post_container_update_stats(container_id : str):
global containerIds_to_update
# start update task for container if no task is running
if container_id not in containerIds_to_update:
asyncio.create_task(get_container_stats(container_id))
containerIds_to_update.append(container_id)
while True:
if redis_client.exists(container_id + '_stats'):
break
await asyncio.sleep(1.5)
stats = json.loads(redis_client.get(container_id + '_stats'))
return Response(content=json.dumps(stats, indent=4), media_type="application/json")
class DockerUtils:
def __init__(self, docker_client):
self.docker_client = docker_client
# api call: container_post - post_action: stop
async def container_post__stop(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
await container.stop()
res = {
'type': 'success',
'msg': 'command completed successfully'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: start
async def container_post__start(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
await container.start()
res = {
'type': 'success',
'msg': 'command completed successfully'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: restart
async def container_post__restart(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
await container.restart()
res = {
'type': 'success',
'msg': 'command completed successfully'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: top
async def container_post__top(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
ps_exec = await container.exec("ps")
async with ps_exec.start(detach=False) as stream:
ps_return = await stream.read_out()
exec_details = await ps_exec.inspect()
if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
res = {
'type': 'success',
'msg': ps_return.data.decode('utf-8')
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
'type': 'danger',
'msg': ''
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: mailq - task: delete
async def container_post__exec__mailq__delete(self, container_id, request_json):
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-d %s' % i for i in filtered_qids]
sanitized_string = str(' '.join(flagged_qids))
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: hold
async def container_post__exec__mailq__hold(self, container_id, request_json):
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-h %s' % i for i in filtered_qids]
sanitized_string = str(' '.join(flagged_qids))
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: cat
async def container_post__exec__mailq__cat(self, container_id, request_json):
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
sanitized_string = str(' '.join(filtered_qids))
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
postcat_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postcat -q " + sanitized_string], user='postfix')
return await exec_run_handler('utf8_text_only', postcat_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: unhold
async def container_post__exec__mailq__unhold(self, container_id, request_json):
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-H %s' % i for i in filtered_qids]
sanitized_string = str(' '.join(flagged_qids))
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: deliver
async def container_post__exec__mailq__deliver(self, container_id, request_json):
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-i %s' % i for i in filtered_qids]
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
for i in flagged_qids:
postsuper_r_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postqueue " + i], user='postfix')
async with postsuper_r_exec.start(detach=False) as stream:
postsuper_r_return = await stream.read_out()
# todo: check each exit code
res = {
'type': 'success',
'msg': 'Scheduled immediate delivery'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: mailq - task: list
async def container_post__exec__mailq__list(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
mailq_exec = await container.exec(["/usr/sbin/postqueue", "-j"], user='postfix')
return await exec_run_handler('utf8_text_only', mailq_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: flush
async def container_post__exec__mailq__flush(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
postsuper_r_exec = await container.exec(["/usr/sbin/postqueue", "-f"], user='postfix')
return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: mailq - task: super_delete
async def container_post__exec__mailq__super_delete(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
postsuper_r_exec = await container.exec(["/usr/sbin/postsuper", "-d", "ALL"])
return await exec_run_handler('generic', postsuper_r_exec)
# api call: container_post - post_action: exec - cmd: system - task: fts_rescan
async def container_post__exec__system__fts_rescan(self, container_id, request_json):
if 'username' in request_json:
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
rescan_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -u '" + request_json['username'].replace("'", "'\\''") + "'"], user='vmail')
async with rescan_exec.start(detach=False) as stream:
rescan_return = await stream.read_out()
exec_details = await rescan_exec.inspect()
if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
res = {
'type': 'success',
'msg': 'fts_rescan: rescan triggered'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
'type': 'warning',
'msg': 'fts_rescan error'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
if 'all' in request_json:
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
rescan_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -A"], user='vmail')
async with rescan_exec.start(detach=False) as stream:
rescan_return = await stream.read_out()
exec_details = await rescan_exec.inspect()
if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
res = {
'type': 'success',
'msg': 'fts_rescan: rescan triggered'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
'type': 'warning',
'msg': 'fts_rescan error'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: system - task: df
async def container_post__exec__system__df(self, container_id, request_json):
if 'dir' in request_json:
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
df_exec = await container.exec(["/bin/bash", "-c", "/bin/df -H '" + request_json['dir'].replace("'", "'\\''") + "' | /usr/bin/tail -n1 | /usr/bin/tr -s [:blank:] | /usr/bin/tr ' ' ','"], user='nobody')
async with df_exec.start(detach=False) as stream:
df_return = await stream.read_out()
print(df_return)
print(await df_exec.inspect())
exec_details = await df_exec.inspect()
if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
return df_return.data.decode('utf-8').rstrip()
else:
return "0,0,0,0,0,0"
# api call: container_post - post_action: exec - cmd: system - task: mysql_upgrade
async def container_post__exec__system__mysql_upgrade(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
sql_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/mysql_upgrade -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "'\n"], user='mysql')
async with sql_exec.start(detach=False) as stream:
sql_return = await stream.read_out()
exec_details = await sql_exec.inspect()
if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
matched = False
for line in sql_return.data.decode('utf-8').split("\n"):
if 'is already upgraded to' in line:
matched = True
if matched:
res = {
'type': 'success',
'msg': 'mysql_upgrade: already upgraded',
'text': sql_return.data.decode('utf-8')
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
await container.restart()
res = {
'type': 'warning',
'msg': 'mysql_upgrade: upgrade was applied',
'text': sql_return.data.decode('utf-8')
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
'type': 'error',
'msg': 'mysql_upgrade: error running command',
'text': sql_return.data.decode('utf-8')
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: system - task: mysql_tzinfo_to_sql
async def container_post__exec__system__mysql_tzinfo_to_sql(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
sql_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/mysql_tzinfo_to_sql /usr/share/zoneinfo | /bin/sed 's/Local time zone must be set--see zic manual page/FCTY/' | /usr/bin/mysql -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "' mysql \n"], user='mysql')
async with sql_exec.start(detach=False) as stream:
sql_return = await stream.read_out()
exec_details = await sql_exec.inspect()
if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
res = {
'type': 'info',
'msg': 'mysql_tzinfo_to_sql: command completed successfully',
'text': sql_return.data.decode('utf-8')
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
'type': 'error',
'msg': 'mysql_tzinfo_to_sql: error running command',
'text': sql_return.data.decode('utf-8')
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: reload - task: dovecot
async def container_post__exec__reload__dovecot(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
reload_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/dovecot reload"])
return await exec_run_handler('generic', reload_exec)
# api call: container_post - post_action: exec - cmd: reload - task: postfix
async def container_post__exec__reload__postfix(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
reload_exec = await container.exec(["/bin/bash", "-c", "/usr/sbin/postfix reload"])
return await exec_run_handler('generic', reload_exec)
# api call: container_post - post_action: exec - cmd: reload - task: nginx
async def container_post__exec__reload__nginx(self, container_id, request_json):
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
reload_exec = await container.exec(["/bin/sh", "-c", "/usr/sbin/nginx -s reload"])
return await exec_run_handler('generic', reload_exec)
# api call: container_post - post_action: exec - cmd: sieve - task: list
async def container_post__exec__sieve__list(self, container_id, request_json):
if 'username' in request_json:
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
sieve_exec = await container.exec(["/bin/bash", "-c", "/usr/bin/doveadm sieve list -u '" + request_json['username'].replace("'", "'\\''") + "'"])
return await exec_run_handler('utf8_text_only', sieve_exec)
# api call: container_post - post_action: exec - cmd: sieve - task: print
async def container_post__exec__sieve__print(self, container_id, request_json):
if 'username' in request_json and 'script_name' in request_json:
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request_json['username'].replace("'", "'\\''") + "' '" + request_json['script_name'].replace("'", "'\\''") + "'"]
sieve_exec = await container.exec(cmd)
return await exec_run_handler('utf8_text_only', sieve_exec)
# api call: container_post - post_action: exec - cmd: maildir - task: cleanup
async def container_post__exec__maildir__cleanup(self, container_id, request_json):
if 'maildir' in request_json:
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
sane_name = re.sub(r'\W+', '', request_json['maildir'])
cmd = ["/bin/bash", "-c", "if [[ -d '/var/vmail/" + request_json['maildir'].replace("'", "'\\''") + "' ]]; then /bin/mv '/var/vmail/" + request_json['maildir'].replace("'", "'\\''") + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "'; fi"]
maildir_cleanup_exec = await container.exec(cmd, user='vmail')
return await exec_run_handler('generic', maildir_cleanup_exec)
# api call: container_post - post_action: exec - cmd: rspamd - task: worker_password
async def container_post__exec__rspamd__worker_password(self, container_id, request_json):
if 'raw' in request_json:
for container in (await self.docker_client.containers.list()):
if container._id == container_id:
cmd = "./set_worker_password.sh '" + request_json['raw'].replace("'", "'\\''") + "' 2> /dev/null"
rspamd_password_exec = await container.exec(cmd, user='_rspamd')
async with rspamd_password_exec.start(detach=False) as stream:
rspamd_password_return = await stream.read_out()
matched = False
if "OK" in rspamd_password_return.data.decode('utf-8'):
matched = True
await container.restart()
if matched:
res = {
'type': 'success',
'msg': 'command completed successfully'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
'type': 'danger',
'msg': 'command did not complete'
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
async def exec_run_handler(type, exec_obj):
async with exec_obj.start(detach=False) as stream:
exec_return = await stream.read_out()
if exec_return == None:
exec_return = ""
else:
exec_return = exec_return.data.decode('utf-8')
if type == 'generic':
exec_details = await exec_obj.inspect()
if exec_details["ExitCode"] == None or exec_details["ExitCode"] == 0:
res = {
"type": "success",
"msg": "command completed successfully"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
"type": "success",
"msg": "'command failed: " + exec_return
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
if type == 'utf8_text_only':
return Response(content=exec_return, media_type="text/plain")
async def get_host_stats(wait=5):
global host_stats_isUpdating
try:
system_time = datetime.now()
host_stats = {
"cpu": {
"cores": psutil.cpu_count(),
"usage": psutil.cpu_percent()
},
"memory": {
"total": psutil.virtual_memory().total,
"usage": psutil.virtual_memory().percent,
"swap": psutil.swap_memory()
},
"uptime": time.time() - psutil.boot_time(),
"system_time": system_time.strftime("%d.%m.%Y %H:%M:%S")
}
redis_client.set('host_stats', json.dumps(host_stats), ex=10)
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
print(json.dumps(res, indent=4))
await asyncio.sleep(wait)
host_stats_isUpdating = False
async def get_container_stats(container_id, wait=5, stop=False):
global containerIds_to_update
if container_id and container_id.isalnum():
try:
for container in (await async_docker_client.containers.list()):
if container._id == container_id:
res = await container.stats(stream=False)
if redis_client.exists(container_id + '_stats'):
stats = json.loads(redis_client.get(container_id + '_stats'))
else:
stats = []
stats.append(res[0])
if len(stats) > 3:
del stats[0]
redis_client.set(container_id + '_stats', json.dumps(stats), ex=60)
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
print(json.dumps(res, indent=4))
else:
res = {
"type": "danger",
"msg": "no or invalid id defined"
}
print(json.dumps(res, indent=4))
await asyncio.sleep(wait)
if stop == True:
# update task was called second time, stop
containerIds_to_update.remove(container_id)
else:
# call update task a second time
await get_container_stats(container_id, wait=0, stop=True)
if os.environ['REDIS_SLAVEOF_IP'] != "":
redis_client = redis.Redis(host=os.environ['REDIS_SLAVEOF_IP'], port=os.environ['REDIS_SLAVEOF_PORT'], db=0)
else:
redis_client = redis.Redis(host='redis-mailcow', port=6379, db=0)
async_docker_client = aiodocker.Docker(url='unix:///var/run/docker.sock')

View File

@@ -0,0 +1,260 @@
import os
import sys
import uvicorn
import json
import uuid
import async_timeout
import asyncio
import aioredis
import aiodocker
import docker
import logging
from logging.config import dictConfig
from fastapi import FastAPI, Response, Request
from modules.DockerApi import DockerApi
dockerapi = None
app = FastAPI()
# Define Routes
@app.get("/host/stats")
async def get_host_update_stats():
global dockerapi
if dockerapi.host_stats_isUpdating == False:
asyncio.create_task(dockerapi.get_host_stats())
dockerapi.host_stats_isUpdating = True
while True:
if await dockerapi.redis_client.exists('host_stats'):
break
await asyncio.sleep(1.5)
stats = json.loads(await dockerapi.redis_client.get('host_stats'))
return Response(content=json.dumps(stats, indent=4), media_type="application/json")
@app.get("/containers/{container_id}/json")
async def get_container(container_id : str):
global dockerapi
if container_id and container_id.isalnum():
try:
for container in (await dockerapi.async_docker_client.containers.list()):
if container._id == container_id:
container_info = await container.show()
return Response(content=json.dumps(container_info, indent=4), media_type="application/json")
res = {
"type": "danger",
"msg": "no container found"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
"type": "danger",
"msg": "no or invalid id defined"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
@app.get("/containers/json")
async def get_containers():
global dockerapi
containers = {}
try:
for container in (await dockerapi.async_docker_client.containers.list()):
container_info = await container.show()
containers.update({container_info['Id']: container_info})
return Response(content=json.dumps(containers, indent=4), media_type="application/json")
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
@app.post("/containers/{container_id}/{post_action}")
async def post_containers(container_id : str, post_action : str, request: Request):
global dockerapi
try :
request_json = await request.json()
except Exception as err:
request_json = {}
if container_id and container_id.isalnum() and post_action:
try:
"""Dispatch container_post api call"""
if post_action == 'exec':
if not request_json or not 'cmd' in request_json:
res = {
"type": "danger",
"msg": "cmd is missing"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
if not request_json or not 'task' in request_json:
res = {
"type": "danger",
"msg": "task is missing"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
api_call_method_name = '__'.join(['container_post', str(post_action), str(request_json['cmd']), str(request_json['task']) ])
else:
api_call_method_name = '__'.join(['container_post', str(post_action) ])
api_call_method = getattr(dockerapi, api_call_method_name, lambda container_id: Response(content=json.dumps({'type': 'danger', 'msg':'container_post - unknown api call' }, indent=4), media_type="application/json"))
dockerapi.logger.info("api call: %s, container_id: %s" % (api_call_method_name, container_id))
return api_call_method(request_json, container_id=container_id)
except Exception as e:
dockerapi.logger.error("error - container_post: %s" % str(e))
res = {
"type": "danger",
"msg": str(e)
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = {
"type": "danger",
"msg": "invalid container id or missing action"
}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
@app.post("/container/{container_id}/stats/update")
async def post_container_update_stats(container_id : str):
global dockerapi
# start update task for container if no task is running
if container_id not in dockerapi.containerIds_to_update:
asyncio.create_task(dockerapi.get_container_stats(container_id))
dockerapi.containerIds_to_update.append(container_id)
while True:
if await dockerapi.redis_client.exists(container_id + '_stats'):
break
await asyncio.sleep(1.5)
stats = json.loads(await dockerapi.redis_client.get(container_id + '_stats'))
return Response(content=json.dumps(stats, indent=4), media_type="application/json")
# Events
@app.on_event("startup")
async def startup_event():
global dockerapi
# Initialize a custom logger
logger = logging.getLogger("dockerapi")
logger.setLevel(logging.INFO)
# Configure the logger to output logs to the terminal
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info("Init APP")
# Init redis client
if os.environ['REDIS_SLAVEOF_IP'] != "":
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0")
else:
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0")
# Init docker clients
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
async_docker_client = aiodocker.Docker(url='unix:///var/run/docker.sock')
dockerapi = DockerApi(redis_client, sync_docker_client, async_docker_client, logger)
logger.info("Subscribe to redis channel")
# Subscribe to redis channel
dockerapi.pubsub = redis.pubsub()
await dockerapi.pubsub.subscribe("MC_CHANNEL")
asyncio.create_task(handle_pubsub_messages(dockerapi.pubsub))
@app.on_event("shutdown")
async def shutdown_event():
global dockerapi
# Close docker connections
dockerapi.sync_docker_client.close()
await dockerapi.async_docker_client.close()
# Close redis
await dockerapi.pubsub.unsubscribe("MC_CHANNEL")
await dockerapi.redis_client.close()
# PubSub Handler
async def handle_pubsub_messages(channel: aioredis.client.PubSub):
global dockerapi
while True:
try:
async with async_timeout.timeout(60):
message = await channel.get_message(ignore_subscribe_messages=True, timeout=30)
if message is not None:
# Parse message
data_json = json.loads(message['data'].decode('utf-8'))
dockerapi.logger.info(f"PubSub Received - {json.dumps(data_json)}")
# Handle api_call
if 'api_call' in data_json:
# api_call: container_post
if data_json['api_call'] == "container_post":
if 'post_action' in data_json and 'container_name' in data_json:
try:
"""Dispatch container_post api call"""
request_json = {}
if data_json['post_action'] == 'exec':
if 'request' in data_json:
request_json = data_json['request']
if 'cmd' in request_json:
if 'task' in request_json:
api_call_method_name = '__'.join(['container_post', str(data_json['post_action']), str(request_json['cmd']), str(request_json['task']) ])
else:
dockerapi.logger.error("api call: task missing")
else:
dockerapi.logger.error("api call: cmd missing")
else:
dockerapi.logger.error("api call: request missing")
else:
api_call_method_name = '__'.join(['container_post', str(data_json['post_action'])])
if api_call_method_name:
api_call_method = getattr(dockerapi, api_call_method_name)
if api_call_method:
dockerapi.logger.info("api call: %s, container_name: %s" % (api_call_method_name, data_json['container_name']))
api_call_method(request_json, container_name=data_json['container_name'])
else:
dockerapi.logger.error("api call not found: %s, container_name: %s" % (api_call_method_name, data_json['container_name']))
except Exception as e:
dockerapi.logger.error("container_post: %s" % str(e))
else:
dockerapi.logger.error("api call: missing container_name, post_action or request")
else:
dockerapi.logger.error("Unknwon PubSub recieved - %s" % json.dumps(data_json))
else:
dockerapi.logger.error("Unknwon PubSub recieved - %s" % json.dumps(data_json))
await asyncio.sleep(0.0)
except asyncio.TimeoutError:
pass
if __name__ == '__main__':
uvicorn.run(
app,
host="0.0.0.0",
port=443,
ssl_certfile="/app/dockerapi_cert.pem",
ssl_keyfile="/app/dockerapi_key.pem",
log_level="info",
loop="none"
)

View File

@@ -0,0 +1,487 @@
import psutil
import sys
import os
import re
import time
import json
import asyncio
import platform
from datetime import datetime
from fastapi import FastAPI, Response, Request
class DockerApi:
def __init__(self, redis_client, sync_docker_client, async_docker_client, logger):
self.redis_client = redis_client
self.sync_docker_client = sync_docker_client
self.async_docker_client = async_docker_client
self.logger = logger
self.host_stats_isUpdating = False
self.containerIds_to_update = []
# api call: container_post - post_action: stop
def container_post__stop(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(all=True, filters=filters):
container.stop()
res = { 'type': 'success', 'msg': 'command completed successfully'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: start
def container_post__start(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(all=True, filters=filters):
container.start()
res = { 'type': 'success', 'msg': 'command completed successfully'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: restart
def container_post__restart(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(all=True, filters=filters):
container.restart()
res = { 'type': 'success', 'msg': 'command completed successfully'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: top
def container_post__top(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(all=True, filters=filters):
res = { 'type': 'success', 'msg': container.top()}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: stats
def container_post__stats(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(all=True, filters=filters):
for stat in container.stats(decode=True, stream=True):
res = { 'type': 'success', 'msg': stat}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: mailq - task: delete
def container_post__exec__mailq__delete(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-d %s' % i for i in filtered_qids]
sanitized_string = str(' '.join(flagged_qids))
for container in self.sync_docker_client.containers.list(filters=filters):
postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
return self.exec_run_handler('generic', postsuper_r)
# api call: container_post - post_action: exec - cmd: mailq - task: hold
def container_post__exec__mailq__hold(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-h %s' % i for i in filtered_qids]
sanitized_string = str(' '.join(flagged_qids))
for container in self.sync_docker_client.containers.list(filters=filters):
postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
return self.exec_run_handler('generic', postsuper_r)
# api call: container_post - post_action: exec - cmd: mailq - task: cat
def container_post__exec__mailq__cat(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
sanitized_string = str(' '.join(filtered_qids))
for container in self.sync_docker_client.containers.list(filters=filters):
postcat_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postcat -q " + sanitized_string], user='postfix')
if not postcat_return:
postcat_return = 'err: invalid'
return self.exec_run_handler('utf8_text_only', postcat_return)
# api call: container_post - post_action: exec - cmd: mailq - task: unhold
def container_post__exec__mailq__unhold(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-H %s' % i for i in filtered_qids]
sanitized_string = str(' '.join(flagged_qids))
for container in self.sync_docker_client.containers.list(filters=filters):
postsuper_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postsuper " + sanitized_string])
return self.exec_run_handler('generic', postsuper_r)
# api call: container_post - post_action: exec - cmd: mailq - task: deliver
def container_post__exec__mailq__deliver(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'items' in request_json:
r = re.compile("^[0-9a-fA-F]+$")
filtered_qids = filter(r.match, request_json['items'])
if filtered_qids:
flagged_qids = ['-i %s' % i for i in filtered_qids]
for container in self.sync_docker_client.containers.list(filters=filters):
for i in flagged_qids:
postqueue_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postqueue " + i], user='postfix')
# todo: check each exit code
res = { 'type': 'success', 'msg': 'Scheduled immediate delivery'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: mailq - task: list
def container_post__exec__mailq__list(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
mailq_return = container.exec_run(["/usr/sbin/postqueue", "-j"], user='postfix')
return self.exec_run_handler('utf8_text_only', mailq_return)
# api call: container_post - post_action: exec - cmd: mailq - task: flush
def container_post__exec__mailq__flush(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
postqueue_r = container.exec_run(["/usr/sbin/postqueue", "-f"], user='postfix')
return self.exec_run_handler('generic', postqueue_r)
# api call: container_post - post_action: exec - cmd: mailq - task: super_delete
def container_post__exec__mailq__super_delete(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
postsuper_r = container.exec_run(["/usr/sbin/postsuper", "-d", "ALL"])
return self.exec_run_handler('generic', postsuper_r)
# api call: container_post - post_action: exec - cmd: system - task: fts_rescan
def container_post__exec__system__fts_rescan(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'username' in request_json:
for container in self.sync_docker_client.containers.list(filters=filters):
rescan_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -u '" + request_json['username'].replace("'", "'\\''") + "'"], user='vmail')
if rescan_return.exit_code == 0:
res = { 'type': 'success', 'msg': 'fts_rescan: rescan triggered'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = { 'type': 'warning', 'msg': 'fts_rescan error'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
if 'all' in request_json:
for container in self.sync_docker_client.containers.list(filters=filters):
rescan_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm fts rescan -A"], user='vmail')
if rescan_return.exit_code == 0:
res = { 'type': 'success', 'msg': 'fts_rescan: rescan triggered'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = { 'type': 'warning', 'msg': 'fts_rescan error'}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: system - task: df
def container_post__exec__system__df(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'dir' in request_json:
for container in self.sync_docker_client.containers.list(filters=filters):
df_return = container.exec_run(["/bin/bash", "-c", "/bin/df -H '" + request_json['dir'].replace("'", "'\\''") + "' | /usr/bin/tail -n1 | /usr/bin/tr -s [:blank:] | /usr/bin/tr ' ' ','"], user='nobody')
if df_return.exit_code == 0:
return df_return.output.decode('utf-8').rstrip()
else:
return "0,0,0,0,0,0"
# api call: container_post - post_action: exec - cmd: system - task: mysql_upgrade
def container_post__exec__system__mysql_upgrade(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
sql_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/mysql_upgrade -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "'\n"], user='mysql')
if sql_return.exit_code == 0:
matched = False
for line in sql_return.output.decode('utf-8').split("\n"):
if 'is already upgraded to' in line:
matched = True
if matched:
res = { 'type': 'success', 'msg':'mysql_upgrade: already upgraded', 'text': sql_return.output.decode('utf-8')}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
container.restart()
res = { 'type': 'warning', 'msg':'mysql_upgrade: upgrade was applied', 'text': sql_return.output.decode('utf-8')}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = { 'type': 'error', 'msg': 'mysql_upgrade: error running command', 'text': sql_return.output.decode('utf-8')}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: system - task: mysql_tzinfo_to_sql
def container_post__exec__system__mysql_tzinfo_to_sql(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
sql_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/mysql_tzinfo_to_sql /usr/share/zoneinfo | /bin/sed 's/Local time zone must be set--see zic manual page/FCTY/' | /usr/bin/mysql -uroot -p'" + os.environ['DBROOT'].replace("'", "'\\''") + "' mysql \n"], user='mysql')
if sql_return.exit_code == 0:
res = { 'type': 'info', 'msg': 'mysql_tzinfo_to_sql: command completed successfully', 'text': sql_return.output.decode('utf-8')}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = { 'type': 'error', 'msg': 'mysql_tzinfo_to_sql: error running command', 'text': sql_return.output.decode('utf-8')}
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# api call: container_post - post_action: exec - cmd: reload - task: dovecot
def container_post__exec__reload__dovecot(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
reload_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/dovecot reload"])
return self.exec_run_handler('generic', reload_return)
# api call: container_post - post_action: exec - cmd: reload - task: postfix
def container_post__exec__reload__postfix(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
reload_return = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postfix reload"])
return self.exec_run_handler('generic', reload_return)
# api call: container_post - post_action: exec - cmd: reload - task: nginx
def container_post__exec__reload__nginx(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
for container in self.sync_docker_client.containers.list(filters=filters):
reload_return = container.exec_run(["/bin/sh", "-c", "/usr/sbin/nginx -s reload"])
return self.exec_run_handler('generic', reload_return)
# api call: container_post - post_action: exec - cmd: sieve - task: list
def container_post__exec__sieve__list(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'username' in request_json:
for container in self.sync_docker_client.containers.list(filters=filters):
sieve_return = container.exec_run(["/bin/bash", "-c", "/usr/bin/doveadm sieve list -u '" + request_json['username'].replace("'", "'\\''") + "'"])
return self.exec_run_handler('utf8_text_only', sieve_return)
# api call: container_post - post_action: exec - cmd: sieve - task: print
def container_post__exec__sieve__print(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'username' in request_json and 'script_name' in request_json:
for container in self.sync_docker_client.containers.list(filters=filters):
cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request_json['username'].replace("'", "'\\''") + "' '" + request_json['script_name'].replace("'", "'\\''") + "'"]
sieve_return = container.exec_run(cmd)
return self.exec_run_handler('utf8_text_only', sieve_return)
# api call: container_post - post_action: exec - cmd: maildir - task: cleanup
def container_post__exec__maildir__cleanup(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'maildir' in request_json:
for container in self.sync_docker_client.containers.list(filters=filters):
sane_name = re.sub(r'\W+', '', request_json['maildir'])
vmail_name = request_json['maildir'].replace("'", "'\\''")
cmd_vmail = "if [[ -d '/var/vmail/" + vmail_name + "' ]]; then /bin/mv '/var/vmail/" + vmail_name + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "'; fi"
index_name = request_json['maildir'].split("/")
if len(index_name) > 1:
index_name = index_name[1].replace("'", "'\\''") + "@" + index_name[0].replace("'", "'\\''")
cmd_vmail_index = "if [[ -d '/var/vmail_index/" + index_name + "' ]]; then /bin/mv '/var/vmail_index/" + index_name + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "_index'; fi"
cmd = ["/bin/bash", "-c", cmd_vmail + " && " + cmd_vmail_index]
else:
cmd = ["/bin/bash", "-c", cmd_vmail]
maildir_cleanup = container.exec_run(cmd, user='vmail')
return self.exec_run_handler('generic', maildir_cleanup)
# api call: container_post - post_action: exec - cmd: rspamd - task: worker_password
def container_post__exec__rspamd__worker_password(self, request_json, **kwargs):
if 'container_id' in kwargs:
filters = {"id": kwargs['container_id']}
elif 'container_name' in kwargs:
filters = {"name": kwargs['container_name']}
if 'raw' in request_json:
for container in self.sync_docker_client.containers.list(filters=filters):
cmd = "/usr/bin/rspamadm pw -e -p '" + request_json['raw'].replace("'", "'\\''") + "' 2> /dev/null"
cmd_response = self.exec_cmd_container(container, cmd, user="_rspamd")
matched = False
for line in cmd_response.split("\n"):
if '$2$' in line:
hash = line.strip()
hash_out = re.search('\$2\$.+$', hash).group(0)
rspamd_passphrase_hash = re.sub('[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
rspamd_password_filename = "/etc/rspamd/override.d/worker-controller-password.inc"
cmd = '''/bin/echo 'enable_password = "%s";' > %s && cat %s''' % (rspamd_passphrase_hash, rspamd_password_filename, rspamd_password_filename)
cmd_response = self.exec_cmd_container(container, cmd, user="_rspamd")
if rspamd_passphrase_hash.startswith("$2$") and rspamd_passphrase_hash in cmd_response:
container.restart()
matched = True
if matched:
res = { 'type': 'success', 'msg': 'command completed successfully' }
self.logger.info('success changing Rspamd password')
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
self.logger.error('failed changing Rspamd password')
res = { 'type': 'danger', 'msg': 'command did not complete' }
return Response(content=json.dumps(res, indent=4), media_type="application/json")
# Collect host stats
async def get_host_stats(self, wait=5):
try:
system_time = datetime.now()
host_stats = {
"cpu": {
"cores": psutil.cpu_count(),
"usage": psutil.cpu_percent()
},
"memory": {
"total": psutil.virtual_memory().total,
"usage": psutil.virtual_memory().percent,
"swap": psutil.swap_memory()
},
"uptime": time.time() - psutil.boot_time(),
"system_time": system_time.strftime("%d.%m.%Y %H:%M:%S"),
"architecture": platform.machine()
}
await self.redis_client.set('host_stats', json.dumps(host_stats), ex=10)
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
await asyncio.sleep(wait)
self.host_stats_isUpdating = False
# Collect container stats
async def get_container_stats(self, container_id, wait=5, stop=False):
if container_id and container_id.isalnum():
try:
for container in (await self.async_docker_client.containers.list()):
if container._id == container_id:
res = await container.stats(stream=False)
if await self.redis_client.exists(container_id + '_stats'):
stats = json.loads(await self.redis_client.get(container_id + '_stats'))
else:
stats = []
stats.append(res[0])
if len(stats) > 3:
del stats[0]
await self.redis_client.set(container_id + '_stats', json.dumps(stats), ex=60)
except Exception as e:
res = {
"type": "danger",
"msg": str(e)
}
else:
res = {
"type": "danger",
"msg": "no or invalid id defined"
}
await asyncio.sleep(wait)
if stop == True:
# update task was called second time, stop
self.containerIds_to_update.remove(container_id)
else:
# call update task a second time
await self.get_container_stats(container_id, wait=0, stop=True)
def exec_cmd_container(self, container, cmd, user, timeout=2, shell_cmd="/bin/bash"):
def recv_socket_data(c_socket, timeout):
c_socket.setblocking(0)
total_data=[]
data=''
begin=time.time()
while True:
if total_data and time.time()-begin > timeout:
break
elif time.time()-begin > timeout*2:
break
try:
data = c_socket.recv(8192)
if data:
total_data.append(data.decode('utf-8'))
#change the beginning time for measurement
begin=time.time()
else:
#sleep for sometime to indicate a gap
time.sleep(0.1)
break
except:
pass
return ''.join(total_data)
try :
socket = container.exec_run([shell_cmd], stdin=True, socket=True, user=user).output._sock
if not cmd.endswith("\n"):
cmd = cmd + "\n"
socket.send(cmd.encode('utf-8'))
data = recv_socket_data(socket, timeout)
socket.close()
return data
except Exception as e:
self.logger.error("error - exec_cmd_container: %s" % str(e))
traceback.print_exc(file=sys.stdout)
def exec_run_handler(self, type, output):
if type == 'generic':
if output.exit_code == 0:
res = { 'type': 'success', 'msg': 'command completed successfully' }
return Response(content=json.dumps(res, indent=4), media_type="application/json")
else:
res = { 'type': 'danger', 'msg': 'command failed: ' + output.output.decode('utf-8') }
return Response(content=json.dumps(res, indent=4), media_type="application/json")
if type == 'utf8_text_only':
return Response(content=output.output.decode('utf-8'), media_type="text/plain")

View File

@@ -1,10 +1,13 @@
FROM debian:bullseye-slim
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
ARG DEBIAN_FRONTEND=noninteractive
ARG DOVECOT=2.3.19.1
# renovate: datasource=github-tags depName=dovecot/core versioning=semver-coerced extractVersion=(?<version>.*)$
ARG DOVECOT=2.3.21
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=(?<version>.*)$
ARG GOSU_VERSION=1.16
ENV LC_ALL C
ENV GOSU_VERSION 1.14
# Add groups and users before installing Dovecot to not break compatibility
RUN groupadd -g 5000 vmail \
@@ -18,6 +21,7 @@ RUN groupadd -g 5000 vmail \
&& touch /etc/default/locale \
&& apt-get update \
&& apt-get -y --no-install-recommends install \
build-essential \
apt-transport-https \
ca-certificates \
cpanminus \
@@ -58,6 +62,7 @@ RUN groupadd -g 5000 vmail \
libproc-processtable-perl \
libreadonly-perl \
libregexp-common-perl \
libssl-dev \
libsys-meminfo-perl \
libterm-readkey-perl \
libtest-deep-perl \
@@ -107,6 +112,8 @@ RUN groupadd -g 5000 vmail \
&& apt-get autoclean \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /tmp/* /var/tmp/* /root/.cache/
# imapsync dependencies
RUN cpan Crypt::OpenSSL::PKCS12
COPY trim_logs.sh /usr/local/bin/trim_logs.sh
COPY clean_q_aged.sh /usr/local/bin/clean_q_aged.sh

View File

@@ -159,7 +159,7 @@ function auth_password_verify(req, pass)
VALUES ("%s", 0, "%s", "%s")]], con:escape(req.service), con:escape(req.user), con:escape(req.real_rip)))
cur:close()
con:close()
return dovecot.auth.PASSDB_RESULT_OK, "password=" .. pass
return dovecot.auth.PASSDB_RESULT_OK, ""
end
row = cur:fetch (row, "a")
end
@@ -180,13 +180,13 @@ function auth_password_verify(req, pass)
if tostring(req.real_rip) == "__IPV4_SOGO__" then
cur:close()
con:close()
return dovecot.auth.PASSDB_RESULT_OK, "password=" .. pass
return dovecot.auth.PASSDB_RESULT_OK, ""
elseif row.has_prot_access == "1" then
con:execute(string.format([[REPLACE INTO sasl_log (service, app_password, username, real_rip)
VALUES ("%s", %d, "%s", "%s")]], con:escape(req.service), row.id, con:escape(req.user), con:escape(req.real_rip)))
cur:close()
con:close()
return dovecot.auth.PASSDB_RESULT_OK, "password=" .. pass
return dovecot.auth.PASSDB_RESULT_OK, ""
end
end
row = cur:fetch (row, "a")

View File

@@ -8492,6 +8492,7 @@ sub xoauth2
require HTML::Entities ;
require JSON ;
require JSON::WebToken::Crypt::RSA ;
require Crypt::OpenSSL::PKCS12;
require Crypt::OpenSSL::RSA ;
require Encode::Byte ;
require IO::Socket::SSL ;
@@ -8532,8 +8533,9 @@ sub xoauth2
$sync->{ debug } and myprint( "Service account: $iss\nKey file: $keyfile\nKey password: $keypass\n");
# Get private key from p12 file (would be better in perl...)
$key = `openssl pkcs12 -in "$keyfile" -nodes -nocerts -passin pass:$keypass -nomacver`;
# Get private key from p12 file
my $pkcs12 = Crypt::OpenSSL::PKCS12->new_from_file($keyfile);
$key = $pkcs12->private_key($keypass);
$sync->{ debug } and myprint( "Private key:\n$key\n");
}

View File

@@ -75,7 +75,8 @@ my $sth = $dbh->prepare("SELECT id,
custom_params,
subscribeall,
timeout1,
timeout2
timeout2,
dry
FROM imapsync
WHERE active = 1
AND is_running = 0
@@ -111,13 +112,16 @@ while ($row = $sth->fetchrow_arrayref()) {
$subscribeall = @$row[18];
$timeout1 = @$row[19];
$timeout2 = @$row[20];
$dry = @$row[21];
if ($enc1 eq "TLS") { $enc1 = "--tls1"; } elsif ($enc1 eq "SSL") { $enc1 = "--ssl1"; } else { undef $enc1; }
my $template = $run_dir . '/imapsync.XXXXXXX';
my $passfile1 = File::Temp->new(TEMPLATE => $template);
my $passfile2 = File::Temp->new(TEMPLATE => $template);
binmode( $passfile1, ":utf8" );
print $passfile1 "$password1\n";
print $passfile2 trim($master_pass) . "\n";
@@ -148,6 +152,7 @@ while ($row = $sth->fetchrow_arrayref()) {
"--host2", "localhost",
"--user2", $user2 . '*' . trim($master_user),
"--passfile2", $passfile2->filename,
($dry eq "1" ? ('--dry') : ()),
'--no-modulesversion',
'--noreleasecheck'];

View File

@@ -1,5 +1,7 @@
FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
WORKDIR /app
ENV XTABLES_LIBDIR /usr/lib/xtables
ENV PYTHON_IPTABLES_XTABLES_VERSION 12
@@ -14,10 +16,13 @@ RUN apk add --virtual .build-deps \
iptables \
ip6tables \
xtables-addons \
nftables \
tzdata \
py3-pip \
py3-nftables \
musl-dev \
&& pip3 install --ignore-installed --upgrade pip \
jsonschema \
python-iptables \
redis \
ipaddress \
@@ -26,5 +31,10 @@ RUN apk add --virtual .build-deps \
# && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \
COPY server.py /
CMD ["python3", "-u", "/server.py"]
COPY modules /app/modules
COPY main.py /app/
COPY ./docker-entrypoint.sh /app/
RUN chmod +x /app/docker-entrypoint.sh
CMD ["/bin/sh", "-c", "/app/docker-entrypoint.sh"]

View File

@@ -0,0 +1,29 @@
#!/bin/sh
backend=iptables
nft list table ip filter &>/dev/null
nftables_found=$?
iptables -L -n &>/dev/null
iptables_found=$?
if [ $nftables_found -lt $iptables_found ]; then
backend=nftables
fi
if [ $nftables_found -gt $iptables_found ]; then
backend=iptables
fi
if [ $nftables_found -eq 0 ] && [ $nftables_found -eq $iptables_found ]; then
nftables_lines=$(nft list ruleset | wc -l)
iptables_lines=$(iptables-save | wc -l)
if [ $nftables_lines -gt $iptables_lines ]; then
backend=nftables
else
backend=iptables
fi
fi
exec python -u /app/main.py $backend

View File

@@ -0,0 +1,469 @@
#!/usr/bin/env python3
import re
import os
import sys
import time
import atexit
import signal
import ipaddress
from collections import Counter
from random import randint
from threading import Thread
from threading import Lock
import redis
import json
import dns.resolver
import dns.exception
import uuid
from modules.Logger import Logger
from modules.IPTables import IPTables
from modules.NFTables import NFTables
# connect to redis
while True:
try:
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
if "".__eq__(redis_slaveof_ip):
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
else:
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
r.ping()
except Exception as ex:
print('%s - trying again in 3 seconds' % (ex))
time.sleep(3)
else:
break
pubsub = r.pubsub()
# rename fail2ban to netfilter
if r.exists('F2B_LOG'):
r.rename('F2B_LOG', 'NETFILTER_LOG')
# globals
WHITELIST = []
BLACKLIST= []
bans = {}
quit_now = False
exit_code = 0
lock = Lock()
# init Logger
logger = Logger(r)
# init backend
backend = sys.argv[1]
if backend == "nftables":
logger.logInfo('Using NFTables backend')
tables = NFTables("MAILCOW", logger)
else:
logger.logInfo('Using IPTables backend')
tables = IPTables("MAILCOW", logger)
def refreshF2boptions():
global f2boptions
global quit_now
global exit_code
f2boptions = {}
if not r.get('F2B_OPTIONS'):
f2boptions['ban_time'] = r.get('F2B_BAN_TIME')
f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME')
f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT')
f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS')
f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW')
f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4')
f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6')
else:
try:
f2boptions = json.loads(r.get('F2B_OPTIONS'))
except ValueError:
logger.logCrit('Error loading F2B options: F2B_OPTIONS is not json')
quit_now = True
exit_code = 2
verifyF2boptions(f2boptions)
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
def verifyF2boptions(f2boptions):
verifyF2boption(f2boptions,'ban_time', 1800)
verifyF2boption(f2boptions,'max_ban_time', 10000)
verifyF2boption(f2boptions,'ban_time_increment', True)
verifyF2boption(f2boptions,'max_attempts', 10)
verifyF2boption(f2boptions,'retry_window', 600)
verifyF2boption(f2boptions,'netban_ipv4', 32)
verifyF2boption(f2boptions,'netban_ipv6', 128)
verifyF2boption(f2boptions,'banlist_id', str(uuid.uuid4()))
verifyF2boption(f2boptions,'manage_external', 0)
def verifyF2boption(f2boptions, f2boption, f2bdefault):
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
def refreshF2bregex():
global f2bregex
global quit_now
global exit_code
if not r.get('F2B_REGEX'):
f2bregex = {}
f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
else:
try:
f2bregex = {}
f2bregex = json.loads(r.get('F2B_REGEX'))
except ValueError:
logger.logCrit('Error loading F2B options: F2B_REGEX is not json')
quit_now = True
exit_code = 2
def get_ip(address):
ip = ipaddress.ip_address(address)
if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
ip = ip.ipv4_mapped
if ip.is_private or ip.is_loopback:
return False
return ip
def ban(address):
global f2boptions
global lock
refreshF2boptions()
BAN_TIME = int(f2boptions['ban_time'])
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
RETRY_WINDOW = int(f2boptions['retry_window'])
NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
ip = get_ip(address)
if not ip: return
address = str(ip)
self_network = ipaddress.ip_network(address)
with lock:
temp_whitelist = set(WHITELIST)
if temp_whitelist:
for wl_key in temp_whitelist:
wl_net = ipaddress.ip_network(wl_key, False)
if wl_net.overlaps(self_network):
logger.logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
return
net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
net = str(net)
if not net in bans:
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
current_attempt = time.time()
if current_attempt - bans[net]['last_attempt'] > RETRY_WINDOW:
bans[net]['attempts'] = 0
bans[net]['attempts'] += 1
bans[net]['last_attempt'] = current_attempt
if bans[net]['attempts'] >= MAX_ATTEMPTS:
cur_time = int(round(time.time()))
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1:
with lock:
tables.banIPv4(net)
elif int(f2boptions['manage_external']) != 1:
with lock:
tables.banIPv6(net)
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
else:
logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
def unban(net):
global lock
if not net in bans:
logger.logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
return
logger.logInfo('Unbanning %s' % net)
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
with lock:
tables.unbanIPv4(net)
else:
with lock:
tables.unbanIPv6(net)
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
if net in bans:
bans[net]['attempts'] = 0
bans[net]['ban_counter'] += 1
def permBan(net, unban=False):
global f2boptions
global lock
is_unbanned = False
is_banned = False
if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
with lock:
if unban:
is_unbanned = tables.unbanIPv4(net)
elif int(f2boptions['manage_external']) != 1:
is_banned = tables.banIPv4(net)
else:
with lock:
if unban:
is_unbanned = tables.unbanIPv6(net)
elif int(f2boptions['manage_external']) != 1:
is_banned = tables.banIPv6(net)
if is_unbanned:
r.hdel('F2B_PERM_BANS', '%s' % net)
logger.logCrit('Removed host/network %s from blacklist' % net)
elif is_banned:
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
logger.logCrit('Added host/network %s to blacklist' % net)
def clear():
global lock
logger.logInfo('Clearing all bans')
for net in bans.copy():
unban(net)
with lock:
tables.clearIPv4Table()
tables.clearIPv6Table()
r.delete('F2B_ACTIVE_BANS')
r.delete('F2B_PERM_BANS')
pubsub.unsubscribe()
def watch():
logger.logInfo('Watching Redis channel F2B_CHANNEL')
pubsub.subscribe('F2B_CHANNEL')
global quit_now
global exit_code
while not quit_now:
try:
for item in pubsub.listen():
refreshF2bregex()
for rule_id, rule_regex in f2bregex.items():
if item['data'] and item['type'] == 'message':
try:
result = re.search(rule_regex, item['data'])
except re.error:
result = False
if result:
addr = result.group(1)
ip = ipaddress.ip_address(addr)
if ip.is_private or ip.is_loopback:
continue
logger.logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
ban(addr)
except Exception as ex:
logger.logWarn('Error reading log line from pubsub: %s' % ex)
quit_now = True
exit_code = 2
def snat4(snat_target):
global lock
global quit_now
while not quit_now:
time.sleep(10)
with lock:
tables.snat4(snat_target, os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24')
def snat6(snat_target):
global lock
global quit_now
while not quit_now:
time.sleep(10)
with lock:
tables.snat6(snat_target, os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64'))
def autopurge():
while not quit_now:
time.sleep(10)
refreshF2boptions()
BAN_TIME = int(f2boptions['ban_time'])
MAX_BAN_TIME = int(f2boptions['max_ban_time'])
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
if QUEUE_UNBAN:
for net in QUEUE_UNBAN:
unban(str(net))
for net in bans.copy():
if bans[net]['attempts'] >= MAX_ATTEMPTS:
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt']
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME or TIME_SINCE_LAST_ATTEMPT > MAX_BAN_TIME:
unban(net)
def mailcowChainOrder():
global lock
global quit_now
global exit_code
while not quit_now:
time.sleep(10)
with lock:
quit_now, exit_code = tables.checkIPv4ChainOrder()
if quit_now: return
quit_now, exit_code = tables.checkIPv6ChainOrder()
def isIpNetwork(address):
try:
ipaddress.ip_network(address, False)
except ValueError:
return False
return True
def genNetworkList(list):
resolver = dns.resolver.Resolver()
hostnames = []
networks = []
for key in list:
if isIpNetwork(key):
networks.append(key)
else:
hostnames.append(key)
for hostname in hostnames:
hostname_ips = []
for rdtype in ['A', 'AAAA']:
try:
answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
except dns.exception.Timeout:
logger.logInfo('Hostname %s timedout on resolve' % hostname)
break
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
continue
except dns.exception.DNSException as dnsexception:
logger.logInfo('%s' % dnsexception)
continue
for rdata in answer:
hostname_ips.append(rdata.to_text())
networks.extend(hostname_ips)
return set(networks)
def whitelistUpdate():
global lock
global quit_now
global WHITELIST
while not quit_now:
start_time = time.time()
list = r.hgetall('F2B_WHITELIST')
new_whitelist = []
if list:
new_whitelist = genNetworkList(list)
with lock:
if Counter(new_whitelist) != Counter(WHITELIST):
WHITELIST = new_whitelist
logger.logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
def blacklistUpdate():
global quit_now
global BLACKLIST
while not quit_now:
start_time = time.time()
list = r.hgetall('F2B_BLACKLIST')
new_blacklist = []
if list:
new_blacklist = genNetworkList(list)
if Counter(new_blacklist) != Counter(BLACKLIST):
addban = set(new_blacklist).difference(BLACKLIST)
delban = set(BLACKLIST).difference(new_blacklist)
BLACKLIST = new_blacklist
logger.logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
if addban:
for net in addban:
permBan(net=net)
if delban:
for net in delban:
permBan(net=net, unban=True)
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
def quit(signum, frame):
global quit_now
quit_now = True
if __name__ == '__main__':
refreshF2boptions()
# In case a previous session was killed without cleanup
clear()
# Reinit MAILCOW chain
# Is called before threads start, no locking
logger.logInfo("Initializing mailcow netfilter chain")
tables.initChainIPv4()
tables.initChainIPv6()
watch_thread = Thread(target=watch)
watch_thread.daemon = True
watch_thread.start()
if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
try:
snat_ip = os.getenv('SNAT_TO_SOURCE')
snat_ipo = ipaddress.ip_address(snat_ip)
if type(snat_ipo) is ipaddress.IPv4Address:
snat4_thread = Thread(target=snat4,args=(snat_ip,))
snat4_thread.daemon = True
snat4_thread.start()
except ValueError:
print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n':
try:
snat_ip = os.getenv('SNAT6_TO_SOURCE')
snat_ipo = ipaddress.ip_address(snat_ip)
if type(snat_ipo) is ipaddress.IPv6Address:
snat6_thread = Thread(target=snat6,args=(snat_ip,))
snat6_thread.daemon = True
snat6_thread.start()
except ValueError:
print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address')
autopurge_thread = Thread(target=autopurge)
autopurge_thread.daemon = True
autopurge_thread.start()
mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
mailcowchainwatch_thread.daemon = True
mailcowchainwatch_thread.start()
blacklistupdate_thread = Thread(target=blacklistUpdate)
blacklistupdate_thread.daemon = True
blacklistupdate_thread.start()
whitelistupdate_thread = Thread(target=whitelistUpdate)
whitelistupdate_thread.daemon = True
whitelistupdate_thread.start()
signal.signal(signal.SIGTERM, quit)
atexit.register(clear)
while not quit_now:
time.sleep(0.5)
sys.exit(exit_code)

View File

@@ -0,0 +1,213 @@
import iptc
import time
class IPTables:
def __init__(self, chain_name, logger):
self.chain_name = chain_name
self.logger = logger
def initChainIPv4(self):
if not iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name) in iptc.Table(iptc.Table.FILTER).chains:
iptc.Table(iptc.Table.FILTER).create_chain(self.chain_name)
for c in ['FORWARD', 'INPUT']:
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
rule = iptc.Rule()
rule.src = '0.0.0.0/0'
rule.dst = '0.0.0.0/0'
target = iptc.Target(rule, self.chain_name)
rule.target = target
if rule not in chain.rules:
chain.insert_rule(rule)
def initChainIPv6(self):
if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name) in iptc.Table6(iptc.Table6.FILTER).chains:
iptc.Table6(iptc.Table6.FILTER).create_chain(self.chain_name)
for c in ['FORWARD', 'INPUT']:
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c)
rule = iptc.Rule6()
rule.src = '::/0'
rule.dst = '::/0'
target = iptc.Target(rule, self.chain_name)
rule.target = target
if rule not in chain.rules:
chain.insert_rule(rule)
def checkIPv4ChainOrder(self):
filter_table = iptc.Table(iptc.Table.FILTER)
filter_table.refresh()
return self.checkChainOrder(filter_table)
def checkIPv6ChainOrder(self):
filter_table = iptc.Table6(iptc.Table6.FILTER)
filter_table.refresh()
return self.checkChainOrder(filter_table)
def checkChainOrder(self, filter_table):
err = False
exit_code = None
forward_chain = iptc.Chain(filter_table, 'FORWARD')
input_chain = iptc.Chain(filter_table, 'INPUT')
for chain in [forward_chain, input_chain]:
target_found = False
for position, item in enumerate(chain.rules):
if item.target.name == self.chain_name:
target_found = True
if position > 2:
self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name))
err = True
exit_code = 2
if not target_found:
self.logger.logCrit('Error in %s chain: %s target not found, restarting container' % (chain.name, self.chain_name))
err = True
exit_code = 2
return err, exit_code
def clearIPv4Table(self):
self.clearTable(iptc.Table(iptc.Table.FILTER))
def clearIPv6Table(self):
self.clearTable(iptc.Table6(iptc.Table6.FILTER))
def clearTable(self, filter_table):
filter_table.autocommit = False
forward_chain = iptc.Chain(filter_table, "FORWARD")
input_chain = iptc.Chain(filter_table, "INPUT")
mailcow_chain = iptc.Chain(filter_table, self.chain_name)
if mailcow_chain in filter_table.chains:
for rule in mailcow_chain.rules:
mailcow_chain.delete_rule(rule)
for rule in forward_chain.rules:
if rule.target.name == self.chain_name:
forward_chain.delete_rule(rule)
for rule in input_chain.rules:
if rule.target.name == self.chain_name:
input_chain.delete_rule(rule)
filter_table.delete_chain(self.chain_name)
filter_table.commit()
filter_table.refresh()
filter_table.autocommit = True
def banIPv4(self, source):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
rule = iptc.Rule()
rule.src = source
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule in chain.rules:
return False
chain.insert_rule(rule)
return True
def banIPv6(self, source):
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name)
rule = iptc.Rule6()
rule.src = source
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule in chain.rules:
return False
chain.insert_rule(rule)
return True
def unbanIPv4(self, source):
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
rule = iptc.Rule()
rule.src = source
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule not in chain.rules:
return False
chain.delete_rule(rule)
return True
def unbanIPv6(self, source):
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), self.chain_name)
rule = iptc.Rule6()
rule.src = source
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule not in chain.rules:
return False
chain.delete_rule(rule)
return True
def snat4(self, snat_target, source):
try:
table = iptc.Table('nat')
table.refresh()
chain = iptc.Chain(table, 'POSTROUTING')
table.autocommit = False
new_rule = self.getSnat4Rule(snat_target, source)
if not chain.rules:
# if there are no rules in the chain, insert the new rule directly
self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
chain.insert_rule(new_rule)
else:
for position, rule in enumerate(chain.rules):
if not hasattr(rule.target, 'parameter'):
continue
match = all((
new_rule.get_src() == rule.get_src(),
new_rule.get_dst() == rule.get_dst(),
new_rule.target.parameters == rule.target.parameters,
new_rule.target.name == rule.target.name
))
if position == 0:
if not match:
self.logger.logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
chain.insert_rule(new_rule)
else:
if match:
self.logger.logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}')
chain.delete_rule(rule)
table.commit()
table.autocommit = True
return True
except:
self.logger.logCrit('Error running SNAT4, retrying...')
return False
def snat6(self, snat_target, source):
try:
table = iptc.Table6('nat')
table.refresh()
chain = iptc.Chain(table, 'POSTROUTING')
table.autocommit = False
new_rule = self.getSnat6Rule(snat_target, source)
if new_rule not in chain.rules:
self.logger.logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (new_rule.src, snat_target))
chain.insert_rule(new_rule)
else:
for position, item in enumerate(chain.rules):
if item == new_rule:
if position != 0:
chain.delete_rule(new_rule)
table.commit()
table.autocommit = True
except:
self.logger.logCrit('Error running SNAT6, retrying...')
def getSnat4Rule(self, snat_target, source):
rule = iptc.Rule()
rule.src = source
rule.dst = '!' + rule.src
target = rule.create_target("SNAT")
target.to_source = snat_target
match = rule.create_match("comment")
match.comment = f'{int(round(time.time()))}'
return rule
def getSnat6Rule(self, snat_target, source):
rule = iptc.Rule6()
rule.src = source
rule.dst = '!' + rule.src
target = rule.create_target("SNAT")
target.to_source = snat_target
return rule

View File

@@ -0,0 +1,23 @@
import time
import json
class Logger:
def __init__(self, redis):
self.r = redis
def log(self, priority, message):
tolog = {}
tolog['time'] = int(round(time.time()))
tolog['priority'] = priority
tolog['message'] = message
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
print(message)
def logWarn(self, message):
self.log('warn', message)
def logCrit(self, message):
self.log('crit', message)
def logInfo(self, message):
self.log('info', message)

View File

@@ -0,0 +1,495 @@
import nftables
import ipaddress
class NFTables:
def __init__(self, chain_name, logger):
self.chain_name = chain_name
self.logger = logger
self.nft = nftables.Nftables()
self.nft.set_json_output(True)
self.nft.set_handle_output(True)
self.nft_chain_names = {'ip': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} },
'ip6': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} } }
self.search_current_chains()
def initChainIPv4(self):
self.insert_mailcow_chains("ip")
def initChainIPv6(self):
self.insert_mailcow_chains("ip6")
def checkIPv4ChainOrder(self):
return self.checkChainOrder("ip")
def checkIPv6ChainOrder(self):
return self.checkChainOrder("ip6")
def checkChainOrder(self, filter_table):
err = False
exit_code = None
for chain in ['input', 'forward']:
chain_position = self.check_mailcow_chains(filter_table, chain)
if chain_position is None: continue
if chain_position is False:
self.logger.logCrit(f'MAILCOW target not found in {filter_table} {chain} table, restarting container to fix it...')
err = True
exit_code = 2
if chain_position > 0:
self.logger.logCrit(f'MAILCOW target is in position {chain_position} in the {filter_table} {chain} table, restarting container to fix it...')
err = True
exit_code = 2
return err, exit_code
def clearIPv4Table(self):
self.clearTable("ip")
def clearIPv6Table(self):
self.clearTable("ip6")
def clearTable(self, _family):
is_empty_dict = True
json_command = self.get_base_dict()
chain_handle = self.get_chain_handle(_family, "filter", self.chain_name)
# if no handle, the chain doesn't exists
if chain_handle is not None:
is_empty_dict = False
# flush chain
mailcow_chain = {'family': _family, 'table': 'filter', 'name': self.chain_name}
flush_chain = {'flush': {'chain': mailcow_chain}}
json_command["nftables"].append(flush_chain)
# remove rule in forward chain
# remove rule in input chain
chains_family = [self.nft_chain_names[_family]['filter']['input'],
self.nft_chain_names[_family]['filter']['forward'] ]
for chain_base in chains_family:
if not chain_base: continue
rules_handle = self.get_rules_handle(_family, "filter", chain_base)
if rules_handle is not None:
for r_handle in rules_handle:
is_empty_dict = False
mailcow_rule = {'family':_family,
'table': 'filter',
'chain': chain_base,
'handle': r_handle }
delete_rules = {'delete': {'rule': mailcow_rule} }
json_command["nftables"].append(delete_rules)
# remove chain
# after delete all rules referencing this chain
if chain_handle is not None:
mc_chain_handle = {'family':_family,
'table': 'filter',
'name': self.chain_name,
'handle': chain_handle }
delete_chain = {'delete': {'chain': mc_chain_handle} }
json_command["nftables"].append(delete_chain)
if is_empty_dict == False:
if self.nft_exec_dict(json_command):
self.logger.logInfo(f"Clear completed: {_family}")
def banIPv4(self, source):
ban_dict = self.get_ban_ip_dict(source, "ip")
return self.nft_exec_dict(ban_dict)
def banIPv6(self, source):
ban_dict = self.get_ban_ip_dict(source, "ip6")
return self.nft_exec_dict(ban_dict)
def unbanIPv4(self, source):
unban_dict = self.get_unban_ip_dict(source, "ip")
if not unban_dict:
return False
return self.nft_exec_dict(unban_dict)
def unbanIPv6(self, source):
unban_dict = self.get_unban_ip_dict(source, "ip6")
if not unban_dict:
return False
return self.nft_exec_dict(unban_dict)
def snat4(self, snat_target, source):
self.snat_rule("ip", snat_target, source)
def snat6(self, snat_target, source):
self.snat_rule("ip6", snat_target, source)
def nft_exec_dict(self, query: dict):
if not query: return False
rc, output, error = self.nft.json_cmd(query)
if rc != 0:
#self.logger.logCrit(f"Nftables Error: {error}")
return False
# Prevent returning False or empty string on commands that do not produce output
if rc == 0 and len(output) == 0:
return True
return output
def get_base_dict(self):
return {'nftables': [{ 'metainfo': { 'json_schema_version': 1} } ] }
def search_current_chains(self):
nft_chain_priority = {'ip': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} },
'ip6': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} } }
# Command: 'nft list chains'
_list = {'list' : {'chains': 'null'} }
command = self.get_base_dict()
command['nftables'].append(_list)
kernel_ruleset = self.nft_exec_dict(command)
if kernel_ruleset:
for _object in kernel_ruleset['nftables']:
chain = _object.get("chain")
if not chain: continue
_family = chain['family']
_table = chain['table']
_hook = chain.get("hook")
_priority = chain.get("prio")
_name = chain['name']
if _family not in self.nft_chain_names: continue
if _table not in self.nft_chain_names[_family]: continue
if _hook not in self.nft_chain_names[_family][_table]: continue
if _priority is None: continue
_saved_priority = nft_chain_priority[_family][_table][_hook]
if _saved_priority is None or _priority < _saved_priority:
# at this point, we know the chain has:
# hook and priority set
# and it has the lowest priority
nft_chain_priority[_family][_table][_hook] = _priority
self.nft_chain_names[_family][_table][_hook] = _name
def search_for_chain(self, kernel_ruleset: dict, chain_name: str):
found = False
for _object in kernel_ruleset["nftables"]:
chain = _object.get("chain")
if not chain:
continue
ch_name = chain.get("name")
if ch_name == chain_name:
found = True
break
return found
def get_chain_dict(self, _family: str, _name: str):
# nft (add | create) chain [<family>] <table> <name>
_chain_opts = {'family': _family, 'table': 'filter', 'name': _name }
_add = {'add': {'chain': _chain_opts} }
final_chain = self.get_base_dict()
final_chain["nftables"].append(_add)
return final_chain
def get_mailcow_jump_rule_dict(self, _family: str, _chain: str):
_jump_rule = self.get_base_dict()
_expr_opt=[]
_expr_counter = {'family': _family, 'table': 'filter', 'packets': 0, 'bytes': 0}
_counter_dict = {'counter': _expr_counter}
_expr_opt.append(_counter_dict)
_jump_opts = {'jump': {'target': self.chain_name} }
_expr_opt.append(_jump_opts)
_rule_params = {'family': _family,
'table': 'filter',
'chain': _chain,
'expr': _expr_opt,
'comment': "mailcow" }
_add_rule = {'insert': {'rule': _rule_params} }
_jump_rule["nftables"].append(_add_rule)
return _jump_rule
def insert_mailcow_chains(self, _family: str):
nft_input_chain = self.nft_chain_names[_family]['filter']['input']
nft_forward_chain = self.nft_chain_names[_family]['filter']['forward']
# Command: 'nft list table <family> filter'
_table_opts = {'family': _family, 'name': 'filter'}
_list = {'list': {'table': _table_opts} }
command = self.get_base_dict()
command['nftables'].append(_list)
kernel_ruleset = self.nft_exec_dict(command)
if kernel_ruleset:
# chain
if not self.search_for_chain(kernel_ruleset, self.chain_name):
cadena = self.get_chain_dict(_family, self.chain_name)
if self.nft_exec_dict(cadena):
self.logger.logInfo(f"MAILCOW {_family} chain created successfully.")
input_jump_found, forward_jump_found = False, False
for _object in kernel_ruleset["nftables"]:
if not _object.get("rule"):
continue
rule = _object["rule"]
if nft_input_chain and rule["chain"] == nft_input_chain:
if rule.get("comment") and rule["comment"] == "mailcow":
input_jump_found = True
if nft_forward_chain and rule["chain"] == nft_forward_chain:
if rule.get("comment") and rule["comment"] == "mailcow":
forward_jump_found = True
if not input_jump_found:
command = self.get_mailcow_jump_rule_dict(_family, nft_input_chain)
self.nft_exec_dict(command)
if not forward_jump_found:
command = self.get_mailcow_jump_rule_dict(_family, nft_forward_chain)
self.nft_exec_dict(command)
def delete_nat_rule(self, _family:str, _chain: str, _handle:str):
delete_command = self.get_base_dict()
_rule_opts = {'family': _family,
'table': 'nat',
'chain': _chain,
'handle': _handle }
_delete = {'delete': {'rule': _rule_opts} }
delete_command["nftables"].append(_delete)
return self.nft_exec_dict(delete_command)
def snat_rule(self, _family: str, snat_target: str, source_address: str):
chain_name = self.nft_chain_names[_family]['nat']['postrouting']
# no postrouting chain, may occur if docker has ipv6 disabled.
if not chain_name: return
# Command: nft list chain <family> nat <chain_name>
_chain_opts = {'family': _family, 'table': 'nat', 'name': chain_name}
_list = {'list':{'chain': _chain_opts} }
command = self.get_base_dict()
command['nftables'].append(_list)
kernel_ruleset = self.nft_exec_dict(command)
if not kernel_ruleset:
return
rule_position = 0
rule_handle = None
rule_found = False
for _object in kernel_ruleset["nftables"]:
if not _object.get("rule"):
continue
rule = _object["rule"]
if not rule.get("comment") or not rule["comment"] == "mailcow":
rule_position +=1
continue
rule_found = True
rule_handle = rule["handle"]
break
dest_net = ipaddress.ip_network(source_address)
target_net = ipaddress.ip_network(snat_target)
if rule_found:
saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"]
saddr_len = int(rule["expr"][0]["match"]["right"]["prefix"]["len"])
daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"]
daddr_len = int(rule["expr"][1]["match"]["right"]["prefix"]["len"])
target_ip = rule["expr"][3]["snat"]["addr"]
saddr_net = ipaddress.ip_network(saddr_ip + '/' + str(saddr_len))
daddr_net = ipaddress.ip_network(daddr_ip + '/' + str(daddr_len))
current_target_net = ipaddress.ip_network(target_ip)
match = all((
dest_net == saddr_net,
dest_net == daddr_net,
target_net == current_target_net
))
try:
if rule_position == 0:
if not match:
# Position 0 , it is a mailcow rule , but it does not have the same parameters
if self.delete_nat_rule(_family, chain_name, rule_handle):
self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule does not match configured parameters')
else:
# Position > 0 and is mailcow rule
if self.delete_nat_rule(_family, chain_name, rule_handle):
self.logger.logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule is at position {rule_position}')
except:
self.logger.logCrit(f"Error running SNAT on {_family}, retrying..." )
else:
# rule not found
json_command = self.get_base_dict()
try:
snat_dict = {'snat': {'addr': str(target_net.network_address)} }
expr_counter = {'family': _family, 'table': 'nat', 'packets': 0, 'bytes': 0}
counter_dict = {'counter': expr_counter}
prefix_dict = {'prefix': {'addr': str(dest_net.network_address), 'len': int(dest_net.prefixlen)} }
payload_dict = {'payload': {'protocol': _family, 'field': "saddr"} }
match_dict1 = {'match': {'op': '==', 'left': payload_dict, 'right': prefix_dict} }
payload_dict2 = {'payload': {'protocol': _family, 'field': "daddr"} }
match_dict2 = {'match': {'op': '!=', 'left': payload_dict2, 'right': prefix_dict } }
expr_list = [
match_dict1,
match_dict2,
counter_dict,
snat_dict
]
rule_fields = {'family': _family,
'table': 'nat',
'chain': chain_name,
'comment': "mailcow",
'expr': expr_list }
insert_dict = {'insert': {'rule': rule_fields} }
json_command["nftables"].append(insert_dict)
if self.nft_exec_dict(json_command):
self.logger.logInfo(f'Added {_family} nat {chain_name} rule for source network {dest_net} to {target_net}')
except:
self.logger.logCrit(f"Error running SNAT on {_family}, retrying...")
def get_chain_handle(self, _family: str, _table: str, chain_name: str):
chain_handle = None
# Command: 'nft list chains {family}'
_list = {'list': {'chains': {'family': _family} } }
command = self.get_base_dict()
command['nftables'].append(_list)
kernel_ruleset = self.nft_exec_dict(command)
if kernel_ruleset:
for _object in kernel_ruleset["nftables"]:
if not _object.get("chain"):
continue
chain = _object["chain"]
if chain["family"] == _family and chain["table"] == _table and chain["name"] == chain_name:
chain_handle = chain["handle"]
break
return chain_handle
def get_rules_handle(self, _family: str, _table: str, chain_name: str):
rule_handle = []
# Command: 'nft list chain {family} {table} {chain_name}'
_chain_opts = {'family': _family, 'table': _table, 'name': chain_name}
_list = {'list': {'chain': _chain_opts} }
command = self.get_base_dict()
command['nftables'].append(_list)
kernel_ruleset = self.nft_exec_dict(command)
if kernel_ruleset:
for _object in kernel_ruleset["nftables"]:
if not _object.get("rule"):
continue
rule = _object["rule"]
if rule["family"] == _family and rule["table"] == _table and rule["chain"] == chain_name:
if rule.get("comment") and rule["comment"] == "mailcow":
rule_handle.append(rule["handle"])
return rule_handle
def get_ban_ip_dict(self, ipaddr: str, _family: str):
json_command = self.get_base_dict()
expr_opt = []
ipaddr_net = ipaddress.ip_network(ipaddr)
right_dict = {'prefix': {'addr': str(ipaddr_net.network_address), 'len': int(ipaddr_net.prefixlen) } }
left_dict = {'payload': {'protocol': _family, 'field': 'saddr'} }
match_dict = {'op': '==', 'left': left_dict, 'right': right_dict }
expr_opt.append({'match': match_dict})
counter_dict = {'counter': {'family': _family, 'table': "filter", 'packets': 0, 'bytes': 0} }
expr_opt.append(counter_dict)
expr_opt.append({'drop': "null"})
rule_dict = {'family': _family, 'table': "filter", 'chain': self.chain_name, 'expr': expr_opt}
base_dict = {'insert': {'rule': rule_dict} }
json_command["nftables"].append(base_dict)
return json_command
def get_unban_ip_dict(self, ipaddr:str, _family: str):
json_command = self.get_base_dict()
# Command: 'nft list chain {s_family} filter MAILCOW'
_chain_opts = {'family': _family, 'table': 'filter', 'name': self.chain_name}
_list = {'list': {'chain': _chain_opts} }
command = self.get_base_dict()
command['nftables'].append(_list)
kernel_ruleset = self.nft_exec_dict(command)
rule_handle = None
if kernel_ruleset:
for _object in kernel_ruleset["nftables"]:
if not _object.get("rule"):
continue
rule = _object["rule"]["expr"][0]["match"]
left_opt = rule["left"]["payload"]
if not left_opt["protocol"] == _family:
continue
if not left_opt["field"] =="saddr":
continue
# ip currently banned
rule_right = rule["right"]
if isinstance(rule_right, dict):
current_rule_ip = rule_right["prefix"]["addr"] + '/' + str(rule_right["prefix"]["len"])
else:
current_rule_ip = rule_right
current_rule_net = ipaddress.ip_network(current_rule_ip)
# ip to ban
candidate_net = ipaddress.ip_network(ipaddr)
if current_rule_net == candidate_net:
rule_handle = _object["rule"]["handle"]
break
if rule_handle is not None:
mailcow_rule = {'family': _family, 'table': 'filter', 'chain': self.chain_name, 'handle': rule_handle}
delete_rule = {'delete': {'rule': mailcow_rule} }
json_command["nftables"].append(delete_rule)
else:
return False
return json_command
def check_mailcow_chains(self, family: str, chain: str):
position = 0
rule_found = False
chain_name = self.nft_chain_names[family]['filter'][chain]
if not chain_name: return None
_chain_opts = {'family': family, 'table': 'filter', 'name': chain_name}
_list = {'list': {'chain': _chain_opts}}
command = self.get_base_dict()
command['nftables'].append(_list)
kernel_ruleset = self.nft_exec_dict(command)
if kernel_ruleset:
for _object in kernel_ruleset["nftables"]:
if not _object.get("rule"):
continue
rule = _object["rule"]
if rule.get("comment") and rule["comment"] == "mailcow":
rule_found = True
break
position+=1
return position if rule_found else False

View File

@@ -1,587 +0,0 @@
#!/usr/bin/env python3
import re
import os
import sys
import time
import atexit
import signal
import ipaddress
from collections import Counter
from random import randint
from threading import Thread
from threading import Lock
import redis
import json
import iptc
import dns.resolver
import dns.exception
while True:
try:
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
if "".__eq__(redis_slaveof_ip):
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
else:
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
r.ping()
except Exception as ex:
print('%s - trying again in 3 seconds' % (ex))
time.sleep(3)
else:
break
pubsub = r.pubsub()
WHITELIST = []
BLACKLIST= []
bans = {}
quit_now = False
exit_code = 0
lock = Lock()
def log(priority, message):
tolog = {}
tolog['time'] = int(round(time.time()))
tolog['priority'] = priority
tolog['message'] = message
r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
print(message)
def logWarn(message):
log('warn', message)
def logCrit(message):
log('crit', message)
def logInfo(message):
log('info', message)
def refreshF2boptions():
global f2boptions
global quit_now
global exit_code
if not r.get('F2B_OPTIONS'):
f2boptions = {}
f2boptions['ban_time'] = int
f2boptions['max_attempts'] = int
f2boptions['retry_window'] = int
f2boptions['netban_ipv4'] = int
f2boptions['netban_ipv6'] = int
f2boptions['ban_time'] = r.get('F2B_BAN_TIME') or 1800
f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') or 10
f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') or 600
f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') or 32
f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') or 128
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
else:
try:
f2boptions = {}
f2boptions = json.loads(r.get('F2B_OPTIONS'))
except ValueError:
print('Error loading F2B options: F2B_OPTIONS is not json')
quit_now = True
exit_code = 2
def refreshF2bregex():
global f2bregex
global quit_now
global exit_code
if not r.get('F2B_REGEX'):
f2bregex = {}
f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
else:
try:
f2bregex = {}
f2bregex = json.loads(r.get('F2B_REGEX'))
except ValueError:
print('Error loading F2B options: F2B_REGEX is not json')
quit_now = True
exit_code = 2
if r.exists('F2B_LOG'):
r.rename('F2B_LOG', 'NETFILTER_LOG')
def mailcowChainOrder():
global lock
global quit_now
global exit_code
while not quit_now:
time.sleep(10)
with lock:
filter4_table = iptc.Table(iptc.Table.FILTER)
filter6_table = iptc.Table6(iptc.Table6.FILTER)
filter4_table.refresh()
filter6_table.refresh()
for f in [filter4_table, filter6_table]:
forward_chain = iptc.Chain(f, 'FORWARD')
input_chain = iptc.Chain(f, 'INPUT')
for chain in [forward_chain, input_chain]:
target_found = False
for position, item in enumerate(chain.rules):
if item.target.name == 'MAILCOW':
target_found = True
if position > 2:
logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position))
quit_now = True
exit_code = 2
if not target_found:
logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name))
quit_now = True
exit_code = 2
def ban(address):
global lock
refreshF2boptions()
BAN_TIME = int(f2boptions['ban_time'])
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
RETRY_WINDOW = int(f2boptions['retry_window'])
NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
ip = ipaddress.ip_address(address)
if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped:
ip = ip.ipv4_mapped
address = str(ip)
if ip.is_private or ip.is_loopback:
return
self_network = ipaddress.ip_network(address)
with lock:
temp_whitelist = set(WHITELIST)
if temp_whitelist:
for wl_key in temp_whitelist:
wl_net = ipaddress.ip_network(wl_key, False)
if wl_net.overlaps(self_network):
logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
return
net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
net = str(net)
if not net in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW:
bans[net] = { 'attempts': 0 }
active_window = RETRY_WINDOW
else:
active_window = time.time() - bans[net]['last_attempt']
bans[net]['attempts'] += 1
bans[net]['last_attempt'] = time.time()
active_window = time.time() - bans[net]['last_attempt']
if bans[net]['attempts'] >= MAX_ATTEMPTS:
cur_time = int(round(time.time()))
logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60))
if type(ip) is ipaddress.IPv4Address:
with lock:
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
rule = iptc.Rule()
rule.src = net
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule not in chain.rules:
chain.insert_rule(rule)
else:
with lock:
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
rule = iptc.Rule6()
rule.src = net
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule not in chain.rules:
chain.insert_rule(rule)
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME)
else:
logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
def unban(net):
global lock
if not net in bans:
logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
return
logInfo('Unbanning %s' % net)
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
with lock:
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
rule = iptc.Rule()
rule.src = net
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule in chain.rules:
chain.delete_rule(rule)
else:
with lock:
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
rule = iptc.Rule6()
rule.src = net
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule in chain.rules:
chain.delete_rule(rule)
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
if net in bans:
del bans[net]
def permBan(net, unban=False):
global lock
if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network:
with lock:
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW')
rule = iptc.Rule()
rule.src = net
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule not in chain.rules and not unban:
logCrit('Add host/network %s to blacklist' % net)
chain.insert_rule(rule)
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
elif rule in chain.rules and unban:
logCrit('Remove host/network %s from blacklist' % net)
chain.delete_rule(rule)
r.hdel('F2B_PERM_BANS', '%s' % net)
else:
with lock:
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW')
rule = iptc.Rule6()
rule.src = net
target = iptc.Target(rule, "REJECT")
rule.target = target
if rule not in chain.rules and not unban:
logCrit('Add host/network %s to blacklist' % net)
chain.insert_rule(rule)
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
elif rule in chain.rules and unban:
logCrit('Remove host/network %s from blacklist' % net)
chain.delete_rule(rule)
r.hdel('F2B_PERM_BANS', '%s' % net)
def quit(signum, frame):
global quit_now
quit_now = True
def clear():
global lock
logInfo('Clearing all bans')
for net in bans.copy():
unban(net)
with lock:
filter4_table = iptc.Table(iptc.Table.FILTER)
filter6_table = iptc.Table6(iptc.Table6.FILTER)
for filter_table in [filter4_table, filter6_table]:
filter_table.autocommit = False
forward_chain = iptc.Chain(filter_table, "FORWARD")
input_chain = iptc.Chain(filter_table, "INPUT")
mailcow_chain = iptc.Chain(filter_table, "MAILCOW")
if mailcow_chain in filter_table.chains:
for rule in mailcow_chain.rules:
mailcow_chain.delete_rule(rule)
for rule in forward_chain.rules:
if rule.target.name == 'MAILCOW':
forward_chain.delete_rule(rule)
for rule in input_chain.rules:
if rule.target.name == 'MAILCOW':
input_chain.delete_rule(rule)
filter_table.delete_chain("MAILCOW")
filter_table.commit()
filter_table.refresh()
filter_table.autocommit = True
r.delete('F2B_ACTIVE_BANS')
r.delete('F2B_PERM_BANS')
pubsub.unsubscribe()
def watch():
logInfo('Watching Redis channel F2B_CHANNEL')
pubsub.subscribe('F2B_CHANNEL')
global quit_now
global exit_code
while not quit_now:
try:
for item in pubsub.listen():
refreshF2bregex()
for rule_id, rule_regex in f2bregex.items():
if item['data'] and item['type'] == 'message':
try:
result = re.search(rule_regex, item['data'])
except re.error:
result = False
if result:
addr = result.group(1)
ip = ipaddress.ip_address(addr)
if ip.is_private or ip.is_loopback:
continue
logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data']))
ban(addr)
except Exception as ex:
logWarn('Error reading log line from pubsub')
quit_now = True
exit_code = 2
def snat4(snat_target):
global lock
global quit_now
def get_snat4_rule():
rule = iptc.Rule()
rule.src = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24'
rule.dst = '!' + rule.src
target = rule.create_target("SNAT")
target.to_source = snat_target
match = rule.create_match("comment")
match.comment = f'{int(round(time.time()))}'
return rule
while not quit_now:
time.sleep(10)
with lock:
try:
table = iptc.Table('nat')
table.refresh()
chain = iptc.Chain(table, 'POSTROUTING')
table.autocommit = False
new_rule = get_snat4_rule()
for position, rule in enumerate(chain.rules):
match = all((
new_rule.get_src() == rule.get_src(),
new_rule.get_dst() == rule.get_dst(),
new_rule.target.parameters == rule.target.parameters,
new_rule.target.name == rule.target.name
))
if position == 0:
if not match:
logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}')
chain.insert_rule(new_rule)
else:
if match:
logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}')
chain.delete_rule(rule)
table.commit()
table.autocommit = True
except:
print('Error running SNAT4, retrying...')
def snat6(snat_target):
global lock
global quit_now
def get_snat6_rule():
rule = iptc.Rule6()
rule.src = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64')
rule.dst = '!' + rule.src
target = rule.create_target("SNAT")
target.to_source = snat_target
return rule
while not quit_now:
time.sleep(10)
with lock:
try:
table = iptc.Table6('nat')
table.refresh()
chain = iptc.Chain(table, 'POSTROUTING')
table.autocommit = False
if get_snat6_rule() not in chain.rules:
logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat6_rule().src, snat_target))
chain.insert_rule(get_snat6_rule())
table.commit()
else:
for position, item in enumerate(chain.rules):
if item == get_snat6_rule():
if position != 0:
chain.delete_rule(get_snat6_rule())
table.commit()
table.autocommit = True
except:
print('Error running SNAT6, retrying...')
def autopurge():
while not quit_now:
time.sleep(10)
refreshF2boptions()
BAN_TIME = int(f2boptions['ban_time'])
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
if QUEUE_UNBAN:
for net in QUEUE_UNBAN:
unban(str(net))
for net in bans.copy():
if bans[net]['attempts'] >= MAX_ATTEMPTS:
if time.time() - bans[net]['last_attempt'] > BAN_TIME:
unban(net)
def isIpNetwork(address):
try:
ipaddress.ip_network(address, False)
except ValueError:
return False
return True
def genNetworkList(list):
resolver = dns.resolver.Resolver()
hostnames = []
networks = []
for key in list:
if isIpNetwork(key):
networks.append(key)
else:
hostnames.append(key)
for hostname in hostnames:
hostname_ips = []
for rdtype in ['A', 'AAAA']:
try:
answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3)
except dns.exception.Timeout:
logInfo('Hostname %s timedout on resolve' % hostname)
break
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
continue
except dns.exception.DNSException as dnsexception:
logInfo('%s' % dnsexception)
continue
for rdata in answer:
hostname_ips.append(rdata.to_text())
networks.extend(hostname_ips)
return set(networks)
def whitelistUpdate():
global lock
global quit_now
global WHITELIST
while not quit_now:
start_time = time.time()
list = r.hgetall('F2B_WHITELIST')
new_whitelist = []
if list:
new_whitelist = genNetworkList(list)
with lock:
if Counter(new_whitelist) != Counter(WHITELIST):
WHITELIST = new_whitelist
logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
def blacklistUpdate():
global quit_now
global BLACKLIST
while not quit_now:
start_time = time.time()
list = r.hgetall('F2B_BLACKLIST')
new_blacklist = []
if list:
new_blacklist = genNetworkList(list)
if Counter(new_blacklist) != Counter(BLACKLIST):
addban = set(new_blacklist).difference(BLACKLIST)
delban = set(BLACKLIST).difference(new_blacklist)
BLACKLIST = new_blacklist
logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
if addban:
for net in addban:
permBan(net=net)
if delban:
for net in delban:
permBan(net=net, unban=True)
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
def initChain():
# Is called before threads start, no locking
print("Initializing mailcow netfilter chain")
# IPv4
if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains:
iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW")
for c in ['FORWARD', 'INPUT']:
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c)
rule = iptc.Rule()
rule.src = '0.0.0.0/0'
rule.dst = '0.0.0.0/0'
target = iptc.Target(rule, "MAILCOW")
rule.target = target
if rule not in chain.rules:
chain.insert_rule(rule)
# IPv6
if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), "MAILCOW") in iptc.Table6(iptc.Table6.FILTER).chains:
iptc.Table6(iptc.Table6.FILTER).create_chain("MAILCOW")
for c in ['FORWARD', 'INPUT']:
chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c)
rule = iptc.Rule6()
rule.src = '::/0'
rule.dst = '::/0'
target = iptc.Target(rule, "MAILCOW")
rule.target = target
if rule not in chain.rules:
chain.insert_rule(rule)
if __name__ == '__main__':
# In case a previous session was killed without cleanup
clear()
# Reinit MAILCOW chain
initChain()
watch_thread = Thread(target=watch)
watch_thread.daemon = True
watch_thread.start()
if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n':
try:
snat_ip = os.getenv('SNAT_TO_SOURCE')
snat_ipo = ipaddress.ip_address(snat_ip)
if type(snat_ipo) is ipaddress.IPv4Address:
snat4_thread = Thread(target=snat4,args=(snat_ip,))
snat4_thread.daemon = True
snat4_thread.start()
except ValueError:
print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address')
if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n':
try:
snat_ip = os.getenv('SNAT6_TO_SOURCE')
snat_ipo = ipaddress.ip_address(snat_ip)
if type(snat_ipo) is ipaddress.IPv6Address:
snat6_thread = Thread(target=snat6,args=(snat_ip,))
snat6_thread.daemon = True
snat6_thread.start()
except ValueError:
print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address')
autopurge_thread = Thread(target=autopurge)
autopurge_thread.daemon = True
autopurge_thread.start()
mailcowchainwatch_thread = Thread(target=mailcowChainOrder)
mailcowchainwatch_thread.daemon = True
mailcowchainwatch_thread.start()
blacklistupdate_thread = Thread(target=blacklistUpdate)
blacklistupdate_thread.daemon = True
blacklistupdate_thread.start()
whitelistupdate_thread = Thread(target=whitelistUpdate)
whitelistupdate_thread.daemon = True
whitelistupdate_thread.start()
signal.signal(signal.SIGTERM, quit)
atexit.register(clear)
while not quit_now:
time.sleep(0.5)
sys.exit(exit_code)

View File

@@ -1,5 +1,5 @@
FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
WORKDIR /app

View File

@@ -1,12 +1,18 @@
FROM php:8.1-fpm-alpine3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
FROM php:8.2-fpm-alpine3.17
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
ENV APCU_PECL 5.1.22
ENV IMAGICK_PECL 3.7.0
ENV MAILPARSE_PECL 3.1.4
ENV MEMCACHED_PECL 3.2.0
ENV REDIS_PECL 5.3.7
ENV COMPOSER 2.4.4
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced extractVersion=^v(?<version>.*)$
ARG APCU_PECL_VERSION=5.1.22
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=(?<version>.*)$
ARG IMAGICK_PECL_VERSION=3.7.0
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced extractVersion=^v(?<version>.*)$
ARG MAILPARSE_PECL_VERSION=3.1.6
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced extractVersion=^v(?<version>.*)$
ARG MEMCACHED_PECL_VERSION=3.2.0
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=(?<version>.*)$
ARG REDIS_PECL_VERSION=6.0.1
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=(?<version>.*)$
ARG COMPOSER_VERSION=2.6.5
RUN apk add -U --no-cache autoconf \
aspell-dev \
@@ -46,6 +52,7 @@ RUN apk add -U --no-cache autoconf \
libxpm-dev \
libzip \
libzip-dev \
linux-headers \
make \
mysql-client \
openldap-dev \
@@ -55,11 +62,11 @@ RUN apk add -U --no-cache autoconf \
samba-client \
zlib-dev \
tzdata \
&& pecl install mailparse-${MAILPARSE_PECL} \
&& pecl install redis-${REDIS_PECL} \
&& pecl install memcached-${MEMCACHED_PECL} \
&& pecl install APCu-${APCU_PECL} \
&& pecl install imagick-${IMAGICK_PECL} \
&& pecl install APCu-${APCU_PECL_VERSION} \
&& pecl install imagick-${IMAGICK_PECL_VERSION} \
&& pecl install mailparse-${MAILPARSE_PECL_VERSION} \
&& pecl install memcached-${MEMCACHED_PECL_VERSION} \
&& pecl install redis-${REDIS_PECL_VERSION} \
&& docker-php-ext-enable apcu imagick memcached mailparse redis \
&& pecl clear-cache \
&& docker-php-ext-configure intl \
@@ -69,10 +76,10 @@ RUN apk add -U --no-cache autoconf \
--with-webp \
--with-xpm \
--with-avif \
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets zip bcmath gmp \
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets sysvsem zip bcmath gmp \
&& docker-php-ext-configure imap --with-imap --with-imap-ssl \
&& docker-php-ext-install -j 4 imap \
&& curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER} \
&& curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER_VERSION} \
&& mv composer.phar /usr/local/bin/composer \
&& chmod +x /usr/local/bin/composer \
&& apk del --purge autoconf \
@@ -93,6 +100,7 @@ RUN apk add -U --no-cache autoconf \
libxml2-dev \
libxpm-dev \
libzip-dev \
linux-headers \
make \
openldap-dev \
pcre-dev \

View File

@@ -172,6 +172,24 @@ BEGIN
END;
//
DELIMITER ;
DROP EVENT IF EXISTS clean_sasl_log;
DELIMITER //
CREATE EVENT clean_sasl_log
ON SCHEDULE EVERY 1 DAY DO
BEGIN
DELETE sasl_log.* FROM sasl_log
LEFT JOIN (
SELECT username, service, MAX(datetime) AS lastdate
FROM sasl_log
GROUP BY username, service
) AS last ON sasl_log.username = last.username AND sasl_log.service = last.service
WHERE datetime < DATE_SUB(NOW(), INTERVAL 31 DAY) AND datetime < lastdate;
DELETE FROM sasl_log
WHERE username NOT IN (SELECT username FROM mailbox) AND
datetime < DATE_SUB(NOW(), INTERVAL 31 DAY);
END;
//
DELIMITER ;
EOF
fi

View File

@@ -1,5 +1,5 @@
FROM debian:bullseye-slim
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
ARG DEBIAN_FRONTEND=noninteractive
ENV LC_ALL C
@@ -17,10 +17,10 @@ RUN groupadd -g 102 postfix \
ca-certificates \
curl \
dirmngr \
dnsutils \
dnsutils \
gnupg \
libsasl2-modules \
mariadb-client \
mariadb-client \
perl \
postfix \
postfix-mysql \
@@ -32,7 +32,7 @@ RUN groupadd -g 102 postfix \
syslog-ng \
syslog-ng-core \
syslog-ng-mod-redis \
tzdata \
tzdata \
&& rm -rf /var/lib/apt/lists/* \
&& touch /etc/default/locale \
&& printf '#!/bin/bash\n/usr/sbin/postconf -c /opt/postfix/conf "$@"' > /usr/local/sbin/postconf \

View File

@@ -393,12 +393,101 @@ query = SELECT goto FROM spamalias
AND validity >= UNIX_TIMESTAMP()
EOF
sed -i '/User overrides/q' /opt/postfix/conf/main.cf
echo >> /opt/postfix/conf/main.cf
touch /opt/postfix/conf/extra.cf
sed -i '/myhostname/d' /opt/postfix/conf/extra.cf
echo -e "myhostname = ${MAILCOW_HOSTNAME}\n$(cat /opt/postfix/conf/extra.cf)" > /opt/postfix/conf/extra.cf
if [ ! -f /opt/postfix/conf/dns_blocklists.cf ]; then
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
# This file can be edited.
# Delete this file and restart postfix container to revert any changes.
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
hostkarma.junkemailfilter.com=127.0.0.1*-2
list.dnswl.org=127.0.[0..255].0*-2
list.dnswl.org=127.0.[0..255].1*-4
list.dnswl.org=127.0.[0..255].2*-6
list.dnswl.org=127.0.[0..255].3*-8
ix.dnsbl.manitu.net*2
bl.spamcop.net*2
bl.suomispam.net*2
hostkarma.junkemailfilter.com=127.0.0.2*3
hostkarma.junkemailfilter.com=127.0.0.4*2
hostkarma.junkemailfilter.com=127.0.1.2*1
backscatter.spameatingmonkey.net*2
bl.ipv6.spameatingmonkey.net*2
bl.spameatingmonkey.net*2
b.barracudacentral.org=127.0.0.2*7
bl.mailspike.net=127.0.0.2*5
bl.mailspike.net=127.0.0.[10;11;12]*4
dnsbl.sorbs.net=127.0.0.10*8
dnsbl.sorbs.net=127.0.0.5*6
dnsbl.sorbs.net=127.0.0.7*3
dnsbl.sorbs.net=127.0.0.8*2
dnsbl.sorbs.net=127.0.0.6*2
dnsbl.sorbs.net=127.0.0.9*2
EOF
fi
DNSBL_CONFIG=$(grep -v '^#' /opt/postfix/conf/dns_blocklists.cf | grep '\S')
if [ ! -z "$DNSBL_CONFIG" ]; then
echo -e "\e[33mChecking if ASN for your IP is listed for Spamhaus Bad ASN List...\e[0m"
if [ -n "$SPAMHAUS_DQS_KEY" ]; then
echo -e "\e[32mDetected SPAMHAUS_DQS_KEY variable from mailcow.conf...\e[0m"
echo -e "\e[33mUsing DQS Blocklists from Spamhaus!\e[0m"
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[4..7]*6
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[10;11]*8
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.3*4
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.2*3
postscreen_dnsbl_reply_map = texthash:/opt/postfix/conf/dnsbl_reply.map
EOF
cat <<EOF > /opt/postfix/conf/dnsbl_reply.map
# Autogenerated by mailcow, using Spamhaus DQS reply domains
${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net sbl.spamhaus.org
${SPAMHAUS_DQS_KEY}.xbl.dq.spamhaus.net xbl.spamhaus.org
${SPAMHAUS_DQS_KEY}.pbl.dq.spamhaus.net pbl.spamhaus.org
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net zen.spamhaus.org
${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net dbl.spamhaus.org
${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net zrd.spamhaus.org
EOF
)
else
if [ -f "/opt/postfix/conf/dnsbl_reply.map" ]; then
rm /opt/postfix/conf/dnsbl_reply.map
fi
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
if [ "$response" -eq 503 ]; then
echo -e "\e[31mThe AS of your IP is listed as a banned AS from Spamhaus!\e[0m"
echo -e "\e[33mNo SPAMHAUS_DQS_KEY found... Skipping Spamhaus blocklists entirely!\e[0m"
SPAMHAUS_DNSBL_CONFIG=""
elif [ "$response" -eq 200 ]; then
echo -e "\e[32mThe AS of your IP is NOT listed as a banned AS from Spamhaus!\e[0m"
echo -e "\e[33mUsing the open Spamhaus blocklists.\e[0m"
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
zen.spamhaus.org=127.0.0.[10;11]*8
zen.spamhaus.org=127.0.0.[4..7]*6
zen.spamhaus.org=127.0.0.3*4
zen.spamhaus.org=127.0.0.2*3
EOF
)
else
echo -e "\e[31mWe couldn't determine your AS... (maybe DNS/Network issue?) Response Code: $response\e[0m"
echo -e "\e[33mDeactivating Spamhaus DNS Blocklists to be on the safe site!\e[0m"
SPAMHAUS_DNSBL_CONFIG=""
fi
fi
fi
# Reset main.cf
sed -i '/Overrides/q' /opt/postfix/conf/main.cf
echo >> /opt/postfix/conf/main.cf
# Append postscreen dnsbl sites to main.cf
if [ ! -z "$DNSBL_CONFIG" ]; then
echo -e "${DNSBL_CONFIG}\n${SPAMHAUS_DNSBL_CONFIG}" >> /opt/postfix/conf/main.cf
fi
# Append user overrides
echo -e "\n# User Overrides" >> /opt/postfix/conf/main.cf
touch /opt/postfix/conf/extra.cf
sed -i '/\$myhostname/! { /myhostname/d }' /opt/postfix/conf/extra.cf
echo -e "myhostname = ${MAILCOW_HOSTNAME}\n$(cat /opt/postfix/conf/extra.cf)" > /opt/postfix/conf/extra.cf
cat /opt/postfix/conf/extra.cf >> /opt/postfix/conf/main.cf
if [ ! -f /opt/postfix/conf/custom_transport.pcre ]; then

View File

@@ -1,5 +1,5 @@
FROM debian:bullseye-slim
LABEL maintainer "Andre Peters <andre.peters@tinc.gmbh>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
ARG DEBIAN_FRONTEND=noninteractive
ARG CODENAME=bullseye

View File

@@ -79,6 +79,9 @@ EOF
redis-cli -h redis-mailcow SLAVEOF NO ONE
fi
# Provide additional lua modules
ln -s /usr/lib/$(uname -m)-linux-gnu/liblua5.1-cjson.so.0.0.0 /usr/lib/rspamd/cjson.so
chown -R _rspamd:_rspamd /var/lib/rspamd \
/etc/rspamd/local.d \
/etc/rspamd/override.d \

View File

@@ -1,10 +1,11 @@
FROM debian:bullseye-slim
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
ARG DEBIAN_FRONTEND=noninteractive
ARG SOGO_DEBIAN_REPOSITORY=http://packages.sogo.nu/nightly/5/debian/
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
ARG GOSU_VERSION=1.16
ENV LC_ALL C
ENV GOSU_VERSION 1.14
# Prerequisites
RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \

View File

@@ -2,7 +2,8 @@ FROM solr:7.7-slim
USER root
ENV GOSU_VERSION 1.11
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=(?<version>.*)$
ARG GOSU_VERSION=1.16
COPY solr.sh /
COPY solr-config-7.7.0.xml /

View File

@@ -1,6 +1,6 @@
FROM alpine:3.17
LABEL maintainer "Andre Peters <andre.peters@servercow.de>"
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
RUN apk add --update --no-cache \
curl \
@@ -18,6 +18,11 @@ EXPOSE 53/udp 53/tcp
COPY docker-entrypoint.sh /docker-entrypoint.sh
# healthcheck (nslookup)
COPY healthcheck.sh /healthcheck.sh
RUN chmod +x /healthcheck.sh
HEALTHCHECK --interval=30s --timeout=10s CMD [ "/healthcheck.sh" ]
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/sbin/unbound"]

View File

@@ -0,0 +1,12 @@
#!/bin/bash
nslookup mailcow.email 127.0.0.1 1> /dev/null
if [ $? == 0 ]; then
echo "DNS resolution is working!"
exit 0
else
echo "DNS resolution is not working correctly..."
echo "Maybe check your outbound firewall, as it needs to resolve DNS over TCP AND UDP!"
exit 1
fi