08 พฤศจิกายน 2566

bind9 DNSSEC key with utimaco HSM

- ubuntu 20.04
- libssl 1.1.1f

---
apt -y install build-essential

apt -y install libssl-dev pkg-config
export PKG_CONFIG_PATH=/usr/lib/x86_64-linux-gnu/pkgconfig/

wget https://github.com/OpenSC/libp11/releases/download/libp11-0.4.12/libp11-0.4.12.tar.gz
tar -xzf libp11-0.4.12.tar.gz
cd libp11-0.4.12/
./configure prefix="/usr/local/libp11/"
make && make install
export LD_LIBRARY_PATH=/usr/local/libp11/lib/:$LD_LIBRARY_PATH

mkdir -p /opt/utimaco/bin
mkdir -p /opt/utimaco/lib
mkdir /etc/utimaco

cp csadm p11tool2 /opt/utimaco/bin/
chmod +x /opt/utimaco/bin/*
cp ADMIN.key /opt/utimaco/bin/
cp libcs_pkcs11_R3.so /opt/utimaco/lib/
cp cs_pkcs11_R3.cfg /etc/utimaco/

# openssl.conf
EOF
openssl_conf = openssl_init

[openssl_init]
engines=engine_section

[engine_section]
pkcs11 = pkcs11_section

[pkcs11_section]
engine_id = pkcs11
dynamic_path = /usr/lib/x86_64-linux-gnu/engines-1.1/pkcs11.so
MODULE_PATH = /opt/utimaco/lib/libcs_pkcs11_R3.so
init = 0
EOF

systemctl disable systemd-resolved.service
systemctl stop systemd-resolved
rm /etc/resolv.conf

cat << EOF > /etc/resolv.conf
nameserver 192.168.1.1
EOF

add-apt-repository ppa:isc/bind
apt update
apt -y install bind9

/opt/utimaco/bin/p11tool2 slot=0 Label=bind-hsm Login=ADMIN,/opt/utimaco/bin/ADMIN.key InitToken=ask
/opt/utimaco/bin/p11tool2 slot=0 LoginSO=ask InitPin=ask

/opt/utimaco/bin/p11tool2 slot=0 LoginUser=ask PubKeyAttr=CKA_LABEL="ksk" PrvKeyAttr=CKA_LABEL="ksk" GenerateKeyPair=RSA
/opt/utimaco/bin/p11tool2 slot=0 LoginUser=ask PubKeyAttr=CKA_LABEL="zsk" PrvKeyAttr=CKA_LABEL="zsk" GenerateKeyPair=RSA

/opt/utimaco/bin/p11tool2 slot=0 LoginUser=ask ListObjects

dnssec-keyfromlabel -E pkcs11 -f KSK -a RSASHA256 -l "pkcs11:token=bind-hsm;object=ksk" example.net
dnssec-keyfromlabel -E pkcs11 -a RSASHA256 -l "pkcs11:token=bind-hsm;object=zsk" example.net
dnssec-signzone -E pkcs11 -S -o example.net db.example.net

27 ตุลาคม 2566

build postfix docker image for mikrotik chr

- smtp authen for send mail
- no mailbox only forward to another email

---
Dockerfile
FROM alpine:latest

RUN apk update  
RUN apk add bash ca-certificates cyrus-sasl cyrus-sasl-login cyrus-sasl-crammd5 iproute2 mailx postfix postfix-pcre rsyslog supervisor tzdata
RUN rm -rf /tmp/*
RUN rm -rf /var/cache/apk/*

EXPOSE 25/tcp
EXPOSE 587/tcp

COPY ./supervisord.conf /etc/supervisord.conf
COPY ./smtpd.pem /etc/postfix/certs/smtpd.pem
COPY ./docker-entrypoint.sh /docker-entrypoint.sh

RUN chmod +x /docker-entrypoint.sh

ENTRYPOINT ["/docker-entrypoint.sh"]

CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]


---
docker-entrypoint.sh
#!/bin/bash

cp -f /usr/share/zoneinfo/Asia/Bangkok /etc/localtime
echo Asia/Bangkok > /etc/timezone

mkdir -p /var/spool/rsyslog/

cat <<EOF > /etc/rsyslog.conf
module(load="imuxsock")

\$WorkDirectory  /var/spool/rsyslog

*.*             -/dev/stdout
EOF

mkdir -p /etc/sasl2/

cat <<EOF > /etc/sasl2/smtp.conf
pwcheck_method: auxprop
auxprop_plugin: sasldb
mech_list: PLAIN LOGIN CRAM-MD5 DIGEST-MD5
log_level: 7
EOF

echo "user@domain.tld forward@gmail.com" >> /etc/postfix/virtual
echo "admin@domain.tld root" >> /etc/postfix/virtual

postmap /etc/postfix/virtual

postconf -e "virtual_alias_maps = lmdb:/etc/postfix/virtual"

postconf -e "mydomain = domain.tld"
postconf -e "myhostname = mail.domain.tld"
postconf -e "mydestination = localhost, \$myhostname, \$mydomain"
postconf -e "inet_interfaces = all"
postconf -e "broken_sasl_auth_clients = yes"

postconf -e "smtp_tls_security_level=may"
postconf -e "smtp_tls_loglevel=1"

postconf -e "smtpd_helo_required = yes"
postconf -e "smtpd_sasl_auth_enable = yes"
postconf -e "smtpd_sender_restrictions = reject_unknown_sender_domain, reject_unauthenticated_sender_login_mismatch, reject_known_sender_login_mismatch, permit_sasl_authenticated, permit"
postconf -e "smtpd_recipient_restrictions = permit_sasl_authenticated, reject_non_fqdn_helo_hostname, reject_non_fqdn_sender, reject_unknown_sender_domain, reject_non_fqdn_recipient, reject_unknown_recipient_domain, reject_unauth_destination"
postconf -e "smtpd_relay_restrictions = permit_sasl_authenticated, reject_unauth_destination"
postconf -e "smtpd_sasl_authenticated_header = yes"

postconf -e "smtpd_use_tls = yes"
postconf -e "smtpd_tls_auth_only = yes"
postconf -e "smtpd_tls_loglevel = 1"
postconf -e "smtpd_tls_cert_file=/etc/postfix/certs/smtpd.pem"
postconf -e "smtpd_tls_key_file=/etc/postfix/certs/smtpd.pem"
postconf -e "smtpd_tls_CAfile=/etc/postfix/certs/smtpd.pem"

postconf -e "smtputf8_enable = yes"

postconf -M submission/inet="submission   inet   n   -   n   -   -   smtpd"
postconf -P "submission/inet/syslog_name=postfix/submission"
postconf -P "submission/inet/smtpd_tls_security_level=encrypt"
postconf -P "submission/inet/smtpd_sasl_auth_enable=yes"
postconf -P "submission/inet/smtpd_client_restrictions=permit_sasl_authenticated,reject_unauth_destination"
postconf -P "submission/inet/smtpd_recipient_restrictions=permit_sasl_authenticated,reject_unauth_destination"

echo password | saslpasswd2 -p -c -u domain.tld username

chown postfix /etc/sasl2/sasldb2

newaliases

exec "$@"


---
smtpd.pem
-----BEGIN PRIVATE KEY-----
-----END PRIVATE KEY-----
-----BEGIN CERTIFICATE-----
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
-----END CERTIFICATE-----

---
supervisord.conf
[supervisord]
nodaemon = true
user = root

[program:rsyslog]
autorestart = true
command = /usr/sbin/rsyslogd -n
priority = 100
process_name = rsyslog
redirect_stderr = true
stdout_logfile = /dev/stdout
stdout_logfile_maxbytes = 0

[program:postfix]
autorestart = true
command = /usr/libexec/postfix/master -c /etc/postfix -d
process_name = postfix
redirect_stderr = true
stdout_logfile = /dev/stdout
stdout_logfile_maxbytes = 0


---
build.sh
!/bin/bash

docker buildx build -t postfix-alpine:latest .

docker save postfix-alpine:latest > postfix-alpine.tar


---
upload postfix-alpine.tar to chr

/container
add file=postfix-alpine.tar interface=veth-postfix hostname=mail.domain.tld dns=10.10.0.1 logging=yes

21 ตุลาคม 2566

secure docker with ufw

Docker and ufw
Uncomplicated Firewall (ufw) is a frontend that ships with Debian and Ubuntu, and it lets you manage firewall rules. Docker and ufw use iptables in ways that make them incompatible with each other.

When you publish a container's ports using Docker, traffic to and from that container gets diverted before it goes through the ufw firewall settings. Docker routes container traffic in the nat table, which means that packets are diverted before it reaches the INPUT and OUTPUT chains that ufw uses. Packets are routed before the firewall rules can be applied, effectively ignoring your firewall configuration.

Docker installs two custom iptables chains named DOCKER-USER and DOCKER, and it ensures that incoming packets are always checked by these two chains first. These chains are part of the FORWARD chain.


Solving UFW and Docker issues
This solution needs to modify only one UFW configuration file, all Docker configurations and options remain the default.

Modify the UFW configuration file /etc/ufw/after.rules and add the following rules at the end of the file:

# BEGIN UFW AND DOCKER
*filter
:ufw-user-forward - [0:0]
:ufw-docker-logging-deny - [0:0]
:DOCKER-USER - [0:0]
-A DOCKER-USER -j ufw-user-forward

-A DOCKER-USER -j RETURN -s 10.0.0.0/8
-A DOCKER-USER -j RETURN -s 172.16.0.0/12
-A DOCKER-USER -j RETURN -s 192.168.0.0/16

-A DOCKER-USER -p udp -m udp --sport 53 --dport 1024:65535 -j RETURN

-A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 192.168.0.0/16
-A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 10.0.0.0/8
-A DOCKER-USER -j ufw-docker-logging-deny -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -d 172.16.0.0/12
-A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 192.168.0.0/16
-A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 10.0.0.0/8
-A DOCKER-USER -j ufw-docker-logging-deny -p udp -m udp --dport 0:32767 -d 172.16.0.0/12

-A DOCKER-USER -j RETURN

-A ufw-docker-logging-deny -m limit --limit 3/min --limit-burst 10 -j LOG --log-prefix "[UFW DOCKER BLOCK] "
-A ufw-docker-logging-deny -j DROP

COMMIT
# END UFW AND DOCKER



ref:
https://docs.docker.com/network/packet-filtering-firewalls/
https://github.com/chaifeng/ufw-docker

docker build bind9 (authoritative) alpine for mikrotik container

docker build bind9 (authoritative) alpine for mikrotik container


# create Dockerfile
FROM alpine:latest

RUN apk add --no-cache bind

RUN cp /etc/bind/named.conf.authoritative /etc/bind/named.conf

RUN sed -i "s/127.0.0.1/any/g" /etc/bind/named.conf

RUN mkdir -p /etc/bind/zone/ && chown named: /etc/bind/zone/

EXPOSE 53/tcp
EXPOSE 53/udp

CMD ["named", "-c", "/etc/bind/named.conf", "-g", "-u", "named"]

# build image
docker buildx build -t bind9-alpine:latest .

# save image
docker save bind9-alpine:latest > bind9-alpine.tar

# upload image to mikrotik
echo 'put bind9-alpine.tar' | sftp user@mikrotik


---
mikrotik

/container config
set registry-url=https://registry-1.docker.io

/interface bridge
add name=Docker

/ip address
add address=10.0.0.1/24 interface=Docker

/ip firewall nat
add chain=srcnat src-address=10.0.0.0/24 action=masquerade

/interface veth
add address=10.0.0.10/24 gateway=10.0.0.1 name=veth-bind9

/interface bridge port
add bridge=Docker interface=veth-bind9

/container mounts
add name=bind9 src=/bind9 dst=/etc/bind/

/container
add interface=veth-bind9 file=bind9-alpine.tar mounts=bind9 logging=yes

#start container (check container number with command print)
start 0

# shell to container number 0
shell 0

# append zone config to /etc/bind/named.conf
cat >> /etc/bind/named.conf << 'EOF'
zone "domain.tld" IN {
    type master;
    file "/etc/bind/zone/db.domain.tld";
};
EOF

# create zone file db.domain.tld
cat > /etc/bind/zone/db.domain.tld << 'EOF'
$TTL 3600
$ORIGIN domain.tld.
@       SOA     ns1.domain.tld. dns.domain.tld. (
                2023102100    ; Serial
                28800              ; Refresh
                7200                ; Retry
                604800            ; Expire
                7200 )              ; Minimum

                NS      ns1.domain.tld.

                MX    10 mail.thnic.co.th.

                A    10.0.0.10
www        A    10.0.0.10
ns1          A    10.0.0.10
EOF

# reconfig bind
rndc reconfig

15 ตุลาคม 2566

proxmox install openwrt 23.05.4

---
proxmox shell

wget -O - https://downloads.openwrt.org/releases/23.05.4/targets/x86/64/openwrt-23.05.4-x86-64-generic-ext4-combined.img.gz | gunzip -c > openwrt.raw

qemu-img resize -f raw openwrt.raw 512M

mkdir /var/lib/vz/images/770

qemu-img convert -f raw -O qcow2 openwrt.raw /var/lib/vz/images/770/vm-770-disk-0.qcow2

chmod 540 /var/lib/vz/images/770/vm-770-disk-0.qcow2

qm create 770 --name OpenWrt --ostype l26 --cpu host --sockets 1 -cores 1 --memory 1024 --net0 virtio,bridge=vmbr1 --net1 virtio,bridge=vmbr0 --onboot yes  --virtio0 local:770/vm-770-disk-0.qcow2

qm start 770

---
OpenWrt console

passwd

uci set firewall.@zone[1].input='ACCEPT'
uci commit
service firewall reload

ip a

02 ตุลาคม 2566

Mikrotik set NTP Client

/system ntp client servers add address=0.pool.ntp.org
/system ntp client servers add address=clock.nectec.or.th
/system ntp client servers add address=203.159.70.33

/system ntp client set enabled=yes

nginx docker with certbot docker (Let's Encrypt)

nginx docker with certbot docker (Let's Encrypt)

docker-compose.yaml
    image: nginx:latest
    container_name: nginx
    volumes:
      - ./tmp-acme_challenge:/tmp/acme_challenge
      - ./etc-letsencrypt:/etc/letsencrypt:ro
      - ./default.conf:/etc/nginx/conf.d/default.conf

default.conf

    location ^~ /.well-known/acme-challenge/ {
        allow all;
        root /tmp/acme_challenge;
    }


    ssl_certificate /etc/letsencrypt/live/domain.tld/fullchain.pem;
    ssl_certificate_key /etc/letsencrypt/live/domain.tld/privkey.pem;

# issue Let's Encrypt
docker run -it --rm -v /home/user/docker/nginx/etc-letsencrypt:/etc/letsencrypt -v /home/user/docker/nginx/tmp-acme_challenge:/tmp/acme_challenge certbot/certbot certonly  --expand --webroot -w /tmp/acme_challenge --text --agree-tos --no-eff-email --email me@domain.tld --verbose --keep-until-expiring --preferred-challenges=http -d domain.tld -d www.domain.tld
        

# renew cert
docker run -it --rm -v /home/user/docker/nginx/etc-letsencrypt:/etc/letsencrypt -v /home/user/docker/nginx/tmp-acme_challenge:/tmp/acme_challenge certbot/certbot renew

reference
- https://eff-certbot.readthedocs.io/en/stable/install.html#running-with-docker

reference
- https://eff-certbot.readthedocs.io/en/stable/install.html#running-with-docker

29 กันยายน 2566

Greenbone Community Containers 22.4

curl -fsSL get.docker.com | sh

sudo usermod -aG docker $USER

mkdir greenbone && cd greenbone

curl -fsSL https://greenbone.github.io/docs/latest/_static/docker-compose-22.4.yml -o docker-compose.yml

docker compose up -d

docker compose exec -u gvmd gvmd gvmd --user=admin --new-password=<password>

 

https://greenbone.github.io/docs/latest/22.4/container/index.html

26 กันยายน 2566

nginx docker with hosted acme.sh (Let's Encrypt & ZeroSSL)

curl https://get.acme.sh | sh -s email=me@domain.tld

docker-compose.yaml
    image: nginx:latest
    container_name: nginx
    volumes:
      - ./etc-nginx-certs/:/etc/nginx/certs/
      - ./tmp-acme_challenge:/tmp/acme_challenge
      - ./default.conf:/etc/nginx/conf.d/default.conf

default.conf

    location ^~ /.well-known/acme-challenge/ {
        allow all;
        root /tmp/acme_challenge;
    }

    ssl_certificate /etc/nginx/certs/domain.tld.crt;
    ssl_certificate_key /etc/nginx/certs/domain.tld.key;
    
# issue Let's Encrypt
acme.sh --issue --server letsencrypt  -d domain.tld -d www.domain.tld -w /home/user/docker/nginx/tmp-acme_challenge --home /home/user/docker/nginx/acme.sh

# issue ZeroSSL
acme.sh --register-account -m me@domain.tld --issue -d domain.tld -d www.domain.tld -w /home/user/docker/nginx/tmp-acme_challenge --home /home/user/docker/nginx/acme.sh

# install cert
acme.sh --install-cert -d domain.tld --home /home/user/docker/nginx/acme.sh --key-file /home/user/docker/nginx/etc-nginx-certs/domain.tld.key --fullchain-file /home/user/docker/nginx/etc-nginx-certs/domain.tld.crt --reloadcmd "docker exec nginx /etc/init.d/nginx reload"

# renew cert
acme.sh --cron --home /home/user/docker/nginx/acme.sh 
 
 
reference
- https://github.com/acmesh-official/acme.sh

25 กันยายน 2566

K3S install with Rancher Helm Chart

---
SRV01

curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.26.9+k3s1 K3S_KUBECONFIG_MODE=644 sh -

curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash

helm repo add rancher-stable https://releases.rancher.com/server-charts/stable

kubectl create namespace cattle-system

helm repo add jetstack https://charts.jetstack.io

helm repo update

kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.crds.yaml

kubectl config view --raw > ~/.kube/config

chmod 600 ~/.kube/config

helm install \
  cert-manager jetstack/cert-manager \
  --namespace cert-manager \
  --create-namespace \
  --version v1.13.0

helm install rancher rancher-stable/rancher \
  --namespace cattle-system \
  --set hostname=rancher.my.org \
  --set bootstrapPassword=admin
  
kubectl -n cattle-system rollout status deploy/rancher

kubectl -n cattle-system get deploy rancher

sudo cat /var/lib/rancher/k3s/server/token

---
SRV02

curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=v1.26.9+k3s1 K3S_URL=https://<SRV01>:6443 K3S_TOKEN=<SRV01_TOKEN> sh -

28 เมษายน 2566

Mikrotik container + adguard home v0.107.29 (latest)

Mikrotik v7.8
AdGuard Home v0.107.29 (latest)

image from https://registry-1.docker.io "error response getting manifests: 404"

import image form PC instead (both PC and CHR using amd64)

$ docker pull adguard/adguardhome:latest
$ docker save adguard/adguardhome:latest -o adguard_home_0_107_29.tar

upload adguard_home_0_107_29.tar to your CHR


Create mount volune to keep config and data for upgrade image version
/container mounts
add dst=/opt/adguardhome/work name=adguard-work src=/adguard/work
add dst=/opt/adguardhome/conf name=adguard-conf src=/adguard/conf

/container
add file=adguard_home_0_107_29.tar interface=veth-adguard mounts=adguard-conf,adguard-work start-on-boot=yes logging=yes

04 เมษายน 2566

proxmox anywhere with mikrotik chr

Scenario
- mini pc with 1 NIC
- proxmox CE
- mikrotik CHR
- cloudflare tunnel

1. Install proxmox with static ip for existing network (192.168.80.20/24)

Network Device :  enp1s0
Linux Bridge : vmbr0

/etc/network/interfaces
iface enp1s0 inet manual

auto vmbr0
iface vmbr0 inet static
        address 192.168.80.20/24
        gateway 192.168.80.1
        bridge-ports enp1s0
        bridge-stp off
        bridge-fd 0

2. Add vmbr1 as LAN (10.80.0.20/24) and set vmbr0 as WAN

/etc/network/interfaces
iface enp1s0 inet manual

auto vmbr0
iface vmbr0 inet static
        address 192.168.80.20/24
        gateway 192.168.80.1
        bridge-ports enp1s0
        bridge-stp off
        bridge-fd 0
#WAN

auto vmbr1
iface vmbr1 inet static
        address 10.80.0.20/24
        bridge-ports none
        bridge-stp off
        bridge-fd 0
#LAN

3. Install Mikrotik CHR

ether1 : vmbr0 (dhcp client)
ehter2 : vmbr1 (10.80.0.1/24)
masquerade out-interface ether1

4. Install cloudflare tunnel via proxmox node shell

Add public hostname with service https://10.80.0.20:8006 for proxmox
Add public hostname with service http://10.80.0.1 for mikrotik CHR

5. change promox default gateway to vmbr1

/etc/network/interfaces
iface enp1s0 inet manual

auto vmbr0
iface vmbr0 inet static
        address 192.168.80.20/24
        bridge-ports enp1s0
        bridge-stp off
        bridge-fd 0
#WAN

auto vmbr1
iface vmbr1 inet static
        address 10.80.0.20/24
        gateway 10.80.0.1
        bridge-ports none
        bridge-stp off
        bridge-fd 0
#LAN

6. Access proxmox with public hostname

7. Install other proxmox guest with vmbr1 and access with public hostname

24 มีนาคม 2566

Mikrotik CHR on ReadyIDC cloud

ACTIONS -> OPTIONS -> Rebuild virtual server
select CentOS

ACTIONS -> POWER -> Reboot in Recovery

sudo yum install unzip

mount -t tmpfs tmpfs /tmp/

cd /tmp

wget https://download.mikrotik.com/routeros/7.8/chr-7.8.img.zip

unzip chr-7.8.img.zip

dd if=chr-7.8.img of=/dev/vda bs=4M oflag=sync

mkdir /media/vda1

mount /dev/vda1 /media/vda1

mkdir -p /media/vda1/boot/grub2/

vi /media/vda1/boot/grub2/grub.cfg

setparams 'Grub 2'
set root=(hd0,msdos1)
chainloader +1

sync

umount /dev/vda1

echo 1 > /proc/sys/kernel/sysrq
echo b > /proc/sysrq-trigger

ACTIONS -> POWER -> Reboot Virtual Server

----- restore rsc and enable container ------

ip address add address=192.168.80.99/24 interface=ether1 

ip route add  gateway=192.168.80.1

system reset-configuration no-default=no skip-backup=yes keep-user=no

systec backup load name=chr-back-20230323.backup

system device-mode update container=yes

ACTIONS -> POWER -> Shut Down Virtual Server
select Power OFF

17 กุมภาพันธ์ 2566

Mikrotik container + adguard + uptime kuma

Mikrotik  v7.7

adguardhome after this version have an error
 
 
/system/device-mode/update container=yes

/container config
set registry-url=https://registry-1.docker.io

/interface bridge
add name=Docker

/ip address
add address=10.0.0.1/24 interface=Docker

/ip firewall nat
add chain=srcnat src-address=10.0.0.0/24 action=masquerade

/interface veth
add address=10.0.0.10/24 gateway=10.0.0.1 name=veth-adguard

/interface bridge port
add bridge=Docker interface=veth-adguard

/container
add interface=veth-adguard remote-image=adguard/adguardhome:v0.107.23 start-on-boot=yes logging=yes

/interface veth
add address=10.0.0.11/24 gateway=10.0.0.1 name=veth-uptimekuma

/interface bridge port
add bridge=Docker interface=veth-uptimekuma

/container mounts
add dst=/app/data name=uptimekuma src=/kuma_data

/container
add interface=veth-uptimekuma mounts=uptimekuma remote-image=louislam/uptime-kuma start-on-boot=yes logging=yes

sftp to remove kuma_data/.type after first start

13 กุมภาพันธ์ 2566

docker-composer :: php mysql phpmyadmin

 ubuntu 22.04

file structure
docker-composer.yaml
www/Dockerfile
www/html/index.php
mysql/dbdata/


$ sudo apt update && sudo apt -y upgrade
$ sudo apt install docker-compose docker.io -y

$ vi docker-compose.yaml
version: "3.8"
services:

  www:
    build:
      context: ./www
      dockerfile: Dockerfile
    depends_on:
      - db
    volumes:
      - ./www/html:/var/www/html/
    ports:
      - "8080:80"
    networks:
      - my-network

  db:
    image: mysql:latest
    environment:
      MYSQL_ROOT_PASSWORD: xxxxxxxxx
    volumes:
      - ./mysql/dbdata:/var/lib/mysql/
    ports:
      - "3306:3306"
    networks:
      - my-network

  phpmyadmin:
    image: phpmyadmin/phpmyadmin:latest
    depends_on:
      - db
    environment:
      PMA_HOST: db
    ports:
      - "8081:80"
    networks:
      - my-network

networks:
  my-network:


$ vi www/Dockerfile
FROM php:8.2-apache

ENV TZ=Asia/Bangkok
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone

RUN printf '[PHP]\ndate.timezone = "Asia/Bangkok"\n' > /usr/local/etc/php/conf.d/tzone.ini

RUN docker-php-ext-install mysqli && docker-php-ext-enable mysqli
RUN apt-get update && apt-get upgrade -y

$ sudo docker-compose up -d

03 กุมภาพันธ์ 2566

PHP Cloudflare Turnstile

Turnstile is Cloudflare’s smart CAPTCHA alternative.

https://www.cloudflare.com/products/turnstile/

<?php
if (isset($_POST["cf-turnstile-response"])) {

    $captcha = $_POST['cf-turnstile-response'];
    $secretKey = "__Secret_Key__";
    $ip = $_SERVER['REMOTE_ADDR'];
    $url = 'https://challenges.cloudflare.com/turnstile/v0/siteverify';
    $data = array('secret' => $secretKey, 'response' => $captcha, 'remoteip' => $ip);

    $curl = curl_init();
    curl_setopt($curl, CURLOPT_URL, $url);
    curl_setopt($curl, CURLOPT_RETURNTRANSFER, true);
    curl_setopt($curl, CURLOPT_TIMEOUT, 10);
    curl_setopt($curl, CURLOPT_POST, true);
    curl_setopt($curl, CURLOPT_POSTFIELDS, $data);
    $curlData = curl_exec($curl);
    curl_close($curl);
    
    $result = json_decode($curlData, true);
    
    if (intval($result["success"]) == 1) {
        echo "success";
        exit();
    } else {
        echo "error";
        exit();
    }
}
?>
<!DOCTYPE html>
<html lang="en">
    <head>
        <title>Test Turnstile</title>
        <script src="https://challenges.cloudflare.com/turnstile/v0/api.js" async defer></script>
    </head>
    <body>
        <form method="POST" action="">
            <div>
                <!-- The following line controls and configures the Turnstile widget. -->
                <div class="cf-turnstile" data-sitekey="__Site_Key__" data-theme="light"></div>
                <!-- end. -->
            </div>
            <button  type="submit">Sign in</button>
        </form>
    </body>
</html>

26 มกราคม 2566

Mikrotik load balance with recursive route

RouterOS7

ehter1 : 192.168.1.x/24
ether2 : 192.168.2.x/24
LAN : 192.168.25.1/24

ether1 ping check : 1.0.0.1
ether2 ping check : 8.8.4.4

/ip firewall mangle
add action=accept chain=prerouting dst-address=192.168.1.0/24
add action=accept chain=prerouting dst-address=192.168.2.0/24
add action=accept chain=prerouting dst-address=192.168.25.0/24
add action=mark-connection chain=input in-interface=ether1 new-connection-mark=wan1_input passthrough=yes
add action=mark-connection chain=input in-interface=ether2 new-connection-mark=wan2_input passthrough=yes
add action=mark-routing chain=output comment="WAN1 Input" connection-mark=wan1_input new-routing-mark=WAN1 passthrough=no
add action=mark-routing chain=output comment="WAN2 Input" connection-mark=wan2_input new-routing-mark=WAN2 passthrough=no
add action=mark-connection chain=prerouting in-interface=ether1 new-connection-mark=wan1_conn passthrough=yes
add action=mark-connection chain=prerouting in-interface=ether2 new-connection-mark=wan2_conn passthrough=yes
add action=mark-routing chain=prerouting connection-mark=wan1_conn in-interface=br-lan new-routing-mark=WAN1 passthrough=no
add action=mark-routing chain=prerouting connection-mark=wan2_conn in-interface=br-lan new-routing-mark=WAN2 passthrough=no
add action=mark-connection chain=prerouting comment=4/0 dst-address-type=!local in-interface=br-lan new-connection-mark=wan1_lb passthrough=yes per-connection-classifier=both-addresses:4/0
add action=mark-connection chain=prerouting comment=4/1 dst-address-type=!local in-interface=br-lan new-connection-mark=wan2_lb passthrough=yes per-connection-classifier=both-addresses:4/1
add action=mark-connection chain=prerouting comment=4/2 dst-address-type=!local in-interface=br-lan new-connection-mark=wan1_lb passthrough=yes per-connection-classifier=both-addresses:4/2
add action=mark-connection chain=prerouting comment=4/3 dst-address-type=!local in-interface=br-lan new-connection-mark=wan2_lb passthrough=yes per-connection-classifier=both-addresses:4/3
add action=mark-routing chain=prerouting connection-mark=wan1_lb in-interface=br-lan new-routing-mark=WAN1 passthrough=no
add action=mark-routing chain=prerouting connection-mark=wan2_lb in-interface=br-lan new-routing-mark=WAN2 passthrough=no

/queue simple
add dst=ether1 limit-at=1M/1M max-limit=10M/10M name=wan1 queue=pcq-upload-default/pcq-download-default target=192.168.25.0/24
add dst=ether2 limit-at=1M/1M max-limit=10M/10M name=wan2 queue=pcq-upload-default/pcq-download-default target=192.168.25.0/24

/ip route
add distance=1 dst-address=1.0.0.1/32 gateway=192.168.1.1 routing-table=main scope=10
add distance=1 dst-address=8.8.4.4/32 gateway=192.168.2.1 routing-table=main scope=10
add check-gateway=ping distance=1 dst-address=0.0.0.0/0 gateway=1.0.0.1 routing-table=main target-scope=30
add check-gateway=ping distance=1 dst-address=0.0.0.0/0 gateway=8.8.4.4 routing-table=main target-scope=30
add check-gateway=ping distance=1 dst-address=0.0.0.0/0 gateway=1.0.0.1 routing-table=WAN1 target-scope=30
add check-gateway=ping distance=1 dst-address=0.0.0.0/0 gateway=8.8.4.4 routing-table=WAN2 target-scope=30