geoip maxmind

https://github.com/maxmind/libmaxminddb
https://github.com/maxmind/MaxMind-DB-Reader-php

sudo apt install libmaxminddb0 libmaxminddb-dev mmdb-bin

wget https://github.com/maxmind/MaxMind-DB-Reader-php/archive/v1.8.0.tar.gz
tar xzf v1.8.0.tar.gz
cd MaxMind-DB-Reader-php-1.8.0/
cd ext
./configure –with-php-config=/usr/local/apache2/php/bin/php-config
make
make test
sudo make install

vi /usr/local/apache2/php/php.ini
extension=maxminddb.so

openssl rsa public/private keys

kubectl -n hcr create secret tls hcr-tls-secret \
–cert=hcr.homlish.net.2020-10-27.cert.pem \
–key=hcr.homlish.net.2020-10-27.key.pem

kubectl -n default create secret tls test-tls-secret \
–cert=jbox-api.local.homlish.net.2022-02-13.cert.pem\
–key=jbox-api.local.homlish.net.2022-02-13.key.pem

# generate private
openssl genrsa -out private-key2.pem 4096
cp private-key2.pem private-key2-no-lf.pem
awk ‘NF {sub(/\r/, “”); printf “%s\\n”,$0;}’ private-key2-no-lf.pem > private-key2-no-lf.txt

# generate public
openssl rsa -in private-key.pem -outform PEM -pubout -out public.pem

# remove linefeeds for kubernetes
awk ‘NF {sub(/\r/, “”); printf “%s\\n”,$0;}’ private-key.pem
awk ‘NF {sub(/\r/, “”); printf “%s\\n”,$0;}’ public.pem

Great, I see it on the screen. It works if I use it in VS code launch.json.

How do I get it into kubernetes?
$ kubectl create secret generic my-secret –from-file=ssh-privatekey=/path/to/.ssh/id_rsa –from-file=ssh-publickey=/path/to/.ssh/id_rsa.pub

awk ‘NF {sub(/\r/, “”); printf “%s\\n”,$0;}’ jbox-api.local.homlish.net.2022-02-13.cert.pem > jbox-api.local.homlish.net.2022-02-13.cert.pem.txt
awk ‘NF {sub(/\r/, “”); printf “%s\\n”,$0;}’ jbox-api.local.homlish.net.2022-02-13.key.pem > jbox-api.local.homlish.net.2022-02-13.key.pem.txt

k8s persistent volume

k patch pv imagesdev -p ‘{“spec”:{“claimRef”: null}}’

k0:/home/phomlish/kubernetes/test-pv
k config set-context –current –namespace=kube-public

k apply -f test-image.yaml
k get pod shell-demo
k exec –stdin –tty shell-demo — /bin/bash
k exec shell-demo env

k delete -f shell-demo.yaml

k get pod shell-demo -o wide

k patch pv homlishca -p ‘{“spec”:{“claimRef”: null}}’

jenkins

https://www.jenkins.io/doc/book/installing/kubernetes/

jenkins: 10.11.168.251
jenkins-agent: 10.11.168.252

k create namespace jenkins

helm -n jenkins delete jenkins
k delete -f jenkins-persistent-volume.yaml
k create -f jenkins-persistent-volume.yaml
helm install jenkins -n jenkins -f jenkins-values.yaml jenkinsci/jenkins

finally got it workig
http://10.11.168.251

printf $(kubectl get secret –namespace jenkins jenkins -o jsonpath=”{.data.jenkins-admin-password}” | base64 –decode);echo

export POD_NAME=$(kubectl get pods –namespace jenkins -l “app.kubernetes.io/component=jenkins-master” -l “app.kubernetes.io/instance=jenkins” -o jsonpath=”{.items[0].metadata.name}”)
get pods –namespace jenkins -l “app.kubernetes.io/component=jenkins-master” -l “app.kubernetes.io/instance=jenkins” -o jsonpath=”{.items[0].metadata.name}”)

both worked:
lynx http://10.166.32.242:8080/login
lynx http://10.105.174.214:8080/login

want this to work:
lynx http://10.11.169.251/login

we need to create a load balancer with an annotation to match the jenkins pod
helm -n jenkins delete jenkins
k delete -f jenkins-persistent-volume.yaml
k create -f jenkins-persistent-volume.yaml
helm install jenkins -n jenkins -f jenkins-values.yaml jenkinsci/jenkins

k delete -f jenkins-service-ui.yaml
k apply -f jenkins-service-ui.yaml

Continue reading “jenkins”

apcupsd

multiple ups devices
https://wiki.debian.org/apcupsd

root@a6:/etc/apcupsd# lsusb
Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub
Bus 001 Device 002: ID 046d:c31c Logitech, Inc. Keyboard K120
Bus 001 Device 005: ID 051d:0002 American Power Conversion Uninterruptible Power Supply
Bus 001 Device 004: ID 051d:0002 American Power Conversion Uninterruptible Power Supply
Bus 001 Device 003: ID 046d:c077 Logitech, Inc. M105 Optical Mouse
Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub

root@a6:/etc/apcupsd# udevadm info –attribute-walk –name=/dev/usb/hiddev0 | egrep ‘manufacturer|product|serial’
ATTRS{manufacturer}==”American Power Conversion”
ATTRS{product}==”Back-UPS ES 600M1 FW:928.a8 .D USB FW:a8 ”
ATTRS{serial}==”4B1903P08928 ”
ATTRS{serial}==”0000:00:14.0″
ATTRS{product}==”xHCI Host Controller”
ATTRS{manufacturer}==”Linux 4.19.0-12-amd64 xhci-hcd”
root@a6:/etc/apcupsd# udevadm info –attribute-walk –name=/dev/usb/hiddev1 | egrep ‘manufacturer|product|serial’
ATTRS{manufacturer}==”American Power Conversion”
ATTRS{product}==”Back-UPS 350 FW: 5.4.D USB FW: c1 ”
ATTRS{serial}==”BB0236018154″
ATTRS{serial}==”0000:00:14.0″
ATTRS{manufacturer}==”Linux 4.19.0-12-amd64 xhci-hcd”
ATTRS{product}==”xHCI Host Controller”

root@a6:/etc/apcupsd# ls -l /dev/usb
total 0
crw——- 1 root root 180, 0 Jan 12 05:50 hiddev0
crw——- 1 root root 180, 1 Jan 12 05:50 hiddev1
lrwxrwxrwx 1 root root 7 Jan 12 05:50 ups-server -> hiddev1
lrwxrwxrwx 1 root root 7 Jan 12 05:50 ups-spare -> hiddev1

Continue reading “apcupsd”

k8s cheatsheet

k get all –all-namespaces

k -n kube-system get configmap calico-config
k -n kube-system get configmap calico-config -o yaml

kubectl get clusterrolebindings system:node –all-namespaces -o json

k8s sandbox

FAILED!!!
I can access the 5 pods but not the serice

https://kubernetes.io/docs/tutorials/stateless-application/expose-external-ip-address/

kubectl expose deployment hello-world –type=NodePort –name=example-service

I wanted to force a nodeport
I wish I could figure out a yaml for this but ended up with this becaue the yaml always said endpoints:none

so I used:
kubectl expose deployment hello-world –type=NodePort –name=example-service –overrides ‘{ “apiVersion”: “v1″,”spec”:{“ports”:[{“port”:8080,”protocol”:”TCP”,”targetPort”:8080,”nodePort”:30031}]}}’

works:
curl http://10.110.245.152:8080

but on a0 does not work:
curl http://10.110.245.152:8080

k describe deployment hello-world
Name: hello-world
Namespace: default
CreationTimestamp: Wed, 28 Oct 2020 03:05:08 -0400
Labels: app.kubernetes.io/name=load-balancer-example
Annotations: deployment.kubernetes.io/revision: 1
Selector: app.kubernetes.io/name=load-balancer-example
Replicas: 5 desired | 5 updated | 5 total | 5 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app.kubernetes.io/name=load-balancer-example
Containers:
hello-world:
Image: gcr.io/google-samples/node-hello:1.0
Port: 8080/TCP
Host Port: 0/TCP
Environment:
Mounts:
Volumes:
Conditions:
Type Status Reason
—- —— ——
Progressing True NewReplicaSetAvailable
Available True MinimumReplicasAvailable
OldReplicaSets:
NewReplicaSet: hello-world-6df5659cb7 (5/5 replicas created)
Events:

kubectl expose deployment hello-world –type=NodePort –name=example-service

phomlish@k0:~$ kubectl describe services example-service
Name: example-service
Namespace: default
Labels: app.kubernetes.io/name=load-balancer-example
Annotations:
Selector: app.kubernetes.io/name=load-balancer-example
Type: NodePort
IP: 10.110.245.152
Port: 8080/TCP
TargetPort: 8080/TCP
NodePort: 30140/TCP
Endpoints: 10.166.32.215:8080,10.166.32.222:8080,10.166.32.233:8080 + 2 more…
Session Affinity: None
External Traffic Policy: Cluster
Events:

k get endpoints example-service -o yaml
apiVersion: v1
kind: Endpoints
metadata:
annotations:
endpoints.kubernetes.io/last-change-trigger-time: “2020-11-01T09:23:22Z”
creationTimestamp: “2020-11-01T09:23:22Z”
labels:
app.kubernetes.io/name: load-balancer-example
managedFields:
– apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:metadata:
f:annotations:
.: {}
f:endpoints.kubernetes.io/last-change-trigger-time: {}
f:labels:
.: {}
f:app.kubernetes.io/name: {}
f:subsets: {}
manager: kube-controller-manager
operation: Update
time: “2020-11-01T09:23:22Z”
name: example-service
namespace: default
resourceVersion: “1485713”
selfLink: /api/v1/namespaces/default/endpoints/example-service
uid: 27c316f9-57d6-413b-93ca-20458d875925
subsets:
– addresses:
– ip: 10.166.32.215
nodeName: k0
targetRef:
kind: Pod
name: hello-world-6df5659cb7-fjqrc
namespace: default
resourceVersion: “1472538”
uid: e870d96c-07a4-435f-a57c-88307b3dda3f
– ip: 10.166.32.222
nodeName: k0
targetRef:
kind: Pod
name: hello-world-6df5659cb7-n9klg
namespace: default
resourceVersion: “1472588”
uid: c2965c4c-c679-4519-8f5e-f4cca25b1942
– ip: 10.166.32.233
nodeName: k0
targetRef:
kind: Pod
name: hello-world-6df5659cb7-5x6wb
namespace: default
resourceVersion: “1472473”
uid: ff188c54-dab9-4d27-a301-06ea256c0588
– ip: 10.166.32.236
nodeName: k0
targetRef:
kind: Pod
name: hello-world-6df5659cb7-9x9lx
namespace: default
resourceVersion: “1472726”
uid: 55a09551-9136-4e23-aa14-c18fbada63f4
– ip: 10.166.32.242
nodeName: k0
targetRef:
kind: Pod
name: hello-world-6df5659cb7-9fdzx
namespace: default
resourceVersion: “1472578”
uid: abd01d7a-be6b-4aaa-900e-d8d2f54a1831
ports:
– port: 8080
protocol: TCP

Name: example-service
Namespace: default
Labels: name=load-balancer-example
Annotations:
Selector: app=load-balancer-example
Type: NodePort
IP: 10.97.241.235
Port: ihttpd 8080/TCP
TargetPort: 8080/TCP
NodePort: ihttpd 30163/TCP
Endpoints:
Session Affinity: None
External Traffic Policy: Cluster
Events:

from a0
curl http://10.11.1.70:30140