Iperf - 1000BASE-LX SMF LC/LC Fiber Link Speed Test 
HOST A - SERVER

angelcool@2603-8000-6a00-5748-xxxx-xxxx-xxxx-xxxx:~$ date
Sun Mar 3 02:04:35 PM PST 2024

#IPv4
angelcool@2603-8000-6a00-5748-xxxx-xxxx-xxxx-xxxx:~$ iperf -s
------------------------------------------------------------
Server listening on TCP port 5001
TCP window size: 128 KByte (default)
------------------------------------------------------------
[ 1] local 192.168.1.184 port 5001 connected with 192.168.1.192 port 57642 (icwnd/mss/irtt=14/1448/515)
[ ID] Interval Transfer Bandwidth
[ 1] 0.00-10.01 sec 1.10 GBytes 941 Mbits/sec
angelcool@2603-8000-6a00-5748-xxxx-xxxx-xxxx-xxxx:~$
angelcool@2603-8000-6a00-5748-xxxx-xxxx-xxxx-xxxx:~$

# IPv6
angelcool@2603-8000-6a00-5748-xxxx-xxxx-xxxx-xxxx:~$ iperf -s -V
------------------------------------------------------------
Server listening on TCP port 5001
TCP window size: 128 KByte (default)
------------------------------------------------------------
[ 1] local 2603:8000:6a00:5748:xxxx:xxxx:xxxx:xxxx port 5001 connected with 2603:8000:6a00:5748:xxxx:xxxx:xxxx:xxxx port 56868 (icwnd/mss/irtt=13/1428/460)
[ ID] Interval Transfer Bandwidth
[ 1] 0.00-10.02 sec 1.08 GBytes 928 Mbits/sec
angelcool@2603-8000-6a00-5748-xxxx-xxxx-xxxx-xxxx:~$


HOST B - CLIENT

acool@localhost ~]$
# IPv4
[acool@localhost ~]$ iperf -c 192.168.1.184
------------------------------------------------------------
Client connecting to 192.168.1.184, TCP port 5001
TCP window size: 16.0 KByte (default)
------------------------------------------------------------
[ 1] local 192.168.1.192 port 57642 connected with 192.168.1.184 port 5001 (icwnd/mss/irtt=14/1448/731)
[ ID] Interval Transfer Bandwidth
[ 1] 0.00-10.02 sec 1.10 GBytes 940 Mbits/sec

# IPv6
[acool@localhost ~]$ iperf -c 2603:8000:6a00:xxxx:xxxx:xxxx:xxxx
------------------------------------------------------------
Client connecting to 2603:8000:6a00:xxxx:xxxx:xxxx:xxxx, TCP port 5001
TCP window size: 16.0 KByte (default)
------------------------------------------------------------
[ 1] local 2603:8000:6a00:5748:: port 56868 connected with 2603:8000:6a00:5748:xxxx:xxxx:xxxx:xxxx port 5001 (icwnd/mss/irtt=13/1428/783)
[ ID] Interval Transfer Bandwidth
[ 1] 0.00-10.02 sec 1.08 GBytes 928 Mbits/sec
[acool@localhost ~]$


[ view entry ] ( 44 views )   |  print article
Terraform: AWS VPC with IPV6 support 
[acool@localhost EC2-VPC]$ 
[acool@localhost EC2-VPC]$ date
Sun Jul 4 06:19:34 PM PDT 2021
[acool@localhost EC2-VPC]$ cat /etc/redhat-release
Fedora release 33 (Thirty Three)
[acool@localhost EC2-VPC]$ aws --version
aws-cli/1.18.223 Python/3.9.5 Linux/5.12.13-200.fc33.x86_64 botocore/1.19.63
[acool@localhost EC2-VPC]$ terraform -v
Terraform v1.0.1
on linux_amd64
+ provider registry.terraform.io/hashicorp/aws v3.48.0
[acool@localhost EC2-VPC]$

The gist of this post:
 
[acool@localhost EC2-VPC]$
[acool@localhost EC2-VPC]$ cat main.tf
# extract public ssh key from private ssh key
# [acool@localhost EC2-VPC]$ ssh-keygen -y -f ./COOL_SSH_PRIVATEKEY.pem > COOL_SSH_PUBLICKEY.pub

// a.- set region to use
provider "aws" {
region = "us-east-2"
}

// b.- create ssh key pair
resource "aws_key_pair" "COOL_KEY_PAIR" {
key_name = "COOL_SSH_KEYPAIR"
public_key = "${file("./COOL_SSH_PUBLICKEY.pub")}"
}

// c.- create vpc resource
resource "aws_vpc" "COOL_VPC" {
enable_dns_support = true
enable_dns_hostnames = true
assign_generated_ipv6_cidr_block = true
cidr_block = "10.0.0.0/16"
}

// d.- create subnet
resource "aws_subnet" "COOL_VPC_SUBNET" {
vpc_id = "${aws_vpc.COOL_VPC.id}"
cidr_block = "${cidrsubnet(aws_vpc.COOL_VPC.cidr_block, 4, 1)}"
map_public_ip_on_launch = true

ipv6_cidr_block = "${cidrsubnet(aws_vpc.COOL_VPC.ipv6_cidr_block, 8, 1)}"
assign_ipv6_address_on_creation = true
}

// e.- create internet gateway
resource "aws_internet_gateway" "COOL_GATEWAY" {
vpc_id = "${aws_vpc.COOL_VPC.id}"
}

// f.- create routing table
resource "aws_default_route_table" "COOL_VPC_ROUTING_TABLE" {
default_route_table_id = "${aws_vpc.COOL_VPC.default_route_table_id}"

route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.COOL_GATEWAY.id}"
}

route {
ipv6_cidr_block = "::/0"
gateway_id = "${aws_internet_gateway.COOL_GATEWAY.id}"
}
}

// g.- create some sort of association needed
resource "aws_route_table_association" "COOL_SUBNET_ROUTE_TABLE_ASSOCIATION" {
subnet_id = "${aws_subnet.COOL_VPC_SUBNET.id}"
route_table_id = "${aws_default_route_table.COOL_VPC_ROUTING_TABLE.id}"
}

// h.- create security group
resource "aws_security_group" "COOL_SECURITY_GROUP" {
name = "COOL_SECURITY_GROUP"
vpc_id = "${aws_vpc.COOL_VPC.id}"

ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

ingress {
from_port = 22
to_port = 22
protocol = "tcp"
ipv6_cidr_blocks = ["::/0"]
}

// allow ping
ingress{
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}

// allow ping
ingress{
from_port = -1
to_port = -1
protocol = "icmpv6"
ipv6_cidr_blocks = ["::/0"]
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
ipv6_cidr_blocks = ["::/0"]
}
}

// i.- create EC2 instance
resource "aws_instance" "COOL_INSTANCE_APP01" {
ami = "ami-01d5ac8f5f8804300"
key_name = "COOL_SSH_KEYPAIR"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.COOL_VPC_SUBNET.id}"
ipv6_address_count = 1
vpc_security_group_ids = ["${aws_security_group.COOL_SECURITY_GROUP.id}"]

tags = {
Name = "COOL_INSTANCE_APP01"
}

depends_on = [aws_internet_gateway.COOL_GATEWAY]
}

//j.- print instance IPs
output "COOL_INSTANCE_APP01_IPv4" {
value = "${aws_instance.COOL_INSTANCE_APP01.public_ip}"
}

output "COOL_INSTANCE_APP01_IPv6" {
value = ["${aws_instance.COOL_INSTANCE_APP01.ipv6_addresses}"]
}
[acool@localhost EC2-VPC]$
[acool@localhost EC2-VPC]$ terraform init
...
[acool@localhost EC2-VPC]$
[acool@localhost EC2-VPC]$ terraform apply
...
[acool@localhost EC2-VPC]$


Happy 4th of July, 2021! and cheers!


UPDATE - November 9, 2021
Added 'app_servers' variable to create multiple aws_instances.
Commit message: 'Added EIP and specified private ip addresses.'

main.tf :

# extract public ssh key from private ssh key
# [acool@localhost EC2-VPC]$ ssh-keygen -y -f ./COOL_SSH_PRIVATEKEY.pem > COOL_SSH_PUBLICKEY.pub

// set region to use
provider "aws" {
region = "us-east-2"
}

// create ssh key pair
resource "aws_key_pair" "COOL_KEY_PAIR" {
key_name = "COOL_SSH_KEYPAIR"
public_key = "${file("./COOL_SSH_PUBLICKEY.pub")}"
}

// create vpc resource
resource "aws_vpc" "COOL_VPC" {
enable_dns_support = true
enable_dns_hostnames = true
assign_generated_ipv6_cidr_block = true
cidr_block = "10.0.0.0/16"
}

// create subnet
resource "aws_subnet" "COOL_PVC_SUBNET" {
vpc_id = "${aws_vpc.COOL_VPC.id}"
cidr_block = "${cidrsubnet(aws_vpc.COOL_VPC.cidr_block, 4, 1)}"
map_public_ip_on_launch = true

ipv6_cidr_block = "${cidrsubnet(aws_vpc.COOL_VPC.ipv6_cidr_block, 8, 1)}"
assign_ipv6_address_on_creation = true
}

// create internet gateway
resource "aws_internet_gateway" "COOL_GATEWAY" {
vpc_id = "${aws_vpc.COOL_VPC.id}"
}

// create routing table
resource "aws_default_route_table" "COOL_VPC_ROUTING_TABLE" {
default_route_table_id = "${aws_vpc.COOL_VPC.default_route_table_id}"

route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.COOL_GATEWAY.id}"
}

route {
ipv6_cidr_block = "::/0"
gateway_id = "${aws_internet_gateway.COOL_GATEWAY.id}"
}
}

// create some sort of association needed
resource "aws_route_table_association" "COOL_SUBNET_ROUTE_TABLE_ASSOCIATION" {
subnet_id = "${aws_subnet.COOL_PVC_SUBNET.id}"
route_table_id = "${aws_default_route_table.COOL_VPC_ROUTING_TABLE.id}"
}

// create security group
resource "aws_security_group" "COOL_SECURITY_GROUP" {
name = "COOL_SECURITY_GROUP"
vpc_id = "${aws_vpc.COOL_VPC.id}"

ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

ingress {
from_port = 22
to_port = 22
protocol = "tcp"
ipv6_cidr_blocks = ["::/0"]
}

// allow ping
ingress{
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}

// allow ping
ingress{
from_port = -1
to_port = -1
protocol = "icmpv6"
ipv6_cidr_blocks = ["::/0"]
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
ipv6_cidr_blocks = ["::/0"]
}
}

// server names
variable app_servers {
description = "name of app servers"
type = list(map(any))
default = [
{name:"COOL_LB01", ip:"10.0.16.4"},
{name:"COOL_LB02", ip:"10.0.16.5"},
{name:"COOL_APP01", ip:"10.0.16.6"},
{name:"COOL_APP02", ip:"10.0.16.7"},
]
}

// create EC2 instance
resource "aws_instance" "COOL_SERVERS" {
ami = "ami-01d5ac8f5f8804300"
key_name = "COOL_SSH_KEYPAIR"
instance_type = "t2.micro"
subnet_id = "${aws_subnet.COOL_PVC_SUBNET.id}"
ipv6_address_count = 1
vpc_security_group_ids = ["${aws_security_group.COOL_SECURITY_GROUP.id}"]
for_each = {for server in var.app_servers: server.name => server}
private_ip = each.value["ip"]

tags = {
Name = each.value["name"]
}

depends_on = [aws_internet_gateway.COOL_GATEWAY]
}

// elastic IP
resource "aws_eip" "COOL_EIP" {
instance = aws_instance.COOL_SERVERS["COOL_LB01"].id
vpc = true
}

// print instance IPs
output "COOL_INSTANCE_APP01_IPv4" {
value = {for k, v in aws_instance.COOL_SERVERS: k => v.public_ip}
}

output "COOL_INSTANCE_APP01_IPv6" {
value = {for k, v in aws_instance.COOL_SERVERS: k => v.ipv6_addresses}
}

output "COOL_VPC_IPV6_BLOCK" {
value = aws_subnet.COOL_PVC_SUBNET.ipv6_cidr_block
}

// SSH to instance:
// [acool@localhost EC2-VPC]$ ssh -i ./COOL_SSH_PRIVATEKEY.pem centos@ip_address

// remove eip from COOL_LB01
// [acool@localhost EC2-VPC]$ aws ec2 disassociate-address --region us-east-2 --public-ip 3.131.249.150

// assign eip to COOL_LB02, adjust instance id to match LB02. The same commands work to return eip to LB01
// [acool@localhost EC2-VPC]$ aws ec2 associate-address --region us-east-2 --public-ip 3.131.249.150 --instance-id i-05a634252654b7b34



[ view entry ] ( 462 views )   |  print article
Terraform: AWS EC2 single instance example. 
[acool@localhost terraform-tests]$ terraform --version
Terraform v1.0.1
...
[acool@localhost terraform-tests]$ aws --version
aws-cli/1.18.223 Python/3.9.5 Linux/5.12.12-200.fc33.x86_64 botocore/1.19.63
...

The gist of this post:
[acool@localhost EC2-SINGLE-INSTANCE]$ cat main.tf 
provider "aws" {
region = "us-east-2"
}

// create ssh key
resource "tls_private_key" "COOL_SSH_PK" {
algorithm = "RSA"
rsa_bits = 4096
}

// create ssh key pair
resource "aws_key_pair" "COOL_KEY_PAIR" {
key_name = "COOL_SSH_KEYNAME"
public_key = tls_private_key.COOL_SSH_PK.public_key_openssh

provisioner "local-exec" { # Create "myKey.pem" to your computer!!
command = "echo '${tls_private_key.COOL_SSH_PK.private_key_pem}' > ./COOL_SSH_PK.pem"
}
}

// create aws ec2 instance
resource "aws_instance" "COOLAPP01" {
ami = "ami-01d5ac8f5f8804300"
instance_type = "t2.micro"
key_name = aws_key_pair.COOL_KEY_PAIR.key_name
vpc_security_group_ids = [aws_security_group.COOLAPP01_security_group.id]

tags = {
Name = "COOLAPP01_tag_name"
}
}

// create security group
resource "aws_security_group" "COOLAPP01_security_group" {

name="terraform_COOLAPP01_security_group"

// allow port 80 tcp
ingress{
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

// allow port 22 tcp
ingress{
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}

// allow ping
ingress{
from_port = -1
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}

// allow all outbound traffic
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

// TODO: enable IPV6

output "public_ip" {
value = aws_instance.COOLAPP01.public_ip
description = "public ip for COOLAPP01"
}
[acool@localhost EC2-SINGLE-INSTANCE]$
[acool@localhost EC2-SINGLE-INSTANCE]$terraform apply
...


Happy 4th of July, 2021 ya'll!!

[ view entry ] ( 367 views )   |  print article
Highly Available HAproxy Balancer with Keepalived 
We're gonna use Keepalived's VRRP feature.

Floating ip address will be 192.168.121.179

Vagrantfile needed parameters:

config.vm.box = "centos/8"
config.vm.network "private_network", ip: "192.168.121.180"
config.vm.hostname = "lb01.localhost"

config.vm.box = "centos/8"
config.vm.network "private_network", ip: "192.168.121.181"
config.vm.hostname = "lb02.localhost"

config.vm.box = "centos/8"
config.vm.network "private_network", ip: "192.168.121.191"
config.vm.hostname = "app01.localhost"

config.vm.box = "centos/8"
config.vm.network "private_network", ip: "192.168.121.192"
config.vm.hostname = "app02.localhost"

------------------------------------------------------------------------
app01 and app02 will have nginx installed running its default welcome page.

angel@acool:~/Documents/haproxy-cluster$ date
Fri 21 May 2021 07:11:52 PM PDT
angel@acool:~/Documents/haproxy-cluster$ cat /etc/lsb-release
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=20.04
DISTRIB_CODENAME=focal
DISTRIB_DESCRIPTION="Ubuntu 20.04.2 LTS"
angel@acool:~/Documents/haproxy-cluster$
angel@acool:~/Documents/haproxy-cluster$ tree
.
├── app01
│   └── Vagrantfile
├── app02
│   └── Vagrantfile
├── lb01
│   └── Vagrantfile
├── lb02
│   └── Vagrantfile
└── NOTES.txt

4 directories, 5 files
angel@acool:~/Documents/haproxy-cluster$
angel@acool:~/Documents/haproxy-cluster$ sudo vagrant global-status
id name provider state directory
------------------------------------------------------------------------------
1553a24 default libvirt shutoff /home/angel/Documents/haproxy-cluster/lb01
3c33424 default libvirt shutoff /home/angel/Documents/haproxy-cluster/lb02
1d9af06 default libvirt shutoff /home/angel/Documents/haproxy-cluster/app01
5bc8220 default libvirt shutoff /home/angel/Documents/haproxy-cluster/app02
...
angel@acool:~/Documents/haproxy-cluster$
angel@acool:~/Documents/haproxy-cluster$
angel@acool:~/Documents/haproxy-cluster/lb01$ vagrant --version
Vagrant 2.2.6
angel@acool:~/Documents/haproxy-cluster$
angel@acool:~/Documents/haproxy-cluster$ cd lb01/
angel@acool:~/Documents/haproxy-cluster/lb01$ sudo vagrant up
...
angel@acool:~/Documents/haproxy-cluster/lb01$ sudo vagrant ssh
Last login: Sat May 22 02:08:45 2021 from 192.168.121.1
[vagrant@lb01 ~]$
[vagrant@lb01 ~]$ cat /etc/redhat-release
CentOS Linux release 8.3.2011
[vagrant@lb01 ~]$ sudo dnf install haproxy keepalived

[vagrant@lb01 ~]$ haproxy -v
HA-Proxy version 1.8.23 2019/11/25
Copyright 2000-2019 Willy Tarreau <willy@haproxy.org>

[vagrant@lb01 ~]$ keepalived --version
Keepalived v2.0.10 (11/12,2018)
...
[vagrant@lb01 ~]$
[vagrant@lb01 ~]$ cat /etc/sysctl.conf
...
net.ipv4.ip_nonlocal_bind=1
[vagrant@lb01 ~]$
[vagrant@lb01 ~]$ sudo sysctl -p
net.ipv4.ip_nonlocal_bind = 1
[vagrant@lb01 ~]$
[vagrant@lb01 ~]$
[vagrant@lb01 ~]$ cat /etc/haproxy/haproxy.cfg
...
## enable stats
listen stats
bind :9000
stats enable
stats uri /stats
stats refresh 10s
stats admin if LOCALHOST

## enable www frontend, bind floating ip address
frontend www
bind 192.168.121.179:80
mode http
default_backend www_servers

## enable www backend
backend www_servers
balance roundrobin
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk HEAD / HTTP/1.1\r\nHost:localhost
server app01 192.168.121.191:80 check
server app02 192.168.121.192:80 check

[vagrant@lb01 ~]$
[vagrant@lb01 ~]$ cat /etc/keepalived/keepalived.conf
vrrp_script chk_haproxy { # Requires keepalived-1.1.13
#script "killall -0 haproxy" # cheaper than pidof
script "pidof haproxy" # this one worked better for me.
interval 2 # check every 2 seconds
weight 2 # add 2 points of priority if OK
}
vrrp_instance VI_1 {
interface eth0
state MASTER
virtual_router_id 51
priority 101 # 101 on lb01, 100 on lb02
virtual_ipaddress {
192.168.121.179
}
track_script {
chk_haproxy
}
}
[vagrant@lb01 ~]$
[vagrant@lb01 ~]$ # this should be the end result, the floating ip should be listed.
[vagrant@lb01 ~]$ ip a |grep 179
inet 192.168.121.179/32 scope global eth0
[vagrant@lb01 ~]$
[vagrant@lb01 ~]$ # if you stop haproxy (or shutdown lb01), lb02 should take over the floating ip!
[vagrant@lb01 ~]$ # when haproxy is back, lb01 will reclaim the floating ip, the end result is
[vagrant@lb01 ~]$ # the floating ip will be available even if lb01 goes down.


Cheers!

UPDATE: November 11, 2021 - Adding lb02 details in order to remove ambiguities when I see this post in the future.

[vagrant@lb02 ~]$ cat /etc/sysctl.conf
...
net.ipv4.ip_nonlocal_bind=1
[vagrant@lb02 ~]$


[vagrant@lb02 ~]$
[vagrant@lb02 ~]$
[vagrant@lb02 ~]$ cat /etc/keepalived/keepalived.conf
vrrp_script chk_haproxy { # Requires keepalived-1.1.13
#script "killall -0 haproxy" # cheaper than pidof
script "pidof haproxy"
interval 2 # check every 2 seconds
weight 2 # add 2 points of priority if OK
}
vrrp_instance VI_1 {
interface eth0
state MASTER
virtual_router_id 51
priority 100 # 101 on primary, 100 on secondary
virtual_ipaddress {
192.168.121.179
}
track_script {
chk_haproxy
}
}

[vagrant@lb02 ~]$


[vagrant@lb02 ~]$
[vagrant@lb02 ~]$
[vagrant@lb02 ~]$ cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# https://www.haproxy.org/download/1.8/doc/configuration.txt
#
#---------------------------------------------------------------------

#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2

chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon

# turn on stats unix socket
stats socket /var/lib/haproxy/stats

# utilize system-wide crypto-policies
ssl-default-bind-ciphers PROFILE=SYSTEM
ssl-default-server-ciphers PROFILE=SYSTEM

#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000

# ME: enable stats
listen stats
bind :9000
stats enable
stats uri /stats
stats refresh 10s
stats admin if LOCALHOST

# ME:
frontend www
bind 192.168.121.179:80
mode http
default_backend www_servers

# ME:
backend www_servers
balance roundrobin
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
option httpchk HEAD / HTTP/1.1\r\nHost:localhost
server app01 192.168.121.191:80 check
server app02 192.168.121.192:80 check

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend main
bind *:5000
acl url_static path_beg -i /static /images /javascript /stylesheets
acl url_static path_end -i .jpg .gif .png .css .js

use_backend static if url_static
default_backend app

#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend static
balance roundrobin
server static 127.0.0.1:4331 check

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend app
balance roundrobin
server app1 127.0.0.1:5001 check
server app2 127.0.0.1:5002 check
server app3 127.0.0.1:5003 check
server app4 127.0.0.1:5004 check
[vagrant@lb02 ~]$
[vagrant@lb02 ~]$





[ view entry ] ( 440 views )   |  print article
Docker: reference information for SWARMS, NODES, SERVICES, STACKS and NETWORKS 
[vagrant@box1 ~]$ date
Fri Dec 11 18:34:51 UTC 2020
[vagrant@box1 ~]$
[vagrant@box1 ~]$ docker --version
Docker version 20.10.0, build 7287ab3
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ ########## Docker SWARM info ##########
[vagrant@box1 ~]$ docker swarm

Usage: docker swarm COMMAND

Manage Swarm

Commands:
ca Display and rotate the root CA
init Initialize a swarm
join Join a swarm as a node and/or manager
join-token Manage join tokens
leave Leave the swarm
unlock Unlock swarm
unlock-key Manage the unlock key
update Update the swarm

Run 'docker swarm COMMAND --help' for more information on a command.
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ ########## Docker NODE info ##########
[vagrant@box1 ~]$ docker node

Usage: docker node COMMAND

Manage Swarm nodes

Commands:
demote Demote one or more nodes from manager in the swarm
inspect Display detailed information on one or more nodes
ls List nodes in the swarm
promote Promote one or more nodes to manager in the swarm
ps List tasks running on one or more nodes, defaults to current node
rm Remove one or more nodes from the swarm
update Update a node

Run 'docker node COMMAND --help' for more information on a command.
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ ########## Docker SERVICE info ##########
[vagrant@box1 ~]$ docker service

Usage: docker service COMMAND

Manage services

Commands:
create Create a new service
inspect Display detailed information on one or more services
logs Fetch the logs of a service or task
ls List services
ps List the tasks of one or more services
rm Remove one or more services
rollback Revert changes to a service's configuration
scale Scale one or multiple replicated services
update Update a service

Run 'docker service COMMAND --help' for more information on a command.
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ ########## Docker STACK info ##########
[vagrant@box1 ~]$ docker stack

Usage: docker stack [OPTIONS] COMMAND

Manage Docker stacks

Options:
--orchestrator string Orchestrator to use (swarm|kubernetes|all)

Commands:
deploy Deploy a new stack or update an existing stack
ls List stacks
ps List the tasks in the stack
rm Remove one or more stacks
services List the services in the stack

Run 'docker stack COMMAND --help' for more information on a command.
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ ########## Docker NETWORK info ##########
vagrant@box1 ~]$ docker network

Usage: docker network COMMAND

Manage networks

Commands:
connect Connect a container to a network
create Create a network
disconnect Disconnect a container from a network
inspect Display detailed information on one or more networks
ls List networks
prune Remove all unused networks
rm Remove one or more networks

Run 'docker network COMMAND --help' for more information on a command.
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ ########## All the crap available under Docker binary ##########
[vagrant@box1 ~]$
[vagrant@box1 ~]$ docker

Usage: docker [OPTIONS] COMMAND

A self-sufficient runtime for containers

Options:
--config string Location of client config files (default "/home/vagrant/.docker")
-c, --context string Name of the context to use to connect to the daemon (overrides DOCKER_HOST env var and default context set with "docker
context use")
-D, --debug Enable debug mode
-H, --host list Daemon socket(s) to connect to
-l, --log-level string Set the logging level ("debug"|"info"|"warn"|"error"|"fatal") (default "info")
--tls Use TLS; implied by --tlsverify
--tlscacert string Trust certs signed only by this CA (default "/home/vagrant/.docker/ca.pem")
--tlscert string Path to TLS certificate file (default "/home/vagrant/.docker/cert.pem")
--tlskey string Path to TLS key file (default "/home/vagrant/.docker/key.pem")
--tlsverify Use TLS and verify the remote
-v, --version Print version information and quit

Management Commands:
app* Docker App (Docker Inc., v0.9.1-beta3)
builder Manage builds
buildx* Build with BuildKit (Docker Inc., v0.4.2-docker)
config Manage Docker configs
container Manage containers
context Manage contexts
image Manage images
manifest Manage Docker image manifests and manifest lists
network Manage networks
node Manage Swarm nodes
plugin Manage plugins
secret Manage Docker secrets
service Manage services
stack Manage Docker stacks
swarm Manage Swarm
system Manage Docker
trust Manage trust on Docker images
volume Manage volumes

Commands:
attach Attach local standard input, output, and error streams to a running container
build Build an image from a Dockerfile
commit Create a new image from a container's changes
cp Copy files/folders between a container and the local filesystem
create Create a new container
diff Inspect changes to files or directories on a container's filesystem
events Get real time events from the server
exec Run a command in a running container
export Export a container's filesystem as a tar archive
history Show the history of an image
images List images
import Import the contents from a tarball to create a filesystem image
info Display system-wide information
inspect Return low-level information on Docker objects
kill Kill one or more running containers
load Load an image from a tar archive or STDIN
login Log in to a Docker registry
logout Log out from a Docker registry
logs Fetch the logs of a container
pause Pause all processes within one or more containers
port List port mappings or a specific mapping for the container
ps List containers
pull Pull an image or a repository from a registry
push Push an image or a repository to a registry
rename Rename a container
restart Restart one or more containers
rm Remove one or more containers
rmi Remove one or more images
run Run a command in a new container
save Save one or more images to a tar archive (streamed to STDOUT by default)
search Search the Docker Hub for images
start Start one or more stopped containers
stats Display a live stream of container(s) resource usage statistics
stop Stop one or more running containers
tag Create a tag TARGET_IMAGE that refers to SOURCE_IMAGE
top Display the running processes of a container
unpause Unpause all processes within one or more containers
update Update configuration of one or more containers
version Show the Docker version information
wait Block until one or more containers stop, then print their exit codes

Run 'docker COMMAND --help' for more information on a command.
To get more help with docker, check out guides at https://docs.docker.com/go/guides/
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$


[ view entry ] ( 458 views )   |  print article
Nagios: Miscellaneous notes on installing and configuring Nagios. 
[acool@localhost ~]$ 
[acool@localhost ~]$ date
Mon 07 Dec 2020 05:45:53 PM PST
[acool@localhost ~]$
[acool@localhost ~]$ cat /etc/redhat-release
Fedora release 31 (Thirty One)
[acool@localhost ~]$
[acool@localhost ~]$ sudo dnf install httpd nagios nagios-common nagios-plugins-all
Last metadata expiration check: 0:36:12 ago on Mon 07 Dec 2020 05:09:52 PM PST.
Package httpd-2.4.46-1.fc31.x86_64 is already installed.
Package nagios-4.4.5-7.fc31.x86_64 is already installed.
Package nagios-common-4.4.5-7.fc31.x86_64 is already installed.
Package nagios-plugins-all-2.3.3-2.fc31.x86_64 is already installed.
Dependencies resolved.
Nothing to do.
Complete!
[acool@localhost ~]$
[acool@localhost ~]$ cat /etc/httpd/conf.d/nagios.conf
...
[acool@localhost ~]$
[acool@localhost ~]$ # default password for web ui nagiosadmin:nagiosadmin? I think yes.
[acool@localhost ~]$ ll /etc/nagios/
total 92
-rw-rw-r--. 1 root root 13699 Apr 7 2020 cgi.cfg
-rw-rw-r--. 1 root root 45886 Nov 4 23:23 nagios.cfg
-rw-r--r--. 1 root root 12839 Apr 29 2020 nrpe.cfg
drwxr-x---. 2 root nagios 4096 Nov 5 11:05 objects
-rw-r-----. 1 root apache 27 Apr 7 2020 passwd
drwxr-x---. 2 root nagios 4096 Nov 3 12:22 private
[acool@localhost ~]$
[acool@localhost ~]$ # http://localhost:8080/nagios/ should now load (adjust port as needed)


TODO: nagiosgraph. NEEDS TESTING!!

A.- Looks like we need this in commands.cfg :

define command {
command_name process-service-perfdata-for-nagiosgraph
command_line /usr/local/nagiosgraph/bin/insert.pl
}

B.- And this in templates.cfg :

define service {
name graphed-service
action_url /nagiosgraph/cgi-bin/show.cgi?host=$HOSTNAME$&service=$SERVICEDESC$' onMouseOver='showGraphPopup(this)' onMouseOut='hideGraphPopup()' rel='/nagiosgraph/cgi-bin/showgraph.cgi?host=$HOSTNAME$&service=$SERVICEDESC$&period=week&rrdopts=-w+450+-j
register 0
}

C.- Then we need to add 'graphed-service' to services in localhost.cfg for example:

# Define a service to "ping" the local machine
define service {

use local-service,graphed-service; Name of service template to use
host_name localhost
service_description PING
check_command check_ping!100.0,20%!500.0,60%
}

D.- In these in /etc/nagios/nagios.cfg : - NEEDS TO BE VERIFIED

process_performance_data=1
service_perfdata_file=/tmp/perfdata.log
service_perfdata_file_template=$LASTSERVICECHECK$||$HOSTNAME$||$SERVICEDESC$||$SERVICEOUTPUT$||$SERVICEPERFDATA$
service_perfdata_file_mode=a
service_perfdata_file_processing_interval=30
service_perfdata_file_processing_command=process-service-perfdata-for-nagiosgraph

More hints :
[root@localhost nagiosgraph]# 
[root@localhost nagiosgraph]# grep -nri nagiosgraph /etc/httpd/
/etc/httpd/conf/httpd.conf:354:#### NAGIOSGRAPH #####
/etc/httpd/conf/httpd.conf:355:include /usr/local/nagiosgraph/etc/nagiosgraph-apache.conf
[root@localhost nagiosgraph]#


See nagiosgraph settings:

http://localhost:8080/nagiosgraph/cgi-b ... config.cgi


[ view entry ] ( 532 views )   |  print article
Solr: Starting Solr 4.7 for development purposes. 
[acool@localhost solr-4.7.0]$ date
Fri 25 Sep 2020 09:33:39 AM PDT
[acool@localhost solr-4.7.0]$
[acool@localhost solr-4.7.0]$
[acool@localhost solr-4.7.0]$ sudo yum install java-1.8.0-openjdk
...
[acool@localhost solr-4.7.0]$ java -version
openjdk version "1.8.0_265"
OpenJDK Runtime Environment (build 1.8.0_265-b01)
OpenJDK 64-Bit Server VM (build 25.265-b01, mixed mode)
[acool@localhost solr-4.7.0]$
[acool@localhost solr-4.7.0]$
[acool@localhost solr-4.7.0]$ ll
total 460
-rw-r--r--. 1 acool acool 362968 Feb 21 2014 CHANGES.txt
drwxr-xr-x. 12 acool acool 4096 Feb 21 2014 contrib
drwxrwxr-x. 4 acool acool 4096 Feb 1 2020 dist
drwxrwxr-x. 17 acool acool 4096 Feb 1 2020 docs
drwxr-xr-x. 15 acool acool 4096 Feb 2 2020 example
drwxr-xr-x. 2 acool acool 32768 Feb 1 2020 licenses
-rw-r--r--. 1 acool acool 12646 Feb 18 2014 LICENSE.txt
-rw-r--r--. 1 acool acool 26762 Feb 18 2014 NOTICE.txt
-rw-r--r--. 1 acool acool 5344 Feb 18 2014 README.txt
-rw-r--r--. 1 acool acool 686 Feb 18 2014 SYSTEM_REQUIREMENTS.txt
[acool@localhost solr-4.7.0]$
[acool@localhost solr-4.7.0]$
[acool@localhost solr-4.7.0]$ # Starting server
[acool@localhost solr-4.7.0]$ cd example/
[acool@localhost example]$
[acool@localhost example]$ java -jar start.jar
...
[acool@localhost example]$
[acool@localhost example]$ # http://localhost:8983/solr should now render the dashboard
[acool@localhost example]$


12/7/2020 Sample query:
http://app01.example.com:8098/search/query/article_index?sort=score DESC
&q={!edismax}how to become a millionaire
&qf=authorName^6 objectId^4 headline^2 deck
&fq={!lucene}
edition:us
AND statusId:4
AND objectTypeId:(1 2 4 12 15)
AND publicationDateISO8601:[NOW-10YEAR TO NOW]
&qs=5
&bq=publicationDateISO8601:[NOW-2YEAR TO NOW]
&fl=*,score
&hl=true
&mm=3<80%
&wt=json
&rows=20
&start=0
&df=entspellcheck
&spellcheck=true
&spellcheck.q="how to become a millionaire"~10
&spellcheck.collate=true
&spellcheck.maxCollations=30
&spellcheck.maxCollationTries=30
&spellcheck.maxCollationEvaluations=30
&spellcheck.collateExtendedResults=true
&spellcheck.collateMaxCollectDocs=30
&spellcheck.count=10
&spellcheck.extendedResults=true
&spellcheck.maxResultsForSuggest=5
&spellcheck.alternativeTermCount=10
&spellcheck.accuracy=0.5


[ view entry ] ( 641 views )   |  print article
Docker: Swarm Demo 
In this demo I:

a) create 3 CentOS 7 vagrant VMs
b) install docker in each VM
c) create a Docker Swarm (Swarm mode) with one manager and 2 workers
d) create a service with nginx image, update the service to use httpd image and update replicas memory limit

[acool@localhost docker-swarm-demo]$ date
Sat 22 Feb 2020 04:35:36 PM PST
[acool@localhost docker-swarm-demo]$ cat /etc/redhat-release
Fedora release 31 (Thirty One)
[acool@localhost docker-swarm-demo]$ vagrant --version
Vagrant 2.2.6
[acool@localhost docker-swarm-demo]$ tree
.
├── vagrant-box-1
│   └── Vagrantfile
├── vagrant-box-2
│   └── Vagrantfile
└── vagrant-box-3
└── Vagrantfile

3 directories, 3 files
[acool@localhost docker-swarm-demo]$
[acool@localhost docker-swarm-demo]$ cd vagrant-box-1
[acool@localhost vagrant-box-1]$ vagrant up
...
[acool@localhost vagrant-box-1]$ vagrant ssh
[vagrant@box1 ~]$
[vagrant@box1 ~]$ cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
[vagrant@box1 ~]$
[vagrant@box1 ~]$ ip address show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:de:6e:43 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.102/24 brd 192.168.122.255 scope global noprefixroute dynamic eth0
valid_lft 3307sec preferred_lft 3307sec
inet6 fe80::5054:ff:fede:6e43/64 scope link
valid_lft forever preferred_lft forever
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo yum install docker
...
[vagrant@box1 ~]$ sudo systemctl start docker
[vagrant@box1 ~]$ sudo docker version
Client:
Version: 1.13.1
API version: 1.26
Package version: docker-1.13.1-108.git4ef4b30.el7.centos.x86_64
Go version: go1.10.3
Git commit: 4ef4b30/1.13.1
Built: Tue Jan 21 17:16:25 2020
OS/Arch: linux/amd64

Server:
Version: 1.13.1
API version: 1.26 (minimum version 1.12)
Package version: docker-1.13.1-108.git4ef4b30.el7.centos.x86_64
Go version: go1.10.3
Git commit: 4ef4b30/1.13.1
Built: Tue Jan 21 17:16:25 2020
OS/Arch: linux/amd64
Experimental: false
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ # disable firewall for the sake of keeping this demo simple
[vagrant@box1 ~]$ sudo systemctl disable firewalld.service
[vagrant@box1 ~]$

[acool@localhost docker-swarm-demo]$ # create box2 and box3 via vagrant

[vagrant@box2 ~]$
[vagrant@box2 ~]$ # install and start docker as previously shown in box1
[vagrant@box2 ~]$ # disable firewall as previously shown in box1
[vagrant@box2 ~]$
[vagrant@box2 ~]$ ip address show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:e1:c4:f9 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.27/24 brd 192.168.122.255 scope global noprefixroute dynamic eth0
valid_lft 3436sec preferred_lft 3436sec
inet6 fe80::5054:ff:fee1:c4f9/64 scope link
valid_lft forever preferred_lft forever
[vagrant@box2 ~]$

[vagrant@box3 ~]$
[vagrant@box3 ~]$ # install and start docker as previously shown in box1
[vagrant@box3 ~]$ # disable firewall as previously shown in box1
[vagrant@box3 ~]$
[vagrant@box3 ~]$ ip address show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:18:5a:8c brd ff:ff:ff:ff:ff:ff
inet 192.168.122.88/24 brd 192.168.122.255 scope global noprefixroute dynamic eth0
valid_lft 3323sec preferred_lft 3323sec
inet6 fe80::5054:ff:fe18:5a8c/64 scope link
valid_lft forever preferred_lft forever
[vagrant@box3 ~]$
[vagrant@box3 ~]$
[vagrant@box3 ~]$ # make sure all boxes can ping each other
[vagrant@box3 ~]$ ping -c2 192.168.122.102
PING 192.168.122.102 (192.168.122.102) 56(84) bytes of data.
64 bytes from 192.168.122.102: icmp_seq=1 ttl=64 time=0.562 ms
64 bytes from 192.168.122.102: icmp_seq=2 ttl=64 time=0.619 ms

--- 192.168.122.102 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.562/0.590/0.619/0.037 ms
[vagrant@box3 ~]$
[vagrant@box3 ~]$
[vagrant@box3 ~]$ ping -c2 192.168.122.27
PING 192.168.122.27 (192.168.122.27) 56(84) bytes of data.
64 bytes from 192.168.122.27: icmp_seq=1 ttl=64 time=0.457 ms
64 bytes from 192.168.122.27: icmp_seq=2 ttl=64 time=0.312 ms

--- 192.168.122.27 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.312/0.384/0.457/0.075 ms
[vagrant@box3 ~]$



The gist of this demo:

[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker swarm init --advertise-addr 192.168.122.102
Swarm initialized: current node (325hn4zrumoinjslhiw3p9c1j) is now a manager.

To add a worker to this swarm, run the following command:

docker swarm join \
--token SWMTKN-1-1qm592qpo4j2ka5nxqx98vizi6z9dtag4rou49zxvrr7rww72g-agsgzbalcyw0c7saupqvk90sl \
192.168.122.102:2377

To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.

[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$


[vagrant@box2 ~]$
[vagrant@box2 ~]$
[vagrant@box2 ~]$ sudo docker swarm join \
> --token SWMTKN-1-1qm592qpo4j2ka5nxqx98vizi6z9dtag4rou49zxvrr7rww72g-agsgzbalcyw0c7saupqvk90sl \
> 192.168.122.102:2377
This node joined a swarm as a worker.
[vagrant@box2 ~]$
[vagrant@box2 ~]$


[vagrant@box3 ~]
[vagrant@box3 ~]
[vagrant@box3 ~]$ sudo docker swarm join \
> --token SWMTKN-1-1qm592qpo4j2ka5nxqx98vizi6z9dtag4rou49zxvrr7rww72g-agsgzbalcyw0c7saupqvk90sl \
> 192.168.122.102:2377
This node joined a swarm as a worker.
[vagrant@box3 ~]
[vagrant@box3 ~]


[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
325hn4zrumoinjslhiw3p9c1j * box1 Ready Active Leader
78uis92n6z7lg2glmsbkzuag0 box3 Ready Active
ehjej7f2ol2svf4nci0k9x4if box2 Ready Active
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ # lets create a service
[vagrant@box1 ~]$ sudo docker service create --replicas 5 -p 80:80 --name web nginx
ytr9c94iieku7akjlp1gsq8mt
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker service ls
ID NAME MODE REPLICAS IMAGE
ytr9c94iieku web replicated 0/5 nginx:latest
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker service ps web
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
n4n6xun4dlmn web.1 nginx:latest box2 Running Preparing 20 seconds ago
ks1cnh8oko1r web.2 nginx:latest box3 Running Running less than a second ago
lhqha4nd2sj2 web.3 nginx:latest box1 Running Preparing 20 seconds ago
dy48ok6b1clb web.4 nginx:latest box2 Running Preparing 20 seconds ago
81dkfenyjrbz web.5 nginx:latest box3 Running Running less than a second ago
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ # nginx should be available via any box ip in your browser: http://192.168.122.88/, http://192.168.122.27/ or http://192.168.122.102/
[vagrant@box1 ~]$
[vagrant@box1 ~]$ # we can try curl too
[vagrant@box1 ~]$
[vagrant@box1 ~]$ curl 192.168.122.102
...
[vagrant@box1 ~]$ curl 192.168.122.88
...
[vagrant@box1 ~]$ curl 192.168.122.27
...

[vagrant@box2 ~]$ # lets see how much memory each replica is assigned
[vagrant@box2 ~]$
[vagrant@box2 ~]$ sudo docker stats --no-stream
CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS
19467a26755f 0.00% 1.402 MiB / 487.1 MiB 0.29% 8.65 kB / 9.52 kB 0 B / 0 B 2
427cf3658a03 0.00% 1.383 MiB / 487.1 MiB 0.28% 4.65 kB / 2.86 kB 1.83 MB / 0 B 2
[vagrant@box2 ~]$
[vagrant@box2 ~]$

[vagrant@box1 ~]$ # lets update each replica memory limit to 250M
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker service update --limit-memory 250M web
web
[vagrant@box1 ~]$
[vagrant@box1 ~]$


[vagrant@box3 ~]$ # verify memory adjustment
[vagrant@box3 ~]$
[vagrant@box3 ~]$ sudo docker stats --no-stream
CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS
8990a7fa2489 0.00% 1.375 MiB / 250 MiB 0.55% 2.19 kB / 1.31 kB 0 B / 0 B 2
e6d71ec0caf8 0.00% 1.375 MiB / 250 MiB 0.55% 2.62 kB / 1.31 kB 0 B / 0 B 2
[vagrant@box3 ~]$

[vagrant@box1 ~]$
[vagrant@box1 ~]$ # lets update our service with a different image, we'll try httpd instead of nginx :)
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker service update --image httpd web
web
[vagrant@box1 ~]$
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker service ps web
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
opf7ks9q5rj4 web.1 httpd:latest box2 Running Starting less than a second ago
sbbs4g9shkzm \_ web.1 nginx:latest box2 Shutdown Shutdown 5 seconds ago
n4n6xun4dlmn \_ web.1 nginx:latest box2 Shutdown Shutdown 3 minutes ago
vvv6018iym4j web.2 nginx:latest box3 Running Running 3 minutes ago
ks1cnh8oko1r \_ web.2 nginx:latest box3 Shutdown Shutdown 3 minutes ago
nl0oddf682d3 web.3 nginx:latest box1 Running Running 3 minutes ago
lhqha4nd2sj2 \_ web.3 nginx:latest box1 Shutdown Shutdown 3 minutes ago
xgcgisnlz5kd web.4 nginx:latest box1 Running Running 3 minutes ago
dy48ok6b1clb \_ web.4 nginx:latest box2 Shutdown Shutdown 3 minutes ago
jw9btp4h734o web.5 nginx:latest box3 Running Running 3 minutes ago
81dkfenyjrbz \_ web.5 nginx:latest box3 Shutdown Shutdown 3 minutes ago
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker service ls
ID NAME MODE REPLICAS IMAGE
ytr9c94iieku web replicated 5/5 httpd:latest
[vagrant@box1 ~]$
[vagrant@box1 ~]$ # all nodes should render apache httpd welcome message now!
[vagrant@box1 ~]$
[vagrant@box1 ~]$ # lets increase the number of replicas
[vagrant@box1 ~]$ sudo docker service scale web=8
web scaled to 8
[vagrant@box1 ~]$
[vagrant@box1 ~]$ sudo docker service ls
ID NAME MODE REPLICAS IMAGE
ytr9c94iieku web replicated 8/8 httpd:latest
[vagrant@box1 ~]$
[vagrant@box1 ~]$ exit
logout
Connection to 192.168.122.102 closed.
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$


Enjoy!

[ view entry ] ( 652 views )   |  print article
Vagrant: Creating two CentOS VMs and ping each other. 
[acool@localhost ~]$ date
Fri 21 Feb 2020 02:53:59 PM PST
[acool@localhost ~]$
[acool@localhost ~]$ cat /etc/redhat-release
Fedora release 31 (Thirty One)
[acool@localhost ~]$
[acool@localhost ~]$ sudo dnf install vagrant-libvirt
...
[acool@localhost ~]$ vagrant --version
Vagrant 2.2.6
[acool@localhost ~]$
[acool@localhost ~]$ mkdir vagrant-box-1
[acool@localhost ~]$ cd vagrant-box-1/
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$ vagrant init centos/7
A `Vagrantfile` has been placed in this directory. You are now
ready to `vagrant up` your first virtual environment! Please read
the comments in the Vagrantfile as well as documentation on
`vagrantup.com` for more information on using Vagrant.
[acool@localhost vagrant-box-1]
[acool@localhost vagrant-box-1]$ vagrant up
...
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$ vagrant status
Current machine states:

default running (libvirt)

The Libvirt domain is running. To stop this machine, you can run
`vagrant halt`. To destroy the machine, you can run `vagrant destroy`.
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$ # you can now visually access this VM via "Boxes" which is like virt-manager
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$ # or you can ssh into this box via vagrant
[acool@localhost vagrant-box-1]$ vagrant ssh
Last login: Fri Feb 21 23:05:54 2020 from 192.168.122.1
[vagrant@localhost ~]$
[vagrant@localhost ~]$ cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
[vagrant@localhost ~]$
[vagrant@localhost ~]$ ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:7f:e1:0c brd ff:ff:ff:ff:ff:ff
inet 192.168.122.194/24 brd 192.168.122.255 scope global noprefixroute dynamic eth0
valid_lft 3068sec preferred_lft 3068sec
inet6 fe80::5054:ff:fe7f:e10c/64 scope link
valid_lft forever preferred_lft forever
[vagrant@localhost ~]$
[vagrant@localhost ~]$
[vagrant@localhost ~]$ exit
logout
Connection to 192.168.122.194 closed.
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$
[acool@localhost vagrant-box-1]$ # lets create another box
[acool@localhost vagrant-box-1]$ cd ../ && mkdir vagrant-box-2
[acool@localhost ~]$
[acool@localhost ~]$ cd vagrant-box-2
[acool@localhost vagrant-box-2]$
[acool@localhost vagrant-box-2]$ vagrant init centos/7
A `Vagrantfile` has been placed in this directory. You are now
ready to `vagrant up` your first virtual environment! Please read
the comments in the Vagrantfile as well as documentation on
`vagrantup.com` for more information on using Vagrant.
[acool@localhost vagrant-box-2]$
[acool@localhost vagrant-box-2]$ vagrant up
...
[acool@localhost vagrant-box-2]$
[acool@localhost vagrant-box-2]$ vagrant ssh
Last login: Fri Feb 21 23:17:20 2020 from 192.168.122.1
[vagrant@localhost ~]$
[vagrant@localhost ~]$ ip a show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 52:54:00:69:74:25 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.27/24 brd 192.168.122.255 scope global noprefixroute dynamic eth0
valid_lft 2908sec preferred_lft 2908sec
inet6 fe80::5054:ff:fe69:7425/64 scope link
valid_lft forever preferred_lft forever
[vagrant@localhost ~]$
[vagrant@localhost ~]$ # let's ping box-1 from box-2
[vagrant@localhost ~]$ ping -c 2 192.168.122.194
PING 192.168.122.194 (192.168.122.194) 56(84) bytes of data.
64 bytes from 192.168.122.194: icmp_seq=1 ttl=64 time=0.589 ms
64 bytes from 192.168.122.194: icmp_seq=2 ttl=64 time=0.548 ms

--- 192.168.122.194 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 999ms
rtt min/avg/max/mdev = 0.548/0.568/0.589/0.031 ms
[vagrant@localhost ~]$
[vagrant@localhost ~]$ cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
[vagrant@localhost ~]$ exit
logout
Connection to 192.168.122.27 closed.
[acool@localhost vagrant-box-2]$
[acool@localhost vagrant-box-2]$
[acool@localhost vagrant-box-2]$
[acool@localhost vagrant-box-2]$# le's cleanup our tests
[acool@localhost vagrant-box-2]$ vagrant destroy
...
[acool@localhost vagrant-box-1]$ vagrant destroy


[ view entry ] ( 652 views )   |  print article
Docker: CentOS 7 Fun. 
[aesteban@localhost ~]$ 
[aesteban@localhost ~]$
[aesteban@localhost ~]$ cat /etc/redhat-release
Fedora release 24 (Twenty Four)
[aesteban@localhost ~]$
[aesteban@localhost ~]$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
[aesteban@localhost ~]$ sudo docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[aesteban@localhost ~]$
[aesteban@localhost ~]$ sudo docker pull centos:7
Trying to pull repository docker.io/library/centos ...
7: Pulling from docker.io/library/centos

a02a4930cb5d: Pull complete
Digest: sha256:184e5f35598e333bfa7de10d8fb1cebb5ee4df5bc0f970bf2b1e7c7345136426
Status: Downloaded newer image for docker.io/centos:7
[aesteban@localhost ~]$
[aesteban@localhost ~]$ sudo docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
docker.io/centos 7 1e1148e4cc2c 6 days ago 201.8 MB
[aesteban@localhost ~]$ sudo docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[aesteban@localhost ~]$ sudo docker run -d --privileged -p 80:80 docker.io/centos:7 /sbin/init
f0faf6197fbc696796333bfc81f25d537a1aba170b81f2076010222e84284b36
[aesteban@localhost ~]$
[aesteban@localhost ~]$ sudo docker exec -it f0faf6197fbc696796333bfc81f25d537a1aba170b81f2076010222e84284b36 bash
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# yum install epel-release
...
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# yum install nginx
...
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# systemctl enable nginx
Created symlink from /etc/systemd/system/multi-user.target.wants/nginx.service to /usr/lib/systemd/system/nginx.service.
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# systemctl start nginx
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# systemctl status nginx
● nginx.service - The nginx HTTP and reverse proxy server
Loaded: loaded (/usr/lib/systemd/system/nginx.service; enabled; vendor preset: disabled)
Active: active (running) since Wed 2018-12-12 02:55:32 UTC; 4s ago
Process: 2643 ExecStart=/usr/sbin/nginx (code=exited, status=0/SUCCESS)
Process: 2642 ExecStartPre=/usr/sbin/nginx -t (code=exited, status=0/SUCCESS)
Process: 2641 ExecStartPre=/usr/bin/rm -f /run/nginx.pid (code=exited, status=0/SUCCESS)
Main PID: 2644 (nginx)
CGroup: /system.slice/docker-f0faf6197fbc696796333bfc81f25d537a1aba170b81f2076010222e84284b36.scope/system.slice/nginx.service
├─2644 nginx: master process /usr/sbin/nginx
├─2645 nginx: worker process
├─2646 nginx: worker process
├─2647 nginx: worker process
└─2648 nginx: worker process

Dec 12 02:55:31 f0faf6197fbc systemd[1]: Starting The nginx HTTP and reverse proxy server...
Dec 12 02:55:31 f0faf6197fbc nginx[2642]: nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
Dec 12 02:55:31 f0faf6197fbc nginx[2642]: nginx: configuration file /etc/nginx/nginx.conf test is successful
Dec 12 02:55:32 f0faf6197fbc systemd[1]: Started The nginx HTTP and reverse proxy server.
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# exit
exit
[aesteban@localhost ~]$
[aesteban@localhost ~]$ # localhost should now be accessible in browser
[aesteban@localhost ~]$
[aesteban@localhost ~]$
[aesteban@localhost ~]$
[aesteban@localhost ~]$ sudo docker exec -it f0faf6197fbc systemctl status nginx
● nginx.service - The nginx HTTP and reverse proxy server
Loaded: loaded (/usr/lib/systemd/system/nginx.service; enabled; vendor preset: disabled)
Active: active (running) since Wed 2018-12-12 02:55:32 UTC; 1min 57s ago
Process: 2643 ExecStart=/usr/sbin/nginx (code=exited, status=0/SUCCESS)
Process: 2642 ExecStartPre=/usr/sbin/nginx -t (code=exited, status=0/SUCCESS)
Process: 2641 ExecStartPre=/usr/bin/rm -f /run/nginx.pid (code=exited, status=0/SUCCESS)
Main PID: 2644 (nginx)
CGroup: /system.slice/docker-f0faf6197fbc696796333bfc81f25d537a1aba170b81f2076010222e84284b36.scope/system.slice/nginx.service
├─2644 nginx: master process /usr/sbin/nginx
├─2645 nginx: worker process
├─2646 nginx: worker process
├─2647 nginx: worker process
└─2648 nginx: worker process

Dec 12 02:55:31 f0faf6197fbc systemd[1]: Starting The nginx HTTP and reverse proxy server...
Dec 12 02:55:31 f0faf6197fbc nginx[2642]: nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
Dec 12 02:55:31 f0faf6197fbc nginx[2642]: nginx: configuration file /etc/nginx/nginx.conf test is successful
Dec 12 02:55:32 f0faf6197fbc systemd[1]: Started The nginx HTTP and reverse proxy server.
[aesteban@localhost ~]$
[aesteban@localhost ~]$
[aesteban@localhost ~]$
[aesteban@localhost ~]$
[aesteban@localhost ~]$
[aesteban@localhost ~]$ sudo docker exec -it f0faf6197fbc bash
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# systemctl status postfix
Unit postfix.service could not be found.
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# systemctl status memcached
Unit memcached.service could not be found.
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# # We can install postfix and memcached with yum using the same procedure!
[root@f0faf6197fbc /]#
[root@f0faf6197fbc /]# # Exercise Done :) !!


[ view entry ] ( 914 views )   |  print article

| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | Next> Last>>


2024 By Angel Cool