'ansible'에 해당되는 글 2건

  1. 2023.03.10 install containerd-based k8s using Ansible
  2. 2020.02.19 ansible로 ubuntu user 생성
# yum upgrade in all node
yum -y upgrade


# ssh keygen in master
ssh-keygen -b 4096 -f ~/.ssh/mysshkey_rsa


# ssh key copy to worker1,2
ssh-copy-id -i ~/.ssh/mysshkey_rsa.pub root@worker1
ssh-copy-id -i ~/.ssh/mysshkey_rsa.pub root@worker2


# host명, hosts 파일 수정 -> 각 노드에서 수행
hostnamectl set-hostname master
hostnamectl set-hostname worker1
hostnamectl set-hostname worker2


# install ansible in master node
yum -y install epel-release 
yum -y install ansible

ansible --version


# edit ansible-hosts 파일수정 /etc/ansible/hosts
[masters]
control-plane ansible_host={MASTER IP} ansible_user=root

[workers]
worker1 ansible_host={WORDER1 IP} ansible_user=root
worker2 ansible_host={WORKER2 IP} ansible_user=root


# ansible 연결 확인
ansible all -m ping


# install-kube
ansible-playbook install-kube-v2.yml


# master-setup
ansible-playbook master-setup.yml


# worker-setup -> /etc/kube_join_command 파일 확인
ansible-playbook worker-setup.yml

 

install-kube-v2.yaml

---
- hosts: "masters, workers"
  remote_user: root
  become: yes
  become_method: sudo
  become_user: root
  gather_facts: yes
  connection: ssh

  tasks:
     - name: Stop and disable firewalld.
       service:
         name: firewalld
         state: stopped
         enabled: False       
  
     - name: disable SELinux
       shell: |
                sudo setenforce 0
                sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

     - name: disabling swap as its required for kubelet
       shell: |
               sudo swapoff -a
               sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
               sudo mount -a


     - name: Creating a configuration file for containerd, our container runtime
       shell: |
               cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
               overlay
               br_netfilter
               EOF

     - name: setting up pre-requisites for containerd
       shell: |
               sudo modprobe overlay
               sudo modprobe br_netfilter
     
     - name: sysctl params required by setup, params persist across reboots
       shell: |
               cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
               net.bridge.bridge-nf-call-iptables  = 1
               net.bridge.bridge-nf-call-ip6tables = 1
               net.ipv4.ip_forward                 = 1
               EOF


     - name: make settings applicable without restart
       command: sudo sysctl --system

# ipv4 강제로 설정
     - name: set proc/sys/net/ipv4/ip_forward
       shell: |
               sudo sysctl -w net.ipv4.ip_forward=1

     - name: installing containerd and settings its config. restart as well.
       shell: |
               sudo yum-config-manager     --add-repo     https://download.docker.com/linux/centos/docker-ce.repo
               sudo yum install -y containerd.io
               sudo mkdir -p /etc/containerd
               sudo containerd config default | sudo tee /etc/containerd/config.toml
               sudo systemctl restart containerd

     - name: Create a kube repo file
       file:
         path: "/etc/yum.repos.d/kubernetes.repo"
         state: "touch"

     - name: write repo information in kube repo file
       blockinfile:
         path: "/etc/yum.repos.d/kubernetes.repo"
         block: |
                [kubernetes]
                name=Kubernetes
                baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
                enabled=1
                gpgcheck=1
                repo_gpgcheck=1
                gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

     - name: install kubernetes
       shell: |
               sudo yum install -y kubelet kubeadm kubectl
               sudo systemctl enable --now kubelet
               sudo systemctl start kubelet

 

master-setup.yml

- hosts: masters
  become: yes
  tasks:
    - name: start the cluster
      shell: kubeadm init --pod-network-cidr=192.168.0.0/16
      args:
        chdir: $HOME
      async: 60
      poll: 60

    - name: create a new directory to hold kube conf
      # ansible.builtin.file:
      #   path: /etc/kubernetes
      #   state: directory
      #   mode: '0755'
    
      become: yes
      become_user: root
      file:
        path: $HOME/.kube
        state: directory
        mode: 0755

    - name: copy configuration file to the newly created dir
      # become: true
      # become_user: root
      # ansible.builtin.copy:
      #   src: /etc/kubernetes/admin.conf
      #   dest: $HOME/.kube/config
      #   remote_src: true
    
      copy:
        src: /etc/kubernetes/admin.conf
        dest: $HOME/.kube/config
        remote_src: yes
        owner: root

    - name: set kubeconfig file permissions
      file:
        path: $HOME/.kube/config 
        owner: "{{ ansible_effective_user_id }}"
        group: "{{ ansible_effective_group_id }}"
        
    - name: Apply a calico manifset to init the pod network
      # args:
      #   chdir: $HOME
      # ansible.builtin.command: kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
    
      become: yes
      become_user: root
      shell: |
              curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.5/manifests/calico.yaml -O
              kubectl apply -f calico.yaml 
      args:
        chdir: $HOME
        
    - name: Get the join command to be used by the worker
      become: yes
      become_user: root
      shell: kubeadm token create  --print-join-command
      register: kube_join_command

    - name: Save the join command to a local file
      become: yes
      local_action: copy content="{{ kube_join_command.stdout_lines[0] }}" dest="/etc/kube_join_command" mode=0777

 

Worker-setup.yaml

- hosts: workers
  become: yes
  gather_facts: yes

  tasks:
   - name: Fetch the join command we wrote to a file in previous step and move it to the worker node.
     become: yes
     copy:
       src: /etc/kube_join_command
       dest: /etc/kube_join_command
       mode: 0777

   - name: Join the Worker node to the cluster.
     become: yes
     command: sh /etc/kube_join_command

 

 

Trouble shoot

# 오류발생하면 kubeadm reset 하고 playbook 다시 실행.
kubeadm reset


# ipv4 에러 인 경우 master, worker 노드에서 아래 명령어 실행하고 kubelet 재시작하고 playbook 다시 실행하면 됨
modprobe br_netfilter
echo 1 > /proc/sys/net/ipv4/ip_forward

 

'kubernetes' 카테고리의 다른 글

Install minikube with Driver none and cri-dockerd on CentOS 9  (0) 2024.06.07
minikube on ubuntu  (0) 2020.04.17
POD 과 local machine 간 file copy  (0) 2020.03.13
ingress controller log  (0) 2020.02.13
Posted by jerymy
,

ansible로 ubuntu user 생성

etc 2020. 2. 19. 15:45

1. ansible 설치 -> 사전에 python 설치 확인

# 방법 1 - from ansible documentation 
$ sudo apt update
$ sudo apt install software-properties-common
$ sudo apt-add-repository --yes --update ppa:ansible/ansible
$ sudo apt install ansible


# 방법 2 - from ansible documentation
$ pip install ansible

 

2. 호스트 등록

$ vi /etc/ansible/hosts

# 아래 양식으로 등록(아래 이미지 참조)

[web]
호스트IP 1
호스트IP 2

3. 연결 테스트

# 연결 테스트
$ ansible all -m ping
$ ansible worker1 -m ping

 

4. ssh key 생성 및 복사

# SSH key 생성
$ ssh-keygen -b 4096 -f ~/.ssh/id_rsa -N ""

# 인증키에 등록
$ cat ~/.ssh/id_rsa.pub | sudo tee -a ~/.ssh/authorized_keys

# 인증키를 각 노드에 추가
$ ssh-copy-id -i ~/.ssh/id_rsa.pub <user>@<node_ip_address>
예)
$ ssh-copy-id -i ~/.ssh/id_rsa.pub root@proxy
$ ssh-copy-id -i ~/.ssh/id_rsa.pub root@management
$ ssh-copy-id -i ~/.ssh/id_rsa.pub root@worker1
$ ssh-copy-id -i ~/.ssh/id_rsa.pub root@worker2

ssh-copy-id -i ~/.ssh/id_rsa.pub root@169.56.170.168

# 모든 node에서 ssh 재시작
$ sudo systemctl restart sshd

 

5. ssh key 복사후 ping 테스트 결과 (아래 이미지)

 

 

6. yaml 작성

# create-user.yaml

- name: 사용자 추가
  hosts: all
  become: true
  tasks:
    - name: 사용자 이름 생성
      user:
        name: "{{ item }}"
        shell: /bin/bash
      with_items: "{{ USER_NAME }}"
    - name: 패스워드 변경
      user:
        name: "{{ item }}"
        password: "{{ PASSWORD | password_hash('sha512') }}"
      with_items: "{{ USER_NAME }}"
    - name: sudoers.d 추가
      copy:
        content: |
          %{{item}} ALL=(ALL) NOPASSWD: ALL
        dest: "/etc/sudoers.d/{{item}}"
        owner: root
        group: root
        mode: 0440
        validate: "/usr/sbin/visudo -c -f '%s'"
      with_items: "{{ USER_NAME }}"
  vars:
    USER_NAME:
    - "user01"
    - "user02"
    - "user03"
    - "user04"
    - "user05"
    - "user06"
    - "user07"
    - "user08"
    - "user09"
    - "user10"
    - "user11"
    - "user12"
    - "user13"
    - "user14"
    - "user15"
    - "user16"
    - "user17"
    - "user18"
    - "user19"
    - "user20"
    - "user21"
    - "user22"
    - "user23"
    - "user24"
    - "user25"
    - "user26"
    - "user27"
    - "user28"
    - "user29"
    - "user30"


7. playbook 실행

# 명령어 실행
$ ansible-playbook create-user.yaml --extra-vars "PASSWORD=your_password"

# 로그인시 프롬프트가 $ 이고 history나 화살표로 이전 명령어 보이기가 실행 안되면
# bin/sh로 실행되는것 -->  vi /etc/passwd 열어서 sh를 -> bash 로 변경하면 됨

8. ICP cluster 접근을 위한 추가 작업

  • root계정으로 cloudctl, kubectl 설치하고, ICP 화면에서 클라이언트 구성 복사해서 실행하면 정상 연결됨
  •  
  • cloudctl login https://icp-cluster:8443
  • 로그인
  • kubectl 명령 실행 가능함....
  •  
  • cloudctl login -a https://icp-cluster:8443 --skip-ssl-validation

 

'etc' 카테고리의 다른 글

ssh key로 서버 접속  (0) 2020.05.18
bash Prompt 설정  (0) 2020.05.18
ubuntu user 생성  (0) 2020.04.20
git command  (0) 2020.03.16
Posted by jerymy
,