root@ip-10-0-0-23:~# kubectl exec -it mydepl-5d6645bd5b-qg2g7 -c container2 -- bash
root@mydepl-5d6645bd5b-qg2g7:/# cd /etc/docker
root@mydepl-5d6645bd5b-qg2g7:/etc/docker# ls -lrt
total 8
-rw-r--r-- 1 root root 0 Aug 20 14:48 data.dat
-rw-r--r-- 1 root root 0 Aug 20 14:48 conta.pdf
-rw-r--r-- 1 root root 0 Aug 20 14:48 app.py
drwxr-xr-x 2 root root 4096 Aug 20 14:48 tcs
drwxr-xr-x 2 root root 4096 Aug 20 14:48 infosys
Container2 create some files
root@mydepl-5d6645bd5b-qg2g7:/etc/docker# touch jenkis.yaml docker.yaml subbu.txt
root@mydepl-5d6645bd5b-qg2g7:/etc/docker#
exit
command terminated with exit code 130
Container1 file came to automatically
root@ip-10-0-0-23:~# kubectl exec -it mydepl-5d6645bd5b-qg2g7 -c container1 -- bash
root@mydepl-5d6645bd5b-qg2g7:/# cd /opt/jenkins
root@mydepl-5d6645bd5b-qg2g7:/opt/jenkins# ls -lrt
total 8
-rw-r--r-- 1 root root 0 Aug 20 14:48 data.dat
-rw-r--r-- 1 root root 0 Aug 20 14:48 conta.pdf
-rw-r--r-- 1 root root 0 Aug 20 14:48 app.py
drwxr-xr-x 2 root root 4096 Aug 20 14:48 tcs
drwxr-xr-x 2 root root 4096 Aug 20 14:48 infosys
-rw-r--r-- 1 root root 0 Aug 20 14:51 subbu.txt
-rw-r--r-- 1 root root 0 Aug 20 14:51 jenkis.yaml
-rw-r--r-- 1 root root 0 Aug 20 14:51 docker.yaml
Step3: above example until pod running container to container replicate is doing , if pod delete
new pod will create but volume data will loss
--Pod deleted
root@ip-10-0-0-23:~# kubectl delete pod mydepl-5d6645bd5b-qg2g7
pod "mydepl-5d6645bd5b-qg2g7" deleted
-- Automatically new pod created
root@ip-10-0-0-23:~# kubectl get pod
NAME READY STATUS RESTARTS AGE
mydepl-5d6645bd5b-whtr8 2/2 Running 0 56s
--We will inside the container, volume no file exists
root@ip-10-0-0-23:~# kubectl exec -it mydepl-5d6645bd5b-whtr8 -c container1 -- bash
root@mydepl-5d6645bd5b-whtr8:/# cd /opt/jenkins/
root@mydepl-5d6645bd5b-whtr8:/opt/jenkins# ls
root@mydepl-5d6645bd5b-whtr8:/opt/jenkins#
container2 also data missing , we loss the data :
root@ip-10-0-0-23:~# kubectl exec -it mydepl-5d6645bd5b-whtr8 -c container2 -- bash
root@mydepl-5d6645bd5b-whtr8:/# cd /etc/docker
root@mydepl-5d6645bd5b-whtr8:/etc/docker# ls
2.HostPath
This volume type is the advanced version of
the previous volume type
Emptydir.
In EmptyDirmthe data is stored in the
volumes that reside inside the pods only
Where the host machine doesn’t have the
data of the pods and containers
Hostpath volume type helps to access the
data of the pods or container volumes from the host machine.
Hostpath replicates the data of the volumes
on the host machine and if you make the changes from the host machine then the
changes will be reflected to the pods volumes(if attached)
Step1: delete existing deployment
root@ip-10-0-0-23:~# kubectl delete deploy mydepl
deployment.apps "mydepl" deleted
Step2: previous manifest file only below line are changed for hostpath
volumes:
- name: myvolume
hostPath:
path: /tmp/mydata/
root@ip-10-0-0-23:~# kubectl create -f manifest.yaml
deployment.apps/mydepl created
Container1 some files are created
root@ip-10-0-0-23:~# kubectl exec -it mydepl-57f4bc8d46-7dkqm -c container1 -- bash
root@mydepl-57f4bc8d46-7dkqm:/# cd /opt/jenkins/
root@mydepl-57f4bc8d46-7dkqm:/opt/jenkins# touch app.py jenkins.yaml java.jvm
root@mydepl-57f4bc8d46-7dkqm:/opt/jenkins# ls
app.py java.jvm jenkins.yaml
Container 2 need to check files came or not
root@ip-10-0-0-23:~# kubectl exec -it mydepl-57f4bc8d46-7dkqm -c container2 -- bash
root@mydepl-57f4bc8d46-7dkqm:/# cd /etc/docker/
root@mydepl-57f4bc8d46-7dkqm:/etc/docker# ls
app.py java.jvm jenkins.yaml
Step2: Delete the pod
root@ip-10-0-0-23:~# kubectl delete pod mydepl-57f4bc8d46-7dkqm
pod "mydepl-57f4bc8d46-7dkqm" deleted
root@ip-10-0-0-23:~# kubectl delete mydepl-57f4bc8d46-7dkqm
error: the server doesn't have a resource type "mydepl-57f4bc8d46-7dkqm"
root@ip-10-0-0-23:~# kubectl delete pod mydepl-57f4bc8d46-7dkqm
pod "mydepl-57f4bc8d46-7dkqm" deleted
root@ip-10-0-0-23:~# kubectl get po
NAME READY STATUS RESTARTS AGE
mydepl-57f4bc8d46-6fgfl 2/2 Running 0 46s
root@ip-10-0-0-23:~# kubectl exec -it mydepl-57f4bc8d46-6fgfl -c container1 -- bash
root@mydepl-57f4bc8d46-6fgfl:/# cd /opt/jenkins/
root@mydepl-57f4bc8d46-6fgfl:/opt/jenkins# ls
app.py java.jvm jenkins.yaml
root@mydepl-57f4bc8d46-6fgfl:/opt/jenkins#
exit
root@ip-10-0-0-23:~# kubectl exec -it mydepl-57f4bc8d46-6fgfl -c container2 -- bash
root@mydepl-57f4bc8d46-6fgfl:/# cd /etc/docker/
root@mydepl-57f4bc8d46-6fgfl:/etc/docker# ls
app.py java.jvm jenkins.yaml
root@mydepl-57f4bc8d46-6fgfl:/etc/docker# touch test nexflex google
root@mydepl-57f4bc8d46-6fgfl:/etc/docker#
exit
root@ip-10-0-0-23:~# kubectl delete pod mydepl-57f4bc8d46-6fgfl
pod "mydepl-57f4bc8d46-6fgfl" deleted
root@ip-10-0-0-23:~# kubectl get pod
NAME READY STATUS RESTARTS AGE
mydepl-57f4bc8d46-jww7m 2/2 Running 0 40s
root@ip-10-0-0-23:~# kubectl exec -it mydepl-57f4bc8d46-jww7m -c container1 -- bash
root@mydepl-57f4bc8d46-jww7m:/# cd /opt/jenkins/
root@mydepl-57f4bc8d46-jww7m:/opt/jenkins# ls
app.py google java.jvm jenkins.yaml nexflex test
Here we did hostpath attached to node, whenevery pod delete it will create and attached volume from hostpath ,for single it will work, but real time we are using multi node, we have attached the hostpath single node,if multi node the pods not not created other node.
If Node deleted ,autoscale node also data will loss to overcome this issue:
PV
PVC
We will store the data in cloud EBS storage for example take 100 GB
Persistent volume (PV-1) 15 GB
Persistent volume (PV-2) 10 GB
Persistent volume (PV-3) 45 GB
We have left 30 GB left it is used for future
Claim persistent volume
I have requirement to Pod creation, i need 13 GB ,for the we have claim that PVC (persistent volume claim) from PV ,PVC will check suitable PV either PV-1 ,PV2m PV-3 here PV1,PV3 suitable
Before pod create we should need create PVC
IF you are trying create Pod with 35 GB ,below case PVC searching for suitable PV there is not space available ,PVC went into pending state and also unable create po.
Step1:EBS volume need to create ,based on the EBS-id need to create PV Elastic Block store>Create volume >
Step2: copy the volumeid vol-08360ae5933a3ef0a
root@ip-10-0-0-23:~# vi pv.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-1
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
awsElasticBlockStore:
volumeID: vol-08360ae5933a3ef0a
fsType: ext4
persistentVolumeReclaimPolicy: Recycle
Step3:
-- it is just warning Create one PV-1
root@ip-10-0-0-23:~# kubectl create -f pv.yaml
Warning: spec.persistentVolumeReclaimPolicy: The Recycle reclaim policy is deprecated. Instead, the recommended approach is to use dynamic provisioning.
persistentvolume/pv-1 created
Step4: Create one more PV-2
root@ip-10-0-0-23:~# vi pv.yaml
root@ip-10-0-0-23:~# cat pv.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-2
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
awsElasticBlockStore:
volumeID: vol-08360ae5933a3ef0a
fsType: ext4
persistentVolumeReclaimPolicy: Recycle
root@ip-10-0-0-23:~# kubectl create -f pv.yaml
Warning: spec.persistentVolumeReclaimPolicy: The Recycle reclaim policy is deprecated. Instead, the recommended approach is to use dynamic provisioning.
persistentvolume/pv-2 created
Step5: list of pv's two pvs created (pv-1,pv-2)
root@ip-10-0-0-23:~# kubectl get pv
root@ip-10-0-0-23:~#
Step6: Now claim the PVC ,write pvc file
root@ip-10-0-0-23:~# vi pvc.yaml
root@ip-10-0-0-23:~# cat pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc-1
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 3Gi
root@ip-10-0-0-23:~# kubectl create -f pvc.yaml
persistentvolumeclaim/mypvc-1 created
root@ip-10-0-0-23:~# kubectl create -f pvc.yaml
persistentvolumeclaim/mypvc-1 created
root@ip-10-0-0-23:~# kubectl get pvc
--Which we have create the volume
root@ip-10-0-0-23:~# kubectl get pv
we have claimed 3 gib bound
Step7: Now we need to attached this volume(PVC) to po
Delete the existing deployment
root@ip-10-0-0-23:~# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
mydepl 1/1 1 1 148m
root@ip-10-0-0-23:~# kubectl delete deploy mydepl
deployment.apps "mydepl" deleted
root@ip-10-0-0-23:~# kubectl get deploy
No resources found in default namespace.
Step8: Create deployment with new volume, only need to change below highlighted lines
root@ip-10-0-0-23:~# vi manifest.yaml
root@ip-10-0-0-23:~# cat manifest.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mydepl
spec:
replicas: 1
selector:
matchLabels:
app: swiggy
template: # Moved to top-level under spec
metadata:
labels:
app: swiggy # Must match selector
spec:
containers:
- name: container1
image: nginx
command: ["/bin/bash", "-c", "while true; do echo welcome to DevOps class; sleep 5; done"]
volumeMounts:
- name: myvolume
mountPath: "/opt/jenkins"
ports:
- containerPort: 80
- name: container2
image: nginx
command: ["/bin/bash", "-c", "while true; do echo welcome to DevOps class; sleep 5; done"]
volumeMounts:
- name: myvolume
mountPath: "/etc/docker"
volumes:
- name: myvolume
persistentVolumeClaim:
claimName: pvc-1
root@ip-10-0-0-23:~# kubectl create -f manifest.yaml
deployment.apps/mydepl created
Step8: Whether pod created or not
root@ip-10-0-0-23:~# kubectl get pod
NAME READY STATUS RESTARTS AGE
mydepl-7b4979dff6-m559p 0/2 Pending 0 47s
To identify the error ,here clearly showing pvc-1 not found ,we have given name PVC mypvc-1
root@ip-10-0-0-23:~# kubectl describe pod mydepl-7b4979dff6-m559p
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 2m21s default-scheduler 0/1 nodes are available: persistentvolumeclaim "pvc-1" not found. preemption: 0/1 nodes are available: 1 Preemption is not helpful for scheduling.
Step9: Changed PCV name , after change configuration need to apply
persistentVolumeClaim:
claimName: mypvc-1
root@ip-10-0-0-23:~# kubectl apply -f manifest.yaml
root@ip-10-0-0-23:~# kubectl apply -f manifest.yaml
Warning: resource deployments/mydepl is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically.
deployment.apps/mydepl configured
Now pod is running state
root@ip-10-0-0-23:~# kubectl get pod
NAME READY STATUS RESTARTS AGE
mydepl-587dc99798-9nr6z 2/2 Running 0 16s
root@ip-10-0-0-23:~# kubectl get pv
root@ip-10-0-0-23:~# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS VOLUMEATTRIBUTESCLASS AGE
mypvc-1 Bound pvc-8998df85-79c6-4df4-9241-3987bebe8519 3Gi RWO standard <unset> 30m
Step10: Try to go inside the po and create some file and delete the pod root@ip-10-0-0-23:~# kubectl exec -it mydepl-587dc99798-9nr6z -c container1 -- bash
root@mydepl-587dc99798-9nr6z:/# cd /opt/jenkins/
root@mydepl-587dc99798-9nr6z:/opt/jenkins# ls
Created some dummy file
root@mydepl-587dc99798-9nr6z:/opt/jenkins# touch app.py docker manifest.yaml pv
root@mydepl-587dc99798-9nr6z:/opt/jenkins# ls
app.py docker manifest.yaml pv
Step11: lets delete the po ,new pod created automatically
root@ip-10-0-0-23:~# kubectl get po
NAME READY STATUS RESTARTS AGE
mydepl-587dc99798-9nr6z 2/2 Running 0 8m58s
root@ip-10-0-0-23:~# kubectl delete pod mydepl-587dc99798-9nr6z
pod "mydepl-587dc99798-9nr6z" deleted
root@ip-10-0-0-23:~# kubectl get po
NAME READY STATUS RESTARTS AGE
mydepl-587dc99798-smm8q 2/2 Running 0 58s
-- see here i have went into container2 ,our file are exists
root@ip-10-0-0-23:~# kubectl exec -it mydepl-587dc99798-smm8q -c container2 -- bash
root@mydepl-587dc99798-smm8q:/# cd /etc/docker
root@mydepl-587dc99798-smm8q:/etc/docker# ls
app.py docker manifest.yaml pv
EBS safe and secure the data ever though pod delete and cluster delete or node delete.
Step12: Make sure the after cluster delete need delete EBS volume also
root@ip-10-0-0-23:~# kubectl get -- all
NAME READY STATUS RESTARTS AGE
pod/mydepl-587dc99798-smm8q 2/2 Running 0 7m20s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4h24m
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/mydepl 1/1 1 1 24m
NAME DESIRED CURRENT READY AGE
replicaset.apps/mydepl-587dc99798 1 1 1 16m
replicaset.apps/mydepl-7b4979dff6 0 0 0 24m
root@ip-10-0-0-23:~# kubectl delete deploy mydepl
deployment.apps "mydepl" deleted
Under standing the Access modes:
Access modes determine how many pods can
access a persistent volume(PV) or a persistent volume claim(PVC) simulataneously.There
are several access modes that can be set on PV or PVC,inclusing:
ReadWriteOnce:This
access mode allows a single pod to read and write to the PV or PVC,this is the
most common access mode,and its appropriate for use cases where a single pod needs
exclusive access to the storage.
ReadOnlyMany:This
access mode allows multiple pods to read from the PV or PVC,but does not allow
any of them to write to it.This access mode is useful for cases where many pods
need to read the same data,such as when servinf a read-only database.
ReadWritemany:This
access mode allow multiple pods to read and write to the PV or PVC simultaneously.
This mode is appropriate for use cases where many pods need to read and write
to the same data ,such as a distributed file system.
Execute:
this access mode allows the pod to execute the data on the PV or PVC but not
read or write to it.This mode is useful for use cases where the data is meant
to be executed by the pods only, such as application code.
--Thanks
No comments:
Post a Comment