How to create an ML pipeline for model retraining and inference [Kubernetes]

Solution
gcloud compute disks create pd-name   --size 500G   --type pd-standard   --zone us-central1-c
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv
spec:
  storageClassName: "test"
  capacity:
    storage: 10G
  accessModes:
    - ReadWriteOnce
  claimRef:
    namespace: default
    name: pv-claim
  gcePersistentDisk:
    pdName: pd-name
    fsType: ext4
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pv-claim
spec:
  storageClassName: "test"
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 10G  
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: cron
spec:
  schedule: "*/1 * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          volumes:
            - name: pv-storage
              persistentVolumeClaim:
                claimName: pv-claim
          containers:
            - name: container
              image: nginx
              volumeMounts:
                - mountPath: "/usr/data"
                  name: pv-storage
              command:
              - /bin/sh
              - -c
              - date >> /usr/data/msg.txt
          restartPolicy: OnFailure
kind: Pod
apiVersion: v1
metadata:
  name: readpod
spec:
  volumes:
    - name: pv-storage
      persistentVolumeClaim:
       claimName: pv-claim
  containers:
    - name: read-container
      image: nginx
      volumeMounts:
        - mountPath: "/usr/data"
          name: pv-storage
     $ kubectl exec -it  readpod -- /bin/bash
     $ cd usr/data
     $ cat msg.txt