OpenShift 3 – build立“由于缺乏磁盘空间而无法启动”

当试图在OpenShift中执行Build时,立即失败并显示事件查看器中显示的“由于缺less磁盘空间而无法启动”错误。

我似乎无法弄清楚为什么OpenShift认为我在磁盘低时,一切似乎是好的。 这里是服务器上的一些细节。

#docker -v

 Docker版本1.8.2-el7,build a01dc02 / 1.8.2

docker信息


    容器:2
    图片:36
    存储驱动程序: devicemapper
    游泳池名称:docker -  vgdocker - 池
     Pool Blocksize:524.3 kB
    备份文件系统:xfs
    数据文件: 
    元数据文件: 
    使用的数据空间:1.923 GB
    数据空间总计:13.72 GB
     数据空间可用:11.8 GB
    元数据使用空间:688.1 kB
    元数据空间总计:37.75 MB
    元数据空间可用:37.06 MB
    支持Udev同步:true
    延迟移除已启用:是
    资料库版本:1.02.107-RHEL7(2015-12-01)
    执行驱动程序:native-0.2
    logging驱动程序:json文件
    内核版本:3.10.0-229.14.1.el7.x86_64
    操作系统:红帽企业Linux服务器7.1(迈波)
     CPU:4
    总内存:7.64 GiB
    名称:oshift101.dev.omitted.ca
     ID:BGBJ:475D:NUO6:FORT:ZQQF:TZ4Z:QAX4:7AFK:VCCQ:7WYU:HNI2:5EAC
    警告:bridge-nf-call-iptables被禁用

#df -h


    使用的文件系统大小可用使用%
     / dev / mapper / vg00-root 1014M 770M 245M 76%/
     devtmpfs 3.9G 0 3.9G 0%/ dev
     tmpfs 3.9G 0 3.9G 0%/ dev / shm
     tmpfs 3.9G 121M 3.8G 4%/ run
     tmpfs 3.9G 0 3.9G 0%/ sys / fs / cgroup
     / dev / mapper / vg00-usr 4.0G 1.4G 2.7G 33%/ usr
     / dev / mapper / vg00-opt 509M 26M 483M 6%/ opt
     / dev / mapper / vg00-tmp 4.0G 33M 4.0G 1%/ tmp
     / dev / sda1 509M 109M 400M 22%/ boot
     / dev / mapper / vg00-var 12G 3.0G 9.1G 25%/ var
     hnas01:/家庭250G 83G 168G 33%/家庭
     hnas01:/ docker-registry 512G 6.1G 506G 2%/ dockerregistry
     hnas01:/ opt-ignored / linux 20G 69M 20G 1%/ opt / ignored / nfs
     tmpfs 3.9G 4.0K 3.9G 1%/var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-dockercfg-budmf-push
     tmpfs 3.9G 8.0K 3.9G 1%/var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/name-ommitted-source
     tmpfs 3.9G 8.0K 3.9G 1%/var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-token-b79p6

#dmsetup状态


     vg00-tmp:0 8388608线性 
     vg00-swap0:0 8388608线性 
     vg00-swap0:8388608 8388608线性 
     docker  -  vg-docker  -  pool:0 26804224精简池117 168/9216 3668/26176  -  rw no_discard_passdown queue_if_no_space 
     vg00-usr:0 8388608线性 
     vg00-var:0 8388608线性 
     vg00-var:8388608 16777216线性 
     vg00-root:0 2097152 linear 
    docker -  vgdocker -  pool_tdata:0 26804224线性 
    docker -  vgdocker -  pool_tmeta:0 73728线性 
     vg00-opt:0 1048576线性

#dmsetup表


     vg00-tmp:0 8388608 linear 8:2 16779264
     vg00-swap0:0 8388608线性8:2 25167872
     vg00-swap0:8388608 8388608线性8:2 36702208
     docker  -  vg-docker  -  pool:0 26804224精简池253:6 253:7 1024 0 1 skip_block_zeroing 
     vg00-usr:0 8388608线性8:2 8390656
     vg00-var:0 8388608线性8:2 2048
     vg00-var:8388608 16777216线性8:2 45090816
     vg00-root:0 2097152 linear 8:2 33556480
    docker工人 -  vgdocker工人 -  pool_tdata:0 26804224线性8:3 75776
    docker工人 -  vgdocker工人 -  pool_tmeta:0 73728线性8:3 2048
     vg00-opt:0 1048576 linear 8:2 35653632

#pvscan


     PV / dev / sda2 VG vg00 lvm2 [31.50 GiB / 2.00 GiB free]
     PV / dev / sda3 VG docker -vg lvm2 [32.00 GiB / 19.14 GiB free]
    总数:2 [63.49 GiB] /使用中:2 [63.49 GiB] / in否VG:0 [0]

#lvscan


       ACTIVE'/ dev / docker-vg / docker-pool'[12.78 GiB]inheritance
       ACTIVE'/ dev / vg00 / var'[12.00 GiB]inheritance
       ACTIVE'/ dev / vg00 / usr'[4.00 GiB]inheritance
       ACTIVE'/ dev / vg00 / tmp'[4.00 GiB]inheritance
       ACTIVE'/ dev / vg00 / swap0'[8.00 GiB]inheritance
       ACTIVE'/ dev / vg00 / root'[1.00 GiB]inheritance
       ACTIVE'/ dev / vg00 / opt'[512.00 MiB]inheritance

#cat / etc / sysconfig / docker-storage


     DOCKER_STORAGE_OPTIONS =  -  storage-driver devicemapper --storage-opt dm.fs = xfs --storage -opt dm.thinpooldev = / dev / mapper / docker  -  vg-docker  -  pool --storage-opt dm.use_deferred_removal =真正

#mount


     proc on / proc type proc(rw,nosuid,nodev,noexec,relatime)
     sysfs on / systypessysfs(rw,nosuid,nodev,noexec,relatime)
     devtmpfs on / dev type devtmpfs(rw,nosuid,size = 3998872k,nr_inodes = 999718,mode = 755)
     securityfs on / sys / kernel /安全typessecurityfs(rw,nosuid,nodev,noexec,relatime)
     / dev / shm上的tmpfstypestmpfs(rw,nosuid,nodev)
     dev / dev / ptstypesdevpts(rw,nosuid,noexec,relatime,gid = 5,mode = 620,ptmxmode = 000)
     tmpfs on / run type tmpfs(rw,nosuid,nodev,mode = 755)
     / sys / fs / cgroup上的tmpfstypestmpfs(rw,nosuid,nodev,noexec,mode = 755)
     cgroup / sys / fs / cgroup / systemdtypescgroup(rw,nosuid,nodev,noexec,relatime,xattr,release_agent = / usr / lib / systemd / systemd -cgroups-agent,name = systemd)
     pstore on / sys / fs / pstore type pstore(rw,nosuid,nodev,noexec,relatime)
     cgroup在/ sys / fs / cgroup / cpusettypescgroup(rw,nosuid,nodev,noexec,relatime,cpuset)
     cgroup在/ sys / fs / cgroup / cpu上,cpuaccttypescgroup(rw,nosuid,nodev,noexec,relatime,cpuacct,cpu)
     cgroup / sys / fs / cgroup /内存typescgroup(rw,nosuid,nodev,noexec,relatime,memory)
     / sys / fs / cgroup / devices上的cgrouptypescgroup(rw,nosuid,nodev,noexec,relatime,devices)
     cgroup / sys / fs / cgroup / freezer type cgroup(rw,nosuid,nodev,noexec,relatime,freezer)
     cgroup在/ sys / fs / cgroup / net_clstypescgroup(rw,nosuid,nodev,noexec,relatime,net_cls)
     cgroup / sys / fs / cgroup / blkiotypescgroup(rw,nosuid,nodev,noexec,relatime,blkio)
     cgroup / sys / fs / cgroup / perf_eventtypescgroup(rw,nosuid,nodev,noexec,relatime,perf_event)
     cgroup on / sys / fs / cgroup / hugetlb type cgroup(rw,nosuid,nodev,noexec,relatime,hugetlb)
     / sys / kernel / config上的configfs configfs(rw,relatime)
     / dev / mapper / vg00-root on / type xfs(rw,relatime,attr2,inode64,noquota)
     / dev / mapper / vg00-usr在/ usrtypesxfs(rw,relatime,attr2,inode64,noquota)
     systemd-1 on / proc / sys / fs / binfmt_misc type autofs(rw,relatime,fd = 36,pgrp = 1,timeout = 300,minproto = 5,maxproto = 5,direct)
     / sys / kernel / debugtypes上的debugfs debugfs(rw,relatime)
     / dev / mqueuetypesmqueue上的mqueue(rw,relatime)
     / dev / hugepages上的hugetlbfstypeshugetlbfs(rw,relatime)
     / dev / mapper / vg00-opt / opt type xfs(rw,relatime,attr2,inode64,noquota)
     / dev / mapper / vg00-tmp on / tmp type xfs(rw,relatime,attr2,inode64,noquota)
     / dev / sda1 on / boot type xfs(rw,relatime,attr2,inode64,noquota)
     / dev / mapper / vg00-var / var type xfs(rw,relatime,attr2,inode64,noquota)
     hnas01:/ home on / home type nfs(rw,relatime,vers = 3,rsize = 32768,wsize = 32768,namlen = 255,hard,proto = tcp,timeo = 600,retrans = 2,sec = sys,mountaddr = 10.139.0.11,mountvers = 3,mountport = 4048,mountproto = TCP,local_lock =无,ADDR = 10.139.0.11)
     hnas01:/ docker-registry on / dockerregistry on nfs(ro,relatime,vers = 3,rsize = 32768,wsize = 32768,namlen = 255,hard,proto = tcp,timeo = 600,retrans = 2,sec = sys, mountaddr = 10.139.0.11,mountvers = 3,mountport = 4048,mountproto = TCP,local_lock =无,ADDR = 10.139.0.11)
     / opt / ignored / nfs type nfs(ro,relatime,vers = 3,rsize = 32768,wsize = 32768,namlen = 255,hard,proto = tcp,timeo = 600,retrans = 2,仲= SYS,mountaddr = 10.139.0.11,mountvers = 3,mountport = 4048,mountproto = TCP,local_lock =无,ADDR = 10.139.0.11)
     / proc / sys / fs / binfmt_misc上的binfmt_misctypesbinfmt_misc(rw,relatime)
     tmpfs on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-dockercfg-budmf-push type tmpfs(rw,relatime)
     tmpfs on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/johnadacssh6-source type tmpfs(rw,relatime)
     tmpfs on /var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-token-b79p6 type tmpfs(rw,relatime)

编辑添加以下额外的细节。

仍然有这个问题。 这看起来可能实际上在Kubernetes存在的问题,但我不知道。

我根据Kubernetes用来确定是否有足够的磁盘空间运行以下GO程序。

package main import ( "fmt" "os" "syscall" ) func main() { pathArg := os.Args[1] stat := syscall.Statfs_t{} err := syscall.Statfs(pathArg, &stat) if err != nil { fmt.Println(err.Error()) return } //bsize := stat.Bsize //fmt.Println(stat) s := fmt.Sprintf(` Statfs_t { Type %d Bsize %d Blocks %d Bfree %d Bavail %d Files %d Ffree %d Frsize %d Flags %d } `, stat.Type, stat.Bsize, stat.Blocks, stat.Bfree, stat.Bavail, stat.Files, stat.Ffree, stat.Frsize, stat.Flags) fmt.Println(s) } 

然后我通过以下运行它

 for x in $(sudo df -h | grep -v Filesys| awk '{print $6}'); do echo "Running on $x"; sudo ./fsinfo $x; done; 

并收到以下结果,似乎也没有显示任何磁盘空间问题。

我想知道是否应该将这个错误报告提交给Kubernetes?

运行/

 Statfs_t { Type 1481003842 Bsize 4096 Blocks 259584 Bfree 62600 Bavail 62600 Files 1048576 Ffree 878047 Frsize 4096 Flags 4128 } 

在/ dev上运行

 Statfs_t { Type 16914836 Bsize 4096 Blocks 999718 Bfree 999718 Bavail 999718 Files 999718 Ffree 999333 Frsize 4096 Flags 34 } 

在/ dev / shm上运行

 Statfs_t { Type 16914836 Bsize 4096 Blocks 1001444 Bfree 1001444 Bavail 1001444 Files 1001444 Ffree 1001443 Frsize 4096 Flags 38 } 

运行/运行

 Statfs_t { Type 16914836 Bsize 4096 Blocks 1001444 Bfree 923475 Bavail 923475 Files 1001444 Ffree 1000914 Frsize 4096 Flags 38 } 

在/ sys / fs / cgroup上运行

 Statfs_t { Type 16914836 Bsize 4096 Blocks 1001444 Bfree 1001444 Bavail 1001444 Files 1001444 Ffree 1001431 Frsize 4096 Flags 46 } 

在/ usr上运行

 Statfs_t { Type 1481003842 Bsize 4096 Blocks 1046016 Bfree 703392 Bavail 703392 Files 4194304 Ffree 4155732 Frsize 4096 Flags 4128 } 

在/ opt上运行

 Statfs_t { Type 1481003842 Bsize 4096 Blocks 130219 Bfree 123593 Bavail 123593 Files 524288 Ffree 524263 Frsize 4096 Flags 4128 } 

在/ tmp上运行

 Statfs_t { Type 1481003842 Bsize 4096 Blocks 1046016 Bfree 1037760 Bavail 1037760 Files 4194304 Ffree 4194273 Frsize 4096 Flags 4128 } 

在/ boot上运行

 Statfs_t { Type 1481003842 Bsize 4096 Blocks 130219 Bfree 102353 Bavail 102353 Files 524288 Ffree 523955 Frsize 4096 Flags 4128 } 

在/ var上运行

 Statfs_t { Type 1481003842 Bsize 4096 Blocks 3143168 Bfree 2416721 Bavail 2416721 Files 12582912 Ffree 12579830 Frsize 4096 Flags 4128 } 

在/ home上运行

 Statfs_t { Type 26985 Bsize 32768 Blocks 8192000 Bfree 5523429 Bavail 5523429 Files 2682388480 Ffree 1534765937 Frsize 32768 Flags 4128 } 

在/ dockerregistry上运行

 Statfs_t { Type 26985 Bsize 32768 Blocks 16777216 Bfree 16578608 Bavail 16578608 Files 1072955392 Ffree 659992996 Frsize 32768 Flags 4129 } 

在/ opt / cbc / nfs上运行

 Statfs_t { Type 26985 Bsize 32768 Blocks 655360 Bfree 653153 Bavail 653153 Files 1072955392 Ffree 659992996 Frsize 32768 Flags 4129 } 

在/var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-dockercfg-bsamf-push上运行

 Statfs_t { Type 16914836 Bsize 4096 Blocks 1001444 Bfree 1001443 Bavail 1001443 Files 1001444 Ffree 1001442 Frsize 4096 Flags 4128 } 

在/var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/omitted6-source上运行

 Statfs_t { Type 16914836 Bsize 4096 Blocks 1001444 Bfree 1001442 Bavail 1001442 Files 1001444 Ffree 1001441 Frsize 4096 Flags 4128 } 

在/var/lib/origin/openshift.local.volumes/pods/19fb86f1-d9b1-11e5-bb64-005056969894/volumes/kubernetes.io~secret/builder-token-b7asp6上运行

 Statfs_t { Type 16914836 Bsize 4096 Blocks 1001444 Bfree 1001442 Bavail 1001442 Files 1001444 Ffree 1001441 Frsize 4096 Flags 4128 } 

#lvs

 # lvs -o +lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,lv_size,seg_count,snap_percent,segtype,stripes,stripesize,chunksize,seg_start,seg_size LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert Maj Min KMaj KMin LSize #Seg Snap% Type #Str Stripe Chunk Start SSize docker-pool docker-vg twi-at--- 12.78g 13.62 1.71 -1 -1 253 8 12.78g 1 thin-pool 1 0 512.00k 0 12.78g opt vg00 -wi-ao---- 512.00m -1 -1 253 5 512.00m 1 linear 1 0 0 0 512.00m root vg00 -wi-ao---- 1.00g -1 -1 253 1 1.00g 1 linear 1 0 0 0 1.00g swap0 vg00 -wi-ao---- 8.00g -1 -1 253 2 8.00g 2 linear 1 0 0 0 4.00g swap0 vg00 -wi-ao---- 8.00g -1 -1 253 2 8.00g 2 linear 1 0 0 4.00g 4.00g tmp vg00 -wi-ao---- 4.00g -1 -1 253 4 4.00g 1 linear 1 0 0 0 4.00g usr vg00 -wi-ao---- 4.00g -1 -1 253 0 4.00g 1 linear 1 0 0 0 4.00g var vg00 -wi-ao---- 12.00g -1 -1 253 3 12.00g 2 linear 1 0 0 0 4.00g var vg00 -wi-ao---- 12.00g -1 -1 253 3 12.00g 2 linear 1 0 0 4.00g 8.00g