#!/bin/bash

source /etc/tos/scripts/scripts
script_get_lock >/dev/null
[ $? -ne 0 ] && exit 1

#########################
# The command line help #
#########################
display_help() {
  echo "Usage:$(basename $0) -l[level] -b -s[0] -t[btrfs] -u[uuid] [devices]"
  echo "\tlevel: [0, 1, 5, single, 6, 10, liner]"
  echo "\tuuid: use uuidgen -t -r to generate."
  script_put_lock >/dev/null
  exit 1
}

# get parameter list
bitmap=0
TOS_DEVICES=""
TOS_DEVICES_NUMBER=0
TOS_FILESYSTEM=ext4
TOS_SIZEG=100%
#create new uuid
TOS_UUID=$(uuidgen -t -r)
TOS_LABEL="TOS_VOL_$(date +'%Y%m%d')"
while getopts "s:l:bt:u:" opt; do
  case $opt in
  s) TOS_LV_SIZE=$OPTARG ;; # get volume size
  l) level=$OPTARG ;;       # get raid level
  b) bitmap=1 ;;            # open raid 1 bitmap
  t) TOS_FILESYSTEM=$OPTARG ;;
  u) TOS_UUID=$OPTARG ;; # get uuid
  esac
done

# parameters check
coresize=$(iniparse -c -f ${TOS_RAID_CONF} -s tos -k coresize)
swapsize=$(iniparse -c -f ${TOS_RAID_CONF} -s tos -k swapsize)

# get TOS_DEVICES
for item in $@; do
  [ ! -b $item ] && continue
  TOS_DEVICES=$TOS_DEVICES' '$item
  let TOS_DEVICES_NUMBER=TOS_DEVICES_NUMBER+1
done

if [ $TOS_DEVICES_NUMBER -lt 1 -o -z "$level" ]; then
  display_help
fi
# fix 4.19.165 bug
check_raid0_default_layout

# get storage root
TOS_POOL_ROOT=/tmp/storage
[ ! -e $TOS_POOL_ROOT ] && mkdir $TOS_POOL_ROOT
# get volume root
TOS_VOLUME_ROOT=/tmp/volume
[ ! -e $TOS_VOLUME_ROOT ] && mkdir $TOS_VOLUME_ROOT

TOS_RAID_NAME=""
TOS_RAID_SORT=""
for i in $(seq 0 7); do
  [ -b /dev/md${i} ] && continue
  TOS_RAID_SORT=${i}
  TOS_RAID_NAME=/dev/md${i}
  break
done

TOS_VG_NAME=vg${TOS_RAID_SORT}
# remove the status for vg...
rm -f $TOS_POOL_ROOT/$TOS_VG_NAME

# get lvname
TOS_KEY=0
while [ 1 ]; do
  TOS_LV_NAME=lv${TOS_KEY}
  check_name_exists ${TOS_KEY}
  [ $? -eq 0 ] && break
  let TOS_KEY=TOS_KEY+1
done
TOS_BLK=/dev/mapper/${TOS_VG_NAME}-${TOS_LV_NAME}
TOS_MNT_PATH=/Volume$(iniparse -c -f /etc/volume/volume.conf -sort sort)

# storage pools max ... 8
if [ -z "$TOS_RAID_NAME" -o -z "$TOS_RAID_SORT" ]; then
  display_help
fi

baseparts=""
swapparts=""
userparts=""
debugFile="$TOS_VOLUME_ROOT/lv${TOS_KEY}"
process=0
rm -f $debugFile

echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:0:${process}" >$debugFile

blkparted() {
  local blk=$1
  local offset=0
  local offend=0
  parted -s $blk mktable gpt
  [ $? -ne 0 ] && return 1

  let offend=$offset+$TOS_BOOT_SIZE
  parted -s $blk mkpart primary ext2 0% $offend
  [ $? -ne 0 ] && return 1

  let offset=$offend
  let offend=$offset+$TOS_CORE_SIZE
  parted -s $blk mkpart primary ext2 $offset $offend
  [ $? -ne 0 ] && return 1

  let offset=$offend
  let offend=$offset+$TOS_SWAP_SIZE
  parted -s $blk mkpart primary ext2 $offset $offend
  [ $? -ne 0 ] && return 1

  let offset=$offend
  parted -s $blk mkpart primary ext2 $offset 100%
  [ $? -ne 0 ] && return 1

  mdev -s
  return 0
}

InitDisk() {
  local space=30
  if [ $TOS_DEVICES_NUMBER -gt 0 ]; then
    let space=30/$TOS_DEVICES_NUMBER
  fi
  for sd in $TOS_DEVICES; do
    local blk=$(basename ${sd})
    # local bootblk=${sd}1
    local coreblk=${sd}2
    local swapblk=${sd}3
    local userblk=${sd}4
    baseparts="${baseparts} ${coreblk}"
    swapparts="${swapparts} ${swapblk}"
    userparts="${userparts} ${userblk}"
    let process=process+space
    echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:0:${process}" >$debugFile
    mdadm --misc -W ${TOS_RAID_CORE}

    # optimize disk read_ahead_kb
    echo 4096 >/sys/block/${blk}/queue/read_ahead_kb

    local part_number=$(ls -d /sys/block/${blk}/${blk}[0-9] | wc -l)
    if [ $part_number -ge 4 ]; then
      mdadm -D ${TOS_RAID_CORE} | grep ${coreblk} >/dev/null
      if [ $? -ne 0 ]; then # not in md9
        local needpart=1
        if [ -b ${coreblk} ]; then
          mdadm -E ${coreblk} | grep ${TOS_CORE_SIGN}
          if [ $? -eq 0 ]; then
            mdadm -r ${TOS_RAID_CORE} ${coreblk} >/dev/null
            mdadm -a ${TOS_RAID_CORE} ${coreblk} >/dev/null
            needpart=0
          fi
        fi
        # need to parted...
        if [ $needpart -eq 1 ]; then
          mdadm -f ${TOS_RAID_SWAP} ${swapblk} >/dev/null
          mdadm -r ${TOS_RAID_SWAP} ${swapblk} >/dev/null
          blkparted ${sd}
          if [ $? -ne 0 ]; then
            echo "makemd:${TOS_DEVICES}:raid${level}:$TOS_UUID:1:make_partion_failed" >$debugFile
            script_put_lock >/dev/null
            exit 1
          fi
        fi
      fi
    else
      if [ -b ${coreblk} ]; then
        mdadm -f ${TOS_RAID_CORE} ${coreblk} >/dev/null 2>&1
        mdadm -r ${TOS_RAID_CORE} ${coreblk} >/dev/null 2>&1
      fi
      if [ -b ${swapblk} ]; then
        mdadm -f ${TOS_RAID_SWAP} ${swapblk} >/dev/null 2>&1
        mdadm -r ${TOS_RAID_SWAP} ${swapblk} >/dev/null 2>&1
      fi
      blkparted ${sd}
    fi

    mkdir -p /boot/bootdisk >/dev/null
    # local bootlabel=$(e2label ${bootblk} 2>/dev/null)
    # if [ -z "${bootlabel}" ]; then
    #   /etc/tos/scripts/initboot "$sd"
    # fi
  done
}

RaidFix() {
  local raid=$1
  local blk=$2
  local raidname=$(basename $raid)
  #cat /proc/mdstat | grep $raidname | grep ${blk}
  mdadm -D ${raid} | grep ${blk} >/dev/null
  if [ $? -ne 0 ]; then
    echo "insert $blk to ${raid}"
    mdadm -r ${raid} ${blk}
    mdadm --zero-superblock ${blk}
    mdadm -a ${raid} ${blk}
  fi
}

RaidCreate() {
  local raid=$1
  local sign=$2
  local disks=$3
  local disklist=${disks//,/ }
  echo -e "y\n" | mdadm -C ${raid} -l1 -c512 -n${TOS_DEVICES_NUMBER} -N${sign} $disklist --force
  if [ $? -ne 0 ]; then
    echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:1:make_base_failed" >$debugFile
    script_put_lock >/dev/null
    exit 1
  fi
  wait $!
}

RaidBitmap() {
  local raid=$1
  echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:0:${process}" >$debugFile
  mdadm --misc -W ${raid}
  # mdadm --grow ${raid} --raid-devices=$TOS_SYS_NUMBER --force
  # mdadm --grow ${raid} --bitmap=internal --force
}

# init disks
InitDisk

# make base raid
if [ -b ${TOS_RAID_CORE} ]; then
  echo ${TOS_RAID_CORE} 'already created'
  # mdadm --grow ${TOS_RAID_CORE} --raid-devices=$TOS_SYS_NUMBER --force
  for i in $baseparts; do
    RaidFix ${TOS_RAID_CORE} ${i}
  done
else
  baselist=${baseparts// /,}
  RaidCreate ${TOS_RAID_CORE} ${TOS_CORE_SIGN} ${baselist}
  echo -e "y\n" | mke2fs -t ext4 -m .5 -b 4096 -E stride=128,stripe-width=256 ${TOS_RAID_CORE}
  sleep 2 # wait disk sync
fi
process=40
RaidBitmap ${TOS_RAID_CORE}

# make swap raid
echo "create ${TOS_RAID_SWAP} devnum:${TOS_DEVICES_NUMBER} level:${level} device:${swapparts}"
if [ -b ${TOS_RAID_SWAP} ]; then
  echo ${TOS_RAID_SWAP} 'already created'
  # mdadm --grow ${TOS_RAID_SWAP} --raid-devices=$TOS_SYS_NUMBER --force
  for i in $swapparts; do
    RaidFix ${TOS_RAID_SWAP} ${i}
  done
else
  swaplist=${swapparts// /,}
  RaidCreate ${TOS_RAID_SWAP} ${TOS_SWAP_SIGN} ${swaplist}
  sleep 2 # wait disk sync
fi
process=50
RaidBitmap ${TOS_RAID_SWAP}

# init swap raid
if [ -b ${TOS_RAID_SWAP} ]; then
  cat /proc/swaps | grep ${TOS_RAID_SWAP}
  if [ $? -ne 0 ]; then
    mkswap ${TOS_RAID_SWAP} # format swapblock
    swapon ${TOS_RAID_SWAP} # Enable swap
  fi
fi

# make user raid
process=60
echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:0:${process}" >$debugFile
TOS_PLUGS="--chunk=128"
TOS_BITMAP=""
[ $bitmap -eq 1 ] && TOS_BITMAP="--bitmap=internal"

if [ $TOS_DEVICES_NUMBER -eq 1 ]; then
  TOS_PLUGS="$TOS_PLUGS $TOS_BITMAP --level=1"
elif [ $level -eq 5 -o $level -eq 1 -o $level -eq 10 -o $level -eq 6 ]; then
  TOS_PLUGS="$TOS_PLUGS $TOS_BITMAP --level=$level"
else
  TOS_PLUGS="$TOS_PLUGS --level=$level"
fi
echo Y | mdadm --create $TOS_RAID_NAME $TOS_PLUGS -n$TOS_DEVICES_NUMBER -N${TOS_USER_SIGN} ${userparts} --force
# wait for create raid...
while [ 1 ]; do
  sleep 2
  [ -b $TOS_RAID_NAME ] && break
done
mdadm -Ds >/etc/mdadm.conf
process=70
echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:0:${process}" >$debugFile

# optimization resync speed for raid5 & raid6...
mdname=$(basename $TOS_RAID_NAME)
cache_size=/sys/block/$mdname/md/stripe_cache_size
[ -e $cache_size ] && echo $TOS_TRIP_SIZE >$cache_size

# clean old vg & pv
pvs -o pv_name $TOS_LVM_OPTS 2>/dev/null | grep ${TOS_RAID_NAME}
if [ $? -eq 0 ]; then
  vg_uuid=$(pvs -o vg_uuid $TOS_LVM_OPTS ${TOS_RAID_NAME} 2>/dev/null)
  if [ ! -z "$vg_uuid" ]; then
    vgrename $vg_uuid vg_temp
    vgremove -f vg_temp
  fi
  pvremove -ff ${TOS_RAID_NAME}
fi

# pv create...
pvcreate -ff ${TOS_RAID_NAME} >/dev/null
pvs $TOS_LVM_OPTS | grep "${TOS_RAID_NAME}" >/dev/null
if [ $? -ne 0 ]; then
  echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:1:create_pv_failed" >$debugFile
  script_put_lock >/dev/null
  exit 1
fi
process=80
echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:0:${process}" >$debugFile
# create volume group...
TOS_SYSID=$(uuidgen -t -r)
vgcreate -f --addtag ${TOS_SYSID} -y ${TOS_VG_NAME} ${TOS_RAID_NAME} >/dev/null
vgs $TOS_LVM_OPTS | grep "${TOS_VG_NAME}" >/dev/null
if [ $? -ne 0 ]; then
  echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:1:create_vg_failed" >$debugFile
  script_put_lock >/dev/null
  exit 1
fi
# modify the storage config
iniparse -a -f /etc/volume/storage.conf -s "$TOS_SYSID" -system

# create lvm volume
process=90
echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:0:${process}" >$debugFile
if [ "${TOS_LV_SIZE}" = "0" ]; then
  lvcreate -y -l 100%FREE -n $TOS_LV_NAME $TOS_VG_NAME >/dev/null
else
  if [ "$TOS_LV_SIZE" -gt 0 ] 2>/dev/null; then
    TOS_LV_SIZE="${TOS_LV_SIZE}g"
  fi
  freeSpace=$(vgs -o vg_free --unit g $TOS_LVM_OPTS "${TOS_VG_NAME}" 2>/dev/null | sed s/[[:space:]]//g)
  /etc/tos/scripts/compareSpace ${freeSpace:0:0-1} ${TOS_LV_SIZE:0:0-1}
  if [ $? -eq 0 ]; then
    lvcreate -y --size $TOS_LV_SIZE -n $TOS_LV_NAME $TOS_VG_NAME >/dev/null
  else
    lvcreate -y -l 100%FREE -n $TOS_LV_NAME $TOS_VG_NAME >/dev/null
  fi
fi

sleep 1

formatDevice() {
  local fs=$1
  local blk=$2
  if [ "$fs" = "btrfs" ]; then
    mkfs.btrfs -f -L "$TOS_LABEL" -U "$TOS_UUID" $blk
    mount -t btrfs -o $MNTOPT_BTRFS $blk $TOS_MNT_PATH
    if [ $? -eq 0 ]; then
      iniparse -a -f /etc/volume/volume.conf -s "$TOS_UUID" -system
    fi
  elif [ "$fs" = "xfs" ]; then
    mkfs.xfs -f -b size=4096 -L "xfs_$(date +'%Y%m%d')" -m uuid="$TOS_UUID" $blk
    mount -t xfs -o $MNTOPT_XFS $blk $TOS_MNT_PATH
    if [ $? -eq 0 ]; then
      iniparse -a -f /etc/volume/volume.conf -s "$TOS_UUID" -system
      quotacheck -uvgmf $TOS_MNT_PATH
      quotaon -a
    fi
  else
    echo -e "y\n" | mke2fs -t ext4 -m .5 -b 4096 -L "$TOS_LABEL" -U "$TOS_UUID" $blk
    wait $!
    mount -t ext4 -o $MNTOPT_EXT4 $blk $TOS_MNT_PATH
    if [ $? -eq 0 ]; then
      iniparse -a -f /etc/volume/volume.conf -s "$TOS_UUID" -system
      quotacheck -uvgmf $TOS_MNT_PATH
      quotaon -a
    fi
  fi
  /etc/init.d/nas/mkbase
}

if [ -b $TOS_BLK ]; then
  [ ! -e $TOS_MNT_PATH ] && mkdir -m 777 -p $TOS_MNT_PATH
  # format block device...
  formatDevice "$TOS_FILESYSTEM" "$TOS_BLK"

  df-json | grep "$TOS_MNT_PATH$" >/dev/null
  # mount successful to init /usr/local/...
  if [ $? -eq 0 ]; then
    process=100
    echo "makemd:${TOS_BLK}:${TOS_MNT_PATH}:$TOS_UUID:0:${process}" >$debugFile
  else
    echo "makemd:${TOS_BLK}:${TOS_MNT_PATH}:$TOS_UUID:1:failed" >$debugFile
    script_put_lock >/dev/null
    exit 1
  fi
else
  echo "makemd:${TOS_BLK}:raid${level}:$TOS_UUID:1:create_lv_failed" >$debugFile
  script_put_lock >/dev/null
  exit 1
fi

# all process over
script_put_lock >/dev/null
exit 0
