#!/bin/sh
#
# A utility to manage core file bundling.
#

VERSION=0.4

OS_ID=$(sed -n 's/^ID=//p' /etc/os-release 2>/dev/null)

if [ -z "${BASH_VERSION}" ]; then
  if [ "alpine" = "${OS_ID}" ]; then
    [ -f /bin/bash ] || apk add bash >/dev/null 2>&1
  fi

  exec /bin/bash $0 "$@"
fi

#
# @descr:
# Provide information about a specific command usage.
#
# @usage:
# help <command>
#
#      <command>       Command to get help for
#
cmd_help() {
  local cmd
  [ $# -gt 0 ] || usage

  cmd=$(get_cmd cmd_${1/-/_})

  echo
  echo -n " "
  get_cmd_info ${cmd} descr
  echo -n "  Usage:  ${MYNAME}"
  get_cmd_info ${cmd} usage
  exit 0
}

#
# @descr:
# Create an encrypted archive with files needed to successfully debug the core dump.
#
# @usage:
# create [--hsfile <hsfile>] [--docker-container <cid> | --docker-image <iid> [-v <vlist> ...]] [<force-options> ...] <core>
#
#          <core>        Path to the core file to create a bundle for
#
#          <hsfile>      Path to the hs_err log file left by the same VM process
#                        Default: hs_err file found in the same directory as the corefile
#
#          <cid>         Docker container ID of the container that dumped the core or of a similar container (if the original one
#                        is not available).
#
#          <iid>         Docker image ID of the image that can be used to create a container similar to one that dumped the core.
#
#          <vlist>       Docker volumes mount list (in the format of -v <src>:<dst>). Multiple -v options allowed.
#
#     <force-options>    %MYNAME% is interactive by default but you can force some/all actions by providing the following options:
#
#     --no-host-check    force bundling on the current host even if it differs from the host where coredump was produced
#        --no-preview    do not open an EDITOR to preview the list of files to bundle
#    --no-space-check    do not perform a free disk space check
#         --overwrite    overwrite the bundle if a file with the same name already exists
#
#
#  Please refer to the online documentation to get more information about bundling core files generated in a docker container.
#
cmd_create() {
  local core_file=
  local hs_err_file=
  local bundlehost=
  local core_host=
  local docker_container=
  local docker_image=
  local docker_volumes=
  local docker_core=no

  local NO_HOST_CHECK=
  local NO_PREVIEW=
  local NO_SPACE_CHECK=
  local OVERWRITE=

  local -a new_args
  while [ $# -gt 0 ]; do
    case $1 in
        --no-host-check) NO_HOST_CHECK=yes ;;
           --no-preview) NO_PREVIEW=yes ;;
       --no-space-check) NO_SPACE_CHECK=yes ;;
            --overwrite) OVERWRITE=yes ;;

               --hsfile) hs_err_file=$(get_option $@); shift ;;
     --docker-container) [ -z "${docker_image}" ] || fail "Please use either --docker-image or --docker-container, not both"
                         docker_container=$(get_option $@)
                         docker_core=yes
                         shift ;;
         --docker-image) [ -z "${docker_container}" ] || fail "Please use either --docker-image or --docker-container, not both"
                         docker_image=$(get_option $@)
                         docker_core=yes
                         shift ;;
                     -v) [ -n "${docker_image}" ] || fail "Docker volume mappings can be specified when using --docker-image only"
                         docker_volumes="${docker_volumes} -v $(get_option $@)"; shift ;;
                      *) new_args="$new_args \"$1\"" ;;
    esac
    shift
  done

  eval set -- ${new_args}

  [ $# -gt 0 ] || fail "No core file specified"
  [ $# -eq 1 ] || fail "Wrong number of arguments"
  [ -w "$(pwd)" ] || fail "Current directory is not writeable"

  SFX=$$

  [ "no" == "${docker_core}" ] || {
    check_docker_permissions
    core_file="${1}"
    core_file_name=$(basename "${core_file}")

    if [ -n "${docker_image}" ]; then
      ${DOCKER} inspect -f '{{ .Id }}' ${docker_image} &>>${TEMPDIR}/docker.err || fail "
	Docker image '${docker_image}' cannot be accessed.
	$(format_file ${TEMPDIR}/docker.err)"

      local extra_opts=

      # For the docker_image case it is expected that files are accessible from the host - we just need to provide mappings to the files
      [ -f "${core_file}" ] || fail "Core file '${core_file}' not found"
      core_file_dir=$(cd $(dirname "${core_file}"); pwd -P)


      if [ -n "${hs_err_file}" ]; then
        [ -f "${hs_err_file}" ] || fail "File '${hs_err_file}' not found"
        hs_err_file_dir=$(cd $(dirname "${hs_err_file}"); pwd -P)
        hs_err_file_name=$(basename "${hs_err_file}")
        docker_volumes="${docker_volumes} -v ${hs_err_file_dir}:/hs_err.${SFX}"
        hs_err_file="/hs_err.${SFX}/${hs_err_file_name}"
      fi

      [ -z "${hs_err_file}" ] || extra_opts="${extra_opts} --hsfile ${hs_err_file}"

      ${DOCKER} run -i --rm \
                -e BUNDLE_FROM_HOST=yes \
                -e BUNDLE_FROM_HOST_DIR=$(pwd) \
                -v $(pwd):/bundler_output.${SFX} \
                -v ${core_file_dir}:/core.${SFX} \
                -v ${MYDIR}:/bundler.${SFX} \
                   ${docker_volumes} \
                -w /bundler_output.${SFX} \
                "${docker_image}" \
                /bundler.${SFX}/${MYNAME} create ${extra_opts} /core.${SFX}/${core_file_name}

    elif [ -n "${docker_container}" ]; then
      # Dealing with the container ...
      ${DOCKER} inspect -f '{{ .Id }}' ${docker_container} &>>${TEMPDIR}/docker.err || fail "
	Docker container '${docker_container}' cannot be accessed.
	$(format_file ${TEMPDIR}/docker.err)

	You may choose to use another similar container to bundle the core.
	For that please start a container with the same volume mappings and pass its ID to ${MYNAME} with --docker-container <cid> parameter.
	An alternative option is to provide an image ID (with --docker-image <iid>) that ${docker_container} was created from.
	In the latter case volume mappings that were used to run the crashed container should also be provided with -v parameter(s).
	"

      docker_volumes=$(${DOCKER} inspect -f '{{ range .Mounts }} {{ print "-v " .Source ":" .Destination }} {{end}}' ${docker_container})

      if [ -f "${core_file}" ]; then
        core_file_dir=$(cd $(dirname "${core_file}"); pwd -P)
        docker_volumes="${docker_volumes} -v ${core_file_dir}:/core.${SFX}"
        core_file="/core.${SFX}/${core_file_name}"
      fi

      if [ -f "${hs_err_file}" ]; then
        hs_err_file_dir=$(cd $(dirname "${hs_err_file}"); pwd -P)
        hs_err_file_name=$(basename "${hs_err_file}")
        docker_volumes="${docker_volumes} -v ${hs_err_file_dir}:/hs_err.${SFX}"
        hs_err_file="/hs_err.${SFX}/${hs_err_file_name}"
      fi

      docker_image=$(${DOCKER} commit -m coredump.${SFX} ${docker_container})
      DOCKER_IMAGE_TO_REMOVE=${docker_image}

      local extra_opts=
      [ -z "${hs_err_file}" ] || extra_opts="${extra_opts} --hsfile ${hs_err_file}"

      ${DOCKER} run -i --rm \
                -e BUNDLE_FROM_HOST=yes \
                -e BUNDLE_FROM_HOST_DIR=$(pwd) \
                -v $(pwd):/bundler_output.${SFX} \
                -v ${MYDIR}:/bundler.${SFX} \
                   ${docker_volumes} \
                -w /bundler_output.${SFX} \
                "${docker_image}" \
                /bundler.${SFX}/${MYNAME} create ${extra_opts} ${core_file}

    fi

    exit 0
  }

  [ -z "${hs_err_file}" ] || [ -r "${hs_err_file}" ] || fail "hs_err log file '${hs_err_file}' cannot be read$([ -z "${BUNDLE_FROM_HOST}" ] || echo ' (in the container)')."
  [ -z "${hs_err_file}" ] || hs_err_file=$(readlink -f "${hs_err_file}")

  [ -f "${1}" ] || fail "Core file '${1}' not found$([ -z "${BUNDLE_FROM_HOST}" ] || echo ' (in the container)')."
  [ -r "${1}" ] || fail "Core file '${1}' cannot be read. Check permissions."

  # Some space is used in /tmp for temporary files.
  [ "yes" == "${NO_SPACE_CHECK}" ] || {
    local FREE_TMP_SPACE=$(disk_free_space $(dirname ${TEMPDIR}))
    local REQUIRED_TMP_SPACE=500
    [ "${FREE_TMP_SPACE}" -gt "${REQUIRED_TMP_SPACE}" ] || fail "
      Not enough free space in $(dirname ${TEMPDIR}) to proceed.
      Available: ${FREE_TMP_SPACE} Kb.
      At least ${REQUIRED_TMP_SPACE} Kb required to proceed."
  }

  core_file=$(readlink -f "${1}")
  core_file_name=$(basename "${core_file}")

  pid_from_core=$(extract_pid_from_core "${core_file}")

  if [ -z "${hs_err_file}" -a -n "${pid_from_core}" ]; then
    hsfile="$(dirname ${core_file})/hs_err_pid${pid_from_core}.log"
    [ ! -r "${hsfile}" ] || hs_err_file=${hsfile}
  fi

  pid_from_hs_err=
  [ -z "${hs_err_file}" ] || pid_from_hs_err=$(extract_pid_from_hs_err "${hs_err_file}")

  if [ -n "${pid_from_core}" -a -n "${pid_from_hs_err}" ]; then
    [ "${pid_from_hs_err}" == "${pid_from_core}" ] || fail "hs_err log file refers to the process with pid == ${pid_from_hs_err}, while core file is for the process with pid == ${pid_from_core}"
  fi

  pid=
  [ -z "${pid_from_core}"   ] || pid=${pid_from_core}
  [ -z "${pid_from_hs_err}" ] || pid=${pid_from_hs_err}

  if [ -z "${pid}" ]; then
    warning "Unable to extract process ID from the core file. Consider using --hsfile <hsfile> parameter."
    pid=0000
  fi

  # Either hs_err_file is not found or it is valid (refers to the same PID).
  exe_file=$(extract_exe_file "${core_file}" "${hs_err_file}")

  core_host=$(extract_core_hostname "${core_file}" "${hs_err_file}" "${exe_file}")
  [ -n "${core_host}" ] || {
    warning "Failed to extract hostname from the core file."
    [ "yes" == "${NO_HOST_CHECK}" ] || [ "yes" == "$(prompt_yes_no "Do you want to proceed with bundling on the current host?" n)" ] || cancel_script
    core_host=FOUND_AT_UNKNOWN_HOSTNAME
  }

  # BUNDLE_FROM_HOST=yes indicates that the script was recursively started in container (from the host)
  # Bypass hostname validation in this case
  if [ "yes" == "${BUNDLE_FROM_HOST}" ]; then
    bundle_name=corelibs.${pid}_${core_host//-/_}
    core_host=$(uname -n)
  else
    [ "${core_host}" == "$(uname -n)" ] || {
      [ "${core_host}" == "FOUND_AT_UNKNOWN_HOSTNAME" ] || {
        docker_container=$(od -S 50 -A n "${core_file}" | sed -n 's#^/docker/\([a-f0-9]*\)$#\1#p' | head -1)
        warning "Core file was dumped on host ${core_host} you are currently on $(uname -n)."
        warning "Important information may be missing from the bundle if continue on the current host."
        warning "It is highly recommended to perform bundling on ${core_host}."
        [ -z "${docker_container}" ] || {
          warning "The corefile '${core_file_name}' was likely dumped from docker container with id ${docker_container:0:16}."
          warning "If this container is still available, re-run with --docker-container ${docker_container:0:16}."
          warning "Otherwise use --docker-container / --docker-image flags to specify suitable docker environment."
        }
        [ "yes" == "${NO_HOST_CHECK}" ] || [ "yes" == "$(prompt_yes_no "Do you want to proceed with bundling on the current host?" n)" ] || cancel_script
      }
      bundle_host_suffix="_bundledon_$(uname -n)"
    }
    bundle_name=corelibs.${pid}_${core_host//-/_}${bundle_host_suffix//-/_}
  fi

  file_editor=${EDITOR:-vi}

  cat <<- EOF > ${TEMPDIR}/flist_to_review

	# Enter your comments (if any) above this line.
	$(editor_help ${file_editor})
	$(wrong_host_warning ${core_host})

	# You are about to create a bundle that contains the core file and number of other
	# files that could make it easier to find the root cause of the problem.

	# PID:           ${pid}
	# CORE HOST:     ${core_host}
	# CORE FILE:     ${core_file}
	# EXECUTABLE:    ${exe_file}

	# BUNDLE NAME:   ${bundle_name}


	# PLEASE REVIEW

	# Below is the list of files to be bundled.
	# Remove or comment out the files to be excluded.
	# Add any missing files to the bundle, if needed, by adding them to the list.
	# Save the file and exit the editor once done.

	$(list_executable "${exe_file}")
	$(list_log_files "${core_file}" "${hs_err_file}")
	$(list_shared_libraries "${core_file}" "${hs_err_file}")
	$(list_system_info_files)
	EOF

  [ "yes" == "${NO_PREVIEW}" ] || {
    cat <<- EOF >&$CFD


	The bundle file will contain the core file, log files, executable, dynamic libraries and some system information.
	For security reasons, the bundle will be encrypted.

	EOF

    response=$(prompt_yes_no "Do you want to review (and/or modify) list of files to be included before proceeding?" n)
    [ "cancel" != "${response}" ] || cancel_script
    [ "no" == "${response}" ] || ${file_editor} ${TEMPDIR}/flist_to_review || cancel_script
  }

  # Create comments file, if a user has left any comments; forcibly add the core file.
  # Also do not use absolute patch for /proc files (see below)
  {
    cat ${TEMPDIR}/flist_to_review | ${AWK} -v comments=${TEMPDIR}/comments '
      /^# Enter your comments/ { f = 1; next }
                 NF && f == 0 { print >> comments; next }
           /(file not found)/ { next }
                         /^#/ { next }
                           NF
    '
    [ ! -r "${TEMPDIR}/comments" ] || echo "comments"
  } | sort -u | sed "s#^/proc/#proc/#" > ${TEMPDIR}/reviewed_flist

  echo "${core_file}" >> ${TEMPDIR}/reviewed_flist

  # Add bundle-info and opencore.sh to the list (actual files are created later)
  echo .bundle-info > ${TEMPDIR}/flist
  echo opencore.sh >> ${TEMPDIR}/flist

  # Normalize paths and create a list of directories to create before extracting the bundle.
  # This is needed for paths that have '..', as gdb will still expect these paths as is (with dots).
  # We need to make sure that all directories exist for this path to be valid.
  cat ${TEMPDIR}/reviewed_flist | while read F; do
    echo "${F}" | tr '/' '\n' | ${AWK} '/\.\./{i--; next}{a[i++]=$0}END{for(k=0;k<i;k++){printf "%s/", a[k]}}' | sed 's#/$#\n#' >> ${TEMPDIR}/flist
    dirname "${F}" >> ${TEMPDIR}/dirlist
  done

  # Calculate the (uncompressed) size of the bundle
  local bundle_size=$(cat ${TEMPDIR}/flist | xargs du -Lb 2>/dev/null | ${AWK} '{ s += $1 } END { print int(s / 1024) }')

  #
  # Files from /proc should be copied to the temp directory first.
  # Otherwise tar will ignore them
  #
  mkdir ${TEMPDIR}/proc

  grep "^proc" ${TEMPDIR}/flist | while read F; do
    mkdir -p "${TEMPDIR}/$(dirname ${F})"
    cp /${F} "${TEMPDIR}/$(dirname ${F})"
  done

  # Create a helper file with information required to correctly extract the bundle
  # This file is also used to display content of the bundle
  cat <<- EOF > ${TEMPDIR}/.bundle-info
	Total Size: ${bundle_size}
	List of directories:
	$(sort -u ${TEMPDIR}/dirlist | sed -e "s#^#./${bundle_name}/#" -e 's#//*#/#g')

	List of files:
	$(cat ${TEMPDIR}/flist | sed -e "s#^#./${bundle_name}/#" -e 's#//*#/#g')
	EOF

  # Create a file with a simple script that makes it easier to open a file with gdb
  cat <<- EOF > ${TEMPDIR}/opencore.sh
	#!/bin/bash
	cd \$(dirname \$0)
	GDB=\${GDB:-\$(ls -1 /home/buildmaster/nightly/ZVM/dev/in_progress/latest/sandbox/azlinux/gdb*/bin/azlinuxgdb 2>/dev/null | head -1)}
	\${GDB} -ex "set sysroot ." -c ./${core_file} ./${exe_file}
	EOF
  chmod +x ${TEMPDIR}/opencore.sh

  # At this point, all files are known and ready to be packed.

  password_file="$(pwd -P)/${bundle_name}.password"
  encrypted_file="$(pwd -P)/${bundle_name}.tgz.gpg"
  encrypted_file_view=${encrypted_file}
  [ -z "${BUNDLE_FROM_HOST_DIR}" ] || encrypted_file_view="${BUNDLE_FROM_HOST_DIR}/${bundle_name}.tgz.gpg"
  [ ! -r "${encrypted_file}" ] || [ "yes" == "${OVERWRITE}" ] || [ "yes" == "$(prompt_yes_no "File $(basename ${encrypted_file}) exists. Overwrite?")" ] || cancel_script

  rm -rf "${encrypted_file}" "${password_file}"

  [ "yes" == "${NO_SPACE_CHECK}" ] || {
    local free_cwd_space=$(disk_free_space .)

    # core file can be compressed dramatically, but let's be conservative and
    # estimate the required space as 1/3 of the size of all the files.
    [ -z "${bundle_size}" -o -z "${free_cwd_space}" ] || [ $(( bundle_size / 3 )) -lt ${free_cwd_space} ] || {
      message ""
      warning "The core bundle will likely exceed the available file system space."
      warning "Available space:          ${free_cwd_space} Kb"
      warning "Uncompressed bundle size: ${bundle_size} Kb"
      [ "yes" == "$(prompt_yes_no "Do you really want to continue?" n)" ] || cancel_script
    }
  }

  # Create a random passphrase
  dd if=/dev/urandom bs=30 count=1 2>>${TEMPDIR}/dd.error | base64 > ${password_file}
  copy_azul_public_key ${TEMPDIR}/keyring/pubring.gpg

  GPG_AGENT_PID=$(start_gpg_agent ${TEMPDIR}/keyring)

  # Finally, create the bundle
  message "Creating an encrypted bundle: ${encrypted_file_view} ..."
  local success=yes

  ${TAR} -C ${TEMPDIR} -h --transform="s,^,${bundle_name}/," -c -T ${TEMPDIR}/flist 2>>${TEMPDIR}/tar.error | ${GZIP} -1 -c | \
  ${GPG} --output "${encrypted_file}" --passphrase-file "${password_file}" \
      --batch --no-tty --homedir=${TEMPDIR}/keyring \
      --no-permission-warning --no-armor --no-verbose \
      --compress-algo Uncompressed --trust-model always \
      -e -c -r "AA22AC698ADA2A6A7ADD0F2D394026C2F659E4E5" \
      2>>${TEMPDIR}/gpg.error || success=no

  [ "yes" == "${success}" ] || {
    rm -f "${encrypted_file}" "${password_file}"
    local nospace=$(fgrep -q "No space left on device" ${TEMPDIR}/gpg.error && echo yes || echo no)
    [ "no" == "${nospace}" ] || fail "No space left on device"
    [ -s "${TEMPDIR}/gpg.error" ] || fail "No space left on device"
    fail "$(cat ${TEMPDIR}/gpg.error)"
  }

  cat <<- EOF >&$CFD


	Encrypted bundle file has been created and is ready to be shared with Azul!
	File: ${encrypted_file_view}

	*** PLEASE DO NOT UPLOAD THE PASSWORD FILE TO AZUL ***
	Azul already has a private key which can decrypt the core bundle.
	The password is only for your use, in case YOU need to extract files from the bundle.
	To extract the file, use ${MYNAME} extract $(basename ${encrypted_file}).

	EOF
}

#
# @descr:
# Extract encrypted core bundle to the current directory.
#
# @usage:
# extract [--list-only] <encrypted-bundle>
#
#      <encrypted-bundle>          Path to the encrypted bundle (corelibs.xxxx.tgz.gpg)
#
cmd_extract() {
  local encrypted_file
  local password_file
  local listonly=no
  local gpg_extra_options=

  local -a new_args
  while [ $# -gt 0 ]; do
    case $1 in
     --list-only) listonly=yes ;;
               *) new_args="$new_args \"$1\"" ;;
    esac
    shift
  done

  eval set -- ${new_args}

  encrypted_file="${1}"

  [ -n "${encrypted_file}" ] || fail "No encrypted bundle file specified"
  [ -r "${encrypted_file}" ] || fail "File '${encrypted_file}' cannot be read"
  [ -w $(pwd) ] || fail "Current directory is not writeable"

  [ "yes" == "${NO_SPACE_CHECK}" ] || {
    # Some space is used in /tmp for temporary files.
    local FREE_TMP_SPACE=$(disk_free_space $(dirname ${TEMPDIR}))
    local REQUIRED_TMP_SPACE=1500
    [ "${FREE_TMP_SPACE}" -gt "${REQUIRED_TMP_SPACE}" ] || fail "
      Not enough free space in $(dirname ${TEMPDIR}) to proceed.
      Available: ${FREE_TMP_SPACE} Kb.
      At least ${REQUIRED_TMP_SPACE} Kb required to proceed."
  }

  encrypted_file="$(readlink -f "${encrypted_file}")"
  password_file="${encrypted_file/tgz.gpg/password}"

  # reset password_file if there is no tgz.gpg in the bundle name
  [ "${encrypted_file}" != "${password_file}" ] || password_file=

  if [ -r "${password_file}" ]; then
    cat "${password_file}" > ${TEMPDIR}/passphrase
  else
    AZUL_KEYRING=/home/buildmaster/nightly/ZST/5.7.x-zst/in_progress/latest/sandbox/azulkeyring
    [ -d "${AZUL_KEYRING}" ] || fail "Password file '$(basename ${password_file})' not found in $(dirname ${password_file})."
    cp -r "${AZUL_KEYRING}" "${TEMPDIR}/keyring"
    sed -n "s/passphrase: //p" ${AZUL_KEYRING}/passphrase > ${TEMPDIR}/passphrase
    gpg_extra_options="--homedir=${TEMPDIR}/keyring"
  fi

  # dump first some bytes of the bundle to get .bundle-info
  dd if=${encrypted_file} of=${TEMPDIR}/bundle.tmp.gpg bs=1024 count=1000 &> ${TEMPDIR}/dd.error
  local bundle_info=$(decrypt "${TEMPDIR}/bundle.tmp.gpg" "${gpg_extra_options}" 2>>${TEMPDIR}/decrypt.error | ${TAR} ztf - 2>>${TEMPDIR}/untar.error | fgrep .bundle-info || true)

  if [ -n "${bundle_info}" ]; then
    decrypt "${TEMPDIR}/bundle.tmp.gpg" "${gpg_extra_options}" 2>>${TEMPDIR}/decrypt.error | ${TAR} zxf - -C ${TEMPDIR} "${bundle_info}" 2>>${TEMPDIR}/untar.error || true
    [ ! -r "${TEMPDIR}/${bundle_info}" ] || mv ${TEMPDIR}/${bundle_info} ${TEMPDIR}/bundle-info
  fi

  if [ "yes" == "${listonly}" ]; then
    if [ -r "${TEMPDIR}/bundle-info" ]; then
      cat "${TEMPDIR}/bundle-info" | ${AWK} 'f==1{if (NF) {print} else {exit}} /List of files:/{f=1}' | egrep -v "/opencore.sh$|/.bundle-info$"
    else
      decrypt "${encrypted_file}" "${gpg_extra_options}" 2>>${TEMPDIR}/decrypt.error | ${TAR} ztf - 2>>${TEMPDIR}/untar.error
    fi
  else
    if [ "yes" != "${NO_SPACE_CHECK}" -a -r "${TEMPDIR}/bundle-info" ]; then
      local required_space=$(cat "${TEMPDIR}/bundle-info" | ${AWK} '/Total Size:/ {print $NF; exit}')
      local free_cwd_space=$(disk_free_space .)
      [ -z "${required_space}" -o -z "${free_cwd_space}" ] || [ ${required_space} -lt ${free_cwd_space} ] || {
        message ""
        fail "Not enough space to extract the bundle. Extracted bundle size is: ${required_space} Kb, Available: ${free_cwd_space} Kb"
      }
      cat "${TEMPDIR}/bundle-info" | ${AWK} 'f==1{if (NF) {print} else {exit}} /List of directories:/{f=1}' | xargs mkdir -p
    fi

    message -n "Extracting data from ${encrypted_file} ... "
    decrypt "${encrypted_file}" "${gpg_extra_options}" 2>>${TEMPDIR}/decrypt.error | ${TAR} zxf - --no-same-owner --no-same-permissions 2>>${TEMPDIR}/untar.error || fail "Failed to extract ${encrypted_file}. Corrupted file?"
    message "Done"
  fi

  exit 0
}

decrypt() {
  cat ${TEMPDIR}/passphrase | ${GPG} -d -q --batch --passphrase-fd 0 --no-random-seed-file ${2} -o - ${1}
}

start_gpg_agent() {
  [ ! -x "${GPG_AGENT}" ] || {
    ${GPG_AGENT} --homedir=${1} --quiet --use-standard-socket --daemon >${TEMPDIR}/gpg-agent.output 2>&1
    cat ${TEMPDIR}/gpg-agent.output | tr '[:]' ' ' | ${AWK} '/started$/ || /^GPG_AGENT_INFO/ { print $2; exit }'
  }
}

copy_azul_public_key() {
  mkdir -p $(dirname ${1})
  cat <<- EOF | base64 -d > ${1}
mQENBFOX1oABCACXzlC41ydOByuV4gbilGyIsZRbMRSqt7NzzsdiHErgQZDZ4dY2nQH5GD+dJq8L
JMVRJ1L+P3dtjkEcY7w+IHNNEaqzMy4FpcoiTXrxNvbNtXQ9O2jX9x4ommG/pM4QrNexaH1CUcsf
yX8BGzcDAxHCICUVROUaAFqiEZBVTVJiLVwZwEWrW1QssBEdXMZ0aUDRyyqTD/oPDITWIjl1fGCR
9YiWlFVgMDYIdzrndwn/h6L4cBCAh8csuYBhONx6wR+7S+KYeLu1819inmyfPeSPJcdTtKcj0EEk
XDrUtyCdxpHvKXOopRmAo85f4f727ofSnLgjYi/G5TDjoOi1DbSRABEBAAG0SUF6dWwgU3lzdGVt
cyBJbmMgKGNvbnRhY3QgQXp1bCBTeXN0ZW1zIFN1cHBvcnQpIDxzdXBwb3J0QGF6dWxzeXN0ZW1z
LmNvbT6JATgEEwECACIFAlOX1oACGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEDlAJsL2
WeTlutkH/23x32mMvBYS+ttRZYgb5JUONt0YfIStuvGCdXX4Jcznf8i7HwidVYEa99xJOYkI0C42
WP4zZxbveq2S2peitZctzQ6TMPjOCvHJvG3jlrE/t9f609dbsN06yD9WiS5fimCHQqXSXsUUUYlH
2b4U99eZ5B+jvSZqeVP4bKdgM+s2FuPSeuiqNpo+K7QGrtoHRPJXpGceykLolY2pRtd+1dGjAlpI
au4AcqlItiMaTiuc/mT7Xgli+amj871W52jQjhGQ67l7SCID6fEXdp2HIQJlcuUJNCBuO/JMmpQv
zFJeArXoU632RpKsHuKAFuzkH/G0htVL0+xBz4bzlHmNhPiwAgADuQENBFOX1oABCACq3UIUwUQW
5eFV2la/5o7EMicI5fh8rDZNedg1hX5feDpg/KYXlKuBZ8W6f/g62+D/7hOETZ4rY2X+9og9lOPa
IjnsK4fSfPzpHhe8tZ6nMbi1JMUTP/hs5yd1SZb9Q/DBluGUI8nQWTrX1ppbiPHQGzpgif2KmAt2
H8zjBNIENNP15qiWaNQ/f6/1ejU/1UZzKAsOSMXRc/dU5J1neKbLgmgGaNsIH9sO+Rcu9wLjloZ4
j69DLuAhY//tGT6dqGPvtevbRI9ZpoGjHsjOTXLL/Q0V+TfEjZ0kwR4+3WQMYB/bN2sSas0kLBJ5
fL3XIyAezhG0vMr2b8jFlJ7xkI3zABEBAAGJAR8EGAECAAkFAlOX1oACGwwACgkQOUAmwvZZ5OWA
Ogf9HmehOk94zQ6FLb0BuuwpcQerAGiVzSKF2ktbNpaisAusKphzFdd8fCEhvkp9FvAiboYOXeEI
Z/AY+2c9OnvrEskJI84gIQqQcuh78POwDZd6c8vevUYBjTtoGM2RRGuv4+zlcZXUf0T0EfEljHel
eX2/K8cBuBDk0SlsIPku4KgBJzJmsdoBHHdYhXjPVma7Eb+3skjIyEYiCFhDtNlAj3K0hXoUmB4i
CB6AOmmraDpYl+lSjocpetAKgM3PM7at/B5KfBUqSrDPD1a2c6j6oDVKaDTNzBhd0MHpDbOJINxl
yE9B8eKyfbkmebtKkpp8hI5bQdUuyh7ASFEIXDvborACAAM=
	EOF
}

wrong_host_warning() {
  [ "${1}" == "$(uname -n)" ] || cat <<- EOF

	# WARNING! The core was dumped on "${1}"!
	# WARNING! Important files may be missing or differ from the files on the current host!

	EOF
}

editor_help() {
  local editor="$(basename ${1})"
  if [[ "${editor}" =~ vi[m]* ]]; then
    cat <<- EOF

	# Tip: To terminate the bundling process now, exit ${editor} with :cq <enter> command
	#      To proceed - use :q <enter> or :wq <enter>

	EOF
  fi
}

get_shared_libraries() {
  local core_file="$1"
  local hs_err_file="$2"

  {
    if [ -n "${GDB}" ]; then
      ${GDB} -q -c "${core_file}" <<< "info sharedlibrary" 2>&1 | \
      grep "^0x" | cut -d" " -f 6- | sed 's/^ *//'
    else
      # Will have to extract strings from core..
      od -S 10 -A n "${core_file}" | egrep "^/.*\.so" > ${TEMPDIR}/core.strings

      if [ -r "${hs_err_file}" ]; then
        cat "${hs_err_file}" | ${AWK} '/^Dynamic libraries/, /^$/{if ($2 ~ "..x."){print}}' | \
        cut -d" " -f 6- | sed 's/^ *//' | grep -v '^\[' | \
        while read F; do [ ! -r "${F}" ] || echo "$(readlink -f ${F}):${F}"; done | \
        sort -u > ${TEMPDIR}/hs_err.list

        grep "^/.*\.so" ${TEMPDIR}/core.strings | \
        while read F; do [ ! -r "${F}" -o -d "${F}" ] || echo "$(readlink -f ${F}):${F}"; done | \
        sort -u > ${TEMPDIR}/core.list

        join -t ":" -j 1 ${TEMPDIR}/hs_err.list ${TEMPDIR}/core.list | sed 's/.*://'
      else
        grep "^/.*\.so" ${TEMPDIR}/core.strings | \
        while read F; do [ ! -r "${F}" -o -d "${F}" ] || echo "${F}"; done
      fi
    fi
  }
}

extract_pid_from_core() {
  local pid=$(od -S 20 -A n "${1}" | grep -m1 sun.java.launcher.pid= | sed 's/.*=//')
  [ -n "${pid}" -o -z "${GDB}" ] || pid=$(${GDB} -q -c "${1}" <<< "info inferior" 2>&1 | sed -n 's/.*process \([0-9]*\).*/\1/p')
  echo "${pid}"
}

extract_pid_from_hs_err() {
  local pid=$(sed -n 's/.*pid=\([0-9]*\),.*/\1/p' "${1}" | head -1)
  [ -n "${pid}" ] || fail "Unable to get PID from hs_err log file '${1}'."
  echo "${pid}"
}

extract_core_hostname() {
  local core_file="${1}"
  local hs_err_file="${2}"
  local exe_file="${3}"
  local core_hostname=

  [ -n "${core_hostname}" ] || core_hostname=$(od -S 10 -A n "${core_file}" | grep -m1 "^HOSTNAME=" | sed 's/HOSTNAME=//')
  [ -n "${core_hostname}" ] || if [ -n "${GDB}" -a -r "${exe_file}" -a -r "${core_file}" ]; then
    message -n "Extracting hostname from the core file. Could take a while ... "
    core_hostname=$(${GDB} -q -c "${core_file}" "${exe_file}" <<< "printf \"Host: %s\\n\", _utsname.nodename" 2>&1 | sed -n 's/.*Host: \(.*\)/\1/p')
    message "Done"
  fi
  [ -n "${core_hostname}" ] || [ ! -r "${hs_err_file}" ] || core_hostname=$(sed -n 's/^Host: \(.*\)/\1/p' "${hs_err_file}")

  echo ${core_hostname}
}

extract_exe_file() {
  local core_file="${1}"
  local hs_err_file="${2}"
  local exe=

  [ -z "${hs_err_file}" ] || exe=$(grep -m 1 "/bin/java$\|/lib/java$" "${hs_err_file}" | cut -d " " -f 6- | sed 's/^ *//')
  [ "${exe}" != "" ] || exe=$(od -S 9 -A n "${core_file}" | grep -m 1 "bin/java$\|lib/java$")
  [ "${exe}" != "" ] || [ "${GDB}" == "" ] || ${GDB} -q -c "${core_file}" <<< "info auxv" 2>&1 | sed -n 's/.*AT_EXECFN.* "\(.*\)"$/\1/p'

  echo "${exe}"
}

out_section() {
  local b=
  sort -u | while read F; do
    [ -n "${b}" ] || { echo; echo "# ${1}"; b=1; }
    [ -r "${F}" ] && suffix= || suffix=" (file not found)"
    F=$(echo "$F" | tr '/' '\n' | ${AWK} '/\.\./{i--; next}{a[i++]=$0}END{for(k=0;k<i;k++){printf "%s/", a[k]}}' | sed -e 's#/\./#/#g' -e 's#/$##')
    echo "${F}${suffix}"
  done
}

list_executable() {
  echo "${exe_file}" | out_section "Executable"
}

resolve_abs_path() {
  local dir="$(dirname ${1})"
  while read F; do
    [ "${F:0:1}" == "/" ] && echo "${F}" || echo "${dir}/${F}"
  done
}

list_log_files() {
  local core_file="${1}"
  local hs_err_file="${2}"
  local hotspot_log
  local gclog
  local use_rotated
  local pid_from_hs_err
  local pid_from_log
  {
    if [ -r "${hs_err_file}" ]; then
      echo "${hs_err_file}"

      # Detect if hotspot.log is available (for the same PID)
      hotspot_log="$(dirname ${hs_err_file})/hotspot.log"
      if [ -r "${hotspot_log}" ]; then
        pid_from_hs_err=$(extract_pid_from_hs_err "${hs_err_file}")
        pid_from_log=$(sed -n "s/.*hotspot_log.*process='\([0-9]*\)'.*/\1/p" "${hotspot_log}")
        [ "${pid_from_log}" != "${pid_from_hs_err}" ] || echo ${hotspot_log}
      fi

      # Extract different log files ...
      grep "^jvm_args:" "${hs_err_file}" | ${AWK} -F = -v RS=' ' '
          /-XX:ProfileLogIn|-XX:ProfileLogOut|-XX:ProfileReportFile|-XX:ProfileReportXMLFile/ { print $2 }
          /-XX:CompileCommandFile|-XX:LogFile|-XX:PerfDataSaveFile/ { print $2 }
        ' | resolve_abs_path "${hs_err_file}"

      # For gc logs also look at the UseGCLogFileRotation flag...
      gclog=$(grep "^jvm_args:" "${hs_err_file}" | tr ' ' '\n' | sed -n 's/-Xloggc://p' | resolve_abs_path "${hs_err_file}")
      if [ -n "${gclog}" ]; then
        use_rotated=$(cat "${hs_err_file}" | ${AWK} -F = -v RS=' ' '/\+UseGCLogFileRotation/{use=1}/-UseGCLogFileRotation/{use=0}END{print use}')
        [ "${use_rotated}" == "1" -a -d "$(dirname ${gclog})" ] && ls ${gclog}* 2>/dev/null || echo "${gclog}"
      fi
    fi
  } | out_section "Log files"
}

list_shared_libraries() {
  get_shared_libraries ${core_file} ${hs_err_file} | out_section "Shared Libraries"
}

list_system_info_files() {
  cat <<- EOF

	# System info files
	/proc/version
	/proc/meminfo
	/proc/modules
	/proc/cpuinfo
	EOF
}

on_exit() {
  [ -z "${GPG_AGENT_PID}" ] || kill ${GPG_AGENT_PID}

  [ -z "${DOCKER_IMAGE_TO_REMOVE}" ] || ${DOCKER} rmi --no-prune ${DOCKER_IMAGE_TO_REMOVE} &>>${TEMPDIR}/docker.err

  # Remove temp directory
  if [ "${KEEP_TMP}" == "no" -a -d "${TEMPDIR}" ]; then
    rm -rf ${TEMPDIR}
  else
    warning "Temp directory ${TEMPDIR} has been kept undeleted. Do not forget to cleanup manually after use."
  fi
  message ""
}

get_option() {
  [ $# -gt 1 ] || fail "Option '$1' requires an argument."
  echo $2
}

get_cmd_info() {
  cat ${MYDIR}/${MYNAME} | ${AWK} -F"# " -v cmd="cmd_${1}() {" -v mrk="# @${2}:" '
        !NF { k = 1; next }
  $0 == mrk { f = 1; k = 1; next }
  $1 == cmd { for (j = 1; j < k; j++) { print i[j] }; exit}
     /^# @/ { f = 0 }
     f == 1 { $1 = ""; i[k++] = $0 }' | sed "s#%MYNAME%#${MYNAME}#g"
}

describe_commands() {
  local cmd
  local cmd_descr
  local -i length

  length=0
  for cmd in $(declare -f | sed -n 's/^\(cmd_.*\)()/\1/p' | sort); do
    [ ${length} -gt ${#cmd} ] || length=${#cmd}
  done

  for cmd in $(declare -f | sed -n 's/^\(cmd_.*\)()/\1/p' | sort); do
    cmd_descr=$(get_cmd_info ${cmd/cmd_/} descr)
    printf "        %-${length}s%s\n" "${cmd/cmd_/}" "${cmd_descr}"
  done
}

usage() {
  cat <<- EOF >&$CFD

	$(cat ${MYDIR}/${MYNAME} | ${AWK} '!NF{exit}p==1{print}/^#$/{p=1}' | sed -e 's/^#//' -e 's/^ *//')

	Usage: ${MYNAME} -h|--help
	       ${MYNAME} <command> <args>

	Commands:
	$(describe_commands | sed "s#%MYNAME%#${MYNAME}#g")


	Use '${MYNAME} help <command>' or '${MYNAME} <command> -h|--help' to get info about a specific command.
	EOF
  exit 0
}

fail() {
  echo >&$CFD
  [ "yes" == "${has_tty}" ] && echo -en "\033[1mError:\033[0m " >&$CFD || echo -n "Error: " >&$CFD
  [ "help" != "${COMMAND}" ] || { echo "'${MYNAME} ${COMMAND}' failed: $@" >&$CFD; exit 1; }
  [ -z "${COMMAND}" ] || COMMAND=" ${COMMAND}"

  cat <<- EOF >&$CFD
	'${MYNAME}${COMMAND}' failed: $@

	Run '${MYNAME} help${COMMAND}' for more information
	EOF

  exit 1
}

warning() {
  [ "yes" == "${has_tty}" ] && echo -en "\033[1mWarning:\033[0m " >&$CFD || echo -n "Warning: " >&$CFD
  echo "$@" >&$CFD
}

message() {
  echo "$@" >&$CFD
}

format_file() {
  [ -x "${FMT}" ] && cat $1 | fmt -w 120 | sed 's/^/\t/' || cat $1
}

prompt_yes_no() {
  [ ! -r "$1" ] || { cat "$1" 1>&$CFD; shift; }
  local prompt=$1
  local default=${2:-y}
  local answer
  local choices="([Y]es / [N]o / [C]ancel) [default '${default}']"

  message ""
  read -p "${prompt} ${choices}: " answer 2>&$CFD
  while true; do
    case ${answer:-${default}} in
      [yY]* ) echo yes; break ;;
      [nN]* ) echo  no; break ;;
      [cC]* ) echo cancel; break ;;
    esac
    read -p "Please type ${choices}: " answer 2>&$CFD
  done
}

get_cmd() {
  local command
  local cmd
  local -a cmds

  for cmd in $(declare -f | sed -n 's/^\(cmd_.*\)()/\1/p'); do
    [[ $cmd =~ ^${1} ]] && cmds[$((i++))]=$cmd && command=$cmd
  done

  [ ${#cmds[@]} -gt 1 ] && fail "command '${1}' is ambiguous: ${cmds[@]/cmd_/}"
  [ ${#cmds[@]} -eq 0 ] && fail "" "unknown command '${1/cmd_/}'"
  echo ${command/cmd_/}
}

process_args() {
  [ -z "$1" -o "$1" == "--help" -o "$1" == "-h" ] && usage

  COMMAND=$(get_cmd cmd_${1/-/_})
  shift

  local -a new_args

  while [ $# -gt 0 ]; do
    case $1 in
         -h|--help) cmd_help ${COMMAND} ;;
        --keep-tmp) KEEP_TMP=yes ;;
                 *) new_args="$new_args \"$1\"" ;;
    esac
    shift
  done

  eval set -- ${new_args}

  cmd_${COMMAND} $@

  exit $?
}

cancel_script() {
  message ""
  message "Script cancelled by user"
  exit 1
}

disk_free_space() {
  df -Pk "${1}" | ${AWK} '{getline; print $4}'
}

check_docker_permissions() {
  local can_use_docker=$(${DOCKER} ps -q 2>&1 | grep -q "permission denied" && echo no || echo yes)
  [ "yes" == "${can_use_docker}" ] || fail "
	You do not have enough permissions to use docker.
	Please make sure to start the bundler with enough privileges."
}

install_alpine() {
   /sbin/apk update &> /dev/null && /sbin/apk add coreutils gpg gpg-agent tar &> /dev/null
}

install_gpg() {
  PKG_MGR=yum
  PKG_MGR=$(type -P ${PKG_MGR}) || PKG_MGR=apt-get
  PKG_MGR=$(type -P ${PKG_MGR}) || PKG_MGR=zypper
  PKG_MGR=$(type -P ${PKG_MGR}) || return 0

  echo Installing gpg using ${PKG_MGR}
  ${PKG_MGR} update -y &> /dev/null
  if [[ "${PKG_MGR}" =~ zypper$ ]]; then
      ${PKG_MGR} install -y gpg2 &> /dev/null
  else
      ${PKG_MGR} install -y gpg &> /dev/null
  fi
}


# Entry point
set -e

MYDIR=$(cd $(dirname $0) && pwd -P)
MYNAME=$(basename $0)

export LC_ALL=C
TEMPDIR=$(mktemp -d /tmp/${MYNAME}_XXXXXX)

has_tty=yes
tty &>/dev/null || has_tty=no

[ "no" == "${has_tty}" ] && exec {CFD}>&2 || {
  { echo -n > $(tty); } &>/dev/null && { exec {CFD}>$(tty); } || exec {CFD}>&2
}

trap on_exit SIGTERM SIGQUIT EXIT
trap cancel_script SIGINT
KEEP_TMP=no

[ -n "${BUNDLE_FROM_HOST}" ] || message "${MYNAME} version ${VERSION}"

#
# Setup / validate required tools
#

if [ "alpine" = "${OS_ID}" ]; then
   install_alpine
fi

GPG=/usr/bin/gpg
GPG=$(type -P ${GPG}) || GPG=gpg
type -P ${GPG} &>/dev/null || install_gpg
GPG=$(type -P ${GPG}) || fail "gpg not found and failed to install"

TAR=gtar
TAR=$(type -P ${TAR}) || TAR=tar
TAR=$(type -P ${TAR}) || fail "tar not found"

GZIP=pigz
GZIP=$(type -P ${GZIP}) || GZIP=gzip
GZIP=$(type -P ${GZIP}) || fail "gzip not found"

AWK=gawk
AWK=$(type -P ${AWK}) || AWK=awk
AWK=$(type -P ${AWK}) || fail "awk not found"

#
# Setup / validate optional tools
#

GPG_AGENT=/usr/bin/gpg-agent
GPG_AGENT=$(type -P ${GPG_AGENT}) || GPG_AGENT=gpg-agent
GPG_AGENT=$(type -P ${GPG_AGENT}) || GPG_AGENT=
GPG_AGENT_PID=

GDB=gdb
GDB=$(type -P ${GDB}) || GDB=
[ -z "${GDB}" ] || ${GDB} <<< "quit" &>/dev/null || GDB=

DOCKER=/usr/bin/docker
DOCKER=$(type -P ${DOCKER}) || DOCKER=docker
DOCKER=$(type -P ${DOCKER}) || DOCKER=

FMT=/usr/bin/fmt
FMT=$(type -P ${FMT}) || FMT=fmt
FMT=$(type -P ${FMT}) || FMT=

EXTRA_ARGS=
if [ "no" == "${has_tty}" ]; then
  EXTRA_ARGS="--no-host-check --no-preview --no-space-check --overwrite"
fi

process_args "$@" ${EXTRA_ARGS}

