ESMF_VMGetDefault Subroutine

private recursive subroutine ESMF_VMGetDefault(vm, keywordEnforcer, localPet, currentSsiPe, petCount, peCount, ssiCount, ssiMap, ssiMinPetCount, ssiMaxPetCount, ssiLocalPetCount, ssiLocalPet, ssiLocalDevCount, ssiLocalDevList, mpiCommunicator, pthreadsEnabledFlag, openMPEnabledFlag, ssiSharedMemoryEnabledFlag, esmfComm, rc)

Arguments

Type IntentOptional Attributes Name
type(ESMF_VM), intent(in) :: vm
type(ESMF_KeywordEnforcer), optional :: keywordEnforcer
integer, intent(out), optional :: localPet
integer, intent(out), optional :: currentSsiPe
integer, intent(out), optional :: petCount
integer, intent(out), optional :: peCount
integer, intent(out), optional :: ssiCount
integer, intent(out), optional, allocatable :: ssiMap(:)
integer, intent(out), optional :: ssiMinPetCount
integer, intent(out), optional :: ssiMaxPetCount
integer, intent(out), optional :: ssiLocalPetCount
integer, intent(out), optional :: ssiLocalPet
integer, intent(out), optional :: ssiLocalDevCount
integer, intent(out), optional, allocatable :: ssiLocalDevList(:)
integer, intent(out), optional :: mpiCommunicator
logical, intent(out), optional :: pthreadsEnabledFlag
logical, intent(out), optional :: openMPEnabledFlag
logical, intent(out), optional :: ssiSharedMemoryEnabledFlag
character(len=:), intent(out), optional, allocatable :: esmfComm
integer, intent(out), optional :: rc

Source Code

  recursive subroutine ESMF_VMGetDefault(vm, keywordEnforcer, localPet, &
    currentSsiPe, petCount, peCount, ssiCount, ssiMap, ssiMinPetCount, ssiMaxPetCount, &
    ssiLocalPetCount, ssiLocalPet, ssiLocalDevCount, ssiLocalDevList, mpiCommunicator, &
    pthreadsEnabledFlag, openMPEnabledFlag, ssiSharedMemoryEnabledFlag, esmfComm, rc)
!
! !ARGUMENTS:
    type(ESMF_VM),        intent(in)            :: vm
type(ESMF_KeywordEnforcer), optional:: keywordEnforcer ! must use keywords below
    integer,              intent(out), optional :: localPet
    integer,              intent(out), optional :: currentSsiPe
    integer,              intent(out), optional :: petCount
    integer,              intent(out), optional :: peCount
    integer,              intent(out), optional :: ssiCount
    integer, allocatable, intent(out), optional :: ssiMap(:)
    integer,              intent(out), optional :: ssiMinPetCount
    integer,              intent(out), optional :: ssiMaxPetCount
    integer,              intent(out), optional :: ssiLocalPetCount
    integer,              intent(out), optional :: ssiLocalPet
    integer,              intent(out), optional :: ssiLocalDevCount
    integer, allocatable, intent(out), optional :: ssiLocalDevList(:)
    integer,              intent(out), optional :: mpiCommunicator
    logical,              intent(out), optional :: pthreadsEnabledFlag
    logical,              intent(out), optional :: openMPEnabledFlag
    logical,              intent(out), optional :: ssiSharedMemoryEnabledFlag
    character(:), allocatable, intent(out), optional :: esmfComm
    integer,              intent(out), optional :: rc
!
! !STATUS:
! \begin{itemize}
! \item\apiStatusCompatibleVersion{5.2.0r}
! \item\apiStatusModifiedSinceVersion{5.2.0r}
! \begin{description}
! \item[8.0.0] Added arguments {\tt ssiCount}, {\tt ssiMinPetCount}, 
!   {\tt ssiMaxPetCount}, and {\tt ssiLocalPetCount} to provide access 
!   to information about how the VM is mapped across the single system images
!   (SSIs) -- typically synonymous to nodes -- of the compute environment. This
!   information is useful when constructing custom petLists. \newline
!   Added argument {\tt ssiSharedMemoryEnabledFlag} that allows the user to 
!   query whether ESMF was compiled with support for shared memory 
!   access between PETs on the same SSI.
! \item[8.1.0] Added argument {\tt currentSsiPe} for easy query of the
!   current PE within the local SSI that is executing the request.\newline
!   Added argument {\tt ssiMap} for a convenient way to obtain a view
!   of the mapping of PETs to single system images across the entire VM.
! \item[8.2.0] Added argument {\tt esmfComm} to provide easy access to the
!   {\tt ESMF\_COMM} setting used by the ESMF installation.
! \item[8.6.0] Added arguments {\tt ssiLocalDevCount} and {\tt ssiLocalDevCount}
!   to provide information about devices associated with the VM on the local
!   SSI. \newline
!   Added argument {\tt ssiLocalPet} to help with SSI specific assignment
!   between PET and device resources.
! \end{description}
! \end{itemize}
!
! !DESCRIPTION:
!   Get internal information about the specified {\tt ESMF\_VM} object.
!
!   The arguments are:
!   \begin{description}
!   \item[vm] 
!        Queried {\tt ESMF\_VM} object.
!   \item[{[localPet]}]
!        Upon return this holds the id of the local PET that issued this call.
!        The valid range of {\tt localPet} is $[0..petCount-1]$. A value of $-1$
!        is returned on PETs that are not active under the specified {\tt vm}.
!   \item[{[currentSsiPe]}]
!        Upon return this holds the id of the PE within the local SSI on which
!        the calling PET (i.e. localPet) is currently executing. If the PET is
!        associated with a set of PEs, or PETs are not pinned, the returned
!        value might change each time the call is made.
!   \item[{[petCount]}]
!        Upon return this holds the number of PETs running under {\tt vm}.
!   \item[{[peCount]}]
!        Upon return this holds the number of PEs referenced by {\tt vm}.
!   \item[{[ssiCount]}]
!        Upon return this holds the number of single system images referenced 
!        by {\tt vm}.
!   \item[{[ssiMap]}]
!        Upon return this array is allocated and holds the single system image
!        id for each PET across the {\tt vm}. The size of {\tt ssiMap} is
!        equal to {\tt petCount}, with lower bound 0 and upper bound
!        {\tt petCount - 1}.
!   \item[{[ssiMinPetCount]}]
!        Upon return this holds the smallest number of PETs running in the same
!        single system images under {\tt vm}.
!   \item[{[ssiMaxPetCount]}]
!        Upon return this holds the largest number of PETs running in the same
!        single system images under {\tt vm}.
!   \item[{[ssiLocalPetCount]}]
!        Upon return this holds the number of PETs running in the same
!        single system as {\tt localPet}.
!   \item[{[ssiLocalPet]}]
!        Upon return this holds the SSI local index of the executing
!        {\tt localPet}.
!   \item[{[ssiLocalDevCount]}]
!        Upon return this holds the number of devices associated with this VM
!        on the local single system.
!   \item[{[ssiLocalDevList]}]
!        Upon return this array is allocated and holds the local device ids
!        of devices associated with this VM. The size of {\tt ssiLocalDevList}
!        is equal to {\tt ssiLocalDevCount}, with lower bound 0 and upper
!        bound {\tt ssiLocalDevCount - 1}. Local device ids can be used to
!        target specific devices using OpenMP, OpenACC, or similar device
!        API.
!   \item[{[mpiCommunicator]}]
!        Upon return this holds the MPI intra-communicator used by the 
!        specified {\tt ESMF\_VM} object. This communicator may be used for
!        user-level MPI communications. It is recommended that the user
!        duplicates the communicator via {\tt MPI\_Comm\_Dup()} in order to
!        prevent any interference with ESMF communications.
!        {\tt MPI\_COMM\_NULL} is returned on PETs that are not active
!        under the specified {\tt vm}.
!   \item[{[pthreadsEnabledFlag]}]
!        \begin{description}
!        \item[{\tt .TRUE.}]
!             ESMF has been compiled with Pthreads, and the MPI environment
!             supports threading.
!        \item[{\tt .FALSE.}]
!             ESMF has {\em not} been compiled with Pthreads, or the MPI
!             environment does {\em not} support threading.
!        \end{description}
!   \item[{[openMPEnabledFlag]}]
!        \begin{description}
!        \item[{\tt .TRUE.}]
!             ESMF has been compiled with OpenMP.
!        \item[{\tt .FALSE.}]
!             ESMF has {\em not} been compiled with OpenMP.
!        \end{description}
!   \item[{[ssiSharedMemoryEnabledFlag]}]
!        \begin{description}
!        \item[{\tt .TRUE.}]
!             ESMF has been compiled to support shared memory access
!             between PETs that are on the same single system image (SSI).
!        \item[{\tt .FALSE.}]
!             ESMF has {\em not} been compiled to support shared memory access
!             between PETs that are on the same single system image (SSI).
!        \end{description}
!   \item[{[esmfComm]}]
!        Upon return this string is allocated to the appropriate size and holds
!        the exact value of the {\tt ESMF\_COMM} build environment variable used
!        by the ESMF installation. This provides a convenient way for the user
!        to determine the underlying MPI implementation.
!   \item[{[rc]}] 
!        Return code; equals {\tt ESMF\_SUCCESS} if there are no errors.
!   \end{description}
!
!EOP
!------------------------------------------------------------------------------
    type(ESMF_Logical)      :: pthreadsEnabledFlagArg         ! helper variable
    type(ESMF_Logical)      :: openMPEnabledFlagArg           ! helper variable
    type(ESMF_Logical)      :: ssiSharedMemoryEnabledFlagArg  ! helper variable
    integer                 :: petCountArg, ssiLocalDevCountArg ! helper variable
    integer                 :: i;                ! helper variable
    integer                 :: localrc  ! local return code
    character(len=40)       :: esmfCommArg
    type(ESMF_InterArray)   :: ssiLocalDevListArg       ! interface variable

    ! initialize return code; assume routine not implemented
    localrc = ESMF_RC_NOT_IMPL
    if (present(rc)) rc = ESMF_RC_NOT_IMPL

    ! Check init status of arguments
    ESMF_INIT_CHECK_DEEP(ESMF_VMGetInit, vm, rc)

    if (vm%this /= ESMF_NULL_POINTER) then
      ! Call into the C++ interface.
      call c_ESMC_VMGet(vm, localPet, currentSsiPe, petCountArg, peCount, &
        ssiCount, ssiMinPetCount, ssiMaxPetCount, ssiLocalPetCount, &
        ssiLocalPet, ssiLocalDevCountArg, mpiCommunicator, &
        pthreadsEnabledFlagArg, openMPEnabledFlagArg, &
        ssiSharedMemoryEnabledFlagArg, localrc)
      if (present(petCount)) petCount = petCountArg
      if (present (pthreadsEnabledFlag))  &
        pthreadsEnabledFlag = pthreadsEnabledFlagArg
      if (present (openMPEnabledFlag))  &
        openMPEnabledFlag = openMPEnabledFlagArg
      if (present (ssiSharedMemoryEnabledFlag))  &
        ssiSharedMemoryEnabledFlag = ssiSharedMemoryEnabledFlagArg
      if (ESMF_LogFoundError(localrc, ESMF_ERR_PASSTHRU, &
        ESMF_CONTEXT, rcToReturn=rc)) return
      if (present(ssiMap)) then
        allocate(ssiMap(0:petCountArg-1))
        do i=0, petCount-1
          call ESMF_VMGet(vm, pet=i, ssiId=ssiMap(i), rc=localrc)
          if (ESMF_LogFoundError(localrc, ESMF_ERR_PASSTHRU, &
            ESMF_CONTEXT, rcToReturn=rc)) return
        enddo
      endif
      if (present(ssiLocalDevCount)) ssiLocalDevCount = ssiLocalDevCountArg
      if (present(ssiLocalDevList)) then
        allocate(ssiLocalDevList(0:ssiLocalDevCountArg-1))
        ssiLocalDevListArg = ESMF_InterArrayCreate(ssiLocalDevList, rc=localrc)
        if (ESMF_LogFoundError(localrc, ESMF_ERR_PASSTHRU, &
          ESMF_CONTEXT, rcToReturn=rc)) return
        call c_ESMC_VMGetSsiLocalDevList(vm, ssiLocalDevListArg, localrc)
        if (ESMF_LogFoundError(localrc, ESMF_ERR_PASSTHRU, &
          ESMF_CONTEXT, rcToReturn=rc)) return
        call ESMF_InterArrayDestroy(ssiLocalDevListArg, rc=localrc)
        if (ESMF_LogFoundError(localrc, ESMF_ERR_PASSTHRU, &
          ESMF_CONTEXT, rcToReturn=rc)) return
      endif
    else
      ! Only very specific cases are supported for a NULL this pointer
      if (present(petCount) .or. present(peCount) .or. present(ssiCount) .or. &
        present(ssiMap) .or. present(ssiMinPetCount) .or. &
        present(ssiMaxPetCount) .or. present(ssiLocalPetCount) .or. &
        present(ssiLocalPetCount) .or. &
        present(ssiLocalDevCount) .or. present(ssiLocalDevList) .or. &
        present(pthreadsEnabledFlag) .or. present(openMPEnabledFlag) .or. &
        present(ssiSharedMemoryEnabledFlag)) then
        call ESMF_LogSetError(ESMF_RC_PTR_NULL, &
          msg="Not a valid pointer to VM", &
          ESMF_CONTEXT, rcToReturn=rc)
        return
      endif
      if (present(localPet)) then
        ! a value of -1 indicates that 
        localPet = -1
      endif
      if (present(mpiCommunicator)) then
        call c_ESMC_VMGetMpiCommNull(mpiCommunicator, localrc)
        if (ESMF_LogFoundError(localrc, ESMF_ERR_PASSTHRU, &
          ESMF_CONTEXT, rcToReturn=rc)) return
      endif
    endif

    ! deal with esmfComm directly on Fortran layer
    if (present(esmfComm)) then
      call c_esmc_initget_esmf_comm(esmfCommArg, localrc)
      esmfComm = trim(esmfCommArg)  ! implicit allocation of esmfComm
    endif

    ! return successfully
    if (present(rc)) rc = ESMF_SUCCESS

  end subroutine ESMF_VMGetDefault