aboutsummaryrefslogtreecommitdiff
path: root/gcc/fortran/trans-openmp.cc
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2022-01-14 16:56:44 +0100
committerMartin Liska <mliska@suse.cz>2022-01-17 22:12:04 +0100
commit5c69acb32329d49e58c26fa41ae74229a52b9106 (patch)
treeddb05f9d73afb6f998457d2ac4b720e3b3b60483 /gcc/fortran/trans-openmp.cc
parent490e23032baaece71f2ec09fa1805064b150fbc2 (diff)
downloadgcc-5c69acb32329d49e58c26fa41ae74229a52b9106.zip
gcc-5c69acb32329d49e58c26fa41ae74229a52b9106.tar.gz
gcc-5c69acb32329d49e58c26fa41ae74229a52b9106.tar.bz2
Rename .c files to .cc files.
gcc/ada/ChangeLog: * adadecode.c: Moved to... * adadecode.cc: ...here. * affinity.c: Moved to... * affinity.cc: ...here. * argv-lynxos178-raven-cert.c: Moved to... * argv-lynxos178-raven-cert.cc: ...here. * argv.c: Moved to... * argv.cc: ...here. * aux-io.c: Moved to... * aux-io.cc: ...here. * cio.c: Moved to... * cio.cc: ...here. * cstreams.c: Moved to... * cstreams.cc: ...here. * env.c: Moved to... * env.cc: ...here. * exit.c: Moved to... * exit.cc: ...here. * expect.c: Moved to... * expect.cc: ...here. * final.c: Moved to... * final.cc: ...here. * gcc-interface/cuintp.c: Moved to... * gcc-interface/cuintp.cc: ...here. * gcc-interface/decl.c: Moved to... * gcc-interface/decl.cc: ...here. * gcc-interface/misc.c: Moved to... * gcc-interface/misc.cc: ...here. * gcc-interface/targtyps.c: Moved to... * gcc-interface/targtyps.cc: ...here. * gcc-interface/trans.c: Moved to... * gcc-interface/trans.cc: ...here. * gcc-interface/utils.c: Moved to... * gcc-interface/utils.cc: ...here. * gcc-interface/utils2.c: Moved to... * gcc-interface/utils2.cc: ...here. * init.c: Moved to... * init.cc: ...here. * initialize.c: Moved to... * initialize.cc: ...here. * libgnarl/thread.c: Moved to... * libgnarl/thread.cc: ...here. * link.c: Moved to... * link.cc: ...here. * locales.c: Moved to... * locales.cc: ...here. * mkdir.c: Moved to... * mkdir.cc: ...here. * raise.c: Moved to... * raise.cc: ...here. * rtfinal.c: Moved to... * rtfinal.cc: ...here. * rtinit.c: Moved to... * rtinit.cc: ...here. * seh_init.c: Moved to... * seh_init.cc: ...here. * sigtramp-armdroid.c: Moved to... * sigtramp-armdroid.cc: ...here. * sigtramp-ios.c: Moved to... * sigtramp-ios.cc: ...here. * sigtramp-qnx.c: Moved to... * sigtramp-qnx.cc: ...here. * sigtramp-vxworks.c: Moved to... * sigtramp-vxworks.cc: ...here. * socket.c: Moved to... * socket.cc: ...here. * tracebak.c: Moved to... * tracebak.cc: ...here. * version.c: Moved to... * version.cc: ...here. * vx_stack_info.c: Moved to... * vx_stack_info.cc: ...here. gcc/ChangeLog: * adjust-alignment.c: Moved to... * adjust-alignment.cc: ...here. * alias.c: Moved to... * alias.cc: ...here. * alloc-pool.c: Moved to... * alloc-pool.cc: ...here. * asan.c: Moved to... * asan.cc: ...here. * attribs.c: Moved to... * attribs.cc: ...here. * auto-inc-dec.c: Moved to... * auto-inc-dec.cc: ...here. * auto-profile.c: Moved to... * auto-profile.cc: ...here. * bb-reorder.c: Moved to... * bb-reorder.cc: ...here. * bitmap.c: Moved to... * bitmap.cc: ...here. * btfout.c: Moved to... * btfout.cc: ...here. * builtins.c: Moved to... * builtins.cc: ...here. * caller-save.c: Moved to... * caller-save.cc: ...here. * calls.c: Moved to... * calls.cc: ...here. * ccmp.c: Moved to... * ccmp.cc: ...here. * cfg.c: Moved to... * cfg.cc: ...here. * cfganal.c: Moved to... * cfganal.cc: ...here. * cfgbuild.c: Moved to... * cfgbuild.cc: ...here. * cfgcleanup.c: Moved to... * cfgcleanup.cc: ...here. * cfgexpand.c: Moved to... * cfgexpand.cc: ...here. * cfghooks.c: Moved to... * cfghooks.cc: ...here. * cfgloop.c: Moved to... * cfgloop.cc: ...here. * cfgloopanal.c: Moved to... * cfgloopanal.cc: ...here. * cfgloopmanip.c: Moved to... * cfgloopmanip.cc: ...here. * cfgrtl.c: Moved to... * cfgrtl.cc: ...here. * cgraph.c: Moved to... * cgraph.cc: ...here. * cgraphbuild.c: Moved to... * cgraphbuild.cc: ...here. * cgraphclones.c: Moved to... * cgraphclones.cc: ...here. * cgraphunit.c: Moved to... * cgraphunit.cc: ...here. * collect-utils.c: Moved to... * collect-utils.cc: ...here. * collect2-aix.c: Moved to... * collect2-aix.cc: ...here. * collect2.c: Moved to... * collect2.cc: ...here. * combine-stack-adj.c: Moved to... * combine-stack-adj.cc: ...here. * combine.c: Moved to... * combine.cc: ...here. * common/common-targhooks.c: Moved to... * common/common-targhooks.cc: ...here. * common/config/aarch64/aarch64-common.c: Moved to... * common/config/aarch64/aarch64-common.cc: ...here. * common/config/alpha/alpha-common.c: Moved to... * common/config/alpha/alpha-common.cc: ...here. * common/config/arc/arc-common.c: Moved to... * common/config/arc/arc-common.cc: ...here. * common/config/arm/arm-common.c: Moved to... * common/config/arm/arm-common.cc: ...here. * common/config/avr/avr-common.c: Moved to... * common/config/avr/avr-common.cc: ...here. * common/config/bfin/bfin-common.c: Moved to... * common/config/bfin/bfin-common.cc: ...here. * common/config/bpf/bpf-common.c: Moved to... * common/config/bpf/bpf-common.cc: ...here. * common/config/c6x/c6x-common.c: Moved to... * common/config/c6x/c6x-common.cc: ...here. * common/config/cr16/cr16-common.c: Moved to... * common/config/cr16/cr16-common.cc: ...here. * common/config/cris/cris-common.c: Moved to... * common/config/cris/cris-common.cc: ...here. * common/config/csky/csky-common.c: Moved to... * common/config/csky/csky-common.cc: ...here. * common/config/default-common.c: Moved to... * common/config/default-common.cc: ...here. * common/config/epiphany/epiphany-common.c: Moved to... * common/config/epiphany/epiphany-common.cc: ...here. * common/config/fr30/fr30-common.c: Moved to... * common/config/fr30/fr30-common.cc: ...here. * common/config/frv/frv-common.c: Moved to... * common/config/frv/frv-common.cc: ...here. * common/config/gcn/gcn-common.c: Moved to... * common/config/gcn/gcn-common.cc: ...here. * common/config/h8300/h8300-common.c: Moved to... * common/config/h8300/h8300-common.cc: ...here. * common/config/i386/i386-common.c: Moved to... * common/config/i386/i386-common.cc: ...here. * common/config/ia64/ia64-common.c: Moved to... * common/config/ia64/ia64-common.cc: ...here. * common/config/iq2000/iq2000-common.c: Moved to... * common/config/iq2000/iq2000-common.cc: ...here. * common/config/lm32/lm32-common.c: Moved to... * common/config/lm32/lm32-common.cc: ...here. * common/config/m32r/m32r-common.c: Moved to... * common/config/m32r/m32r-common.cc: ...here. * common/config/m68k/m68k-common.c: Moved to... * common/config/m68k/m68k-common.cc: ...here. * common/config/mcore/mcore-common.c: Moved to... * common/config/mcore/mcore-common.cc: ...here. * common/config/microblaze/microblaze-common.c: Moved to... * common/config/microblaze/microblaze-common.cc: ...here. * common/config/mips/mips-common.c: Moved to... * common/config/mips/mips-common.cc: ...here. * common/config/mmix/mmix-common.c: Moved to... * common/config/mmix/mmix-common.cc: ...here. * common/config/mn10300/mn10300-common.c: Moved to... * common/config/mn10300/mn10300-common.cc: ...here. * common/config/msp430/msp430-common.c: Moved to... * common/config/msp430/msp430-common.cc: ...here. * common/config/nds32/nds32-common.c: Moved to... * common/config/nds32/nds32-common.cc: ...here. * common/config/nios2/nios2-common.c: Moved to... * common/config/nios2/nios2-common.cc: ...here. * common/config/nvptx/nvptx-common.c: Moved to... * common/config/nvptx/nvptx-common.cc: ...here. * common/config/or1k/or1k-common.c: Moved to... * common/config/or1k/or1k-common.cc: ...here. * common/config/pa/pa-common.c: Moved to... * common/config/pa/pa-common.cc: ...here. * common/config/pdp11/pdp11-common.c: Moved to... * common/config/pdp11/pdp11-common.cc: ...here. * common/config/pru/pru-common.c: Moved to... * common/config/pru/pru-common.cc: ...here. * common/config/riscv/riscv-common.c: Moved to... * common/config/riscv/riscv-common.cc: ...here. * common/config/rs6000/rs6000-common.c: Moved to... * common/config/rs6000/rs6000-common.cc: ...here. * common/config/rx/rx-common.c: Moved to... * common/config/rx/rx-common.cc: ...here. * common/config/s390/s390-common.c: Moved to... * common/config/s390/s390-common.cc: ...here. * common/config/sh/sh-common.c: Moved to... * common/config/sh/sh-common.cc: ...here. * common/config/sparc/sparc-common.c: Moved to... * common/config/sparc/sparc-common.cc: ...here. * common/config/tilegx/tilegx-common.c: Moved to... * common/config/tilegx/tilegx-common.cc: ...here. * common/config/tilepro/tilepro-common.c: Moved to... * common/config/tilepro/tilepro-common.cc: ...here. * common/config/v850/v850-common.c: Moved to... * common/config/v850/v850-common.cc: ...here. * common/config/vax/vax-common.c: Moved to... * common/config/vax/vax-common.cc: ...here. * common/config/visium/visium-common.c: Moved to... * common/config/visium/visium-common.cc: ...here. * common/config/xstormy16/xstormy16-common.c: Moved to... * common/config/xstormy16/xstormy16-common.cc: ...here. * common/config/xtensa/xtensa-common.c: Moved to... * common/config/xtensa/xtensa-common.cc: ...here. * compare-elim.c: Moved to... * compare-elim.cc: ...here. * config/aarch64/aarch64-bti-insert.c: Moved to... * config/aarch64/aarch64-bti-insert.cc: ...here. * config/aarch64/aarch64-builtins.c: Moved to... * config/aarch64/aarch64-builtins.cc: ...here. * config/aarch64/aarch64-c.c: Moved to... * config/aarch64/aarch64-c.cc: ...here. * config/aarch64/aarch64-d.c: Moved to... * config/aarch64/aarch64-d.cc: ...here. * config/aarch64/aarch64.c: Moved to... * config/aarch64/aarch64.cc: ...here. * config/aarch64/cortex-a57-fma-steering.c: Moved to... * config/aarch64/cortex-a57-fma-steering.cc: ...here. * config/aarch64/driver-aarch64.c: Moved to... * config/aarch64/driver-aarch64.cc: ...here. * config/aarch64/falkor-tag-collision-avoidance.c: Moved to... * config/aarch64/falkor-tag-collision-avoidance.cc: ...here. * config/aarch64/host-aarch64-darwin.c: Moved to... * config/aarch64/host-aarch64-darwin.cc: ...here. * config/alpha/alpha.c: Moved to... * config/alpha/alpha.cc: ...here. * config/alpha/driver-alpha.c: Moved to... * config/alpha/driver-alpha.cc: ...here. * config/arc/arc-c.c: Moved to... * config/arc/arc-c.cc: ...here. * config/arc/arc.c: Moved to... * config/arc/arc.cc: ...here. * config/arc/driver-arc.c: Moved to... * config/arc/driver-arc.cc: ...here. * config/arm/aarch-common.c: Moved to... * config/arm/aarch-common.cc: ...here. * config/arm/arm-builtins.c: Moved to... * config/arm/arm-builtins.cc: ...here. * config/arm/arm-c.c: Moved to... * config/arm/arm-c.cc: ...here. * config/arm/arm-d.c: Moved to... * config/arm/arm-d.cc: ...here. * config/arm/arm.c: Moved to... * config/arm/arm.cc: ...here. * config/arm/driver-arm.c: Moved to... * config/arm/driver-arm.cc: ...here. * config/avr/avr-c.c: Moved to... * config/avr/avr-c.cc: ...here. * config/avr/avr-devices.c: Moved to... * config/avr/avr-devices.cc: ...here. * config/avr/avr-log.c: Moved to... * config/avr/avr-log.cc: ...here. * config/avr/avr.c: Moved to... * config/avr/avr.cc: ...here. * config/avr/driver-avr.c: Moved to... * config/avr/driver-avr.cc: ...here. * config/avr/gen-avr-mmcu-specs.c: Moved to... * config/avr/gen-avr-mmcu-specs.cc: ...here. * config/avr/gen-avr-mmcu-texi.c: Moved to... * config/avr/gen-avr-mmcu-texi.cc: ...here. * config/bfin/bfin.c: Moved to... * config/bfin/bfin.cc: ...here. * config/bpf/bpf.c: Moved to... * config/bpf/bpf.cc: ...here. * config/bpf/coreout.c: Moved to... * config/bpf/coreout.cc: ...here. * config/c6x/c6x.c: Moved to... * config/c6x/c6x.cc: ...here. * config/cr16/cr16.c: Moved to... * config/cr16/cr16.cc: ...here. * config/cris/cris.c: Moved to... * config/cris/cris.cc: ...here. * config/csky/csky.c: Moved to... * config/csky/csky.cc: ...here. * config/darwin-c.c: Moved to... * config/darwin-c.cc: ...here. * config/darwin-d.c: Moved to... * config/darwin-d.cc: ...here. * config/darwin-driver.c: Moved to... * config/darwin-driver.cc: ...here. * config/darwin-f.c: Moved to... * config/darwin-f.cc: ...here. * config/darwin.c: Moved to... * config/darwin.cc: ...here. * config/default-c.c: Moved to... * config/default-c.cc: ...here. * config/default-d.c: Moved to... * config/default-d.cc: ...here. * config/dragonfly-d.c: Moved to... * config/dragonfly-d.cc: ...here. * config/epiphany/epiphany.c: Moved to... * config/epiphany/epiphany.cc: ...here. * config/epiphany/mode-switch-use.c: Moved to... * config/epiphany/mode-switch-use.cc: ...here. * config/epiphany/resolve-sw-modes.c: Moved to... * config/epiphany/resolve-sw-modes.cc: ...here. * config/fr30/fr30.c: Moved to... * config/fr30/fr30.cc: ...here. * config/freebsd-d.c: Moved to... * config/freebsd-d.cc: ...here. * config/frv/frv.c: Moved to... * config/frv/frv.cc: ...here. * config/ft32/ft32.c: Moved to... * config/ft32/ft32.cc: ...here. * config/gcn/driver-gcn.c: Moved to... * config/gcn/driver-gcn.cc: ...here. * config/gcn/gcn-run.c: Moved to... * config/gcn/gcn-run.cc: ...here. * config/gcn/gcn-tree.c: Moved to... * config/gcn/gcn-tree.cc: ...here. * config/gcn/gcn.c: Moved to... * config/gcn/gcn.cc: ...here. * config/gcn/mkoffload.c: Moved to... * config/gcn/mkoffload.cc: ...here. * config/glibc-c.c: Moved to... * config/glibc-c.cc: ...here. * config/glibc-d.c: Moved to... * config/glibc-d.cc: ...here. * config/h8300/h8300.c: Moved to... * config/h8300/h8300.cc: ...here. * config/host-darwin.c: Moved to... * config/host-darwin.cc: ...here. * config/host-hpux.c: Moved to... * config/host-hpux.cc: ...here. * config/host-linux.c: Moved to... * config/host-linux.cc: ...here. * config/host-netbsd.c: Moved to... * config/host-netbsd.cc: ...here. * config/host-openbsd.c: Moved to... * config/host-openbsd.cc: ...here. * config/host-solaris.c: Moved to... * config/host-solaris.cc: ...here. * config/i386/djgpp.c: Moved to... * config/i386/djgpp.cc: ...here. * config/i386/driver-i386.c: Moved to... * config/i386/driver-i386.cc: ...here. * config/i386/driver-mingw32.c: Moved to... * config/i386/driver-mingw32.cc: ...here. * config/i386/gnu-property.c: Moved to... * config/i386/gnu-property.cc: ...here. * config/i386/host-cygwin.c: Moved to... * config/i386/host-cygwin.cc: ...here. * config/i386/host-i386-darwin.c: Moved to... * config/i386/host-i386-darwin.cc: ...here. * config/i386/host-mingw32.c: Moved to... * config/i386/host-mingw32.cc: ...here. * config/i386/i386-builtins.c: Moved to... * config/i386/i386-builtins.cc: ...here. * config/i386/i386-c.c: Moved to... * config/i386/i386-c.cc: ...here. * config/i386/i386-d.c: Moved to... * config/i386/i386-d.cc: ...here. * config/i386/i386-expand.c: Moved to... * config/i386/i386-expand.cc: ...here. * config/i386/i386-features.c: Moved to... * config/i386/i386-features.cc: ...here. * config/i386/i386-options.c: Moved to... * config/i386/i386-options.cc: ...here. * config/i386/i386.c: Moved to... * config/i386/i386.cc: ...here. * config/i386/intelmic-mkoffload.c: Moved to... * config/i386/intelmic-mkoffload.cc: ...here. * config/i386/msformat-c.c: Moved to... * config/i386/msformat-c.cc: ...here. * config/i386/winnt-cxx.c: Moved to... * config/i386/winnt-cxx.cc: ...here. * config/i386/winnt-d.c: Moved to... * config/i386/winnt-d.cc: ...here. * config/i386/winnt-stubs.c: Moved to... * config/i386/winnt-stubs.cc: ...here. * config/i386/winnt.c: Moved to... * config/i386/winnt.cc: ...here. * config/i386/x86-tune-sched-atom.c: Moved to... * config/i386/x86-tune-sched-atom.cc: ...here. * config/i386/x86-tune-sched-bd.c: Moved to... * config/i386/x86-tune-sched-bd.cc: ...here. * config/i386/x86-tune-sched-core.c: Moved to... * config/i386/x86-tune-sched-core.cc: ...here. * config/i386/x86-tune-sched.c: Moved to... * config/i386/x86-tune-sched.cc: ...here. * config/ia64/ia64-c.c: Moved to... * config/ia64/ia64-c.cc: ...here. * config/ia64/ia64.c: Moved to... * config/ia64/ia64.cc: ...here. * config/iq2000/iq2000.c: Moved to... * config/iq2000/iq2000.cc: ...here. * config/linux.c: Moved to... * config/linux.cc: ...here. * config/lm32/lm32.c: Moved to... * config/lm32/lm32.cc: ...here. * config/m32c/m32c-pragma.c: Moved to... * config/m32c/m32c-pragma.cc: ...here. * config/m32c/m32c.c: Moved to... * config/m32c/m32c.cc: ...here. * config/m32r/m32r.c: Moved to... * config/m32r/m32r.cc: ...here. * config/m68k/m68k.c: Moved to... * config/m68k/m68k.cc: ...here. * config/mcore/mcore.c: Moved to... * config/mcore/mcore.cc: ...here. * config/microblaze/microblaze-c.c: Moved to... * config/microblaze/microblaze-c.cc: ...here. * config/microblaze/microblaze.c: Moved to... * config/microblaze/microblaze.cc: ...here. * config/mips/driver-native.c: Moved to... * config/mips/driver-native.cc: ...here. * config/mips/frame-header-opt.c: Moved to... * config/mips/frame-header-opt.cc: ...here. * config/mips/mips-d.c: Moved to... * config/mips/mips-d.cc: ...here. * config/mips/mips.c: Moved to... * config/mips/mips.cc: ...here. * config/mmix/mmix.c: Moved to... * config/mmix/mmix.cc: ...here. * config/mn10300/mn10300.c: Moved to... * config/mn10300/mn10300.cc: ...here. * config/moxie/moxie.c: Moved to... * config/moxie/moxie.cc: ...here. * config/msp430/driver-msp430.c: Moved to... * config/msp430/driver-msp430.cc: ...here. * config/msp430/msp430-c.c: Moved to... * config/msp430/msp430-c.cc: ...here. * config/msp430/msp430-devices.c: Moved to... * config/msp430/msp430-devices.cc: ...here. * config/msp430/msp430.c: Moved to... * config/msp430/msp430.cc: ...here. * config/nds32/nds32-cost.c: Moved to... * config/nds32/nds32-cost.cc: ...here. * config/nds32/nds32-fp-as-gp.c: Moved to... * config/nds32/nds32-fp-as-gp.cc: ...here. * config/nds32/nds32-intrinsic.c: Moved to... * config/nds32/nds32-intrinsic.cc: ...here. * config/nds32/nds32-isr.c: Moved to... * config/nds32/nds32-isr.cc: ...here. * config/nds32/nds32-md-auxiliary.c: Moved to... * config/nds32/nds32-md-auxiliary.cc: ...here. * config/nds32/nds32-memory-manipulation.c: Moved to... * config/nds32/nds32-memory-manipulation.cc: ...here. * config/nds32/nds32-pipelines-auxiliary.c: Moved to... * config/nds32/nds32-pipelines-auxiliary.cc: ...here. * config/nds32/nds32-predicates.c: Moved to... * config/nds32/nds32-predicates.cc: ...here. * config/nds32/nds32-relax-opt.c: Moved to... * config/nds32/nds32-relax-opt.cc: ...here. * config/nds32/nds32-utils.c: Moved to... * config/nds32/nds32-utils.cc: ...here. * config/nds32/nds32.c: Moved to... * config/nds32/nds32.cc: ...here. * config/netbsd-d.c: Moved to... * config/netbsd-d.cc: ...here. * config/netbsd.c: Moved to... * config/netbsd.cc: ...here. * config/nios2/nios2.c: Moved to... * config/nios2/nios2.cc: ...here. * config/nvptx/mkoffload.c: Moved to... * config/nvptx/mkoffload.cc: ...here. * config/nvptx/nvptx-c.c: Moved to... * config/nvptx/nvptx-c.cc: ...here. * config/nvptx/nvptx.c: Moved to... * config/nvptx/nvptx.cc: ...here. * config/openbsd-d.c: Moved to... * config/openbsd-d.cc: ...here. * config/or1k/or1k.c: Moved to... * config/or1k/or1k.cc: ...here. * config/pa/pa-d.c: Moved to... * config/pa/pa-d.cc: ...here. * config/pa/pa.c: Moved to... * config/pa/pa.cc: ...here. * config/pdp11/pdp11.c: Moved to... * config/pdp11/pdp11.cc: ...here. * config/pru/pru-passes.c: Moved to... * config/pru/pru-passes.cc: ...here. * config/pru/pru-pragma.c: Moved to... * config/pru/pru-pragma.cc: ...here. * config/pru/pru.c: Moved to... * config/pru/pru.cc: ...here. * config/riscv/riscv-builtins.c: Moved to... * config/riscv/riscv-builtins.cc: ...here. * config/riscv/riscv-c.c: Moved to... * config/riscv/riscv-c.cc: ...here. * config/riscv/riscv-d.c: Moved to... * config/riscv/riscv-d.cc: ...here. * config/riscv/riscv-shorten-memrefs.c: Moved to... * config/riscv/riscv-shorten-memrefs.cc: ...here. * config/riscv/riscv-sr.c: Moved to... * config/riscv/riscv-sr.cc: ...here. * config/riscv/riscv.c: Moved to... * config/riscv/riscv.cc: ...here. * config/rl78/rl78-c.c: Moved to... * config/rl78/rl78-c.cc: ...here. * config/rl78/rl78.c: Moved to... * config/rl78/rl78.cc: ...here. * config/rs6000/driver-rs6000.c: Moved to... * config/rs6000/driver-rs6000.cc: ...here. * config/rs6000/host-darwin.c: Moved to... * config/rs6000/host-darwin.cc: ...here. * config/rs6000/host-ppc64-darwin.c: Moved to... * config/rs6000/host-ppc64-darwin.cc: ...here. * config/rs6000/rbtree.c: Moved to... * config/rs6000/rbtree.cc: ...here. * config/rs6000/rs6000-c.c: Moved to... * config/rs6000/rs6000-c.cc: ...here. * config/rs6000/rs6000-call.c: Moved to... * config/rs6000/rs6000-call.cc: ...here. * config/rs6000/rs6000-d.c: Moved to... * config/rs6000/rs6000-d.cc: ...here. * config/rs6000/rs6000-gen-builtins.c: Moved to... * config/rs6000/rs6000-gen-builtins.cc: ...here. * config/rs6000/rs6000-linux.c: Moved to... * config/rs6000/rs6000-linux.cc: ...here. * config/rs6000/rs6000-logue.c: Moved to... * config/rs6000/rs6000-logue.cc: ...here. * config/rs6000/rs6000-p8swap.c: Moved to... * config/rs6000/rs6000-p8swap.cc: ...here. * config/rs6000/rs6000-pcrel-opt.c: Moved to... * config/rs6000/rs6000-pcrel-opt.cc: ...here. * config/rs6000/rs6000-string.c: Moved to... * config/rs6000/rs6000-string.cc: ...here. * config/rs6000/rs6000.c: Moved to... * config/rs6000/rs6000.cc: ...here. * config/rx/rx.c: Moved to... * config/rx/rx.cc: ...here. * config/s390/driver-native.c: Moved to... * config/s390/driver-native.cc: ...here. * config/s390/s390-c.c: Moved to... * config/s390/s390-c.cc: ...here. * config/s390/s390-d.c: Moved to... * config/s390/s390-d.cc: ...here. * config/s390/s390.c: Moved to... * config/s390/s390.cc: ...here. * config/sh/divtab-sh4-300.c: Moved to... * config/sh/divtab-sh4-300.cc: ...here. * config/sh/divtab-sh4.c: Moved to... * config/sh/divtab-sh4.cc: ...here. * config/sh/divtab.c: Moved to... * config/sh/divtab.cc: ...here. * config/sh/sh-c.c: Moved to... * config/sh/sh-c.cc: ...here. * config/sh/sh.c: Moved to... * config/sh/sh.cc: ...here. * config/sol2-c.c: Moved to... * config/sol2-c.cc: ...here. * config/sol2-cxx.c: Moved to... * config/sol2-cxx.cc: ...here. * config/sol2-d.c: Moved to... * config/sol2-d.cc: ...here. * config/sol2-stubs.c: Moved to... * config/sol2-stubs.cc: ...here. * config/sol2.c: Moved to... * config/sol2.cc: ...here. * config/sparc/driver-sparc.c: Moved to... * config/sparc/driver-sparc.cc: ...here. * config/sparc/sparc-c.c: Moved to... * config/sparc/sparc-c.cc: ...here. * config/sparc/sparc-d.c: Moved to... * config/sparc/sparc-d.cc: ...here. * config/sparc/sparc.c: Moved to... * config/sparc/sparc.cc: ...here. * config/stormy16/stormy16.c: Moved to... * config/stormy16/stormy16.cc: ...here. * config/tilegx/mul-tables.c: Moved to... * config/tilegx/mul-tables.cc: ...here. * config/tilegx/tilegx-c.c: Moved to... * config/tilegx/tilegx-c.cc: ...here. * config/tilegx/tilegx.c: Moved to... * config/tilegx/tilegx.cc: ...here. * config/tilepro/mul-tables.c: Moved to... * config/tilepro/mul-tables.cc: ...here. * config/tilepro/tilepro-c.c: Moved to... * config/tilepro/tilepro-c.cc: ...here. * config/tilepro/tilepro.c: Moved to... * config/tilepro/tilepro.cc: ...here. * config/v850/v850-c.c: Moved to... * config/v850/v850-c.cc: ...here. * config/v850/v850.c: Moved to... * config/v850/v850.cc: ...here. * config/vax/vax.c: Moved to... * config/vax/vax.cc: ...here. * config/visium/visium.c: Moved to... * config/visium/visium.cc: ...here. * config/vms/vms-c.c: Moved to... * config/vms/vms-c.cc: ...here. * config/vms/vms-f.c: Moved to... * config/vms/vms-f.cc: ...here. * config/vms/vms.c: Moved to... * config/vms/vms.cc: ...here. * config/vxworks-c.c: Moved to... * config/vxworks-c.cc: ...here. * config/vxworks.c: Moved to... * config/vxworks.cc: ...here. * config/winnt-c.c: Moved to... * config/winnt-c.cc: ...here. * config/xtensa/xtensa.c: Moved to... * config/xtensa/xtensa.cc: ...here. * context.c: Moved to... * context.cc: ...here. * convert.c: Moved to... * convert.cc: ...here. * coverage.c: Moved to... * coverage.cc: ...here. * cppbuiltin.c: Moved to... * cppbuiltin.cc: ...here. * cppdefault.c: Moved to... * cppdefault.cc: ...here. * cprop.c: Moved to... * cprop.cc: ...here. * cse.c: Moved to... * cse.cc: ...here. * cselib.c: Moved to... * cselib.cc: ...here. * ctfc.c: Moved to... * ctfc.cc: ...here. * ctfout.c: Moved to... * ctfout.cc: ...here. * data-streamer-in.c: Moved to... * data-streamer-in.cc: ...here. * data-streamer-out.c: Moved to... * data-streamer-out.cc: ...here. * data-streamer.c: Moved to... * data-streamer.cc: ...here. * dbgcnt.c: Moved to... * dbgcnt.cc: ...here. * dbxout.c: Moved to... * dbxout.cc: ...here. * dce.c: Moved to... * dce.cc: ...here. * ddg.c: Moved to... * ddg.cc: ...here. * debug.c: Moved to... * debug.cc: ...here. * df-core.c: Moved to... * df-core.cc: ...here. * df-problems.c: Moved to... * df-problems.cc: ...here. * df-scan.c: Moved to... * df-scan.cc: ...here. * dfp.c: Moved to... * dfp.cc: ...here. * diagnostic-color.c: Moved to... * diagnostic-color.cc: ...here. * diagnostic-show-locus.c: Moved to... * diagnostic-show-locus.cc: ...here. * diagnostic-spec.c: Moved to... * diagnostic-spec.cc: ...here. * diagnostic.c: Moved to... * diagnostic.cc: ...here. * dojump.c: Moved to... * dojump.cc: ...here. * dominance.c: Moved to... * dominance.cc: ...here. * domwalk.c: Moved to... * domwalk.cc: ...here. * double-int.c: Moved to... * double-int.cc: ...here. * dse.c: Moved to... * dse.cc: ...here. * dumpfile.c: Moved to... * dumpfile.cc: ...here. * dwarf2asm.c: Moved to... * dwarf2asm.cc: ...here. * dwarf2cfi.c: Moved to... * dwarf2cfi.cc: ...here. * dwarf2ctf.c: Moved to... * dwarf2ctf.cc: ...here. * dwarf2out.c: Moved to... * dwarf2out.cc: ...here. * early-remat.c: Moved to... * early-remat.cc: ...here. * edit-context.c: Moved to... * edit-context.cc: ...here. * emit-rtl.c: Moved to... * emit-rtl.cc: ...here. * errors.c: Moved to... * errors.cc: ...here. * et-forest.c: Moved to... * et-forest.cc: ...here. * except.c: Moved to... * except.cc: ...here. * explow.c: Moved to... * explow.cc: ...here. * expmed.c: Moved to... * expmed.cc: ...here. * expr.c: Moved to... * expr.cc: ...here. * fibonacci_heap.c: Moved to... * fibonacci_heap.cc: ...here. * file-find.c: Moved to... * file-find.cc: ...here. * file-prefix-map.c: Moved to... * file-prefix-map.cc: ...here. * final.c: Moved to... * final.cc: ...here. * fixed-value.c: Moved to... * fixed-value.cc: ...here. * fold-const-call.c: Moved to... * fold-const-call.cc: ...here. * fold-const.c: Moved to... * fold-const.cc: ...here. * fp-test.c: Moved to... * fp-test.cc: ...here. * function-tests.c: Moved to... * function-tests.cc: ...here. * function.c: Moved to... * function.cc: ...here. * fwprop.c: Moved to... * fwprop.cc: ...here. * gcc-ar.c: Moved to... * gcc-ar.cc: ...here. * gcc-main.c: Moved to... * gcc-main.cc: ...here. * gcc-rich-location.c: Moved to... * gcc-rich-location.cc: ...here. * gcc.c: Moved to... * gcc.cc: ...here. * gcov-dump.c: Moved to... * gcov-dump.cc: ...here. * gcov-io.c: Moved to... * gcov-io.cc: ...here. * gcov-tool.c: Moved to... * gcov-tool.cc: ...here. * gcov.c: Moved to... * gcov.cc: ...here. * gcse-common.c: Moved to... * gcse-common.cc: ...here. * gcse.c: Moved to... * gcse.cc: ...here. * genattr-common.c: Moved to... * genattr-common.cc: ...here. * genattr.c: Moved to... * genattr.cc: ...here. * genattrtab.c: Moved to... * genattrtab.cc: ...here. * genautomata.c: Moved to... * genautomata.cc: ...here. * gencfn-macros.c: Moved to... * gencfn-macros.cc: ...here. * gencheck.c: Moved to... * gencheck.cc: ...here. * genchecksum.c: Moved to... * genchecksum.cc: ...here. * gencodes.c: Moved to... * gencodes.cc: ...here. * genconditions.c: Moved to... * genconditions.cc: ...here. * genconfig.c: Moved to... * genconfig.cc: ...here. * genconstants.c: Moved to... * genconstants.cc: ...here. * genemit.c: Moved to... * genemit.cc: ...here. * genenums.c: Moved to... * genenums.cc: ...here. * generic-match-head.c: Moved to... * generic-match-head.cc: ...here. * genextract.c: Moved to... * genextract.cc: ...here. * genflags.c: Moved to... * genflags.cc: ...here. * gengenrtl.c: Moved to... * gengenrtl.cc: ...here. * gengtype-parse.c: Moved to... * gengtype-parse.cc: ...here. * gengtype-state.c: Moved to... * gengtype-state.cc: ...here. * gengtype.c: Moved to... * gengtype.cc: ...here. * genhooks.c: Moved to... * genhooks.cc: ...here. * genmatch.c: Moved to... * genmatch.cc: ...here. * genmddeps.c: Moved to... * genmddeps.cc: ...here. * genmddump.c: Moved to... * genmddump.cc: ...here. * genmodes.c: Moved to... * genmodes.cc: ...here. * genopinit.c: Moved to... * genopinit.cc: ...here. * genoutput.c: Moved to... * genoutput.cc: ...here. * genpeep.c: Moved to... * genpeep.cc: ...here. * genpreds.c: Moved to... * genpreds.cc: ...here. * genrecog.c: Moved to... * genrecog.cc: ...here. * gensupport.c: Moved to... * gensupport.cc: ...here. * gentarget-def.c: Moved to... * gentarget-def.cc: ...here. * genversion.c: Moved to... * genversion.cc: ...here. * ggc-common.c: Moved to... * ggc-common.cc: ...here. * ggc-none.c: Moved to... * ggc-none.cc: ...here. * ggc-page.c: Moved to... * ggc-page.cc: ...here. * ggc-tests.c: Moved to... * ggc-tests.cc: ...here. * gimple-builder.c: Moved to... * gimple-builder.cc: ...here. * gimple-expr.c: Moved to... * gimple-expr.cc: ...here. * gimple-fold.c: Moved to... * gimple-fold.cc: ...here. * gimple-iterator.c: Moved to... * gimple-iterator.cc: ...here. * gimple-laddress.c: Moved to... * gimple-laddress.cc: ...here. * gimple-loop-jam.c: Moved to... * gimple-loop-jam.cc: ...here. * gimple-low.c: Moved to... * gimple-low.cc: ...here. * gimple-match-head.c: Moved to... * gimple-match-head.cc: ...here. * gimple-pretty-print.c: Moved to... * gimple-pretty-print.cc: ...here. * gimple-ssa-backprop.c: Moved to... * gimple-ssa-backprop.cc: ...here. * gimple-ssa-evrp-analyze.c: Moved to... * gimple-ssa-evrp-analyze.cc: ...here. * gimple-ssa-evrp.c: Moved to... * gimple-ssa-evrp.cc: ...here. * gimple-ssa-isolate-paths.c: Moved to... * gimple-ssa-isolate-paths.cc: ...here. * gimple-ssa-nonnull-compare.c: Moved to... * gimple-ssa-nonnull-compare.cc: ...here. * gimple-ssa-split-paths.c: Moved to... * gimple-ssa-split-paths.cc: ...here. * gimple-ssa-sprintf.c: Moved to... * gimple-ssa-sprintf.cc: ...here. * gimple-ssa-store-merging.c: Moved to... * gimple-ssa-store-merging.cc: ...here. * gimple-ssa-strength-reduction.c: Moved to... * gimple-ssa-strength-reduction.cc: ...here. * gimple-ssa-warn-alloca.c: Moved to... * gimple-ssa-warn-alloca.cc: ...here. * gimple-ssa-warn-restrict.c: Moved to... * gimple-ssa-warn-restrict.cc: ...here. * gimple-streamer-in.c: Moved to... * gimple-streamer-in.cc: ...here. * gimple-streamer-out.c: Moved to... * gimple-streamer-out.cc: ...here. * gimple-walk.c: Moved to... * gimple-walk.cc: ...here. * gimple-warn-recursion.c: Moved to... * gimple-warn-recursion.cc: ...here. * gimple.c: Moved to... * gimple.cc: ...here. * gimplify-me.c: Moved to... * gimplify-me.cc: ...here. * gimplify.c: Moved to... * gimplify.cc: ...here. * godump.c: Moved to... * godump.cc: ...here. * graph.c: Moved to... * graph.cc: ...here. * graphds.c: Moved to... * graphds.cc: ...here. * graphite-dependences.c: Moved to... * graphite-dependences.cc: ...here. * graphite-isl-ast-to-gimple.c: Moved to... * graphite-isl-ast-to-gimple.cc: ...here. * graphite-optimize-isl.c: Moved to... * graphite-optimize-isl.cc: ...here. * graphite-poly.c: Moved to... * graphite-poly.cc: ...here. * graphite-scop-detection.c: Moved to... * graphite-scop-detection.cc: ...here. * graphite-sese-to-poly.c: Moved to... * graphite-sese-to-poly.cc: ...here. * graphite.c: Moved to... * graphite.cc: ...here. * haifa-sched.c: Moved to... * haifa-sched.cc: ...here. * hash-map-tests.c: Moved to... * hash-map-tests.cc: ...here. * hash-set-tests.c: Moved to... * hash-set-tests.cc: ...here. * hash-table.c: Moved to... * hash-table.cc: ...here. * hooks.c: Moved to... * hooks.cc: ...here. * host-default.c: Moved to... * host-default.cc: ...here. * hw-doloop.c: Moved to... * hw-doloop.cc: ...here. * hwint.c: Moved to... * hwint.cc: ...here. * ifcvt.c: Moved to... * ifcvt.cc: ...here. * inchash.c: Moved to... * inchash.cc: ...here. * incpath.c: Moved to... * incpath.cc: ...here. * init-regs.c: Moved to... * init-regs.cc: ...here. * input.c: Moved to... * input.cc: ...here. * internal-fn.c: Moved to... * internal-fn.cc: ...here. * intl.c: Moved to... * intl.cc: ...here. * ipa-comdats.c: Moved to... * ipa-comdats.cc: ...here. * ipa-cp.c: Moved to... * ipa-cp.cc: ...here. * ipa-devirt.c: Moved to... * ipa-devirt.cc: ...here. * ipa-fnsummary.c: Moved to... * ipa-fnsummary.cc: ...here. * ipa-icf-gimple.c: Moved to... * ipa-icf-gimple.cc: ...here. * ipa-icf.c: Moved to... * ipa-icf.cc: ...here. * ipa-inline-analysis.c: Moved to... * ipa-inline-analysis.cc: ...here. * ipa-inline-transform.c: Moved to... * ipa-inline-transform.cc: ...here. * ipa-inline.c: Moved to... * ipa-inline.cc: ...here. * ipa-modref-tree.c: Moved to... * ipa-modref-tree.cc: ...here. * ipa-modref.c: Moved to... * ipa-modref.cc: ...here. * ipa-param-manipulation.c: Moved to... * ipa-param-manipulation.cc: ...here. * ipa-polymorphic-call.c: Moved to... * ipa-polymorphic-call.cc: ...here. * ipa-predicate.c: Moved to... * ipa-predicate.cc: ...here. * ipa-profile.c: Moved to... * ipa-profile.cc: ...here. * ipa-prop.c: Moved to... * ipa-prop.cc: ...here. * ipa-pure-const.c: Moved to... * ipa-pure-const.cc: ...here. * ipa-ref.c: Moved to... * ipa-ref.cc: ...here. * ipa-reference.c: Moved to... * ipa-reference.cc: ...here. * ipa-split.c: Moved to... * ipa-split.cc: ...here. * ipa-sra.c: Moved to... * ipa-sra.cc: ...here. * ipa-utils.c: Moved to... * ipa-utils.cc: ...here. * ipa-visibility.c: Moved to... * ipa-visibility.cc: ...here. * ipa.c: Moved to... * ipa.cc: ...here. * ira-build.c: Moved to... * ira-build.cc: ...here. * ira-color.c: Moved to... * ira-color.cc: ...here. * ira-conflicts.c: Moved to... * ira-conflicts.cc: ...here. * ira-costs.c: Moved to... * ira-costs.cc: ...here. * ira-emit.c: Moved to... * ira-emit.cc: ...here. * ira-lives.c: Moved to... * ira-lives.cc: ...here. * ira.c: Moved to... * ira.cc: ...here. * jump.c: Moved to... * jump.cc: ...here. * langhooks.c: Moved to... * langhooks.cc: ...here. * lcm.c: Moved to... * lcm.cc: ...here. * lists.c: Moved to... * lists.cc: ...here. * loop-doloop.c: Moved to... * loop-doloop.cc: ...here. * loop-init.c: Moved to... * loop-init.cc: ...here. * loop-invariant.c: Moved to... * loop-invariant.cc: ...here. * loop-iv.c: Moved to... * loop-iv.cc: ...here. * loop-unroll.c: Moved to... * loop-unroll.cc: ...here. * lower-subreg.c: Moved to... * lower-subreg.cc: ...here. * lra-assigns.c: Moved to... * lra-assigns.cc: ...here. * lra-coalesce.c: Moved to... * lra-coalesce.cc: ...here. * lra-constraints.c: Moved to... * lra-constraints.cc: ...here. * lra-eliminations.c: Moved to... * lra-eliminations.cc: ...here. * lra-lives.c: Moved to... * lra-lives.cc: ...here. * lra-remat.c: Moved to... * lra-remat.cc: ...here. * lra-spills.c: Moved to... * lra-spills.cc: ...here. * lra.c: Moved to... * lra.cc: ...here. * lto-cgraph.c: Moved to... * lto-cgraph.cc: ...here. * lto-compress.c: Moved to... * lto-compress.cc: ...here. * lto-opts.c: Moved to... * lto-opts.cc: ...here. * lto-section-in.c: Moved to... * lto-section-in.cc: ...here. * lto-section-out.c: Moved to... * lto-section-out.cc: ...here. * lto-streamer-in.c: Moved to... * lto-streamer-in.cc: ...here. * lto-streamer-out.c: Moved to... * lto-streamer-out.cc: ...here. * lto-streamer.c: Moved to... * lto-streamer.cc: ...here. * lto-wrapper.c: Moved to... * lto-wrapper.cc: ...here. * main.c: Moved to... * main.cc: ...here. * mcf.c: Moved to... * mcf.cc: ...here. * mode-switching.c: Moved to... * mode-switching.cc: ...here. * modulo-sched.c: Moved to... * modulo-sched.cc: ...here. * multiple_target.c: Moved to... * multiple_target.cc: ...here. * omp-expand.c: Moved to... * omp-expand.cc: ...here. * omp-general.c: Moved to... * omp-general.cc: ...here. * omp-low.c: Moved to... * omp-low.cc: ...here. * omp-offload.c: Moved to... * omp-offload.cc: ...here. * omp-simd-clone.c: Moved to... * omp-simd-clone.cc: ...here. * opt-suggestions.c: Moved to... * opt-suggestions.cc: ...here. * optabs-libfuncs.c: Moved to... * optabs-libfuncs.cc: ...here. * optabs-query.c: Moved to... * optabs-query.cc: ...here. * optabs-tree.c: Moved to... * optabs-tree.cc: ...here. * optabs.c: Moved to... * optabs.cc: ...here. * opts-common.c: Moved to... * opts-common.cc: ...here. * opts-global.c: Moved to... * opts-global.cc: ...here. * opts.c: Moved to... * opts.cc: ...here. * passes.c: Moved to... * passes.cc: ...here. * plugin.c: Moved to... * plugin.cc: ...here. * postreload-gcse.c: Moved to... * postreload-gcse.cc: ...here. * postreload.c: Moved to... * postreload.cc: ...here. * predict.c: Moved to... * predict.cc: ...here. * prefix.c: Moved to... * prefix.cc: ...here. * pretty-print.c: Moved to... * pretty-print.cc: ...here. * print-rtl-function.c: Moved to... * print-rtl-function.cc: ...here. * print-rtl.c: Moved to... * print-rtl.cc: ...here. * print-tree.c: Moved to... * print-tree.cc: ...here. * profile-count.c: Moved to... * profile-count.cc: ...here. * profile.c: Moved to... * profile.cc: ...here. * read-md.c: Moved to... * read-md.cc: ...here. * read-rtl-function.c: Moved to... * read-rtl-function.cc: ...here. * read-rtl.c: Moved to... * read-rtl.cc: ...here. * real.c: Moved to... * real.cc: ...here. * realmpfr.c: Moved to... * realmpfr.cc: ...here. * recog.c: Moved to... * recog.cc: ...here. * ree.c: Moved to... * ree.cc: ...here. * reg-stack.c: Moved to... * reg-stack.cc: ...here. * regcprop.c: Moved to... * regcprop.cc: ...here. * reginfo.c: Moved to... * reginfo.cc: ...here. * regrename.c: Moved to... * regrename.cc: ...here. * regstat.c: Moved to... * regstat.cc: ...here. * reload.c: Moved to... * reload.cc: ...here. * reload1.c: Moved to... * reload1.cc: ...here. * reorg.c: Moved to... * reorg.cc: ...here. * resource.c: Moved to... * resource.cc: ...here. * rtl-error.c: Moved to... * rtl-error.cc: ...here. * rtl-tests.c: Moved to... * rtl-tests.cc: ...here. * rtl.c: Moved to... * rtl.cc: ...here. * rtlanal.c: Moved to... * rtlanal.cc: ...here. * rtlhash.c: Moved to... * rtlhash.cc: ...here. * rtlhooks.c: Moved to... * rtlhooks.cc: ...here. * rtx-vector-builder.c: Moved to... * rtx-vector-builder.cc: ...here. * run-rtl-passes.c: Moved to... * run-rtl-passes.cc: ...here. * sancov.c: Moved to... * sancov.cc: ...here. * sanopt.c: Moved to... * sanopt.cc: ...here. * sbitmap.c: Moved to... * sbitmap.cc: ...here. * sched-deps.c: Moved to... * sched-deps.cc: ...here. * sched-ebb.c: Moved to... * sched-ebb.cc: ...here. * sched-rgn.c: Moved to... * sched-rgn.cc: ...here. * sel-sched-dump.c: Moved to... * sel-sched-dump.cc: ...here. * sel-sched-ir.c: Moved to... * sel-sched-ir.cc: ...here. * sel-sched.c: Moved to... * sel-sched.cc: ...here. * selftest-diagnostic.c: Moved to... * selftest-diagnostic.cc: ...here. * selftest-rtl.c: Moved to... * selftest-rtl.cc: ...here. * selftest-run-tests.c: Moved to... * selftest-run-tests.cc: ...here. * selftest.c: Moved to... * selftest.cc: ...here. * sese.c: Moved to... * sese.cc: ...here. * shrink-wrap.c: Moved to... * shrink-wrap.cc: ...here. * simplify-rtx.c: Moved to... * simplify-rtx.cc: ...here. * sparseset.c: Moved to... * sparseset.cc: ...here. * spellcheck-tree.c: Moved to... * spellcheck-tree.cc: ...here. * spellcheck.c: Moved to... * spellcheck.cc: ...here. * sreal.c: Moved to... * sreal.cc: ...here. * stack-ptr-mod.c: Moved to... * stack-ptr-mod.cc: ...here. * statistics.c: Moved to... * statistics.cc: ...here. * stmt.c: Moved to... * stmt.cc: ...here. * stor-layout.c: Moved to... * stor-layout.cc: ...here. * store-motion.c: Moved to... * store-motion.cc: ...here. * streamer-hooks.c: Moved to... * streamer-hooks.cc: ...here. * stringpool.c: Moved to... * stringpool.cc: ...here. * substring-locations.c: Moved to... * substring-locations.cc: ...here. * symtab.c: Moved to... * symtab.cc: ...here. * target-globals.c: Moved to... * target-globals.cc: ...here. * targhooks.c: Moved to... * targhooks.cc: ...here. * timevar.c: Moved to... * timevar.cc: ...here. * toplev.c: Moved to... * toplev.cc: ...here. * tracer.c: Moved to... * tracer.cc: ...here. * trans-mem.c: Moved to... * trans-mem.cc: ...here. * tree-affine.c: Moved to... * tree-affine.cc: ...here. * tree-call-cdce.c: Moved to... * tree-call-cdce.cc: ...here. * tree-cfg.c: Moved to... * tree-cfg.cc: ...here. * tree-cfgcleanup.c: Moved to... * tree-cfgcleanup.cc: ...here. * tree-chrec.c: Moved to... * tree-chrec.cc: ...here. * tree-complex.c: Moved to... * tree-complex.cc: ...here. * tree-data-ref.c: Moved to... * tree-data-ref.cc: ...here. * tree-dfa.c: Moved to... * tree-dfa.cc: ...here. * tree-diagnostic.c: Moved to... * tree-diagnostic.cc: ...here. * tree-dump.c: Moved to... * tree-dump.cc: ...here. * tree-eh.c: Moved to... * tree-eh.cc: ...here. * tree-emutls.c: Moved to... * tree-emutls.cc: ...here. * tree-if-conv.c: Moved to... * tree-if-conv.cc: ...here. * tree-inline.c: Moved to... * tree-inline.cc: ...here. * tree-into-ssa.c: Moved to... * tree-into-ssa.cc: ...here. * tree-iterator.c: Moved to... * tree-iterator.cc: ...here. * tree-loop-distribution.c: Moved to... * tree-loop-distribution.cc: ...here. * tree-nested.c: Moved to... * tree-nested.cc: ...here. * tree-nrv.c: Moved to... * tree-nrv.cc: ...here. * tree-object-size.c: Moved to... * tree-object-size.cc: ...here. * tree-outof-ssa.c: Moved to... * tree-outof-ssa.cc: ...here. * tree-parloops.c: Moved to... * tree-parloops.cc: ...here. * tree-phinodes.c: Moved to... * tree-phinodes.cc: ...here. * tree-predcom.c: Moved to... * tree-predcom.cc: ...here. * tree-pretty-print.c: Moved to... * tree-pretty-print.cc: ...here. * tree-profile.c: Moved to... * tree-profile.cc: ...here. * tree-scalar-evolution.c: Moved to... * tree-scalar-evolution.cc: ...here. * tree-sra.c: Moved to... * tree-sra.cc: ...here. * tree-ssa-address.c: Moved to... * tree-ssa-address.cc: ...here. * tree-ssa-alias.c: Moved to... * tree-ssa-alias.cc: ...here. * tree-ssa-ccp.c: Moved to... * tree-ssa-ccp.cc: ...here. * tree-ssa-coalesce.c: Moved to... * tree-ssa-coalesce.cc: ...here. * tree-ssa-copy.c: Moved to... * tree-ssa-copy.cc: ...here. * tree-ssa-dce.c: Moved to... * tree-ssa-dce.cc: ...here. * tree-ssa-dom.c: Moved to... * tree-ssa-dom.cc: ...here. * tree-ssa-dse.c: Moved to... * tree-ssa-dse.cc: ...here. * tree-ssa-forwprop.c: Moved to... * tree-ssa-forwprop.cc: ...here. * tree-ssa-ifcombine.c: Moved to... * tree-ssa-ifcombine.cc: ...here. * tree-ssa-live.c: Moved to... * tree-ssa-live.cc: ...here. * tree-ssa-loop-ch.c: Moved to... * tree-ssa-loop-ch.cc: ...here. * tree-ssa-loop-im.c: Moved to... * tree-ssa-loop-im.cc: ...here. * tree-ssa-loop-ivcanon.c: Moved to... * tree-ssa-loop-ivcanon.cc: ...here. * tree-ssa-loop-ivopts.c: Moved to... * tree-ssa-loop-ivopts.cc: ...here. * tree-ssa-loop-manip.c: Moved to... * tree-ssa-loop-manip.cc: ...here. * tree-ssa-loop-niter.c: Moved to... * tree-ssa-loop-niter.cc: ...here. * tree-ssa-loop-prefetch.c: Moved to... * tree-ssa-loop-prefetch.cc: ...here. * tree-ssa-loop-split.c: Moved to... * tree-ssa-loop-split.cc: ...here. * tree-ssa-loop-unswitch.c: Moved to... * tree-ssa-loop-unswitch.cc: ...here. * tree-ssa-loop.c: Moved to... * tree-ssa-loop.cc: ...here. * tree-ssa-math-opts.c: Moved to... * tree-ssa-math-opts.cc: ...here. * tree-ssa-operands.c: Moved to... * tree-ssa-operands.cc: ...here. * tree-ssa-phiopt.c: Moved to... * tree-ssa-phiopt.cc: ...here. * tree-ssa-phiprop.c: Moved to... * tree-ssa-phiprop.cc: ...here. * tree-ssa-pre.c: Moved to... * tree-ssa-pre.cc: ...here. * tree-ssa-propagate.c: Moved to... * tree-ssa-propagate.cc: ...here. * tree-ssa-reassoc.c: Moved to... * tree-ssa-reassoc.cc: ...here. * tree-ssa-sccvn.c: Moved to... * tree-ssa-sccvn.cc: ...here. * tree-ssa-scopedtables.c: Moved to... * tree-ssa-scopedtables.cc: ...here. * tree-ssa-sink.c: Moved to... * tree-ssa-sink.cc: ...here. * tree-ssa-strlen.c: Moved to... * tree-ssa-strlen.cc: ...here. * tree-ssa-structalias.c: Moved to... * tree-ssa-structalias.cc: ...here. * tree-ssa-tail-merge.c: Moved to... * tree-ssa-tail-merge.cc: ...here. * tree-ssa-ter.c: Moved to... * tree-ssa-ter.cc: ...here. * tree-ssa-threadbackward.c: Moved to... * tree-ssa-threadbackward.cc: ...here. * tree-ssa-threadedge.c: Moved to... * tree-ssa-threadedge.cc: ...here. * tree-ssa-threadupdate.c: Moved to... * tree-ssa-threadupdate.cc: ...here. * tree-ssa-uncprop.c: Moved to... * tree-ssa-uncprop.cc: ...here. * tree-ssa-uninit.c: Moved to... * tree-ssa-uninit.cc: ...here. * tree-ssa.c: Moved to... * tree-ssa.cc: ...here. * tree-ssanames.c: Moved to... * tree-ssanames.cc: ...here. * tree-stdarg.c: Moved to... * tree-stdarg.cc: ...here. * tree-streamer-in.c: Moved to... * tree-streamer-in.cc: ...here. * tree-streamer-out.c: Moved to... * tree-streamer-out.cc: ...here. * tree-streamer.c: Moved to... * tree-streamer.cc: ...here. * tree-switch-conversion.c: Moved to... * tree-switch-conversion.cc: ...here. * tree-tailcall.c: Moved to... * tree-tailcall.cc: ...here. * tree-vect-data-refs.c: Moved to... * tree-vect-data-refs.cc: ...here. * tree-vect-generic.c: Moved to... * tree-vect-generic.cc: ...here. * tree-vect-loop-manip.c: Moved to... * tree-vect-loop-manip.cc: ...here. * tree-vect-loop.c: Moved to... * tree-vect-loop.cc: ...here. * tree-vect-patterns.c: Moved to... * tree-vect-patterns.cc: ...here. * tree-vect-slp-patterns.c: Moved to... * tree-vect-slp-patterns.cc: ...here. * tree-vect-slp.c: Moved to... * tree-vect-slp.cc: ...here. * tree-vect-stmts.c: Moved to... * tree-vect-stmts.cc: ...here. * tree-vector-builder.c: Moved to... * tree-vector-builder.cc: ...here. * tree-vectorizer.c: Moved to... * tree-vectorizer.cc: ...here. * tree-vrp.c: Moved to... * tree-vrp.cc: ...here. * tree.c: Moved to... * tree.cc: ...here. * tsan.c: Moved to... * tsan.cc: ...here. * typed-splay-tree.c: Moved to... * typed-splay-tree.cc: ...here. * ubsan.c: Moved to... * ubsan.cc: ...here. * valtrack.c: Moved to... * valtrack.cc: ...here. * value-prof.c: Moved to... * value-prof.cc: ...here. * var-tracking.c: Moved to... * var-tracking.cc: ...here. * varasm.c: Moved to... * varasm.cc: ...here. * varpool.c: Moved to... * varpool.cc: ...here. * vec-perm-indices.c: Moved to... * vec-perm-indices.cc: ...here. * vec.c: Moved to... * vec.cc: ...here. * vmsdbgout.c: Moved to... * vmsdbgout.cc: ...here. * vr-values.c: Moved to... * vr-values.cc: ...here. * vtable-verify.c: Moved to... * vtable-verify.cc: ...here. * web.c: Moved to... * web.cc: ...here. * xcoffout.c: Moved to... * xcoffout.cc: ...here. gcc/c-family/ChangeLog: * c-ada-spec.c: Moved to... * c-ada-spec.cc: ...here. * c-attribs.c: Moved to... * c-attribs.cc: ...here. * c-common.c: Moved to... * c-common.cc: ...here. * c-cppbuiltin.c: Moved to... * c-cppbuiltin.cc: ...here. * c-dump.c: Moved to... * c-dump.cc: ...here. * c-format.c: Moved to... * c-format.cc: ...here. * c-gimplify.c: Moved to... * c-gimplify.cc: ...here. * c-indentation.c: Moved to... * c-indentation.cc: ...here. * c-lex.c: Moved to... * c-lex.cc: ...here. * c-omp.c: Moved to... * c-omp.cc: ...here. * c-opts.c: Moved to... * c-opts.cc: ...here. * c-pch.c: Moved to... * c-pch.cc: ...here. * c-ppoutput.c: Moved to... * c-ppoutput.cc: ...here. * c-pragma.c: Moved to... * c-pragma.cc: ...here. * c-pretty-print.c: Moved to... * c-pretty-print.cc: ...here. * c-semantics.c: Moved to... * c-semantics.cc: ...here. * c-ubsan.c: Moved to... * c-ubsan.cc: ...here. * c-warn.c: Moved to... * c-warn.cc: ...here. * cppspec.c: Moved to... * cppspec.cc: ...here. * stub-objc.c: Moved to... * stub-objc.cc: ...here. gcc/c/ChangeLog: * c-aux-info.c: Moved to... * c-aux-info.cc: ...here. * c-convert.c: Moved to... * c-convert.cc: ...here. * c-decl.c: Moved to... * c-decl.cc: ...here. * c-errors.c: Moved to... * c-errors.cc: ...here. * c-fold.c: Moved to... * c-fold.cc: ...here. * c-lang.c: Moved to... * c-lang.cc: ...here. * c-objc-common.c: Moved to... * c-objc-common.cc: ...here. * c-parser.c: Moved to... * c-parser.cc: ...here. * c-typeck.c: Moved to... * c-typeck.cc: ...here. * gccspec.c: Moved to... * gccspec.cc: ...here. * gimple-parser.c: Moved to... * gimple-parser.cc: ...here. gcc/cp/ChangeLog: * call.c: Moved to... * call.cc: ...here. * class.c: Moved to... * class.cc: ...here. * constexpr.c: Moved to... * constexpr.cc: ...here. * cp-gimplify.c: Moved to... * cp-gimplify.cc: ...here. * cp-lang.c: Moved to... * cp-lang.cc: ...here. * cp-objcp-common.c: Moved to... * cp-objcp-common.cc: ...here. * cp-ubsan.c: Moved to... * cp-ubsan.cc: ...here. * cvt.c: Moved to... * cvt.cc: ...here. * cxx-pretty-print.c: Moved to... * cxx-pretty-print.cc: ...here. * decl.c: Moved to... * decl.cc: ...here. * decl2.c: Moved to... * decl2.cc: ...here. * dump.c: Moved to... * dump.cc: ...here. * error.c: Moved to... * error.cc: ...here. * except.c: Moved to... * except.cc: ...here. * expr.c: Moved to... * expr.cc: ...here. * friend.c: Moved to... * friend.cc: ...here. * g++spec.c: Moved to... * g++spec.cc: ...here. * init.c: Moved to... * init.cc: ...here. * lambda.c: Moved to... * lambda.cc: ...here. * lex.c: Moved to... * lex.cc: ...here. * mangle.c: Moved to... * mangle.cc: ...here. * method.c: Moved to... * method.cc: ...here. * name-lookup.c: Moved to... * name-lookup.cc: ...here. * optimize.c: Moved to... * optimize.cc: ...here. * parser.c: Moved to... * parser.cc: ...here. * pt.c: Moved to... * pt.cc: ...here. * ptree.c: Moved to... * ptree.cc: ...here. * rtti.c: Moved to... * rtti.cc: ...here. * search.c: Moved to... * search.cc: ...here. * semantics.c: Moved to... * semantics.cc: ...here. * tree.c: Moved to... * tree.cc: ...here. * typeck.c: Moved to... * typeck.cc: ...here. * typeck2.c: Moved to... * typeck2.cc: ...here. * vtable-class-hierarchy.c: Moved to... * vtable-class-hierarchy.cc: ...here. gcc/fortran/ChangeLog: * arith.c: Moved to... * arith.cc: ...here. * array.c: Moved to... * array.cc: ...here. * bbt.c: Moved to... * bbt.cc: ...here. * check.c: Moved to... * check.cc: ...here. * class.c: Moved to... * class.cc: ...here. * constructor.c: Moved to... * constructor.cc: ...here. * convert.c: Moved to... * convert.cc: ...here. * cpp.c: Moved to... * cpp.cc: ...here. * data.c: Moved to... * data.cc: ...here. * decl.c: Moved to... * decl.cc: ...here. * dependency.c: Moved to... * dependency.cc: ...here. * dump-parse-tree.c: Moved to... * dump-parse-tree.cc: ...here. * error.c: Moved to... * error.cc: ...here. * expr.c: Moved to... * expr.cc: ...here. * f95-lang.c: Moved to... * f95-lang.cc: ...here. * frontend-passes.c: Moved to... * frontend-passes.cc: ...here. * gfortranspec.c: Moved to... * gfortranspec.cc: ...here. * interface.c: Moved to... * interface.cc: ...here. * intrinsic.c: Moved to... * intrinsic.cc: ...here. * io.c: Moved to... * io.cc: ...here. * iresolve.c: Moved to... * iresolve.cc: ...here. * match.c: Moved to... * match.cc: ...here. * matchexp.c: Moved to... * matchexp.cc: ...here. * misc.c: Moved to... * misc.cc: ...here. * module.c: Moved to... * module.cc: ...here. * openmp.c: Moved to... * openmp.cc: ...here. * options.c: Moved to... * options.cc: ...here. * parse.c: Moved to... * parse.cc: ...here. * primary.c: Moved to... * primary.cc: ...here. * resolve.c: Moved to... * resolve.cc: ...here. * scanner.c: Moved to... * scanner.cc: ...here. * simplify.c: Moved to... * simplify.cc: ...here. * st.c: Moved to... * st.cc: ...here. * symbol.c: Moved to... * symbol.cc: ...here. * target-memory.c: Moved to... * target-memory.cc: ...here. * trans-array.c: Moved to... * trans-array.cc: ...here. * trans-common.c: Moved to... * trans-common.cc: ...here. * trans-const.c: Moved to... * trans-const.cc: ...here. * trans-decl.c: Moved to... * trans-decl.cc: ...here. * trans-expr.c: Moved to... * trans-expr.cc: ...here. * trans-intrinsic.c: Moved to... * trans-intrinsic.cc: ...here. * trans-io.c: Moved to... * trans-io.cc: ...here. * trans-openmp.c: Moved to... * trans-openmp.cc: ...here. * trans-stmt.c: Moved to... * trans-stmt.cc: ...here. * trans-types.c: Moved to... * trans-types.cc: ...here. * trans.c: Moved to... * trans.cc: ...here. gcc/go/ChangeLog: * go-backend.c: Moved to... * go-backend.cc: ...here. * go-lang.c: Moved to... * go-lang.cc: ...here. * gospec.c: Moved to... * gospec.cc: ...here. gcc/jit/ChangeLog: * dummy-frontend.c: Moved to... * dummy-frontend.cc: ...here. * jit-builtins.c: Moved to... * jit-builtins.cc: ...here. * jit-logging.c: Moved to... * jit-logging.cc: ...here. * jit-playback.c: Moved to... * jit-playback.cc: ...here. * jit-recording.c: Moved to... * jit-recording.cc: ...here. * jit-result.c: Moved to... * jit-result.cc: ...here. * jit-spec.c: Moved to... * jit-spec.cc: ...here. * jit-tempdir.c: Moved to... * jit-tempdir.cc: ...here. * jit-w32.c: Moved to... * jit-w32.cc: ...here. * libgccjit.c: Moved to... * libgccjit.cc: ...here. gcc/lto/ChangeLog: * common.c: Moved to... * common.cc: ...here. * lto-common.c: Moved to... * lto-common.cc: ...here. * lto-dump.c: Moved to... * lto-dump.cc: ...here. * lto-lang.c: Moved to... * lto-lang.cc: ...here. * lto-object.c: Moved to... * lto-object.cc: ...here. * lto-partition.c: Moved to... * lto-partition.cc: ...here. * lto-symtab.c: Moved to... * lto-symtab.cc: ...here. * lto.c: Moved to... * lto.cc: ...here. gcc/objc/ChangeLog: * objc-act.c: Moved to... * objc-act.cc: ...here. * objc-encoding.c: Moved to... * objc-encoding.cc: ...here. * objc-gnu-runtime-abi-01.c: Moved to... * objc-gnu-runtime-abi-01.cc: ...here. * objc-lang.c: Moved to... * objc-lang.cc: ...here. * objc-map.c: Moved to... * objc-map.cc: ...here. * objc-next-runtime-abi-01.c: Moved to... * objc-next-runtime-abi-01.cc: ...here. * objc-next-runtime-abi-02.c: Moved to... * objc-next-runtime-abi-02.cc: ...here. * objc-runtime-shared-support.c: Moved to... * objc-runtime-shared-support.cc: ...here. gcc/objcp/ChangeLog: * objcp-decl.c: Moved to... * objcp-decl.cc: ...here. * objcp-lang.c: Moved to... * objcp-lang.cc: ...here. libcpp/ChangeLog: * charset.c: Moved to... * charset.cc: ...here. * directives.c: Moved to... * directives.cc: ...here. * errors.c: Moved to... * errors.cc: ...here. * expr.c: Moved to... * expr.cc: ...here. * files.c: Moved to... * files.cc: ...here. * identifiers.c: Moved to... * identifiers.cc: ...here. * init.c: Moved to... * init.cc: ...here. * lex.c: Moved to... * lex.cc: ...here. * line-map.c: Moved to... * line-map.cc: ...here. * macro.c: Moved to... * macro.cc: ...here. * makeucnid.c: Moved to... * makeucnid.cc: ...here. * mkdeps.c: Moved to... * mkdeps.cc: ...here. * pch.c: Moved to... * pch.cc: ...here. * symtab.c: Moved to... * symtab.cc: ...here. * traditional.c: Moved to... * traditional.cc: ...here.
Diffstat (limited to 'gcc/fortran/trans-openmp.cc')
-rw-r--r--gcc/fortran/trans-openmp.cc7701
1 files changed, 7701 insertions, 0 deletions
diff --git a/gcc/fortran/trans-openmp.cc b/gcc/fortran/trans-openmp.cc
new file mode 100644
index 0000000..d363258
--- /dev/null
+++ b/gcc/fortran/trans-openmp.cc
@@ -0,0 +1,7701 @@
+/* OpenMP directive translation -- generate GCC trees from gfc_code.
+ Copyright (C) 2005-2022 Free Software Foundation, Inc.
+ Contributed by Jakub Jelinek <jakub@redhat.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "options.h"
+#include "tree.h"
+#include "gfortran.h"
+#include "gimple-expr.h"
+#include "trans.h"
+#include "stringpool.h"
+#include "fold-const.h"
+#include "gimplify.h" /* For create_tmp_var_raw. */
+#include "trans-stmt.h"
+#include "trans-types.h"
+#include "trans-array.h"
+#include "trans-const.h"
+#include "arith.h"
+#include "constructor.h"
+#include "gomp-constants.h"
+#include "omp-general.h"
+#include "omp-low.h"
+#include "memmodel.h" /* For MEMMODEL_ enums. */
+
+#undef GCC_DIAG_STYLE
+#define GCC_DIAG_STYLE __gcc_tdiag__
+#include "diagnostic-core.h"
+#undef GCC_DIAG_STYLE
+#define GCC_DIAG_STYLE __gcc_gfc__
+#include "attribs.h"
+#include "function.h"
+
+int ompws_flags;
+
+/* True if OpenMP should regard this DECL as being a scalar which has Fortran's
+ allocatable or pointer attribute. */
+
+bool
+gfc_omp_is_allocatable_or_ptr (const_tree decl)
+{
+ return (DECL_P (decl)
+ && (GFC_DECL_GET_SCALAR_POINTER (decl)
+ || GFC_DECL_GET_SCALAR_ALLOCATABLE (decl)));
+}
+
+/* True if the argument is an optional argument; except that false is also
+ returned for arguments with the value attribute (nonpointers) and for
+ assumed-shape variables (decl is a local variable containing arg->data).
+ Note that for 'procedure(), optional' the value false is used as that's
+ always a pointer and no additional indirection is used.
+ Note that pvoid_type_node is for 'type(c_ptr), value' (and c_funloc). */
+
+static bool
+gfc_omp_is_optional_argument (const_tree decl)
+{
+ /* Note: VAR_DECL can occur with BIND(C) and array descriptors. */
+ return ((TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == VAR_DECL)
+ && DECL_LANG_SPECIFIC (decl)
+ && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
+ && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl)))
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) != FUNCTION_TYPE
+ && GFC_DECL_OPTIONAL_ARGUMENT (decl));
+}
+
+/* Check whether this DECL belongs to a Fortran optional argument.
+ With 'for_present_check' set to false, decls which are optional parameters
+ themselve are returned as tree - or a NULL_TREE otherwise. Those decls are
+ always pointers. With 'for_present_check' set to true, the decl for checking
+ whether an argument is present is returned; for arguments with value
+ attribute this is the hidden argument and of BOOLEAN_TYPE. If the decl is
+ unrelated to optional arguments, NULL_TREE is returned. */
+
+tree
+gfc_omp_check_optional_argument (tree decl, bool for_present_check)
+{
+ if (!for_present_check)
+ return gfc_omp_is_optional_argument (decl) ? decl : NULL_TREE;
+
+ if (!DECL_LANG_SPECIFIC (decl))
+ return NULL_TREE;
+
+ tree orig_decl = decl;
+
+ /* For assumed-shape arrays, a local decl with arg->data is used. */
+ if (TREE_CODE (decl) != PARM_DECL
+ && (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl))
+ || GFC_ARRAY_TYPE_P (TREE_TYPE (decl))))
+ decl = GFC_DECL_SAVED_DESCRIPTOR (decl);
+
+ /* Note: With BIND(C), array descriptors are converted to a VAR_DECL. */
+ if (decl == NULL_TREE
+ || (TREE_CODE (decl) != PARM_DECL && TREE_CODE (decl) != VAR_DECL)
+ || !DECL_LANG_SPECIFIC (decl)
+ || !GFC_DECL_OPTIONAL_ARGUMENT (decl))
+ return NULL_TREE;
+
+ /* Scalars with VALUE attribute which are passed by value use a hidden
+ argument to denote the present status. They are passed as nonpointer type
+ with one exception: 'type(c_ptr), value' as 'void*'. */
+ /* Cf. trans-expr.c's gfc_conv_expr_present. */
+ if (TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE
+ || VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl))))
+ {
+ char name[GFC_MAX_SYMBOL_LEN + 2];
+ tree tree_name;
+
+ name[0] = '_';
+ strcpy (&name[1], IDENTIFIER_POINTER (DECL_NAME (decl)));
+ tree_name = get_identifier (name);
+
+ /* Walk function argument list to find the hidden arg. */
+ decl = DECL_ARGUMENTS (DECL_CONTEXT (decl));
+ for ( ; decl != NULL_TREE; decl = TREE_CHAIN (decl))
+ if (DECL_NAME (decl) == tree_name
+ && DECL_ARTIFICIAL (decl))
+ break;
+
+ gcc_assert (decl);
+ return decl;
+ }
+
+ return fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ orig_decl, null_pointer_node);
+}
+
+
+/* Returns tree with NULL if it is not an array descriptor and with the tree to
+ access the 'data' component otherwise. With type_only = true, it returns the
+ TREE_TYPE without creating a new tree. */
+
+tree
+gfc_omp_array_data (tree decl, bool type_only)
+{
+ tree type = TREE_TYPE (decl);
+
+ if (POINTER_TYPE_P (type))
+ type = TREE_TYPE (type);
+
+ if (!GFC_DESCRIPTOR_TYPE_P (type))
+ return NULL_TREE;
+
+ if (type_only)
+ return GFC_TYPE_ARRAY_DATAPTR_TYPE (type);
+
+ if (POINTER_TYPE_P (TREE_TYPE (decl)))
+ decl = build_fold_indirect_ref (decl);
+
+ decl = gfc_conv_descriptor_data_get (decl);
+ STRIP_NOPS (decl);
+ return decl;
+}
+
+/* True if OpenMP should privatize what this DECL points to rather
+ than the DECL itself. */
+
+bool
+gfc_omp_privatize_by_reference (const_tree decl)
+{
+ tree type = TREE_TYPE (decl);
+
+ if (TREE_CODE (type) == REFERENCE_TYPE
+ && (!DECL_ARTIFICIAL (decl) || TREE_CODE (decl) == PARM_DECL))
+ return true;
+
+ if (TREE_CODE (type) == POINTER_TYPE
+ && gfc_omp_is_optional_argument (decl))
+ return true;
+
+ if (TREE_CODE (type) == POINTER_TYPE)
+ {
+ while (TREE_CODE (decl) == COMPONENT_REF)
+ decl = TREE_OPERAND (decl, 1);
+
+ /* Array POINTER/ALLOCATABLE have aggregate types, all user variables
+ that have POINTER_TYPE type and aren't scalar pointers, scalar
+ allocatables, Cray pointees or C pointers are supposed to be
+ privatized by reference. */
+ if (GFC_DECL_GET_SCALAR_POINTER (decl)
+ || GFC_DECL_GET_SCALAR_ALLOCATABLE (decl)
+ || GFC_DECL_CRAY_POINTEE (decl)
+ || GFC_DECL_ASSOCIATE_VAR_P (decl)
+ || VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl))))
+ return false;
+
+ if (!DECL_ARTIFICIAL (decl)
+ && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE)
+ return true;
+
+ /* Some arrays are expanded as DECL_ARTIFICIAL pointers
+ by the frontend. */
+ if (DECL_LANG_SPECIFIC (decl)
+ && GFC_DECL_SAVED_DESCRIPTOR (decl))
+ return true;
+ }
+
+ return false;
+}
+
+/* OMP_CLAUSE_DEFAULT_UNSPECIFIED unless OpenMP sharing attribute
+ of DECL is predetermined. */
+
+enum omp_clause_default_kind
+gfc_omp_predetermined_sharing (tree decl)
+{
+ /* Associate names preserve the association established during ASSOCIATE.
+ As they are implemented either as pointers to the selector or array
+ descriptor and shouldn't really change in the ASSOCIATE region,
+ this decl can be either shared or firstprivate. If it is a pointer,
+ use firstprivate, as it is cheaper that way, otherwise make it shared. */
+ if (GFC_DECL_ASSOCIATE_VAR_P (decl))
+ {
+ if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
+ return OMP_CLAUSE_DEFAULT_FIRSTPRIVATE;
+ else
+ return OMP_CLAUSE_DEFAULT_SHARED;
+ }
+
+ if (DECL_ARTIFICIAL (decl)
+ && ! GFC_DECL_RESULT (decl)
+ && ! (DECL_LANG_SPECIFIC (decl)
+ && GFC_DECL_SAVED_DESCRIPTOR (decl)))
+ return OMP_CLAUSE_DEFAULT_SHARED;
+
+ /* Cray pointees shouldn't be listed in any clauses and should be
+ gimplified to dereference of the corresponding Cray pointer.
+ Make them all private, so that they are emitted in the debug
+ information. */
+ if (GFC_DECL_CRAY_POINTEE (decl))
+ return OMP_CLAUSE_DEFAULT_PRIVATE;
+
+ /* Assumed-size arrays are predetermined shared. */
+ if (TREE_CODE (decl) == PARM_DECL
+ && GFC_ARRAY_TYPE_P (TREE_TYPE (decl))
+ && GFC_TYPE_ARRAY_AKIND (TREE_TYPE (decl)) == GFC_ARRAY_UNKNOWN
+ && GFC_TYPE_ARRAY_UBOUND (TREE_TYPE (decl),
+ GFC_TYPE_ARRAY_RANK (TREE_TYPE (decl)) - 1)
+ == NULL)
+ return OMP_CLAUSE_DEFAULT_SHARED;
+
+ /* Dummy procedures aren't considered variables by OpenMP, thus are
+ disallowed in OpenMP clauses. They are represented as PARM_DECLs
+ in the middle-end, so return OMP_CLAUSE_DEFAULT_FIRSTPRIVATE here
+ to avoid complaining about their uses with default(none). */
+ if (TREE_CODE (decl) == PARM_DECL
+ && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == FUNCTION_TYPE)
+ return OMP_CLAUSE_DEFAULT_FIRSTPRIVATE;
+
+ /* COMMON and EQUIVALENCE decls are shared. They
+ are only referenced through DECL_VALUE_EXPR of the variables
+ contained in them. If those are privatized, they will not be
+ gimplified to the COMMON or EQUIVALENCE decls. */
+ if (GFC_DECL_COMMON_OR_EQUIV (decl) && ! DECL_HAS_VALUE_EXPR_P (decl))
+ return OMP_CLAUSE_DEFAULT_SHARED;
+
+ if (GFC_DECL_RESULT (decl) && ! DECL_HAS_VALUE_EXPR_P (decl))
+ return OMP_CLAUSE_DEFAULT_SHARED;
+
+ /* These are either array or derived parameters, or vtables.
+ In the former cases, the OpenMP standard doesn't consider them to be
+ variables at all (they can't be redefined), but they can nevertheless appear
+ in parallel/task regions and for default(none) purposes treat them as shared.
+ For vtables likely the same handling is desirable. */
+ if (VAR_P (decl) && TREE_READONLY (decl)
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
+ return OMP_CLAUSE_DEFAULT_SHARED;
+
+ return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
+}
+
+
+/* OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED unless OpenMP mapping attribute
+ of DECL is predetermined. */
+
+enum omp_clause_defaultmap_kind
+gfc_omp_predetermined_mapping (tree decl)
+{
+ if (DECL_ARTIFICIAL (decl)
+ && ! GFC_DECL_RESULT (decl)
+ && ! (DECL_LANG_SPECIFIC (decl)
+ && GFC_DECL_SAVED_DESCRIPTOR (decl)))
+ return OMP_CLAUSE_DEFAULTMAP_TO;
+
+ /* These are either array or derived parameters, or vtables. */
+ if (VAR_P (decl) && TREE_READONLY (decl)
+ && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
+ return OMP_CLAUSE_DEFAULTMAP_TO;
+
+ return OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED;
+}
+
+
+/* Return decl that should be used when reporting DEFAULT(NONE)
+ diagnostics. */
+
+tree
+gfc_omp_report_decl (tree decl)
+{
+ if (DECL_ARTIFICIAL (decl)
+ && DECL_LANG_SPECIFIC (decl)
+ && GFC_DECL_SAVED_DESCRIPTOR (decl))
+ return GFC_DECL_SAVED_DESCRIPTOR (decl);
+
+ return decl;
+}
+
+/* Return true if TYPE has any allocatable components. */
+
+static bool
+gfc_has_alloc_comps (tree type, tree decl)
+{
+ tree field, ftype;
+
+ if (POINTER_TYPE_P (type))
+ {
+ if (GFC_DECL_GET_SCALAR_ALLOCATABLE (decl))
+ type = TREE_TYPE (type);
+ else if (GFC_DECL_GET_SCALAR_POINTER (decl))
+ return false;
+ }
+
+ if (GFC_DESCRIPTOR_TYPE_P (type)
+ && (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER
+ || GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT))
+ return false;
+
+ if (GFC_DESCRIPTOR_TYPE_P (type) || GFC_ARRAY_TYPE_P (type))
+ type = gfc_get_element_type (type);
+
+ if (TREE_CODE (type) != RECORD_TYPE)
+ return false;
+
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ {
+ ftype = TREE_TYPE (field);
+ if (GFC_DECL_GET_SCALAR_ALLOCATABLE (field))
+ return true;
+ if (GFC_DESCRIPTOR_TYPE_P (ftype)
+ && GFC_TYPE_ARRAY_AKIND (ftype) == GFC_ARRAY_ALLOCATABLE)
+ return true;
+ if (gfc_has_alloc_comps (ftype, field))
+ return true;
+ }
+ return false;
+}
+
+/* Return true if TYPE is polymorphic but not with pointer attribute. */
+
+static bool
+gfc_is_polymorphic_nonptr (tree type)
+{
+ if (POINTER_TYPE_P (type))
+ type = TREE_TYPE (type);
+ return GFC_CLASS_TYPE_P (type);
+}
+
+/* Return true if TYPE is unlimited polymorphic but not with pointer attribute;
+ unlimited means also intrinsic types are handled and _len is used. */
+
+static bool
+gfc_is_unlimited_polymorphic_nonptr (tree type)
+{
+ if (POINTER_TYPE_P (type))
+ type = TREE_TYPE (type);
+ if (!GFC_CLASS_TYPE_P (type))
+ return false;
+
+ tree field = TYPE_FIELDS (type); /* _data */
+ gcc_assert (field);
+ field = DECL_CHAIN (field); /* _vptr */
+ gcc_assert (field);
+ field = DECL_CHAIN (field);
+ if (!field)
+ return false;
+ gcc_assert (strcmp ("_len", IDENTIFIER_POINTER (DECL_NAME (field))) == 0);
+ return true;
+}
+
+/* Return true if the DECL is for an allocatable array or scalar. */
+
+bool
+gfc_omp_allocatable_p (tree decl)
+{
+ if (!DECL_P (decl))
+ return false;
+
+ if (GFC_DECL_GET_SCALAR_ALLOCATABLE (decl))
+ return true;
+
+ tree type = TREE_TYPE (decl);
+ if (gfc_omp_privatize_by_reference (decl))
+ type = TREE_TYPE (type);
+
+ if (GFC_DESCRIPTOR_TYPE_P (type)
+ && GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE)
+ return true;
+
+ return false;
+}
+
+
+/* Return true if DECL in private clause needs
+ OMP_CLAUSE_PRIVATE_OUTER_REF on the private clause. */
+bool
+gfc_omp_private_outer_ref (tree decl)
+{
+ tree type = TREE_TYPE (decl);
+
+ if (gfc_omp_privatize_by_reference (decl))
+ type = TREE_TYPE (type);
+
+ if (GFC_DESCRIPTOR_TYPE_P (type)
+ && GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE)
+ return true;
+
+ if (GFC_DECL_GET_SCALAR_ALLOCATABLE (decl))
+ return true;
+
+ if (gfc_has_alloc_comps (type, decl))
+ return true;
+
+ return false;
+}
+
+/* Callback for gfc_omp_unshare_expr. */
+
+static tree
+gfc_omp_unshare_expr_r (tree *tp, int *walk_subtrees, void *)
+{
+ tree t = *tp;
+ enum tree_code code = TREE_CODE (t);
+
+ /* Stop at types, decls, constants like copy_tree_r. */
+ if (TREE_CODE_CLASS (code) == tcc_type
+ || TREE_CODE_CLASS (code) == tcc_declaration
+ || TREE_CODE_CLASS (code) == tcc_constant
+ || code == BLOCK)
+ *walk_subtrees = 0;
+ else if (handled_component_p (t)
+ || TREE_CODE (t) == MEM_REF)
+ {
+ *tp = unshare_expr (t);
+ *walk_subtrees = 0;
+ }
+
+ return NULL_TREE;
+}
+
+/* Unshare in expr anything that the FE which normally doesn't
+ care much about tree sharing (because during gimplification
+ everything is unshared) could cause problems with tree sharing
+ at omp-low.c time. */
+
+static tree
+gfc_omp_unshare_expr (tree expr)
+{
+ walk_tree (&expr, gfc_omp_unshare_expr_r, NULL, NULL);
+ return expr;
+}
+
+enum walk_alloc_comps
+{
+ WALK_ALLOC_COMPS_DTOR,
+ WALK_ALLOC_COMPS_DEFAULT_CTOR,
+ WALK_ALLOC_COMPS_COPY_CTOR
+};
+
+/* Handle allocatable components in OpenMP clauses. */
+
+static tree
+gfc_walk_alloc_comps (tree decl, tree dest, tree var,
+ enum walk_alloc_comps kind)
+{
+ stmtblock_t block, tmpblock;
+ tree type = TREE_TYPE (decl), then_b, tem, field;
+ gfc_init_block (&block);
+
+ if (GFC_ARRAY_TYPE_P (type) || GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ gfc_init_block (&tmpblock);
+ tem = gfc_full_array_size (&tmpblock, decl,
+ GFC_TYPE_ARRAY_RANK (type));
+ then_b = gfc_finish_block (&tmpblock);
+ gfc_add_expr_to_block (&block, gfc_omp_unshare_expr (then_b));
+ tem = gfc_omp_unshare_expr (tem);
+ tem = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tem,
+ gfc_index_one_node);
+ }
+ else
+ {
+ bool compute_nelts = false;
+ if (!TYPE_DOMAIN (type)
+ || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE
+ || TYPE_MIN_VALUE (TYPE_DOMAIN (type)) == error_mark_node
+ || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == error_mark_node)
+ compute_nelts = true;
+ else if (VAR_P (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
+ {
+ tree a = DECL_ATTRIBUTES (TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
+ if (lookup_attribute ("omp dummy var", a))
+ compute_nelts = true;
+ }
+ if (compute_nelts)
+ {
+ tem = fold_build2 (EXACT_DIV_EXPR, sizetype,
+ TYPE_SIZE_UNIT (type),
+ TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ tem = size_binop (MINUS_EXPR, tem, size_one_node);
+ }
+ else
+ tem = array_type_nelts (type);
+ tem = fold_convert (gfc_array_index_type, tem);
+ }
+
+ tree nelems = gfc_evaluate_now (tem, &block);
+ tree index = gfc_create_var (gfc_array_index_type, "S");
+
+ gfc_init_block (&tmpblock);
+ tem = gfc_conv_array_data (decl);
+ tree declvar = build_fold_indirect_ref_loc (input_location, tem);
+ tree declvref = gfc_build_array_ref (declvar, index, NULL);
+ tree destvar, destvref = NULL_TREE;
+ if (dest)
+ {
+ tem = gfc_conv_array_data (dest);
+ destvar = build_fold_indirect_ref_loc (input_location, tem);
+ destvref = gfc_build_array_ref (destvar, index, NULL);
+ }
+ gfc_add_expr_to_block (&tmpblock,
+ gfc_walk_alloc_comps (declvref, destvref,
+ var, kind));
+
+ gfc_loopinfo loop;
+ gfc_init_loopinfo (&loop);
+ loop.dimen = 1;
+ loop.from[0] = gfc_index_zero_node;
+ loop.loopvar[0] = index;
+ loop.to[0] = nelems;
+ gfc_trans_scalarizing_loops (&loop, &tmpblock);
+ gfc_add_block_to_block (&block, &loop.pre);
+ return gfc_finish_block (&block);
+ }
+ else if (GFC_DECL_GET_SCALAR_ALLOCATABLE (var))
+ {
+ decl = build_fold_indirect_ref_loc (input_location, decl);
+ if (dest)
+ dest = build_fold_indirect_ref_loc (input_location, dest);
+ type = TREE_TYPE (decl);
+ }
+
+ gcc_assert (TREE_CODE (type) == RECORD_TYPE);
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ {
+ tree ftype = TREE_TYPE (field);
+ tree declf, destf = NULL_TREE;
+ bool has_alloc_comps = gfc_has_alloc_comps (ftype, field);
+ if ((!GFC_DESCRIPTOR_TYPE_P (ftype)
+ || GFC_TYPE_ARRAY_AKIND (ftype) != GFC_ARRAY_ALLOCATABLE)
+ && !GFC_DECL_GET_SCALAR_ALLOCATABLE (field)
+ && !has_alloc_comps)
+ continue;
+ declf = fold_build3_loc (input_location, COMPONENT_REF, ftype,
+ decl, field, NULL_TREE);
+ if (dest)
+ destf = fold_build3_loc (input_location, COMPONENT_REF, ftype,
+ dest, field, NULL_TREE);
+
+ tem = NULL_TREE;
+ switch (kind)
+ {
+ case WALK_ALLOC_COMPS_DTOR:
+ break;
+ case WALK_ALLOC_COMPS_DEFAULT_CTOR:
+ if (GFC_DESCRIPTOR_TYPE_P (ftype)
+ && GFC_TYPE_ARRAY_AKIND (ftype) == GFC_ARRAY_ALLOCATABLE)
+ {
+ gfc_add_modify (&block, unshare_expr (destf),
+ unshare_expr (declf));
+ tem = gfc_duplicate_allocatable_nocopy
+ (destf, declf, ftype,
+ GFC_TYPE_ARRAY_RANK (ftype));
+ }
+ else if (GFC_DECL_GET_SCALAR_ALLOCATABLE (field))
+ tem = gfc_duplicate_allocatable_nocopy (destf, declf, ftype, 0);
+ break;
+ case WALK_ALLOC_COMPS_COPY_CTOR:
+ if (GFC_DESCRIPTOR_TYPE_P (ftype)
+ && GFC_TYPE_ARRAY_AKIND (ftype) == GFC_ARRAY_ALLOCATABLE)
+ tem = gfc_duplicate_allocatable (destf, declf, ftype,
+ GFC_TYPE_ARRAY_RANK (ftype),
+ NULL_TREE);
+ else if (GFC_DECL_GET_SCALAR_ALLOCATABLE (field))
+ tem = gfc_duplicate_allocatable (destf, declf, ftype, 0,
+ NULL_TREE);
+ break;
+ }
+ if (tem)
+ gfc_add_expr_to_block (&block, gfc_omp_unshare_expr (tem));
+ if (has_alloc_comps)
+ {
+ gfc_init_block (&tmpblock);
+ gfc_add_expr_to_block (&tmpblock,
+ gfc_walk_alloc_comps (declf, destf,
+ field, kind));
+ then_b = gfc_finish_block (&tmpblock);
+ if (GFC_DESCRIPTOR_TYPE_P (ftype)
+ && GFC_TYPE_ARRAY_AKIND (ftype) == GFC_ARRAY_ALLOCATABLE)
+ tem = gfc_conv_descriptor_data_get (unshare_expr (declf));
+ else if (GFC_DECL_GET_SCALAR_ALLOCATABLE (field))
+ tem = unshare_expr (declf);
+ else
+ tem = NULL_TREE;
+ if (tem)
+ {
+ tem = fold_convert (pvoid_type_node, tem);
+ tem = fold_build2_loc (input_location, NE_EXPR,
+ logical_type_node, tem,
+ null_pointer_node);
+ then_b = build3_loc (input_location, COND_EXPR, void_type_node,
+ tem, then_b,
+ build_empty_stmt (input_location));
+ }
+ gfc_add_expr_to_block (&block, then_b);
+ }
+ if (kind == WALK_ALLOC_COMPS_DTOR)
+ {
+ if (GFC_DESCRIPTOR_TYPE_P (ftype)
+ && GFC_TYPE_ARRAY_AKIND (ftype) == GFC_ARRAY_ALLOCATABLE)
+ {
+ tem = gfc_conv_descriptor_data_get (unshare_expr (declf));
+ tem = gfc_deallocate_with_status (tem, NULL_TREE, NULL_TREE,
+ NULL_TREE, NULL_TREE, true,
+ NULL,
+ GFC_CAF_COARRAY_NOCOARRAY);
+ gfc_add_expr_to_block (&block, gfc_omp_unshare_expr (tem));
+ }
+ else if (GFC_DECL_GET_SCALAR_ALLOCATABLE (field))
+ {
+ tem = gfc_call_free (unshare_expr (declf));
+ gfc_add_expr_to_block (&block, gfc_omp_unshare_expr (tem));
+ }
+ }
+ }
+
+ return gfc_finish_block (&block);
+}
+
+/* Return code to initialize DECL with its default constructor, or
+ NULL if there's nothing to do. */
+
+tree
+gfc_omp_clause_default_ctor (tree clause, tree decl, tree outer)
+{
+ tree type = TREE_TYPE (decl), size, ptr, cond, then_b, else_b;
+ stmtblock_t block, cond_block;
+
+ switch (OMP_CLAUSE_CODE (clause))
+ {
+ case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE__REDUCTEMP_:
+ case OMP_CLAUSE__CONDTEMP_:
+ case OMP_CLAUSE__SCANTEMP_:
+ return NULL;
+ case OMP_CLAUSE_PRIVATE:
+ case OMP_CLAUSE_LASTPRIVATE:
+ case OMP_CLAUSE_LINEAR:
+ case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE_IN_REDUCTION:
+ case OMP_CLAUSE_TASK_REDUCTION:
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if ((! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ && (!GFC_DECL_GET_SCALAR_ALLOCATABLE (OMP_CLAUSE_DECL (clause))
+ || !POINTER_TYPE_P (type)))
+ {
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ gcc_assert (outer);
+ gfc_start_block (&block);
+ tree tem = gfc_walk_alloc_comps (outer, decl,
+ OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_DEFAULT_CTOR);
+ gfc_add_expr_to_block (&block, tem);
+ return gfc_finish_block (&block);
+ }
+ return NULL_TREE;
+ }
+
+ gcc_assert (outer != NULL_TREE);
+
+ /* Allocatable arrays and scalars in PRIVATE clauses need to be set to
+ "not currently allocated" allocation status if outer
+ array is "not currently allocated", otherwise should be allocated. */
+ gfc_start_block (&block);
+
+ gfc_init_block (&cond_block);
+
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ gfc_add_modify (&cond_block, decl, outer);
+ tree rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound_get (decl, rank);
+ size = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ size,
+ gfc_conv_descriptor_lbound_get (decl, rank));
+ size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ size, gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size,
+ gfc_conv_descriptor_stride_get (decl, rank));
+ tree esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, esize);
+ size = unshare_expr (size);
+ size = gfc_evaluate_now (fold_convert (size_type_node, size),
+ &cond_block);
+ }
+ else
+ size = fold_convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ ptr = gfc_create_var (pvoid_type_node, NULL);
+ gfc_allocate_using_malloc (&cond_block, ptr, size, NULL_TREE);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ gfc_conv_descriptor_data_set (&cond_block, unshare_expr (decl), ptr);
+ else
+ gfc_add_modify (&cond_block, unshare_expr (decl),
+ fold_convert (TREE_TYPE (decl), ptr));
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ tree tem = gfc_walk_alloc_comps (outer, decl,
+ OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_DEFAULT_CTOR);
+ gfc_add_expr_to_block (&cond_block, tem);
+ }
+ then_b = gfc_finish_block (&cond_block);
+
+ /* Reduction clause requires allocated ALLOCATABLE. */
+ if (OMP_CLAUSE_CODE (clause) != OMP_CLAUSE_REDUCTION
+ && OMP_CLAUSE_CODE (clause) != OMP_CLAUSE_IN_REDUCTION
+ && OMP_CLAUSE_CODE (clause) != OMP_CLAUSE_TASK_REDUCTION)
+ {
+ gfc_init_block (&cond_block);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ gfc_conv_descriptor_data_set (&cond_block, unshare_expr (decl),
+ null_pointer_node);
+ else
+ gfc_add_modify (&cond_block, unshare_expr (decl),
+ build_zero_cst (TREE_TYPE (decl)));
+ else_b = gfc_finish_block (&cond_block);
+
+ tree tem = fold_convert (pvoid_type_node,
+ GFC_DESCRIPTOR_TYPE_P (type)
+ ? gfc_conv_descriptor_data_get (outer) : outer);
+ tem = unshare_expr (tem);
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
+ tem, null_pointer_node);
+ gfc_add_expr_to_block (&block,
+ build3_loc (input_location, COND_EXPR,
+ void_type_node, cond, then_b,
+ else_b));
+ /* Avoid -W*uninitialized warnings. */
+ if (DECL_P (decl))
+ suppress_warning (decl, OPT_Wuninitialized);
+ }
+ else
+ gfc_add_expr_to_block (&block, then_b);
+
+ return gfc_finish_block (&block);
+}
+
+/* Build and return code for a copy constructor from SRC to DEST. */
+
+tree
+gfc_omp_clause_copy_ctor (tree clause, tree dest, tree src)
+{
+ tree type = TREE_TYPE (dest), ptr, size, call;
+ tree decl_type = TREE_TYPE (OMP_CLAUSE_DECL (clause));
+ tree cond, then_b, else_b;
+ stmtblock_t block, cond_block;
+
+ gcc_assert (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_FIRSTPRIVATE
+ || OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_LINEAR);
+
+ if (DECL_ARTIFICIAL (OMP_CLAUSE_DECL (clause))
+ && DECL_LANG_SPECIFIC (OMP_CLAUSE_DECL (clause))
+ && GFC_DECL_SAVED_DESCRIPTOR (OMP_CLAUSE_DECL (clause)))
+ decl_type
+ = TREE_TYPE (GFC_DECL_SAVED_DESCRIPTOR (OMP_CLAUSE_DECL (clause)));
+
+ if (gfc_is_polymorphic_nonptr (decl_type))
+ {
+ if (POINTER_TYPE_P (decl_type))
+ decl_type = TREE_TYPE (decl_type);
+ decl_type = TREE_TYPE (TYPE_FIELDS (decl_type));
+ if (GFC_DESCRIPTOR_TYPE_P (decl_type) || GFC_ARRAY_TYPE_P (decl_type))
+ fatal_error (input_location,
+ "Sorry, polymorphic arrays not yet supported for "
+ "firstprivate");
+ tree src_len;
+ tree nelems = build_int_cst (size_type_node, 1); /* Scalar. */
+ tree src_data = gfc_class_data_get (unshare_expr (src));
+ tree dest_data = gfc_class_data_get (unshare_expr (dest));
+ bool unlimited = gfc_is_unlimited_polymorphic_nonptr (type);
+
+ gfc_start_block (&block);
+ gfc_add_modify (&block, gfc_class_vptr_get (dest),
+ gfc_class_vptr_get (src));
+ gfc_init_block (&cond_block);
+
+ if (unlimited)
+ {
+ src_len = gfc_class_len_get (src);
+ gfc_add_modify (&cond_block, gfc_class_len_get (unshare_expr (dest)), src_len);
+ }
+
+ /* Use: size = class._vtab._size * (class._len > 0 ? class._len : 1). */
+ size = fold_convert (size_type_node, gfc_class_vtab_size_get (src));
+ if (unlimited)
+ {
+ cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
+ unshare_expr (src_len),
+ build_zero_cst (TREE_TYPE (src_len)));
+ cond = build3_loc (input_location, COND_EXPR, size_type_node, cond,
+ fold_convert (size_type_node,
+ unshare_expr (src_len)),
+ build_int_cst (size_type_node, 1));
+ size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
+ size, cond);
+ }
+
+ /* Malloc memory + call class->_vpt->_copy. */
+ call = builtin_decl_explicit (BUILT_IN_MALLOC);
+ call = build_call_expr_loc (input_location, call, 1, size);
+ gfc_add_modify (&cond_block, dest_data,
+ fold_convert (TREE_TYPE (dest_data), call));
+ gfc_add_expr_to_block (&cond_block,
+ gfc_copy_class_to_class (src, dest, nelems,
+ unlimited));
+
+ gcc_assert (TREE_CODE (dest_data) == COMPONENT_REF);
+ if (!GFC_DECL_GET_SCALAR_ALLOCATABLE (TREE_OPERAND (dest_data, 1)))
+ {
+ gfc_add_block_to_block (&block, &cond_block);
+ }
+ else
+ {
+ /* Create: if (class._data != 0) <cond_block> else class._data = NULL; */
+ cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ src_data, null_pointer_node);
+ gfc_add_expr_to_block (&block, build3_loc (input_location, COND_EXPR,
+ void_type_node, cond,
+ gfc_finish_block (&cond_block),
+ fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
+ unshare_expr (dest_data), null_pointer_node)));
+ }
+ return gfc_finish_block (&block);
+ }
+
+ if ((! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ && (!GFC_DECL_GET_SCALAR_ALLOCATABLE (OMP_CLAUSE_DECL (clause))
+ || !POINTER_TYPE_P (type)))
+ {
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ gfc_start_block (&block);
+ gfc_add_modify (&block, dest, src);
+ tree tem = gfc_walk_alloc_comps (src, dest, OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_COPY_CTOR);
+ gfc_add_expr_to_block (&block, tem);
+ return gfc_finish_block (&block);
+ }
+ else
+ return build2_v (MODIFY_EXPR, dest, src);
+ }
+
+ /* Allocatable arrays in FIRSTPRIVATE clauses need to be allocated
+ and copied from SRC. */
+ gfc_start_block (&block);
+
+ gfc_init_block (&cond_block);
+
+ gfc_add_modify (&cond_block, dest, fold_convert (TREE_TYPE (dest), src));
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ tree rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound_get (dest, rank);
+ size = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ size,
+ gfc_conv_descriptor_lbound_get (dest, rank));
+ size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ size, gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size,
+ gfc_conv_descriptor_stride_get (dest, rank));
+ tree esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, esize);
+ size = unshare_expr (size);
+ size = gfc_evaluate_now (fold_convert (size_type_node, size),
+ &cond_block);
+ }
+ else
+ size = fold_convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ ptr = gfc_create_var (pvoid_type_node, NULL);
+ gfc_allocate_using_malloc (&cond_block, ptr, size, NULL_TREE);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ gfc_conv_descriptor_data_set (&cond_block, unshare_expr (dest), ptr);
+ else
+ gfc_add_modify (&cond_block, unshare_expr (dest),
+ fold_convert (TREE_TYPE (dest), ptr));
+
+ tree srcptr = GFC_DESCRIPTOR_TYPE_P (type)
+ ? gfc_conv_descriptor_data_get (src) : src;
+ srcptr = unshare_expr (srcptr);
+ srcptr = fold_convert (pvoid_type_node, srcptr);
+ call = build_call_expr_loc (input_location,
+ builtin_decl_explicit (BUILT_IN_MEMCPY), 3, ptr,
+ srcptr, size);
+ gfc_add_expr_to_block (&cond_block, fold_convert (void_type_node, call));
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ tree tem = gfc_walk_alloc_comps (src, dest,
+ OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_COPY_CTOR);
+ gfc_add_expr_to_block (&cond_block, tem);
+ }
+ then_b = gfc_finish_block (&cond_block);
+
+ gfc_init_block (&cond_block);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ gfc_conv_descriptor_data_set (&cond_block, unshare_expr (dest),
+ null_pointer_node);
+ else
+ gfc_add_modify (&cond_block, unshare_expr (dest),
+ build_zero_cst (TREE_TYPE (dest)));
+ else_b = gfc_finish_block (&cond_block);
+
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
+ unshare_expr (srcptr), null_pointer_node);
+ gfc_add_expr_to_block (&block,
+ build3_loc (input_location, COND_EXPR,
+ void_type_node, cond, then_b, else_b));
+ /* Avoid -W*uninitialized warnings. */
+ if (DECL_P (dest))
+ suppress_warning (dest, OPT_Wuninitialized);
+
+ return gfc_finish_block (&block);
+}
+
+/* Similarly, except use an intrinsic or pointer assignment operator
+ instead. */
+
+tree
+gfc_omp_clause_assign_op (tree clause, tree dest, tree src)
+{
+ tree type = TREE_TYPE (dest), ptr, size, call, nonalloc;
+ tree cond, then_b, else_b;
+ stmtblock_t block, cond_block, cond_block2, inner_block;
+
+ if ((! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ && (!GFC_DECL_GET_SCALAR_ALLOCATABLE (OMP_CLAUSE_DECL (clause))
+ || !POINTER_TYPE_P (type)))
+ {
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ gfc_start_block (&block);
+ /* First dealloc any allocatable components in DEST. */
+ tree tem = gfc_walk_alloc_comps (dest, NULL_TREE,
+ OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_DTOR);
+ gfc_add_expr_to_block (&block, tem);
+ /* Then copy over toplevel data. */
+ gfc_add_modify (&block, dest, src);
+ /* Finally allocate any allocatable components and copy. */
+ tem = gfc_walk_alloc_comps (src, dest, OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_COPY_CTOR);
+ gfc_add_expr_to_block (&block, tem);
+ return gfc_finish_block (&block);
+ }
+ else
+ return build2_v (MODIFY_EXPR, dest, src);
+ }
+
+ gfc_start_block (&block);
+
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ then_b = gfc_walk_alloc_comps (dest, NULL_TREE, OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_DTOR);
+ tree tem = fold_convert (pvoid_type_node,
+ GFC_DESCRIPTOR_TYPE_P (type)
+ ? gfc_conv_descriptor_data_get (dest) : dest);
+ tem = unshare_expr (tem);
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
+ tem, null_pointer_node);
+ tem = build3_loc (input_location, COND_EXPR, void_type_node, cond,
+ then_b, build_empty_stmt (input_location));
+ gfc_add_expr_to_block (&block, tem);
+ }
+
+ gfc_init_block (&cond_block);
+
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ tree rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound_get (src, rank);
+ size = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ size,
+ gfc_conv_descriptor_lbound_get (src, rank));
+ size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ size, gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size,
+ gfc_conv_descriptor_stride_get (src, rank));
+ tree esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ size, esize);
+ size = unshare_expr (size);
+ size = gfc_evaluate_now (fold_convert (size_type_node, size),
+ &cond_block);
+ }
+ else
+ size = fold_convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ ptr = gfc_create_var (pvoid_type_node, NULL);
+
+ tree destptr = GFC_DESCRIPTOR_TYPE_P (type)
+ ? gfc_conv_descriptor_data_get (dest) : dest;
+ destptr = unshare_expr (destptr);
+ destptr = fold_convert (pvoid_type_node, destptr);
+ gfc_add_modify (&cond_block, ptr, destptr);
+
+ nonalloc = fold_build2_loc (input_location, EQ_EXPR, logical_type_node,
+ destptr, null_pointer_node);
+ cond = nonalloc;
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ int i;
+ for (i = 0; i < GFC_TYPE_ARRAY_RANK (type); i++)
+ {
+ tree rank = gfc_rank_cst[i];
+ tree tem = gfc_conv_descriptor_ubound_get (src, rank);
+ tem = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, tem,
+ gfc_conv_descriptor_lbound_get (src, rank));
+ tem = fold_build2_loc (input_location, PLUS_EXPR,
+ gfc_array_index_type, tem,
+ gfc_conv_descriptor_lbound_get (dest, rank));
+ tem = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
+ tem, gfc_conv_descriptor_ubound_get (dest,
+ rank));
+ cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR,
+ logical_type_node, cond, tem);
+ }
+ }
+
+ gfc_init_block (&cond_block2);
+
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ gfc_init_block (&inner_block);
+ gfc_allocate_using_malloc (&inner_block, ptr, size, NULL_TREE);
+ then_b = gfc_finish_block (&inner_block);
+
+ gfc_init_block (&inner_block);
+ gfc_add_modify (&inner_block, ptr,
+ gfc_call_realloc (&inner_block, ptr, size));
+ else_b = gfc_finish_block (&inner_block);
+
+ gfc_add_expr_to_block (&cond_block2,
+ build3_loc (input_location, COND_EXPR,
+ void_type_node,
+ unshare_expr (nonalloc),
+ then_b, else_b));
+ gfc_add_modify (&cond_block2, dest, src);
+ gfc_conv_descriptor_data_set (&cond_block2, unshare_expr (dest), ptr);
+ }
+ else
+ {
+ gfc_allocate_using_malloc (&cond_block2, ptr, size, NULL_TREE);
+ gfc_add_modify (&cond_block2, unshare_expr (dest),
+ fold_convert (type, ptr));
+ }
+ then_b = gfc_finish_block (&cond_block2);
+ else_b = build_empty_stmt (input_location);
+
+ gfc_add_expr_to_block (&cond_block,
+ build3_loc (input_location, COND_EXPR,
+ void_type_node, unshare_expr (cond),
+ then_b, else_b));
+
+ tree srcptr = GFC_DESCRIPTOR_TYPE_P (type)
+ ? gfc_conv_descriptor_data_get (src) : src;
+ srcptr = unshare_expr (srcptr);
+ srcptr = fold_convert (pvoid_type_node, srcptr);
+ call = build_call_expr_loc (input_location,
+ builtin_decl_explicit (BUILT_IN_MEMCPY), 3, ptr,
+ srcptr, size);
+ gfc_add_expr_to_block (&cond_block, fold_convert (void_type_node, call));
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ tree tem = gfc_walk_alloc_comps (src, dest,
+ OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_COPY_CTOR);
+ gfc_add_expr_to_block (&cond_block, tem);
+ }
+ then_b = gfc_finish_block (&cond_block);
+
+ if (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_COPYIN)
+ {
+ gfc_init_block (&cond_block);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ tree tmp = gfc_conv_descriptor_data_get (unshare_expr (dest));
+ tmp = gfc_deallocate_with_status (tmp, NULL_TREE, NULL_TREE,
+ NULL_TREE, NULL_TREE, true, NULL,
+ GFC_CAF_COARRAY_NOCOARRAY);
+ gfc_add_expr_to_block (&cond_block, tmp);
+ }
+ else
+ {
+ destptr = gfc_evaluate_now (destptr, &cond_block);
+ gfc_add_expr_to_block (&cond_block, gfc_call_free (destptr));
+ gfc_add_modify (&cond_block, unshare_expr (dest),
+ build_zero_cst (TREE_TYPE (dest)));
+ }
+ else_b = gfc_finish_block (&cond_block);
+
+ cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
+ unshare_expr (srcptr), null_pointer_node);
+ gfc_add_expr_to_block (&block,
+ build3_loc (input_location, COND_EXPR,
+ void_type_node, cond,
+ then_b, else_b));
+ }
+ else
+ gfc_add_expr_to_block (&block, then_b);
+
+ return gfc_finish_block (&block);
+}
+
+static void
+gfc_omp_linear_clause_add_loop (stmtblock_t *block, tree dest, tree src,
+ tree add, tree nelems)
+{
+ stmtblock_t tmpblock;
+ tree desta, srca, index = gfc_create_var (gfc_array_index_type, "S");
+ nelems = gfc_evaluate_now (nelems, block);
+
+ gfc_init_block (&tmpblock);
+ if (TREE_CODE (TREE_TYPE (dest)) == ARRAY_TYPE)
+ {
+ desta = gfc_build_array_ref (dest, index, NULL);
+ srca = gfc_build_array_ref (src, index, NULL);
+ }
+ else
+ {
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (dest)));
+ tree idx = fold_build2 (MULT_EXPR, sizetype,
+ fold_convert (sizetype, index),
+ TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (dest))));
+ desta = build_fold_indirect_ref (fold_build2 (POINTER_PLUS_EXPR,
+ TREE_TYPE (dest), dest,
+ idx));
+ srca = build_fold_indirect_ref (fold_build2 (POINTER_PLUS_EXPR,
+ TREE_TYPE (src), src,
+ idx));
+ }
+ gfc_add_modify (&tmpblock, desta,
+ fold_build2 (PLUS_EXPR, TREE_TYPE (desta),
+ srca, add));
+
+ gfc_loopinfo loop;
+ gfc_init_loopinfo (&loop);
+ loop.dimen = 1;
+ loop.from[0] = gfc_index_zero_node;
+ loop.loopvar[0] = index;
+ loop.to[0] = nelems;
+ gfc_trans_scalarizing_loops (&loop, &tmpblock);
+ gfc_add_block_to_block (block, &loop.pre);
+}
+
+/* Build and return code for a constructor of DEST that initializes
+ it to SRC plus ADD (ADD is scalar integer). */
+
+tree
+gfc_omp_clause_linear_ctor (tree clause, tree dest, tree src, tree add)
+{
+ tree type = TREE_TYPE (dest), ptr, size, nelems = NULL_TREE;
+ stmtblock_t block;
+
+ gcc_assert (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_LINEAR);
+
+ gfc_start_block (&block);
+ add = gfc_evaluate_now (add, &block);
+
+ if ((! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ && (!GFC_DECL_GET_SCALAR_ALLOCATABLE (OMP_CLAUSE_DECL (clause))
+ || !POINTER_TYPE_P (type)))
+ {
+ bool compute_nelts = false;
+ gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
+ if (!TYPE_DOMAIN (type)
+ || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE
+ || TYPE_MIN_VALUE (TYPE_DOMAIN (type)) == error_mark_node
+ || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == error_mark_node)
+ compute_nelts = true;
+ else if (VAR_P (TYPE_MAX_VALUE (TYPE_DOMAIN (type))))
+ {
+ tree a = DECL_ATTRIBUTES (TYPE_MAX_VALUE (TYPE_DOMAIN (type)));
+ if (lookup_attribute ("omp dummy var", a))
+ compute_nelts = true;
+ }
+ if (compute_nelts)
+ {
+ nelems = fold_build2 (EXACT_DIV_EXPR, sizetype,
+ TYPE_SIZE_UNIT (type),
+ TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ nelems = size_binop (MINUS_EXPR, nelems, size_one_node);
+ }
+ else
+ nelems = array_type_nelts (type);
+ nelems = fold_convert (gfc_array_index_type, nelems);
+
+ gfc_omp_linear_clause_add_loop (&block, dest, src, add, nelems);
+ return gfc_finish_block (&block);
+ }
+
+ /* Allocatable arrays in LINEAR clauses need to be allocated
+ and copied from SRC. */
+ gfc_add_modify (&block, dest, src);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ tree rank = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (type) - 1];
+ size = gfc_conv_descriptor_ubound_get (dest, rank);
+ size = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
+ size,
+ gfc_conv_descriptor_lbound_get (dest, rank));
+ size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
+ size, gfc_index_one_node);
+ if (GFC_TYPE_ARRAY_RANK (type) > 1)
+ size = fold_build2_loc (input_location, MULT_EXPR,
+ gfc_array_index_type, size,
+ gfc_conv_descriptor_stride_get (dest, rank));
+ tree esize = fold_convert (gfc_array_index_type,
+ TYPE_SIZE_UNIT (gfc_get_element_type (type)));
+ nelems = gfc_evaluate_now (unshare_expr (size), &block);
+ size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
+ nelems, unshare_expr (esize));
+ size = gfc_evaluate_now (fold_convert (size_type_node, size),
+ &block);
+ nelems = fold_build2_loc (input_location, MINUS_EXPR,
+ gfc_array_index_type, nelems,
+ gfc_index_one_node);
+ }
+ else
+ size = fold_convert (size_type_node, TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ ptr = gfc_create_var (pvoid_type_node, NULL);
+ gfc_allocate_using_malloc (&block, ptr, size, NULL_TREE);
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ gfc_conv_descriptor_data_set (&block, unshare_expr (dest), ptr);
+ tree etype = gfc_get_element_type (type);
+ ptr = fold_convert (build_pointer_type (etype), ptr);
+ tree srcptr = gfc_conv_descriptor_data_get (unshare_expr (src));
+ srcptr = fold_convert (build_pointer_type (etype), srcptr);
+ gfc_omp_linear_clause_add_loop (&block, ptr, srcptr, add, nelems);
+ }
+ else
+ {
+ gfc_add_modify (&block, unshare_expr (dest),
+ fold_convert (TREE_TYPE (dest), ptr));
+ ptr = fold_convert (TREE_TYPE (dest), ptr);
+ tree dstm = build_fold_indirect_ref (ptr);
+ tree srcm = build_fold_indirect_ref (unshare_expr (src));
+ gfc_add_modify (&block, dstm,
+ fold_build2 (PLUS_EXPR, TREE_TYPE (add), srcm, add));
+ }
+ return gfc_finish_block (&block);
+}
+
+/* Build and return code destructing DECL. Return NULL if nothing
+ to be done. */
+
+tree
+gfc_omp_clause_dtor (tree clause, tree decl)
+{
+ tree type = TREE_TYPE (decl), tem;
+ tree decl_type = TREE_TYPE (OMP_CLAUSE_DECL (clause));
+
+ if (DECL_ARTIFICIAL (OMP_CLAUSE_DECL (clause))
+ && DECL_LANG_SPECIFIC (OMP_CLAUSE_DECL (clause))
+ && GFC_DECL_SAVED_DESCRIPTOR (OMP_CLAUSE_DECL (clause)))
+ decl_type
+ = TREE_TYPE (GFC_DECL_SAVED_DESCRIPTOR (OMP_CLAUSE_DECL (clause)));
+ if (gfc_is_polymorphic_nonptr (decl_type))
+ {
+ if (POINTER_TYPE_P (decl_type))
+ decl_type = TREE_TYPE (decl_type);
+ decl_type = TREE_TYPE (TYPE_FIELDS (decl_type));
+ if (GFC_DESCRIPTOR_TYPE_P (decl_type) || GFC_ARRAY_TYPE_P (decl_type))
+ fatal_error (input_location,
+ "Sorry, polymorphic arrays not yet supported for "
+ "firstprivate");
+ stmtblock_t block, cond_block;
+ gfc_start_block (&block);
+ gfc_init_block (&cond_block);
+ tree final = gfc_class_vtab_final_get (decl);
+ tree size = fold_convert (size_type_node, gfc_class_vtab_size_get (decl));
+ gfc_se se;
+ gfc_init_se (&se, NULL);
+ symbol_attribute attr = {};
+ tree data = gfc_class_data_get (decl);
+ tree desc = gfc_conv_scalar_to_descriptor (&se, data, attr);
+
+ /* Call class->_vpt->_finalize + free. */
+ tree call = build_fold_indirect_ref (final);
+ call = build_call_expr_loc (input_location, call, 3,
+ gfc_build_addr_expr (NULL, desc),
+ size, boolean_false_node);
+ gfc_add_block_to_block (&cond_block, &se.pre);
+ gfc_add_expr_to_block (&cond_block, fold_convert (void_type_node, call));
+ gfc_add_block_to_block (&cond_block, &se.post);
+ /* Create: if (_vtab && _final) <cond_block> */
+ tree cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ gfc_class_vptr_get (decl),
+ null_pointer_node);
+ tree cond2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
+ final, null_pointer_node);
+ cond = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
+ boolean_type_node, cond, cond2);
+ gfc_add_expr_to_block (&block, build3_loc (input_location, COND_EXPR,
+ void_type_node, cond,
+ gfc_finish_block (&cond_block), NULL_TREE));
+ call = builtin_decl_explicit (BUILT_IN_FREE);
+ call = build_call_expr_loc (input_location, call, 1, data);
+ gfc_add_expr_to_block (&block, fold_convert (void_type_node, call));
+ return gfc_finish_block (&block);
+ }
+
+ if ((! GFC_DESCRIPTOR_TYPE_P (type)
+ || GFC_TYPE_ARRAY_AKIND (type) != GFC_ARRAY_ALLOCATABLE)
+ && (!GFC_DECL_GET_SCALAR_ALLOCATABLE (OMP_CLAUSE_DECL (clause))
+ || !POINTER_TYPE_P (type)))
+ {
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ return gfc_walk_alloc_comps (decl, NULL_TREE,
+ OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_DTOR);
+ return NULL_TREE;
+ }
+
+ if (GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ /* Allocatable arrays in FIRSTPRIVATE/LASTPRIVATE etc. clauses need
+ to be deallocated if they were allocated. */
+ tem = gfc_conv_descriptor_data_get (decl);
+ tem = gfc_deallocate_with_status (tem, NULL_TREE, NULL_TREE, NULL_TREE,
+ NULL_TREE, true, NULL,
+ GFC_CAF_COARRAY_NOCOARRAY);
+ }
+ else
+ tem = gfc_call_free (decl);
+ tem = gfc_omp_unshare_expr (tem);
+
+ if (gfc_has_alloc_comps (type, OMP_CLAUSE_DECL (clause)))
+ {
+ stmtblock_t block;
+ tree then_b;
+
+ gfc_init_block (&block);
+ gfc_add_expr_to_block (&block,
+ gfc_walk_alloc_comps (decl, NULL_TREE,
+ OMP_CLAUSE_DECL (clause),
+ WALK_ALLOC_COMPS_DTOR));
+ gfc_add_expr_to_block (&block, tem);
+ then_b = gfc_finish_block (&block);
+
+ tem = fold_convert (pvoid_type_node,
+ GFC_DESCRIPTOR_TYPE_P (type)
+ ? gfc_conv_descriptor_data_get (decl) : decl);
+ tem = unshare_expr (tem);
+ tree cond = fold_build2_loc (input_location, NE_EXPR, logical_type_node,
+ tem, null_pointer_node);
+ tem = build3_loc (input_location, COND_EXPR, void_type_node, cond,
+ then_b, build_empty_stmt (input_location));
+ }
+ return tem;
+}
+
+/* Build a conditional expression in BLOCK. If COND_VAL is not
+ null, then the block THEN_B is executed, otherwise ELSE_VAL
+ is assigned to VAL. */
+
+static void
+gfc_build_cond_assign (stmtblock_t *block, tree val, tree cond_val,
+ tree then_b, tree else_val)
+{
+ stmtblock_t cond_block;
+ tree else_b = NULL_TREE;
+ tree val_ty = TREE_TYPE (val);
+
+ if (else_val)
+ {
+ gfc_init_block (&cond_block);
+ gfc_add_modify (&cond_block, val, fold_convert (val_ty, else_val));
+ else_b = gfc_finish_block (&cond_block);
+ }
+ gfc_add_expr_to_block (block,
+ build3_loc (input_location, COND_EXPR, void_type_node,
+ cond_val, then_b, else_b));
+}
+
+/* Build a conditional expression in BLOCK, returning a temporary
+ variable containing the result. If COND_VAL is not null, then
+ THEN_VAL will be assigned to the variable, otherwise ELSE_VAL
+ is assigned.
+ */
+
+static tree
+gfc_build_cond_assign_expr (stmtblock_t *block, tree cond_val,
+ tree then_val, tree else_val)
+{
+ tree val;
+ tree val_ty = TREE_TYPE (then_val);
+ stmtblock_t cond_block;
+
+ val = create_tmp_var (val_ty);
+
+ gfc_init_block (&cond_block);
+ gfc_add_modify (&cond_block, val, then_val);
+ tree then_b = gfc_finish_block (&cond_block);
+
+ gfc_build_cond_assign (block, val, cond_val, then_b, else_val);
+
+ return val;
+}
+
+void
+gfc_omp_finish_clause (tree c, gimple_seq *pre_p, bool openacc)
+{
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
+ return;
+
+ tree decl = OMP_CLAUSE_DECL (c);
+
+ /* Assumed-size arrays can't be mapped implicitly, they have to be
+ mapped explicitly using array sections. */
+ if (TREE_CODE (decl) == PARM_DECL
+ && GFC_ARRAY_TYPE_P (TREE_TYPE (decl))
+ && GFC_TYPE_ARRAY_AKIND (TREE_TYPE (decl)) == GFC_ARRAY_UNKNOWN
+ && GFC_TYPE_ARRAY_UBOUND (TREE_TYPE (decl),
+ GFC_TYPE_ARRAY_RANK (TREE_TYPE (decl)) - 1)
+ == NULL)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "implicit mapping of assumed size array %qD", decl);
+ return;
+ }
+
+ tree c2 = NULL_TREE, c3 = NULL_TREE, c4 = NULL_TREE;
+ tree present = gfc_omp_check_optional_argument (decl, true);
+ if (POINTER_TYPE_P (TREE_TYPE (decl)))
+ {
+ if (!gfc_omp_privatize_by_reference (decl)
+ && !GFC_DECL_GET_SCALAR_POINTER (decl)
+ && !GFC_DECL_GET_SCALAR_ALLOCATABLE (decl)
+ && !GFC_DECL_CRAY_POINTEE (decl)
+ && !GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (decl))))
+ return;
+ tree orig_decl = decl;
+
+ c4 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (c4, GOMP_MAP_POINTER);
+ OMP_CLAUSE_DECL (c4) = decl;
+ OMP_CLAUSE_SIZE (c4) = size_int (0);
+ decl = build_fold_indirect_ref (decl);
+ if (present
+ && (GFC_DECL_GET_SCALAR_POINTER (orig_decl)
+ || GFC_DECL_GET_SCALAR_ALLOCATABLE (orig_decl)))
+ {
+ c2 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER);
+ OMP_CLAUSE_DECL (c2) = decl;
+ OMP_CLAUSE_SIZE (c2) = size_int (0);
+
+ stmtblock_t block;
+ gfc_start_block (&block);
+ tree ptr = decl;
+ ptr = gfc_build_cond_assign_expr (&block, present, decl,
+ null_pointer_node);
+ gimplify_and_add (gfc_finish_block (&block), pre_p);
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (c) = ptr;
+ OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (ptr));
+ }
+ else
+ {
+ OMP_CLAUSE_DECL (c) = decl;
+ OMP_CLAUSE_SIZE (c) = NULL_TREE;
+ }
+ if (TREE_CODE (TREE_TYPE (orig_decl)) == REFERENCE_TYPE
+ && (GFC_DECL_GET_SCALAR_POINTER (orig_decl)
+ || GFC_DECL_GET_SCALAR_ALLOCATABLE (orig_decl)))
+ {
+ c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (c3, GOMP_MAP_POINTER);
+ OMP_CLAUSE_DECL (c3) = unshare_expr (decl);
+ OMP_CLAUSE_SIZE (c3) = size_int (0);
+ decl = build_fold_indirect_ref (decl);
+ OMP_CLAUSE_DECL (c) = decl;
+ }
+ }
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ {
+ stmtblock_t block;
+ gfc_start_block (&block);
+ tree type = TREE_TYPE (decl);
+ tree ptr = gfc_conv_descriptor_data_get (decl);
+
+ /* OpenMP: automatically map pointer targets with the pointer;
+ hence, always update the descriptor/pointer itself.
+ NOTE: This also remaps the pointer for allocatable arrays with
+ 'target' attribute which also don't have the 'restrict' qualifier. */
+ bool always_modifier = false;
+
+ if (!openacc
+ && !(TYPE_QUALS (TREE_TYPE (ptr)) & TYPE_QUAL_RESTRICT))
+ always_modifier = true;
+
+ if (present)
+ ptr = gfc_build_cond_assign_expr (&block, present, ptr,
+ null_pointer_node);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (ptr)));
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (c) = ptr;
+ c2 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_TO_PSET);
+ if (present)
+ {
+ ptr = create_tmp_var (TREE_TYPE (TREE_OPERAND (decl, 0)));
+ gfc_add_modify (&block, ptr, TREE_OPERAND (decl, 0));
+
+ OMP_CLAUSE_DECL (c2) = build_fold_indirect_ref (ptr);
+ }
+ else
+ OMP_CLAUSE_DECL (c2) = decl;
+ OMP_CLAUSE_SIZE (c2) = TYPE_SIZE_UNIT (type);
+ c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (c3, always_modifier ? GOMP_MAP_ALWAYS_POINTER
+ : GOMP_MAP_POINTER);
+ if (present)
+ {
+ ptr = gfc_conv_descriptor_data_get (decl);
+ ptr = gfc_build_addr_expr (NULL, ptr);
+ ptr = gfc_build_cond_assign_expr (&block, present,
+ ptr, null_pointer_node);
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (c3) = ptr;
+ }
+ else
+ OMP_CLAUSE_DECL (c3) = gfc_conv_descriptor_data_get (decl);
+ OMP_CLAUSE_SIZE (c3) = size_int (0);
+ tree size = create_tmp_var (gfc_array_index_type);
+ tree elemsz = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ elemsz = fold_convert (gfc_array_index_type, elemsz);
+ if (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER
+ || GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT)
+ {
+ stmtblock_t cond_block;
+ tree tem, then_b, else_b, zero, cond;
+
+ gfc_init_block (&cond_block);
+ tem = gfc_full_array_size (&cond_block, decl,
+ GFC_TYPE_ARRAY_RANK (type));
+ gfc_add_modify (&cond_block, size, tem);
+ gfc_add_modify (&cond_block, size,
+ fold_build2 (MULT_EXPR, gfc_array_index_type,
+ size, elemsz));
+ then_b = gfc_finish_block (&cond_block);
+ gfc_init_block (&cond_block);
+ zero = build_int_cst (gfc_array_index_type, 0);
+ gfc_add_modify (&cond_block, size, zero);
+ else_b = gfc_finish_block (&cond_block);
+ tem = gfc_conv_descriptor_data_get (decl);
+ tem = fold_convert (pvoid_type_node, tem);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, tem, null_pointer_node);
+ if (present)
+ {
+ cond = fold_build2_loc (input_location, TRUTH_ANDIF_EXPR,
+ boolean_type_node, present, cond);
+ }
+ gfc_add_expr_to_block (&block, build3_loc (input_location, COND_EXPR,
+ void_type_node, cond,
+ then_b, else_b));
+ }
+ else if (present)
+ {
+ stmtblock_t cond_block;
+ tree then_b;
+
+ gfc_init_block (&cond_block);
+ gfc_add_modify (&cond_block, size,
+ gfc_full_array_size (&cond_block, decl,
+ GFC_TYPE_ARRAY_RANK (type)));
+ gfc_add_modify (&cond_block, size,
+ fold_build2 (MULT_EXPR, gfc_array_index_type,
+ size, elemsz));
+ then_b = gfc_finish_block (&cond_block);
+
+ gfc_build_cond_assign (&block, size, present, then_b,
+ build_int_cst (gfc_array_index_type, 0));
+ }
+ else
+ {
+ gfc_add_modify (&block, size,
+ gfc_full_array_size (&block, decl,
+ GFC_TYPE_ARRAY_RANK (type)));
+ gfc_add_modify (&block, size,
+ fold_build2 (MULT_EXPR, gfc_array_index_type,
+ size, elemsz));
+ }
+ OMP_CLAUSE_SIZE (c) = size;
+ tree stmt = gfc_finish_block (&block);
+ gimplify_and_add (stmt, pre_p);
+ }
+ tree last = c;
+ if (OMP_CLAUSE_SIZE (c) == NULL_TREE)
+ OMP_CLAUSE_SIZE (c)
+ = DECL_P (decl) ? DECL_SIZE_UNIT (decl)
+ : TYPE_SIZE_UNIT (TREE_TYPE (decl));
+ if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p,
+ NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
+ OMP_CLAUSE_SIZE (c) = size_int (0);
+ if (c2)
+ {
+ OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (last);
+ OMP_CLAUSE_CHAIN (last) = c2;
+ last = c2;
+ }
+ if (c3)
+ {
+ OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (last);
+ OMP_CLAUSE_CHAIN (last) = c3;
+ last = c3;
+ }
+ if (c4)
+ {
+ OMP_CLAUSE_CHAIN (c4) = OMP_CLAUSE_CHAIN (last);
+ OMP_CLAUSE_CHAIN (last) = c4;
+ }
+}
+
+
+/* Return true if DECL is a scalar variable (for the purpose of
+ implicit firstprivatization/mapping). Only if 'ptr_alloc_ok.'
+ is true, allocatables and pointers are permitted. */
+
+bool
+gfc_omp_scalar_p (tree decl, bool ptr_alloc_ok)
+{
+ tree type = TREE_TYPE (decl);
+ if (TREE_CODE (type) == REFERENCE_TYPE)
+ type = TREE_TYPE (type);
+ if (TREE_CODE (type) == POINTER_TYPE)
+ {
+ if (GFC_DECL_GET_SCALAR_ALLOCATABLE (decl)
+ || GFC_DECL_GET_SCALAR_POINTER (decl))
+ {
+ if (!ptr_alloc_ok)
+ return false;
+ type = TREE_TYPE (type);
+ }
+ if (GFC_ARRAY_TYPE_P (type)
+ || GFC_CLASS_TYPE_P (type))
+ return false;
+ }
+ if ((TREE_CODE (type) == ARRAY_TYPE || TREE_CODE (type) == INTEGER_TYPE)
+ && TYPE_STRING_FLAG (type))
+ return false;
+ if (INTEGRAL_TYPE_P (type)
+ || SCALAR_FLOAT_TYPE_P (type)
+ || COMPLEX_FLOAT_TYPE_P (type))
+ return true;
+ return false;
+}
+
+
+/* Return true if DECL is a scalar with target attribute but does not have the
+ allocatable (or pointer) attribute (for the purpose of implicit mapping). */
+
+bool
+gfc_omp_scalar_target_p (tree decl)
+{
+ return (DECL_P (decl) && GFC_DECL_GET_SCALAR_TARGET (decl)
+ && gfc_omp_scalar_p (decl, false));
+}
+
+
+/* Return true if DECL's DECL_VALUE_EXPR (if any) should be
+ disregarded in OpenMP construct, because it is going to be
+ remapped during OpenMP lowering. SHARED is true if DECL
+ is going to be shared, false if it is going to be privatized. */
+
+bool
+gfc_omp_disregard_value_expr (tree decl, bool shared)
+{
+ if (GFC_DECL_COMMON_OR_EQUIV (decl)
+ && DECL_HAS_VALUE_EXPR_P (decl))
+ {
+ tree value = DECL_VALUE_EXPR (decl);
+
+ if (TREE_CODE (value) == COMPONENT_REF
+ && VAR_P (TREE_OPERAND (value, 0))
+ && GFC_DECL_COMMON_OR_EQUIV (TREE_OPERAND (value, 0)))
+ {
+ /* If variable in COMMON or EQUIVALENCE is privatized, return
+ true, as just that variable is supposed to be privatized,
+ not the whole COMMON or whole EQUIVALENCE.
+ For shared variables in COMMON or EQUIVALENCE, let them be
+ gimplified to DECL_VALUE_EXPR, so that for multiple shared vars
+ from the same COMMON or EQUIVALENCE just one sharing of the
+ whole COMMON or EQUIVALENCE is enough. */
+ return ! shared;
+ }
+ }
+
+ if (GFC_DECL_RESULT (decl) && DECL_HAS_VALUE_EXPR_P (decl))
+ return ! shared;
+
+ return false;
+}
+
+/* Return true if DECL that is shared iff SHARED is true should
+ be put into OMP_CLAUSE_PRIVATE with OMP_CLAUSE_PRIVATE_DEBUG
+ flag set. */
+
+bool
+gfc_omp_private_debug_clause (tree decl, bool shared)
+{
+ if (GFC_DECL_CRAY_POINTEE (decl))
+ return true;
+
+ if (GFC_DECL_COMMON_OR_EQUIV (decl)
+ && DECL_HAS_VALUE_EXPR_P (decl))
+ {
+ tree value = DECL_VALUE_EXPR (decl);
+
+ if (TREE_CODE (value) == COMPONENT_REF
+ && VAR_P (TREE_OPERAND (value, 0))
+ && GFC_DECL_COMMON_OR_EQUIV (TREE_OPERAND (value, 0)))
+ return shared;
+ }
+
+ return false;
+}
+
+/* Register language specific type size variables as potentially OpenMP
+ firstprivate variables. */
+
+void
+gfc_omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type)
+{
+ if (GFC_ARRAY_TYPE_P (type) || GFC_DESCRIPTOR_TYPE_P (type))
+ {
+ int r;
+
+ gcc_assert (TYPE_LANG_SPECIFIC (type) != NULL);
+ for (r = 0; r < GFC_TYPE_ARRAY_RANK (type); r++)
+ {
+ omp_firstprivatize_variable (ctx, GFC_TYPE_ARRAY_LBOUND (type, r));
+ omp_firstprivatize_variable (ctx, GFC_TYPE_ARRAY_UBOUND (type, r));
+ omp_firstprivatize_variable (ctx, GFC_TYPE_ARRAY_STRIDE (type, r));
+ }
+ omp_firstprivatize_variable (ctx, GFC_TYPE_ARRAY_SIZE (type));
+ omp_firstprivatize_variable (ctx, GFC_TYPE_ARRAY_OFFSET (type));
+ }
+}
+
+
+static inline tree
+gfc_trans_add_clause (tree node, tree tail)
+{
+ OMP_CLAUSE_CHAIN (node) = tail;
+ return node;
+}
+
+static tree
+gfc_trans_omp_variable (gfc_symbol *sym, bool declare_simd)
+{
+ if (declare_simd)
+ {
+ int cnt = 0;
+ gfc_symbol *proc_sym;
+ gfc_formal_arglist *f;
+
+ gcc_assert (sym->attr.dummy);
+ proc_sym = sym->ns->proc_name;
+ if (proc_sym->attr.entry_master)
+ ++cnt;
+ if (gfc_return_by_reference (proc_sym))
+ {
+ ++cnt;
+ if (proc_sym->ts.type == BT_CHARACTER)
+ ++cnt;
+ }
+ for (f = gfc_sym_get_dummy_args (proc_sym); f; f = f->next)
+ if (f->sym == sym)
+ break;
+ else if (f->sym)
+ ++cnt;
+ gcc_assert (f);
+ return build_int_cst (integer_type_node, cnt);
+ }
+
+ tree t = gfc_get_symbol_decl (sym);
+ tree parent_decl;
+ int parent_flag;
+ bool return_value;
+ bool alternate_entry;
+ bool entry_master;
+
+ return_value = sym->attr.function && sym->result == sym;
+ alternate_entry = sym->attr.function && sym->attr.entry
+ && sym->result == sym;
+ entry_master = sym->attr.result
+ && sym->ns->proc_name->attr.entry_master
+ && !gfc_return_by_reference (sym->ns->proc_name);
+ parent_decl = current_function_decl
+ ? DECL_CONTEXT (current_function_decl) : NULL_TREE;
+
+ if ((t == parent_decl && return_value)
+ || (sym->ns && sym->ns->proc_name
+ && sym->ns->proc_name->backend_decl == parent_decl
+ && (alternate_entry || entry_master)))
+ parent_flag = 1;
+ else
+ parent_flag = 0;
+
+ /* Special case for assigning the return value of a function.
+ Self recursive functions must have an explicit return value. */
+ if (return_value && (t == current_function_decl || parent_flag))
+ t = gfc_get_fake_result_decl (sym, parent_flag);
+
+ /* Similarly for alternate entry points. */
+ else if (alternate_entry
+ && (sym->ns->proc_name->backend_decl == current_function_decl
+ || parent_flag))
+ {
+ gfc_entry_list *el = NULL;
+
+ for (el = sym->ns->entries; el; el = el->next)
+ if (sym == el->sym)
+ {
+ t = gfc_get_fake_result_decl (sym, parent_flag);
+ break;
+ }
+ }
+
+ else if (entry_master
+ && (sym->ns->proc_name->backend_decl == current_function_decl
+ || parent_flag))
+ t = gfc_get_fake_result_decl (sym, parent_flag);
+
+ return t;
+}
+
+static tree
+gfc_trans_omp_variable_list (enum omp_clause_code code,
+ gfc_omp_namelist *namelist, tree list,
+ bool declare_simd)
+{
+ for (; namelist != NULL; namelist = namelist->next)
+ if (namelist->sym->attr.referenced || declare_simd)
+ {
+ tree t = gfc_trans_omp_variable (namelist->sym, declare_simd);
+ if (t != error_mark_node)
+ {
+ tree node = build_omp_clause (input_location, code);
+ OMP_CLAUSE_DECL (node) = t;
+ list = gfc_trans_add_clause (node, list);
+
+ if (code == OMP_CLAUSE_LASTPRIVATE
+ && namelist->u.lastprivate_conditional)
+ OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (node) = 1;
+ }
+ }
+ return list;
+}
+
+struct omp_udr_find_orig_data
+{
+ gfc_omp_udr *omp_udr;
+ bool omp_orig_seen;
+};
+
+static int
+omp_udr_find_orig (gfc_expr **e, int *walk_subtrees ATTRIBUTE_UNUSED,
+ void *data)
+{
+ struct omp_udr_find_orig_data *cd = (struct omp_udr_find_orig_data *) data;
+ if ((*e)->expr_type == EXPR_VARIABLE
+ && (*e)->symtree->n.sym == cd->omp_udr->omp_orig)
+ cd->omp_orig_seen = true;
+
+ return 0;
+}
+
+static void
+gfc_trans_omp_array_reduction_or_udr (tree c, gfc_omp_namelist *n, locus where)
+{
+ gfc_symbol *sym = n->sym;
+ gfc_symtree *root1 = NULL, *root2 = NULL, *root3 = NULL, *root4 = NULL;
+ gfc_symtree *symtree1, *symtree2, *symtree3, *symtree4 = NULL;
+ gfc_symbol init_val_sym, outer_sym, intrinsic_sym;
+ gfc_symbol omp_var_copy[4];
+ gfc_expr *e1, *e2, *e3, *e4;
+ gfc_ref *ref;
+ tree decl, backend_decl, stmt, type, outer_decl;
+ locus old_loc = gfc_current_locus;
+ const char *iname;
+ bool t;
+ gfc_omp_udr *udr = n->u2.udr ? n->u2.udr->udr : NULL;
+
+ decl = OMP_CLAUSE_DECL (c);
+ gfc_current_locus = where;
+ type = TREE_TYPE (decl);
+ outer_decl = create_tmp_var_raw (type);
+ if (TREE_CODE (decl) == PARM_DECL
+ && TREE_CODE (type) == REFERENCE_TYPE
+ && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (type))
+ && GFC_TYPE_ARRAY_AKIND (TREE_TYPE (type)) == GFC_ARRAY_ALLOCATABLE)
+ {
+ decl = build_fold_indirect_ref (decl);
+ type = TREE_TYPE (type);
+ }
+
+ /* Create a fake symbol for init value. */
+ memset (&init_val_sym, 0, sizeof (init_val_sym));
+ init_val_sym.ns = sym->ns;
+ init_val_sym.name = sym->name;
+ init_val_sym.ts = sym->ts;
+ init_val_sym.attr.referenced = 1;
+ init_val_sym.declared_at = where;
+ init_val_sym.attr.flavor = FL_VARIABLE;
+ if (OMP_CLAUSE_REDUCTION_CODE (c) != ERROR_MARK)
+ backend_decl = omp_reduction_init (c, gfc_sym_type (&init_val_sym));
+ else if (udr->initializer_ns)
+ backend_decl = NULL;
+ else
+ switch (sym->ts.type)
+ {
+ case BT_LOGICAL:
+ case BT_INTEGER:
+ case BT_REAL:
+ case BT_COMPLEX:
+ backend_decl = build_zero_cst (gfc_sym_type (&init_val_sym));
+ break;
+ default:
+ backend_decl = NULL_TREE;
+ break;
+ }
+ init_val_sym.backend_decl = backend_decl;
+
+ /* Create a fake symbol for the outer array reference. */
+ outer_sym = *sym;
+ if (sym->as)
+ outer_sym.as = gfc_copy_array_spec (sym->as);
+ outer_sym.attr.dummy = 0;
+ outer_sym.attr.result = 0;
+ outer_sym.attr.flavor = FL_VARIABLE;
+ outer_sym.backend_decl = outer_decl;
+ if (decl != OMP_CLAUSE_DECL (c))
+ outer_sym.backend_decl = build_fold_indirect_ref (outer_decl);
+
+ /* Create fake symtrees for it. */
+ symtree1 = gfc_new_symtree (&root1, sym->name);
+ symtree1->n.sym = sym;
+ gcc_assert (symtree1 == root1);
+
+ symtree2 = gfc_new_symtree (&root2, sym->name);
+ symtree2->n.sym = &init_val_sym;
+ gcc_assert (symtree2 == root2);
+
+ symtree3 = gfc_new_symtree (&root3, sym->name);
+ symtree3->n.sym = &outer_sym;
+ gcc_assert (symtree3 == root3);
+
+ memset (omp_var_copy, 0, sizeof omp_var_copy);
+ if (udr)
+ {
+ omp_var_copy[0] = *udr->omp_out;
+ omp_var_copy[1] = *udr->omp_in;
+ *udr->omp_out = outer_sym;
+ *udr->omp_in = *sym;
+ if (udr->initializer_ns)
+ {
+ omp_var_copy[2] = *udr->omp_priv;
+ omp_var_copy[3] = *udr->omp_orig;
+ *udr->omp_priv = *sym;
+ *udr->omp_orig = outer_sym;
+ }
+ }
+
+ /* Create expressions. */
+ e1 = gfc_get_expr ();
+ e1->expr_type = EXPR_VARIABLE;
+ e1->where = where;
+ e1->symtree = symtree1;
+ e1->ts = sym->ts;
+ if (sym->attr.dimension)
+ {
+ e1->ref = ref = gfc_get_ref ();
+ ref->type = REF_ARRAY;
+ ref->u.ar.where = where;
+ ref->u.ar.as = sym->as;
+ ref->u.ar.type = AR_FULL;
+ ref->u.ar.dimen = 0;
+ }
+ t = gfc_resolve_expr (e1);
+ gcc_assert (t);
+
+ e2 = NULL;
+ if (backend_decl != NULL_TREE)
+ {
+ e2 = gfc_get_expr ();
+ e2->expr_type = EXPR_VARIABLE;
+ e2->where = where;
+ e2->symtree = symtree2;
+ e2->ts = sym->ts;
+ t = gfc_resolve_expr (e2);
+ gcc_assert (t);
+ }
+ else if (udr->initializer_ns == NULL)
+ {
+ gcc_assert (sym->ts.type == BT_DERIVED);
+ e2 = gfc_default_initializer (&sym->ts);
+ gcc_assert (e2);
+ t = gfc_resolve_expr (e2);
+ gcc_assert (t);
+ }
+ else if (n->u2.udr->initializer->op == EXEC_ASSIGN)
+ {
+ e2 = gfc_copy_expr (n->u2.udr->initializer->expr2);
+ t = gfc_resolve_expr (e2);
+ gcc_assert (t);
+ }
+ if (udr && udr->initializer_ns)
+ {
+ struct omp_udr_find_orig_data cd;
+ cd.omp_udr = udr;
+ cd.omp_orig_seen = false;
+ gfc_code_walker (&n->u2.udr->initializer,
+ gfc_dummy_code_callback, omp_udr_find_orig, &cd);
+ if (cd.omp_orig_seen)
+ OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1;
+ }
+
+ e3 = gfc_copy_expr (e1);
+ e3->symtree = symtree3;
+ t = gfc_resolve_expr (e3);
+ gcc_assert (t);
+
+ iname = NULL;
+ e4 = NULL;
+ switch (OMP_CLAUSE_REDUCTION_CODE (c))
+ {
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ e4 = gfc_add (e3, e1);
+ break;
+ case MULT_EXPR:
+ e4 = gfc_multiply (e3, e1);
+ break;
+ case TRUTH_ANDIF_EXPR:
+ e4 = gfc_and (e3, e1);
+ break;
+ case TRUTH_ORIF_EXPR:
+ e4 = gfc_or (e3, e1);
+ break;
+ case EQ_EXPR:
+ e4 = gfc_eqv (e3, e1);
+ break;
+ case NE_EXPR:
+ e4 = gfc_neqv (e3, e1);
+ break;
+ case MIN_EXPR:
+ iname = "min";
+ break;
+ case MAX_EXPR:
+ iname = "max";
+ break;
+ case BIT_AND_EXPR:
+ iname = "iand";
+ break;
+ case BIT_IOR_EXPR:
+ iname = "ior";
+ break;
+ case BIT_XOR_EXPR:
+ iname = "ieor";
+ break;
+ case ERROR_MARK:
+ if (n->u2.udr->combiner->op == EXEC_ASSIGN)
+ {
+ gfc_free_expr (e3);
+ e3 = gfc_copy_expr (n->u2.udr->combiner->expr1);
+ e4 = gfc_copy_expr (n->u2.udr->combiner->expr2);
+ t = gfc_resolve_expr (e3);
+ gcc_assert (t);
+ t = gfc_resolve_expr (e4);
+ gcc_assert (t);
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (iname != NULL)
+ {
+ memset (&intrinsic_sym, 0, sizeof (intrinsic_sym));
+ intrinsic_sym.ns = sym->ns;
+ intrinsic_sym.name = iname;
+ intrinsic_sym.ts = sym->ts;
+ intrinsic_sym.attr.referenced = 1;
+ intrinsic_sym.attr.intrinsic = 1;
+ intrinsic_sym.attr.function = 1;
+ intrinsic_sym.attr.implicit_type = 1;
+ intrinsic_sym.result = &intrinsic_sym;
+ intrinsic_sym.declared_at = where;
+
+ symtree4 = gfc_new_symtree (&root4, iname);
+ symtree4->n.sym = &intrinsic_sym;
+ gcc_assert (symtree4 == root4);
+
+ e4 = gfc_get_expr ();
+ e4->expr_type = EXPR_FUNCTION;
+ e4->where = where;
+ e4->symtree = symtree4;
+ e4->value.function.actual = gfc_get_actual_arglist ();
+ e4->value.function.actual->expr = e3;
+ e4->value.function.actual->next = gfc_get_actual_arglist ();
+ e4->value.function.actual->next->expr = e1;
+ }
+ if (OMP_CLAUSE_REDUCTION_CODE (c) != ERROR_MARK)
+ {
+ /* e1 and e3 have been stored as arguments of e4, avoid sharing. */
+ e1 = gfc_copy_expr (e1);
+ e3 = gfc_copy_expr (e3);
+ t = gfc_resolve_expr (e4);
+ gcc_assert (t);
+ }
+
+ /* Create the init statement list. */
+ pushlevel ();
+ if (e2)
+ stmt = gfc_trans_assignment (e1, e2, false, false);
+ else
+ stmt = gfc_trans_call (n->u2.udr->initializer, false,
+ NULL_TREE, NULL_TREE, false);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ OMP_CLAUSE_REDUCTION_INIT (c) = stmt;
+
+ /* Create the merge statement list. */
+ pushlevel ();
+ if (e4)
+ stmt = gfc_trans_assignment (e3, e4, false, true);
+ else
+ stmt = gfc_trans_call (n->u2.udr->combiner, false,
+ NULL_TREE, NULL_TREE, false);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ OMP_CLAUSE_REDUCTION_MERGE (c) = stmt;
+
+ /* And stick the placeholder VAR_DECL into the clause as well. */
+ OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = outer_decl;
+
+ gfc_current_locus = old_loc;
+
+ gfc_free_expr (e1);
+ if (e2)
+ gfc_free_expr (e2);
+ gfc_free_expr (e3);
+ if (e4)
+ gfc_free_expr (e4);
+ free (symtree1);
+ free (symtree2);
+ free (symtree3);
+ free (symtree4);
+ if (outer_sym.as)
+ gfc_free_array_spec (outer_sym.as);
+
+ if (udr)
+ {
+ *udr->omp_out = omp_var_copy[0];
+ *udr->omp_in = omp_var_copy[1];
+ if (udr->initializer_ns)
+ {
+ *udr->omp_priv = omp_var_copy[2];
+ *udr->omp_orig = omp_var_copy[3];
+ }
+ }
+}
+
+static tree
+gfc_trans_omp_reduction_list (int kind, gfc_omp_namelist *namelist, tree list,
+ locus where, bool mark_addressable)
+{
+ omp_clause_code clause = OMP_CLAUSE_REDUCTION;
+ switch (kind)
+ {
+ case OMP_LIST_REDUCTION:
+ case OMP_LIST_REDUCTION_INSCAN:
+ case OMP_LIST_REDUCTION_TASK:
+ break;
+ case OMP_LIST_IN_REDUCTION:
+ clause = OMP_CLAUSE_IN_REDUCTION;
+ break;
+ case OMP_LIST_TASK_REDUCTION:
+ clause = OMP_CLAUSE_TASK_REDUCTION;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ for (; namelist != NULL; namelist = namelist->next)
+ if (namelist->sym->attr.referenced)
+ {
+ tree t = gfc_trans_omp_variable (namelist->sym, false);
+ if (t != error_mark_node)
+ {
+ tree node = build_omp_clause (gfc_get_location (&namelist->where),
+ clause);
+ OMP_CLAUSE_DECL (node) = t;
+ if (mark_addressable)
+ TREE_ADDRESSABLE (t) = 1;
+ if (kind == OMP_LIST_REDUCTION_INSCAN)
+ OMP_CLAUSE_REDUCTION_INSCAN (node) = 1;
+ if (kind == OMP_LIST_REDUCTION_TASK)
+ OMP_CLAUSE_REDUCTION_TASK (node) = 1;
+ switch (namelist->u.reduction_op)
+ {
+ case OMP_REDUCTION_PLUS:
+ OMP_CLAUSE_REDUCTION_CODE (node) = PLUS_EXPR;
+ break;
+ case OMP_REDUCTION_MINUS:
+ OMP_CLAUSE_REDUCTION_CODE (node) = MINUS_EXPR;
+ break;
+ case OMP_REDUCTION_TIMES:
+ OMP_CLAUSE_REDUCTION_CODE (node) = MULT_EXPR;
+ break;
+ case OMP_REDUCTION_AND:
+ OMP_CLAUSE_REDUCTION_CODE (node) = TRUTH_ANDIF_EXPR;
+ break;
+ case OMP_REDUCTION_OR:
+ OMP_CLAUSE_REDUCTION_CODE (node) = TRUTH_ORIF_EXPR;
+ break;
+ case OMP_REDUCTION_EQV:
+ OMP_CLAUSE_REDUCTION_CODE (node) = EQ_EXPR;
+ break;
+ case OMP_REDUCTION_NEQV:
+ OMP_CLAUSE_REDUCTION_CODE (node) = NE_EXPR;
+ break;
+ case OMP_REDUCTION_MAX:
+ OMP_CLAUSE_REDUCTION_CODE (node) = MAX_EXPR;
+ break;
+ case OMP_REDUCTION_MIN:
+ OMP_CLAUSE_REDUCTION_CODE (node) = MIN_EXPR;
+ break;
+ case OMP_REDUCTION_IAND:
+ OMP_CLAUSE_REDUCTION_CODE (node) = BIT_AND_EXPR;
+ break;
+ case OMP_REDUCTION_IOR:
+ OMP_CLAUSE_REDUCTION_CODE (node) = BIT_IOR_EXPR;
+ break;
+ case OMP_REDUCTION_IEOR:
+ OMP_CLAUSE_REDUCTION_CODE (node) = BIT_XOR_EXPR;
+ break;
+ case OMP_REDUCTION_USER:
+ OMP_CLAUSE_REDUCTION_CODE (node) = ERROR_MARK;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (namelist->sym->attr.dimension
+ || namelist->u.reduction_op == OMP_REDUCTION_USER
+ || namelist->sym->attr.allocatable)
+ gfc_trans_omp_array_reduction_or_udr (node, namelist, where);
+ list = gfc_trans_add_clause (node, list);
+ }
+ }
+ return list;
+}
+
+static inline tree
+gfc_convert_expr_to_tree (stmtblock_t *block, gfc_expr *expr)
+{
+ gfc_se se;
+ tree result;
+
+ gfc_init_se (&se, NULL );
+ gfc_conv_expr (&se, expr);
+ gfc_add_block_to_block (block, &se.pre);
+ result = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ return result;
+}
+
+static vec<tree, va_heap, vl_embed> *doacross_steps;
+
+
+/* Translate an array section or array element. */
+
+static void
+gfc_trans_omp_array_section (stmtblock_t *block, gfc_omp_namelist *n,
+ tree decl, bool element, gomp_map_kind ptr_kind,
+ tree &node, tree &node2, tree &node3, tree &node4)
+{
+ gfc_se se;
+ tree ptr, ptr2;
+ tree elemsz = NULL_TREE;
+
+ gfc_init_se (&se, NULL);
+
+ if (element)
+ {
+ gfc_conv_expr_reference (&se, n->expr);
+ gfc_add_block_to_block (block, &se.pre);
+ ptr = se.expr;
+ OMP_CLAUSE_SIZE (node) = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ptr)));
+ elemsz = OMP_CLAUSE_SIZE (node);
+ }
+ else
+ {
+ gfc_conv_expr_descriptor (&se, n->expr);
+ ptr = gfc_conv_array_data (se.expr);
+ tree type = TREE_TYPE (se.expr);
+ gfc_add_block_to_block (block, &se.pre);
+ OMP_CLAUSE_SIZE (node) = gfc_full_array_size (block, se.expr,
+ GFC_TYPE_ARRAY_RANK (type));
+ elemsz = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ elemsz = fold_convert (gfc_array_index_type, elemsz);
+ OMP_CLAUSE_SIZE (node) = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ OMP_CLAUSE_SIZE (node), elemsz);
+ }
+ gcc_assert (se.post.head == NULL_TREE);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (ptr)));
+ OMP_CLAUSE_DECL (node) = build_fold_indirect_ref (ptr);
+ ptr = fold_convert (ptrdiff_type_node, ptr);
+
+ if (POINTER_TYPE_P (TREE_TYPE (decl))
+ && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (decl)))
+ && ptr_kind == GOMP_MAP_POINTER)
+ {
+ node4 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node4, GOMP_MAP_POINTER);
+ OMP_CLAUSE_DECL (node4) = decl;
+ OMP_CLAUSE_SIZE (node4) = size_int (0);
+ decl = build_fold_indirect_ref (decl);
+ }
+ else if (ptr_kind == GOMP_MAP_ALWAYS_POINTER
+ && n->expr->ts.type == BT_CHARACTER
+ && n->expr->ts.deferred)
+ {
+ gomp_map_kind map_kind;
+ if (GOMP_MAP_COPY_TO_P (OMP_CLAUSE_MAP_KIND (node)))
+ map_kind = GOMP_MAP_TO;
+ else if (OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_RELEASE
+ || OMP_CLAUSE_MAP_KIND (node) == GOMP_MAP_DELETE)
+ map_kind = OMP_CLAUSE_MAP_KIND (node);
+ else
+ map_kind = GOMP_MAP_ALLOC;
+ gcc_assert (se.string_length);
+ node4 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node4, map_kind);
+ OMP_CLAUSE_DECL (node4) = se.string_length;
+ OMP_CLAUSE_SIZE (node4) = TYPE_SIZE_UNIT (gfc_charlen_type_node);
+ }
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ {
+ tree desc_node;
+ tree type = TREE_TYPE (decl);
+ ptr2 = gfc_conv_descriptor_data_get (decl);
+ desc_node = build_omp_clause (input_location, OMP_CLAUSE_MAP);
+ OMP_CLAUSE_DECL (desc_node) = decl;
+ OMP_CLAUSE_SIZE (desc_node) = TYPE_SIZE_UNIT (type);
+ if (ptr_kind == GOMP_MAP_ALWAYS_POINTER)
+ {
+ OMP_CLAUSE_SET_MAP_KIND (desc_node, GOMP_MAP_TO);
+ node2 = node;
+ node = desc_node; /* Needs to come first. */
+ }
+ else
+ {
+ OMP_CLAUSE_SET_MAP_KIND (desc_node, GOMP_MAP_TO_PSET);
+ node2 = desc_node;
+ }
+ node3 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node3, ptr_kind);
+ OMP_CLAUSE_DECL (node3)
+ = gfc_conv_descriptor_data_get (decl);
+ /* This purposely does not include GOMP_MAP_ALWAYS_POINTER. The extra
+ cast prevents gimplify.c from recognising it as being part of the
+ struct – and adding an 'alloc: for the 'desc.data' pointer, which
+ would break as the 'desc' (the descriptor) is also mapped
+ (see node4 above). */
+ if (ptr_kind == GOMP_MAP_ATTACH_DETACH)
+ STRIP_NOPS (OMP_CLAUSE_DECL (node3));
+ }
+ else
+ {
+ if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ {
+ tree offset;
+ ptr2 = build_fold_addr_expr (decl);
+ offset = fold_build2 (MINUS_EXPR, ptrdiff_type_node, ptr,
+ fold_convert (ptrdiff_type_node, ptr2));
+ offset = build2 (TRUNC_DIV_EXPR, ptrdiff_type_node,
+ offset, fold_convert (ptrdiff_type_node, elemsz));
+ offset = build4_loc (input_location, ARRAY_REF,
+ TREE_TYPE (TREE_TYPE (decl)),
+ decl, offset, NULL_TREE, NULL_TREE);
+ OMP_CLAUSE_DECL (node) = offset;
+
+ if (ptr_kind == GOMP_MAP_ALWAYS_POINTER)
+ return;
+ }
+ else
+ {
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (decl)));
+ ptr2 = decl;
+ }
+ node3 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node3, ptr_kind);
+ OMP_CLAUSE_DECL (node3) = decl;
+ }
+ ptr2 = fold_convert (ptrdiff_type_node, ptr2);
+ OMP_CLAUSE_SIZE (node3) = fold_build2 (MINUS_EXPR, ptrdiff_type_node,
+ ptr, ptr2);
+}
+
+static tree
+handle_iterator (gfc_namespace *ns, stmtblock_t *iter_block, tree block)
+{
+ tree list = NULL_TREE;
+ for (gfc_symbol *sym = ns->proc_name; sym; sym = sym->tlink)
+ {
+ gfc_constructor *c;
+ gfc_se se;
+
+ tree last = make_tree_vec (6);
+ tree iter_var = gfc_get_symbol_decl (sym);
+ tree type = TREE_TYPE (iter_var);
+ TREE_VEC_ELT (last, 0) = iter_var;
+ DECL_CHAIN (iter_var) = BLOCK_VARS (block);
+ BLOCK_VARS (block) = iter_var;
+
+ /* begin */
+ c = gfc_constructor_first (sym->value->value.constructor);
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, c->expr);
+ gfc_add_block_to_block (iter_block, &se.pre);
+ gfc_add_block_to_block (iter_block, &se.post);
+ TREE_VEC_ELT (last, 1) = fold_convert (type,
+ gfc_evaluate_now (se.expr,
+ iter_block));
+ /* end */
+ c = gfc_constructor_next (c);
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, c->expr);
+ gfc_add_block_to_block (iter_block, &se.pre);
+ gfc_add_block_to_block (iter_block, &se.post);
+ TREE_VEC_ELT (last, 2) = fold_convert (type,
+ gfc_evaluate_now (se.expr,
+ iter_block));
+ /* step */
+ c = gfc_constructor_next (c);
+ tree step;
+ if (c)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, c->expr);
+ gfc_add_block_to_block (iter_block, &se.pre);
+ gfc_add_block_to_block (iter_block, &se.post);
+ gfc_conv_expr (&se, c->expr);
+ step = fold_convert (type,
+ gfc_evaluate_now (se.expr,
+ iter_block));
+ }
+ else
+ step = build_int_cst (type, 1);
+ TREE_VEC_ELT (last, 3) = step;
+ /* orig_step */
+ TREE_VEC_ELT (last, 4) = save_expr (step);
+ TREE_CHAIN (last) = list;
+ list = last;
+ }
+ return list;
+}
+
+static tree
+gfc_trans_omp_clauses (stmtblock_t *block, gfc_omp_clauses *clauses,
+ locus where, bool declare_simd = false,
+ bool openacc = false)
+{
+ tree omp_clauses = NULL_TREE, prev_clauses, chunk_size, c;
+ tree iterator = NULL_TREE;
+ tree tree_block = NULL_TREE;
+ stmtblock_t iter_block;
+ int list, ifc;
+ enum omp_clause_code clause_code;
+ gfc_omp_namelist *prev = NULL;
+ gfc_se se;
+
+ if (clauses == NULL)
+ return NULL_TREE;
+
+ for (list = 0; list < OMP_LIST_NUM; list++)
+ {
+ gfc_omp_namelist *n = clauses->lists[list];
+
+ if (n == NULL)
+ continue;
+ switch (list)
+ {
+ case OMP_LIST_REDUCTION:
+ case OMP_LIST_REDUCTION_INSCAN:
+ case OMP_LIST_REDUCTION_TASK:
+ case OMP_LIST_IN_REDUCTION:
+ case OMP_LIST_TASK_REDUCTION:
+ /* An OpenACC async clause indicates the need to set reduction
+ arguments addressable, to allow asynchronous copy-out. */
+ omp_clauses = gfc_trans_omp_reduction_list (list, n, omp_clauses,
+ where, clauses->async);
+ break;
+ case OMP_LIST_PRIVATE:
+ clause_code = OMP_CLAUSE_PRIVATE;
+ goto add_clause;
+ case OMP_LIST_SHARED:
+ clause_code = OMP_CLAUSE_SHARED;
+ goto add_clause;
+ case OMP_LIST_FIRSTPRIVATE:
+ clause_code = OMP_CLAUSE_FIRSTPRIVATE;
+ goto add_clause;
+ case OMP_LIST_LASTPRIVATE:
+ clause_code = OMP_CLAUSE_LASTPRIVATE;
+ goto add_clause;
+ case OMP_LIST_COPYIN:
+ clause_code = OMP_CLAUSE_COPYIN;
+ goto add_clause;
+ case OMP_LIST_COPYPRIVATE:
+ clause_code = OMP_CLAUSE_COPYPRIVATE;
+ goto add_clause;
+ case OMP_LIST_UNIFORM:
+ clause_code = OMP_CLAUSE_UNIFORM;
+ goto add_clause;
+ case OMP_LIST_USE_DEVICE:
+ case OMP_LIST_USE_DEVICE_PTR:
+ clause_code = OMP_CLAUSE_USE_DEVICE_PTR;
+ goto add_clause;
+ case OMP_LIST_USE_DEVICE_ADDR:
+ clause_code = OMP_CLAUSE_USE_DEVICE_ADDR;
+ goto add_clause;
+ case OMP_LIST_IS_DEVICE_PTR:
+ clause_code = OMP_CLAUSE_IS_DEVICE_PTR;
+ goto add_clause;
+ case OMP_LIST_NONTEMPORAL:
+ clause_code = OMP_CLAUSE_NONTEMPORAL;
+ goto add_clause;
+ case OMP_LIST_SCAN_IN:
+ clause_code = OMP_CLAUSE_INCLUSIVE;
+ goto add_clause;
+ case OMP_LIST_SCAN_EX:
+ clause_code = OMP_CLAUSE_EXCLUSIVE;
+ goto add_clause;
+
+ add_clause:
+ omp_clauses
+ = gfc_trans_omp_variable_list (clause_code, n, omp_clauses,
+ declare_simd);
+ break;
+ case OMP_LIST_ALIGNED:
+ for (; n != NULL; n = n->next)
+ if (n->sym->attr.referenced || declare_simd)
+ {
+ tree t = gfc_trans_omp_variable (n->sym, declare_simd);
+ if (t != error_mark_node)
+ {
+ tree node = build_omp_clause (input_location,
+ OMP_CLAUSE_ALIGNED);
+ OMP_CLAUSE_DECL (node) = t;
+ if (n->expr)
+ {
+ tree alignment_var;
+
+ if (declare_simd)
+ alignment_var = gfc_conv_constant_to_tree (n->expr);
+ else
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, n->expr);
+ gfc_add_block_to_block (block, &se.pre);
+ alignment_var = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+ }
+ OMP_CLAUSE_ALIGNED_ALIGNMENT (node) = alignment_var;
+ }
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ }
+ }
+ break;
+ case OMP_LIST_ALLOCATE:
+ for (; n != NULL; n = n->next)
+ if (n->sym->attr.referenced)
+ {
+ tree t = gfc_trans_omp_variable (n->sym, false);
+ if (t != error_mark_node)
+ {
+ tree node = build_omp_clause (input_location,
+ OMP_CLAUSE_ALLOCATE);
+ OMP_CLAUSE_DECL (node) = t;
+ if (n->expr)
+ {
+ tree allocator_;
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, n->expr);
+ allocator_ = gfc_evaluate_now (se.expr, block);
+ OMP_CLAUSE_ALLOCATE_ALLOCATOR (node) = allocator_;
+ }
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ }
+ }
+ break;
+ case OMP_LIST_LINEAR:
+ {
+ gfc_expr *last_step_expr = NULL;
+ tree last_step = NULL_TREE;
+ bool last_step_parm = false;
+
+ for (; n != NULL; n = n->next)
+ {
+ if (n->expr)
+ {
+ last_step_expr = n->expr;
+ last_step = NULL_TREE;
+ last_step_parm = false;
+ }
+ if (n->sym->attr.referenced || declare_simd)
+ {
+ tree t = gfc_trans_omp_variable (n->sym, declare_simd);
+ if (t != error_mark_node)
+ {
+ tree node = build_omp_clause (input_location,
+ OMP_CLAUSE_LINEAR);
+ OMP_CLAUSE_DECL (node) = t;
+ omp_clause_linear_kind kind;
+ switch (n->u.linear_op)
+ {
+ case OMP_LINEAR_DEFAULT:
+ kind = OMP_CLAUSE_LINEAR_DEFAULT;
+ break;
+ case OMP_LINEAR_REF:
+ kind = OMP_CLAUSE_LINEAR_REF;
+ break;
+ case OMP_LINEAR_VAL:
+ kind = OMP_CLAUSE_LINEAR_VAL;
+ break;
+ case OMP_LINEAR_UVAL:
+ kind = OMP_CLAUSE_LINEAR_UVAL;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ OMP_CLAUSE_LINEAR_KIND (node) = kind;
+ if (last_step_expr && last_step == NULL_TREE)
+ {
+ if (!declare_simd)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, last_step_expr);
+ gfc_add_block_to_block (block, &se.pre);
+ last_step = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+ }
+ else if (last_step_expr->expr_type == EXPR_VARIABLE)
+ {
+ gfc_symbol *s = last_step_expr->symtree->n.sym;
+ last_step = gfc_trans_omp_variable (s, true);
+ last_step_parm = true;
+ }
+ else
+ last_step
+ = gfc_conv_constant_to_tree (last_step_expr);
+ }
+ if (last_step_parm)
+ {
+ OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (node) = 1;
+ OMP_CLAUSE_LINEAR_STEP (node) = last_step;
+ }
+ else
+ {
+ if (kind == OMP_CLAUSE_LINEAR_REF)
+ {
+ tree type;
+ if (n->sym->attr.flavor == FL_PROCEDURE)
+ {
+ type = gfc_get_function_type (n->sym);
+ type = build_pointer_type (type);
+ }
+ else
+ type = gfc_sym_type (n->sym);
+ if (POINTER_TYPE_P (type))
+ type = TREE_TYPE (type);
+ /* Otherwise to be determined what exactly
+ should be done. */
+ tree t = fold_convert (sizetype, last_step);
+ t = size_binop (MULT_EXPR, t,
+ TYPE_SIZE_UNIT (type));
+ OMP_CLAUSE_LINEAR_STEP (node) = t;
+ }
+ else
+ {
+ tree type
+ = gfc_typenode_for_spec (&n->sym->ts);
+ OMP_CLAUSE_LINEAR_STEP (node)
+ = fold_convert (type, last_step);
+ }
+ }
+ if (n->sym->attr.dimension || n->sym->attr.allocatable)
+ OMP_CLAUSE_LINEAR_ARRAY (node) = 1;
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ }
+ }
+ }
+ }
+ break;
+ case OMP_LIST_AFFINITY:
+ case OMP_LIST_DEPEND:
+ iterator = NULL_TREE;
+ prev = NULL;
+ prev_clauses = omp_clauses;
+ for (; n != NULL; n = n->next)
+ {
+ if (iterator && prev->u2.ns != n->u2.ns)
+ {
+ BLOCK_SUBBLOCKS (tree_block) = gfc_finish_block (&iter_block);
+ TREE_VEC_ELT (iterator, 5) = tree_block;
+ for (tree c = omp_clauses; c != prev_clauses;
+ c = OMP_CLAUSE_CHAIN (c))
+ OMP_CLAUSE_DECL (c) = build_tree_list (iterator,
+ OMP_CLAUSE_DECL (c));
+ prev_clauses = omp_clauses;
+ iterator = NULL_TREE;
+ }
+ if (n->u2.ns && (!prev || prev->u2.ns != n->u2.ns))
+ {
+ gfc_init_block (&iter_block);
+ tree_block = make_node (BLOCK);
+ TREE_USED (tree_block) = 1;
+ BLOCK_VARS (tree_block) = NULL_TREE;
+ iterator = handle_iterator (n->u2.ns, block,
+ tree_block);
+ }
+ if (!iterator)
+ gfc_init_block (&iter_block);
+ prev = n;
+ if (list == OMP_LIST_DEPEND
+ && n->u.depend_op == OMP_DEPEND_SINK_FIRST)
+ {
+ tree vec = NULL_TREE;
+ unsigned int i;
+ for (i = 0; ; i++)
+ {
+ tree addend = integer_zero_node, t;
+ bool neg = false;
+ if (n->expr)
+ {
+ addend = gfc_conv_constant_to_tree (n->expr);
+ if (TREE_CODE (addend) == INTEGER_CST
+ && tree_int_cst_sgn (addend) == -1)
+ {
+ neg = true;
+ addend = const_unop (NEGATE_EXPR,
+ TREE_TYPE (addend), addend);
+ }
+ }
+ t = gfc_trans_omp_variable (n->sym, false);
+ if (t != error_mark_node)
+ {
+ if (i < vec_safe_length (doacross_steps)
+ && !integer_zerop (addend)
+ && (*doacross_steps)[i])
+ {
+ tree step = (*doacross_steps)[i];
+ addend = fold_convert (TREE_TYPE (step), addend);
+ addend = build2 (TRUNC_DIV_EXPR,
+ TREE_TYPE (step), addend, step);
+ }
+ vec = tree_cons (addend, t, vec);
+ if (neg)
+ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (vec) = 1;
+ }
+ if (n->next == NULL
+ || n->next->u.depend_op != OMP_DEPEND_SINK)
+ break;
+ n = n->next;
+ }
+ if (vec == NULL_TREE)
+ continue;
+
+ tree node = build_omp_clause (input_location,
+ OMP_CLAUSE_DEPEND);
+ OMP_CLAUSE_DEPEND_KIND (node) = OMP_CLAUSE_DEPEND_SINK;
+ OMP_CLAUSE_DECL (node) = nreverse (vec);
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ continue;
+ }
+
+ if (!n->sym->attr.referenced)
+ continue;
+
+ tree node = build_omp_clause (input_location,
+ list == OMP_LIST_DEPEND
+ ? OMP_CLAUSE_DEPEND
+ : OMP_CLAUSE_AFFINITY);
+ if (n->expr == NULL || n->expr->ref->u.ar.type == AR_FULL)
+ {
+ tree decl = gfc_trans_omp_variable (n->sym, false);
+ if (gfc_omp_privatize_by_reference (decl))
+ decl = build_fold_indirect_ref (decl);
+ if (n->u.depend_op == OMP_DEPEND_DEPOBJ
+ && POINTER_TYPE_P (TREE_TYPE (decl)))
+ decl = build_fold_indirect_ref (decl);
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ {
+ decl = gfc_conv_descriptor_data_get (decl);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (decl)));
+ decl = build_fold_indirect_ref (decl);
+ }
+ else if (DECL_P (decl))
+ TREE_ADDRESSABLE (decl) = 1;
+ OMP_CLAUSE_DECL (node) = decl;
+ }
+ else
+ {
+ tree ptr;
+ gfc_init_se (&se, NULL);
+ if (n->expr->ref->u.ar.type == AR_ELEMENT)
+ {
+ gfc_conv_expr_reference (&se, n->expr);
+ ptr = se.expr;
+ }
+ else
+ {
+ gfc_conv_expr_descriptor (&se, n->expr);
+ ptr = gfc_conv_array_data (se.expr);
+ }
+ gfc_add_block_to_block (&iter_block, &se.pre);
+ gfc_add_block_to_block (&iter_block, &se.post);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (ptr)));
+ OMP_CLAUSE_DECL (node) = build_fold_indirect_ref (ptr);
+ }
+ if (list == OMP_LIST_DEPEND)
+ switch (n->u.depend_op)
+ {
+ case OMP_DEPEND_IN:
+ OMP_CLAUSE_DEPEND_KIND (node) = OMP_CLAUSE_DEPEND_IN;
+ break;
+ case OMP_DEPEND_OUT:
+ OMP_CLAUSE_DEPEND_KIND (node) = OMP_CLAUSE_DEPEND_OUT;
+ break;
+ case OMP_DEPEND_INOUT:
+ OMP_CLAUSE_DEPEND_KIND (node) = OMP_CLAUSE_DEPEND_INOUT;
+ break;
+ case OMP_DEPEND_MUTEXINOUTSET:
+ OMP_CLAUSE_DEPEND_KIND (node)
+ = OMP_CLAUSE_DEPEND_MUTEXINOUTSET;
+ break;
+ case OMP_DEPEND_DEPOBJ:
+ OMP_CLAUSE_DEPEND_KIND (node) = OMP_CLAUSE_DEPEND_DEPOBJ;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (!iterator)
+ gfc_add_block_to_block (block, &iter_block);
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ }
+ if (iterator)
+ {
+ BLOCK_SUBBLOCKS (tree_block) = gfc_finish_block (&iter_block);
+ TREE_VEC_ELT (iterator, 5) = tree_block;
+ for (tree c = omp_clauses; c != prev_clauses;
+ c = OMP_CLAUSE_CHAIN (c))
+ OMP_CLAUSE_DECL (c) = build_tree_list (iterator,
+ OMP_CLAUSE_DECL (c));
+ }
+ break;
+ case OMP_LIST_MAP:
+ for (; n != NULL; n = n->next)
+ {
+ if (!n->sym->attr.referenced)
+ continue;
+
+ bool always_modifier = false;
+ tree node = build_omp_clause (input_location, OMP_CLAUSE_MAP);
+ tree node2 = NULL_TREE;
+ tree node3 = NULL_TREE;
+ tree node4 = NULL_TREE;
+
+ /* OpenMP: automatically map pointer targets with the pointer;
+ hence, always update the descriptor/pointer itself. */
+ if (!openacc
+ && ((n->expr == NULL && n->sym->attr.pointer)
+ || (n->expr && gfc_expr_attr (n->expr).pointer)))
+ always_modifier = true;
+
+ switch (n->u.map_op)
+ {
+ case OMP_MAP_ALLOC:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_ALLOC);
+ break;
+ case OMP_MAP_IF_PRESENT:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_IF_PRESENT);
+ break;
+ case OMP_MAP_ATTACH:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_ATTACH);
+ break;
+ case OMP_MAP_TO:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_TO);
+ break;
+ case OMP_MAP_FROM:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_FROM);
+ break;
+ case OMP_MAP_TOFROM:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_TOFROM);
+ break;
+ case OMP_MAP_ALWAYS_TO:
+ always_modifier = true;
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_ALWAYS_TO);
+ break;
+ case OMP_MAP_ALWAYS_FROM:
+ always_modifier = true;
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_ALWAYS_FROM);
+ break;
+ case OMP_MAP_ALWAYS_TOFROM:
+ always_modifier = true;
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_ALWAYS_TOFROM);
+ break;
+ case OMP_MAP_RELEASE:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_RELEASE);
+ break;
+ case OMP_MAP_DELETE:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_DELETE);
+ break;
+ case OMP_MAP_DETACH:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_DETACH);
+ break;
+ case OMP_MAP_FORCE_ALLOC:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_FORCE_ALLOC);
+ break;
+ case OMP_MAP_FORCE_TO:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_FORCE_TO);
+ break;
+ case OMP_MAP_FORCE_FROM:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_FORCE_FROM);
+ break;
+ case OMP_MAP_FORCE_TOFROM:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_FORCE_TOFROM);
+ break;
+ case OMP_MAP_FORCE_PRESENT:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_FORCE_PRESENT);
+ break;
+ case OMP_MAP_FORCE_DEVICEPTR:
+ OMP_CLAUSE_SET_MAP_KIND (node, GOMP_MAP_FORCE_DEVICEPTR);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ tree decl = gfc_trans_omp_variable (n->sym, false);
+ if (DECL_P (decl))
+ TREE_ADDRESSABLE (decl) = 1;
+
+ gfc_ref *lastref = NULL;
+
+ if (n->expr)
+ for (gfc_ref *ref = n->expr->ref; ref; ref = ref->next)
+ if (ref->type == REF_COMPONENT || ref->type == REF_ARRAY)
+ lastref = ref;
+
+ bool allocatable = false, pointer = false;
+
+ if (lastref && lastref->type == REF_COMPONENT)
+ {
+ gfc_component *c = lastref->u.c.component;
+
+ if (c->ts.type == BT_CLASS)
+ {
+ pointer = CLASS_DATA (c)->attr.class_pointer;
+ allocatable = CLASS_DATA (c)->attr.allocatable;
+ }
+ else
+ {
+ pointer = c->attr.pointer;
+ allocatable = c->attr.allocatable;
+ }
+ }
+
+ if (n->expr == NULL
+ || (n->expr->ref->type == REF_ARRAY
+ && n->expr->ref->u.ar.type == AR_FULL))
+ {
+ tree present = gfc_omp_check_optional_argument (decl, true);
+ if (openacc && n->sym->ts.type == BT_CLASS)
+ {
+ tree type = TREE_TYPE (decl);
+ if (n->sym->attr.optional)
+ sorry ("optional class parameter");
+ if (POINTER_TYPE_P (type))
+ {
+ node4 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node4, GOMP_MAP_POINTER);
+ OMP_CLAUSE_DECL (node4) = decl;
+ OMP_CLAUSE_SIZE (node4) = size_int (0);
+ decl = build_fold_indirect_ref (decl);
+ }
+ tree ptr = gfc_class_data_get (decl);
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (node) = ptr;
+ OMP_CLAUSE_SIZE (node) = gfc_class_vtab_size_get (decl);
+ node2 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node2, GOMP_MAP_TO_PSET);
+ OMP_CLAUSE_DECL (node2) = decl;
+ OMP_CLAUSE_SIZE (node2) = TYPE_SIZE_UNIT (type);
+ node3 = build_omp_clause (input_location, OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node3, GOMP_MAP_ATTACH_DETACH);
+ OMP_CLAUSE_DECL (node3) = gfc_class_data_get (decl);
+ OMP_CLAUSE_SIZE (node3) = size_int (0);
+ goto finalize_map_clause;
+ }
+ else if (POINTER_TYPE_P (TREE_TYPE (decl))
+ && (gfc_omp_privatize_by_reference (decl)
+ || GFC_DECL_GET_SCALAR_POINTER (decl)
+ || GFC_DECL_GET_SCALAR_ALLOCATABLE (decl)
+ || GFC_DECL_CRAY_POINTEE (decl)
+ || GFC_DESCRIPTOR_TYPE_P
+ (TREE_TYPE (TREE_TYPE (decl)))
+ || n->sym->ts.type == BT_DERIVED))
+ {
+ tree orig_decl = decl;
+
+ /* For nonallocatable, nonpointer arrays, a temporary
+ variable is generated, but this one is only defined if
+ the variable is present; hence, we now set it to NULL
+ to avoid accessing undefined variables. We cannot use
+ a temporary variable here as otherwise the replacement
+ of the variables in omp-low.c will not work. */
+ if (present && GFC_ARRAY_TYPE_P (TREE_TYPE (decl)))
+ {
+ tree tmp = fold_build2_loc (input_location,
+ MODIFY_EXPR,
+ void_type_node, decl,
+ null_pointer_node);
+ tree cond = fold_build1_loc (input_location,
+ TRUTH_NOT_EXPR,
+ boolean_type_node,
+ present);
+ gfc_add_expr_to_block (block,
+ build3_loc (input_location,
+ COND_EXPR,
+ void_type_node,
+ cond, tmp,
+ NULL_TREE));
+ }
+ node4 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node4, GOMP_MAP_POINTER);
+ OMP_CLAUSE_DECL (node4) = decl;
+ OMP_CLAUSE_SIZE (node4) = size_int (0);
+ decl = build_fold_indirect_ref (decl);
+ if ((TREE_CODE (TREE_TYPE (orig_decl)) == REFERENCE_TYPE
+ || gfc_omp_is_optional_argument (orig_decl))
+ && (GFC_DECL_GET_SCALAR_POINTER (orig_decl)
+ || GFC_DECL_GET_SCALAR_ALLOCATABLE (orig_decl)))
+ {
+ node3 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node3, GOMP_MAP_POINTER);
+ OMP_CLAUSE_DECL (node3) = decl;
+ OMP_CLAUSE_SIZE (node3) = size_int (0);
+ decl = build_fold_indirect_ref (decl);
+ }
+ }
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ {
+ tree type = TREE_TYPE (decl);
+ tree ptr = gfc_conv_descriptor_data_get (decl);
+ if (present)
+ ptr = gfc_build_cond_assign_expr (block, present, ptr,
+ null_pointer_node);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (ptr)));
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (node) = ptr;
+ node2 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node2, GOMP_MAP_TO_PSET);
+ OMP_CLAUSE_DECL (node2) = decl;
+ OMP_CLAUSE_SIZE (node2) = TYPE_SIZE_UNIT (type);
+ node3 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ if (present)
+ {
+ ptr = gfc_conv_descriptor_data_get (decl);
+ ptr = gfc_build_addr_expr (NULL, ptr);
+ ptr = gfc_build_cond_assign_expr (block, present, ptr,
+ null_pointer_node);
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (node3) = ptr;
+ }
+ else
+ OMP_CLAUSE_DECL (node3)
+ = gfc_conv_descriptor_data_get (decl);
+ OMP_CLAUSE_SIZE (node3) = size_int (0);
+ if (n->u.map_op == OMP_MAP_ATTACH)
+ {
+ /* Standalone attach clauses used with arrays with
+ descriptors must copy the descriptor to the target,
+ else they won't have anything to perform the
+ attachment onto (see OpenACC 2.6, "2.6.3. Data
+ Structures with Pointers"). */
+ OMP_CLAUSE_SET_MAP_KIND (node3, GOMP_MAP_ATTACH);
+ /* We don't want to map PTR at all in this case, so
+ delete its node and shuffle the others down. */
+ node = node2;
+ node2 = node3;
+ node3 = NULL;
+ goto finalize_map_clause;
+ }
+ else if (n->u.map_op == OMP_MAP_DETACH)
+ {
+ OMP_CLAUSE_SET_MAP_KIND (node3, GOMP_MAP_DETACH);
+ /* Similarly to above, we don't want to unmap PTR
+ here. */
+ node = node2;
+ node2 = node3;
+ node3 = NULL;
+ goto finalize_map_clause;
+ }
+ else
+ OMP_CLAUSE_SET_MAP_KIND (node3,
+ always_modifier
+ ? GOMP_MAP_ALWAYS_POINTER
+ : GOMP_MAP_POINTER);
+
+ /* We have to check for n->sym->attr.dimension because
+ of scalar coarrays. */
+ if (n->sym->attr.pointer && n->sym->attr.dimension)
+ {
+ stmtblock_t cond_block;
+ tree size
+ = gfc_create_var (gfc_array_index_type, NULL);
+ tree tem, then_b, else_b, zero, cond;
+
+ gfc_init_block (&cond_block);
+ tem
+ = gfc_full_array_size (&cond_block, decl,
+ GFC_TYPE_ARRAY_RANK (type));
+ gfc_add_modify (&cond_block, size, tem);
+ then_b = gfc_finish_block (&cond_block);
+ gfc_init_block (&cond_block);
+ zero = build_int_cst (gfc_array_index_type, 0);
+ gfc_add_modify (&cond_block, size, zero);
+ else_b = gfc_finish_block (&cond_block);
+ tem = gfc_conv_descriptor_data_get (decl);
+ tem = fold_convert (pvoid_type_node, tem);
+ cond = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node,
+ tem, null_pointer_node);
+ if (present)
+ cond = fold_build2_loc (input_location,
+ TRUTH_ANDIF_EXPR,
+ boolean_type_node,
+ present, cond);
+ gfc_add_expr_to_block (block,
+ build3_loc (input_location,
+ COND_EXPR,
+ void_type_node,
+ cond, then_b,
+ else_b));
+ OMP_CLAUSE_SIZE (node) = size;
+ }
+ else if (n->sym->attr.dimension)
+ {
+ stmtblock_t cond_block;
+ gfc_init_block (&cond_block);
+ tree size = gfc_full_array_size (&cond_block, decl,
+ GFC_TYPE_ARRAY_RANK (type));
+ if (present)
+ {
+ tree var = gfc_create_var (gfc_array_index_type,
+ NULL);
+ gfc_add_modify (&cond_block, var, size);
+ tree cond_body = gfc_finish_block (&cond_block);
+ tree cond = build3_loc (input_location, COND_EXPR,
+ void_type_node, present,
+ cond_body, NULL_TREE);
+ gfc_add_expr_to_block (block, cond);
+ OMP_CLAUSE_SIZE (node) = var;
+ }
+ else
+ {
+ gfc_add_block_to_block (block, &cond_block);
+ OMP_CLAUSE_SIZE (node) = size;
+ }
+ }
+ if (n->sym->attr.dimension)
+ {
+ tree elemsz
+ = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ elemsz = fold_convert (gfc_array_index_type, elemsz);
+ OMP_CLAUSE_SIZE (node)
+ = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ OMP_CLAUSE_SIZE (node), elemsz);
+ }
+ }
+ else if (present
+ && TREE_CODE (decl) == INDIRECT_REF
+ && (TREE_CODE (TREE_OPERAND (decl, 0))
+ == INDIRECT_REF))
+ {
+ /* A single indirectref is handled by the middle end. */
+ gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl)));
+ decl = TREE_OPERAND (decl, 0);
+ decl = gfc_build_cond_assign_expr (block, present, decl,
+ null_pointer_node);
+ OMP_CLAUSE_DECL (node) = build_fold_indirect_ref (decl);
+ }
+ else
+ OMP_CLAUSE_DECL (node) = decl;
+ }
+ else if (n->expr
+ && n->expr->expr_type == EXPR_VARIABLE
+ && n->expr->ref->type == REF_ARRAY
+ && !n->expr->ref->next)
+ {
+ /* An array element or array section which is not part of a
+ derived type, etc. */
+ bool element = n->expr->ref->u.ar.type == AR_ELEMENT;
+ gfc_trans_omp_array_section (block, n, decl, element,
+ GOMP_MAP_POINTER, node, node2,
+ node3, node4);
+ }
+ else if (n->expr
+ && n->expr->expr_type == EXPR_VARIABLE
+ && (n->expr->ref->type == REF_COMPONENT
+ || n->expr->ref->type == REF_ARRAY)
+ && lastref
+ && lastref->type == REF_COMPONENT
+ && lastref->u.c.component->ts.type != BT_CLASS
+ && lastref->u.c.component->ts.type != BT_DERIVED
+ && !lastref->u.c.component->attr.dimension)
+ {
+ /* Derived type access with last component being a scalar. */
+ gfc_init_se (&se, NULL);
+
+ gfc_conv_expr (&se, n->expr);
+ gfc_add_block_to_block (block, &se.pre);
+ /* For BT_CHARACTER a pointer is returned. */
+ OMP_CLAUSE_DECL (node)
+ = POINTER_TYPE_P (TREE_TYPE (se.expr))
+ ? build_fold_indirect_ref (se.expr) : se.expr;
+ gfc_add_block_to_block (block, &se.post);
+ if (pointer || allocatable)
+ {
+ node2 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ gomp_map_kind kind
+ = (openacc ? GOMP_MAP_ATTACH_DETACH
+ : GOMP_MAP_ALWAYS_POINTER);
+ OMP_CLAUSE_SET_MAP_KIND (node2, kind);
+ OMP_CLAUSE_DECL (node2)
+ = POINTER_TYPE_P (TREE_TYPE (se.expr))
+ ? se.expr
+ : gfc_build_addr_expr (NULL, se.expr);
+ OMP_CLAUSE_SIZE (node2) = size_int (0);
+ if (!openacc
+ && n->expr->ts.type == BT_CHARACTER
+ && n->expr->ts.deferred)
+ {
+ gcc_assert (se.string_length);
+ tree tmp
+ = gfc_get_char_type (n->expr->ts.kind);
+ OMP_CLAUSE_SIZE (node)
+ = fold_build2 (MULT_EXPR, size_type_node,
+ fold_convert (size_type_node,
+ se.string_length),
+ TYPE_SIZE_UNIT (tmp));
+ node3 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node3, GOMP_MAP_TO);
+ OMP_CLAUSE_DECL (node3) = se.string_length;
+ OMP_CLAUSE_SIZE (node3)
+ = TYPE_SIZE_UNIT (gfc_charlen_type_node);
+ }
+ }
+ }
+ else if (n->expr
+ && n->expr->expr_type == EXPR_VARIABLE
+ && (n->expr->ref->type == REF_COMPONENT
+ || n->expr->ref->type == REF_ARRAY))
+ {
+ gfc_init_se (&se, NULL);
+ se.expr = gfc_maybe_dereference_var (n->sym, decl);
+
+ for (gfc_ref *ref = n->expr->ref; ref; ref = ref->next)
+ {
+ if (ref->type == REF_COMPONENT)
+ {
+ if (ref->u.c.sym->attr.extension)
+ conv_parent_component_references (&se, ref);
+
+ gfc_conv_component_ref (&se, ref);
+ }
+ else if (ref->type == REF_ARRAY)
+ {
+ if (ref->u.ar.type == AR_ELEMENT && ref->next)
+ gfc_conv_array_ref (&se, &ref->u.ar, n->expr,
+ &n->expr->where);
+ else
+ gcc_assert (!ref->next);
+ }
+ else
+ sorry ("unhandled expression type");
+ }
+
+ tree inner = se.expr;
+
+ /* Last component is a derived type or class pointer. */
+ if (lastref->type == REF_COMPONENT
+ && (lastref->u.c.component->ts.type == BT_DERIVED
+ || lastref->u.c.component->ts.type == BT_CLASS))
+ {
+ if (pointer || (openacc && allocatable))
+ {
+ tree data, size;
+
+ if (lastref->u.c.component->ts.type == BT_CLASS)
+ {
+ data = gfc_class_data_get (inner);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (data)));
+ data = build_fold_indirect_ref (data);
+ size = gfc_class_vtab_size_get (inner);
+ }
+ else /* BT_DERIVED. */
+ {
+ data = inner;
+ size = TYPE_SIZE_UNIT (TREE_TYPE (inner));
+ }
+
+ OMP_CLAUSE_DECL (node) = data;
+ OMP_CLAUSE_SIZE (node) = size;
+ node2 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node2,
+ openacc
+ ? GOMP_MAP_ATTACH_DETACH
+ : GOMP_MAP_ALWAYS_POINTER);
+ OMP_CLAUSE_DECL (node2) = build_fold_addr_expr (data);
+ OMP_CLAUSE_SIZE (node2) = size_int (0);
+ }
+ else
+ {
+ OMP_CLAUSE_DECL (node) = inner;
+ OMP_CLAUSE_SIZE (node)
+ = TYPE_SIZE_UNIT (TREE_TYPE (inner));
+ }
+ }
+ else if (lastref->type == REF_ARRAY
+ && lastref->u.ar.type == AR_FULL)
+ {
+ /* Just pass the (auto-dereferenced) decl through for
+ bare attach and detach clauses. */
+ if (n->u.map_op == OMP_MAP_ATTACH
+ || n->u.map_op == OMP_MAP_DETACH)
+ {
+ OMP_CLAUSE_DECL (node) = inner;
+ OMP_CLAUSE_SIZE (node) = size_zero_node;
+ goto finalize_map_clause;
+ }
+
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (inner)))
+ {
+ gomp_map_kind map_kind;
+ tree desc_node;
+ tree type = TREE_TYPE (inner);
+ tree ptr = gfc_conv_descriptor_data_get (inner);
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (node) = ptr;
+ int rank = GFC_TYPE_ARRAY_RANK (type);
+ OMP_CLAUSE_SIZE (node)
+ = gfc_full_array_size (block, inner, rank);
+ tree elemsz
+ = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ if (GOMP_MAP_COPY_TO_P (OMP_CLAUSE_MAP_KIND (node)))
+ map_kind = GOMP_MAP_TO;
+ else if (n->u.map_op == OMP_MAP_RELEASE
+ || n->u.map_op == OMP_MAP_DELETE)
+ map_kind = OMP_CLAUSE_MAP_KIND (node);
+ else
+ map_kind = GOMP_MAP_ALLOC;
+ if (!openacc
+ && n->expr->ts.type == BT_CHARACTER
+ && n->expr->ts.deferred)
+ {
+ gcc_assert (se.string_length);
+ tree len = fold_convert (size_type_node,
+ se.string_length);
+ elemsz = gfc_get_char_type (n->expr->ts.kind);
+ elemsz = TYPE_SIZE_UNIT (elemsz);
+ elemsz = fold_build2 (MULT_EXPR, size_type_node,
+ len, elemsz);
+ node4 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node4, map_kind);
+ OMP_CLAUSE_DECL (node4) = se.string_length;
+ OMP_CLAUSE_SIZE (node4)
+ = TYPE_SIZE_UNIT (gfc_charlen_type_node);
+ }
+ elemsz = fold_convert (gfc_array_index_type, elemsz);
+ OMP_CLAUSE_SIZE (node)
+ = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ OMP_CLAUSE_SIZE (node), elemsz);
+ desc_node = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ if (openacc)
+ OMP_CLAUSE_SET_MAP_KIND (desc_node,
+ GOMP_MAP_TO_PSET);
+ else
+ OMP_CLAUSE_SET_MAP_KIND (desc_node, map_kind);
+ OMP_CLAUSE_DECL (desc_node) = inner;
+ OMP_CLAUSE_SIZE (desc_node) = TYPE_SIZE_UNIT (type);
+ if (openacc)
+ node2 = desc_node;
+ else
+ {
+ node2 = node;
+ node = desc_node; /* Put first. */
+ }
+ node3 = build_omp_clause (input_location,
+ OMP_CLAUSE_MAP);
+ OMP_CLAUSE_SET_MAP_KIND (node3,
+ openacc
+ ? GOMP_MAP_ATTACH_DETACH
+ : GOMP_MAP_ALWAYS_POINTER);
+ OMP_CLAUSE_DECL (node3)
+ = gfc_conv_descriptor_data_get (inner);
+ /* Similar to gfc_trans_omp_array_section (details
+ there), we add/keep the cast for OpenMP to prevent
+ that an 'alloc:' gets added for node3 ('desc.data')
+ as that is part of the whole descriptor (node3).
+ TODO: Remove once the ME handles this properly. */
+ if (!openacc)
+ OMP_CLAUSE_DECL (node3)
+ = fold_convert (TREE_TYPE (TREE_OPERAND(ptr, 0)),
+ OMP_CLAUSE_DECL (node3));
+ else
+ STRIP_NOPS (OMP_CLAUSE_DECL (node3));
+ OMP_CLAUSE_SIZE (node3) = size_int (0);
+ }
+ else
+ OMP_CLAUSE_DECL (node) = inner;
+ }
+ else if (lastref->type == REF_ARRAY)
+ {
+ /* An array element or section. */
+ bool element = lastref->u.ar.type == AR_ELEMENT;
+ gomp_map_kind kind = (openacc ? GOMP_MAP_ATTACH_DETACH
+ : GOMP_MAP_ALWAYS_POINTER);
+ gfc_trans_omp_array_section (block, n, inner, element,
+ kind, node, node2, node3,
+ node4);
+ }
+ else
+ gcc_unreachable ();
+ }
+ else
+ sorry ("unhandled expression");
+
+ finalize_map_clause:
+
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ if (node2)
+ omp_clauses = gfc_trans_add_clause (node2, omp_clauses);
+ if (node3)
+ omp_clauses = gfc_trans_add_clause (node3, omp_clauses);
+ if (node4)
+ omp_clauses = gfc_trans_add_clause (node4, omp_clauses);
+ }
+ break;
+ case OMP_LIST_TO:
+ case OMP_LIST_FROM:
+ case OMP_LIST_CACHE:
+ for (; n != NULL; n = n->next)
+ {
+ if (!n->sym->attr.referenced)
+ continue;
+
+ switch (list)
+ {
+ case OMP_LIST_TO:
+ clause_code = OMP_CLAUSE_TO;
+ break;
+ case OMP_LIST_FROM:
+ clause_code = OMP_CLAUSE_FROM;
+ break;
+ case OMP_LIST_CACHE:
+ clause_code = OMP_CLAUSE__CACHE_;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ tree node = build_omp_clause (input_location, clause_code);
+ if (n->expr == NULL || n->expr->ref->u.ar.type == AR_FULL)
+ {
+ tree decl = gfc_trans_omp_variable (n->sym, false);
+ if (gfc_omp_privatize_by_reference (decl))
+ {
+ if (gfc_omp_is_allocatable_or_ptr (decl))
+ decl = build_fold_indirect_ref (decl);
+ decl = build_fold_indirect_ref (decl);
+ }
+ else if (DECL_P (decl))
+ TREE_ADDRESSABLE (decl) = 1;
+ if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (decl)))
+ {
+ tree type = TREE_TYPE (decl);
+ tree ptr = gfc_conv_descriptor_data_get (decl);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (ptr)));
+ ptr = build_fold_indirect_ref (ptr);
+ OMP_CLAUSE_DECL (node) = ptr;
+ OMP_CLAUSE_SIZE (node)
+ = gfc_full_array_size (block, decl,
+ GFC_TYPE_ARRAY_RANK (type));
+ tree elemsz
+ = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ elemsz = fold_convert (gfc_array_index_type, elemsz);
+ OMP_CLAUSE_SIZE (node)
+ = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ OMP_CLAUSE_SIZE (node), elemsz);
+ }
+ else
+ {
+ OMP_CLAUSE_DECL (node) = decl;
+ if (gfc_omp_is_allocatable_or_ptr (decl))
+ OMP_CLAUSE_SIZE (node)
+ = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
+ }
+ }
+ else
+ {
+ tree ptr;
+ gfc_init_se (&se, NULL);
+ if (n->expr->ref->u.ar.type == AR_ELEMENT)
+ {
+ gfc_conv_expr_reference (&se, n->expr);
+ ptr = se.expr;
+ gfc_add_block_to_block (block, &se.pre);
+ OMP_CLAUSE_SIZE (node)
+ = TYPE_SIZE_UNIT (TREE_TYPE (ptr));
+ }
+ else
+ {
+ gfc_conv_expr_descriptor (&se, n->expr);
+ ptr = gfc_conv_array_data (se.expr);
+ tree type = TREE_TYPE (se.expr);
+ gfc_add_block_to_block (block, &se.pre);
+ OMP_CLAUSE_SIZE (node)
+ = gfc_full_array_size (block, se.expr,
+ GFC_TYPE_ARRAY_RANK (type));
+ tree elemsz
+ = TYPE_SIZE_UNIT (gfc_get_element_type (type));
+ elemsz = fold_convert (gfc_array_index_type, elemsz);
+ OMP_CLAUSE_SIZE (node)
+ = fold_build2 (MULT_EXPR, gfc_array_index_type,
+ OMP_CLAUSE_SIZE (node), elemsz);
+ }
+ gfc_add_block_to_block (block, &se.post);
+ gcc_assert (POINTER_TYPE_P (TREE_TYPE (ptr)));
+ OMP_CLAUSE_DECL (node) = build_fold_indirect_ref (ptr);
+ }
+ omp_clauses = gfc_trans_add_clause (node, omp_clauses);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (clauses->if_expr)
+ {
+ tree if_var;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->if_expr);
+ gfc_add_block_to_block (block, &se.pre);
+ if_var = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_IF);
+ OMP_CLAUSE_IF_MODIFIER (c) = ERROR_MARK;
+ OMP_CLAUSE_IF_EXPR (c) = if_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ for (ifc = 0; ifc < OMP_IF_LAST; ifc++)
+ if (clauses->if_exprs[ifc])
+ {
+ tree if_var;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->if_exprs[ifc]);
+ gfc_add_block_to_block (block, &se.pre);
+ if_var = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_IF);
+ switch (ifc)
+ {
+ case OMP_IF_CANCEL:
+ OMP_CLAUSE_IF_MODIFIER (c) = VOID_CST;
+ break;
+ case OMP_IF_PARALLEL:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_PARALLEL;
+ break;
+ case OMP_IF_SIMD:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_SIMD;
+ break;
+ case OMP_IF_TASK:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_TASK;
+ break;
+ case OMP_IF_TASKLOOP:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_TASKLOOP;
+ break;
+ case OMP_IF_TARGET:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_TARGET;
+ break;
+ case OMP_IF_TARGET_DATA:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_TARGET_DATA;
+ break;
+ case OMP_IF_TARGET_UPDATE:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_TARGET_UPDATE;
+ break;
+ case OMP_IF_TARGET_ENTER_DATA:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_TARGET_ENTER_DATA;
+ break;
+ case OMP_IF_TARGET_EXIT_DATA:
+ OMP_CLAUSE_IF_MODIFIER (c) = OMP_TARGET_EXIT_DATA;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ OMP_CLAUSE_IF_EXPR (c) = if_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->final_expr)
+ {
+ tree final_var;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->final_expr);
+ gfc_add_block_to_block (block, &se.pre);
+ final_var = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_FINAL);
+ OMP_CLAUSE_FINAL_EXPR (c) = final_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->num_threads)
+ {
+ tree num_threads;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->num_threads);
+ gfc_add_block_to_block (block, &se.pre);
+ num_threads = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NUM_THREADS);
+ OMP_CLAUSE_NUM_THREADS_EXPR (c) = num_threads;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ chunk_size = NULL_TREE;
+ if (clauses->chunk_size)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->chunk_size);
+ gfc_add_block_to_block (block, &se.pre);
+ chunk_size = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+ }
+
+ if (clauses->sched_kind != OMP_SCHED_NONE)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_SCHEDULE);
+ OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = chunk_size;
+ switch (clauses->sched_kind)
+ {
+ case OMP_SCHED_STATIC:
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
+ break;
+ case OMP_SCHED_DYNAMIC:
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
+ break;
+ case OMP_SCHED_GUIDED:
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
+ break;
+ case OMP_SCHED_RUNTIME:
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
+ break;
+ case OMP_SCHED_AUTO:
+ OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (clauses->sched_monotonic)
+ OMP_CLAUSE_SCHEDULE_KIND (c)
+ = (omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c)
+ | OMP_CLAUSE_SCHEDULE_MONOTONIC);
+ else if (clauses->sched_nonmonotonic)
+ OMP_CLAUSE_SCHEDULE_KIND (c)
+ = (omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c)
+ | OMP_CLAUSE_SCHEDULE_NONMONOTONIC);
+ if (clauses->sched_simd)
+ OMP_CLAUSE_SCHEDULE_SIMD (c) = 1;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->default_sharing != OMP_DEFAULT_UNKNOWN)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_DEFAULT);
+ switch (clauses->default_sharing)
+ {
+ case OMP_DEFAULT_NONE:
+ OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_NONE;
+ break;
+ case OMP_DEFAULT_SHARED:
+ OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_SHARED;
+ break;
+ case OMP_DEFAULT_PRIVATE:
+ OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_PRIVATE;
+ break;
+ case OMP_DEFAULT_FIRSTPRIVATE:
+ OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_FIRSTPRIVATE;
+ break;
+ case OMP_DEFAULT_PRESENT:
+ OMP_CLAUSE_DEFAULT_KIND (c) = OMP_CLAUSE_DEFAULT_PRESENT;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->nowait)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NOWAIT);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->ordered)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_ORDERED);
+ OMP_CLAUSE_ORDERED_EXPR (c)
+ = clauses->orderedc ? build_int_cst (integer_type_node,
+ clauses->orderedc) : NULL_TREE;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->order_concurrent)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_ORDER);
+ OMP_CLAUSE_ORDER_UNCONSTRAINED (c) = clauses->order_unconstrained;
+ OMP_CLAUSE_ORDER_REPRODUCIBLE (c) = clauses->order_reproducible;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->untied)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_UNTIED);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->mergeable)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_MERGEABLE);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->collapse)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_COLLAPSE);
+ OMP_CLAUSE_COLLAPSE_EXPR (c)
+ = build_int_cst (integer_type_node, clauses->collapse);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->inbranch)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_INBRANCH);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->notinbranch)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NOTINBRANCH);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ switch (clauses->cancel)
+ {
+ case OMP_CANCEL_UNKNOWN:
+ break;
+ case OMP_CANCEL_PARALLEL:
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_PARALLEL);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ break;
+ case OMP_CANCEL_SECTIONS:
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_SECTIONS);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ break;
+ case OMP_CANCEL_DO:
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_FOR);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ break;
+ case OMP_CANCEL_TASKGROUP:
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_TASKGROUP);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ break;
+ }
+
+ if (clauses->proc_bind != OMP_PROC_BIND_UNKNOWN)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_PROC_BIND);
+ switch (clauses->proc_bind)
+ {
+ case OMP_PROC_BIND_PRIMARY:
+ OMP_CLAUSE_PROC_BIND_KIND (c) = OMP_CLAUSE_PROC_BIND_PRIMARY;
+ break;
+ case OMP_PROC_BIND_MASTER:
+ OMP_CLAUSE_PROC_BIND_KIND (c) = OMP_CLAUSE_PROC_BIND_MASTER;
+ break;
+ case OMP_PROC_BIND_SPREAD:
+ OMP_CLAUSE_PROC_BIND_KIND (c) = OMP_CLAUSE_PROC_BIND_SPREAD;
+ break;
+ case OMP_PROC_BIND_CLOSE:
+ OMP_CLAUSE_PROC_BIND_KIND (c) = OMP_CLAUSE_PROC_BIND_CLOSE;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->safelen_expr)
+ {
+ tree safelen_var;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->safelen_expr);
+ gfc_add_block_to_block (block, &se.pre);
+ safelen_var = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_SAFELEN);
+ OMP_CLAUSE_SAFELEN_EXPR (c) = safelen_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->simdlen_expr)
+ {
+ if (declare_simd)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_SIMDLEN);
+ OMP_CLAUSE_SIMDLEN_EXPR (c)
+ = gfc_conv_constant_to_tree (clauses->simdlen_expr);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ else
+ {
+ tree simdlen_var;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->simdlen_expr);
+ gfc_add_block_to_block (block, &se.pre);
+ simdlen_var = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_SIMDLEN);
+ OMP_CLAUSE_SIMDLEN_EXPR (c) = simdlen_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ }
+
+ if (clauses->num_teams_upper)
+ {
+ tree num_teams_lower = NULL_TREE, num_teams_upper;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->num_teams_upper);
+ gfc_add_block_to_block (block, &se.pre);
+ num_teams_upper = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ if (clauses->num_teams_lower)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->num_teams_lower);
+ gfc_add_block_to_block (block, &se.pre);
+ num_teams_lower = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+ }
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NUM_TEAMS);
+ OMP_CLAUSE_NUM_TEAMS_LOWER_EXPR (c) = num_teams_lower;
+ OMP_CLAUSE_NUM_TEAMS_UPPER_EXPR (c) = num_teams_upper;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->device)
+ {
+ tree device;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->device);
+ gfc_add_block_to_block (block, &se.pre);
+ device = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_DEVICE);
+ OMP_CLAUSE_DEVICE_ID (c) = device;
+
+ if (clauses->ancestor)
+ OMP_CLAUSE_DEVICE_ANCESTOR (c) = 1;
+
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->thread_limit)
+ {
+ tree thread_limit;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->thread_limit);
+ gfc_add_block_to_block (block, &se.pre);
+ thread_limit = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_THREAD_LIMIT);
+ OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = thread_limit;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ chunk_size = NULL_TREE;
+ if (clauses->dist_chunk_size)
+ {
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->dist_chunk_size);
+ gfc_add_block_to_block (block, &se.pre);
+ chunk_size = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+ }
+
+ if (clauses->dist_sched_kind != OMP_SCHED_NONE)
+ {
+ c = build_omp_clause (gfc_get_location (&where),
+ OMP_CLAUSE_DIST_SCHEDULE);
+ OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = chunk_size;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->grainsize)
+ {
+ tree grainsize;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->grainsize);
+ gfc_add_block_to_block (block, &se.pre);
+ grainsize = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_GRAINSIZE);
+ OMP_CLAUSE_GRAINSIZE_EXPR (c) = grainsize;
+ if (clauses->grainsize_strict)
+ OMP_CLAUSE_GRAINSIZE_STRICT (c) = 1;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->num_tasks)
+ {
+ tree num_tasks;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->num_tasks);
+ gfc_add_block_to_block (block, &se.pre);
+ num_tasks = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NUM_TASKS);
+ OMP_CLAUSE_NUM_TASKS_EXPR (c) = num_tasks;
+ if (clauses->num_tasks_strict)
+ OMP_CLAUSE_NUM_TASKS_STRICT (c) = 1;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->priority)
+ {
+ tree priority;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->priority);
+ gfc_add_block_to_block (block, &se.pre);
+ priority = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_PRIORITY);
+ OMP_CLAUSE_PRIORITY_EXPR (c) = priority;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->detach)
+ {
+ tree detach;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->detach);
+ gfc_add_block_to_block (block, &se.pre);
+ detach = se.expr;
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_DETACH);
+ TREE_ADDRESSABLE (detach) = 1;
+ OMP_CLAUSE_DECL (c) = detach;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->filter)
+ {
+ tree filter;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->filter);
+ gfc_add_block_to_block (block, &se.pre);
+ filter = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_FILTER);
+ OMP_CLAUSE_FILTER_EXPR (c) = filter;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->hint)
+ {
+ tree hint;
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, clauses->hint);
+ gfc_add_block_to_block (block, &se.pre);
+ hint = gfc_evaluate_now (se.expr, block);
+ gfc_add_block_to_block (block, &se.post);
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_HINT);
+ OMP_CLAUSE_HINT_EXPR (c) = hint;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->simd)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_SIMD);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->threads)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_THREADS);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->nogroup)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NOGROUP);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ for (int i = 0; i < OMP_DEFAULTMAP_CAT_NUM; i++)
+ {
+ if (clauses->defaultmap[i] == OMP_DEFAULTMAP_UNSET)
+ continue;
+ enum omp_clause_defaultmap_kind behavior, category;
+ switch ((gfc_omp_defaultmap_category) i)
+ {
+ case OMP_DEFAULTMAP_CAT_UNCATEGORIZED:
+ category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED;
+ break;
+ case OMP_DEFAULTMAP_CAT_SCALAR:
+ category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_SCALAR;
+ break;
+ case OMP_DEFAULTMAP_CAT_AGGREGATE:
+ category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_AGGREGATE;
+ break;
+ case OMP_DEFAULTMAP_CAT_ALLOCATABLE:
+ category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_ALLOCATABLE;
+ break;
+ case OMP_DEFAULTMAP_CAT_POINTER:
+ category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_POINTER;
+ break;
+ default: gcc_unreachable ();
+ }
+ switch (clauses->defaultmap[i])
+ {
+ case OMP_DEFAULTMAP_ALLOC:
+ behavior = OMP_CLAUSE_DEFAULTMAP_ALLOC;
+ break;
+ case OMP_DEFAULTMAP_TO: behavior = OMP_CLAUSE_DEFAULTMAP_TO; break;
+ case OMP_DEFAULTMAP_FROM: behavior = OMP_CLAUSE_DEFAULTMAP_FROM; break;
+ case OMP_DEFAULTMAP_TOFROM:
+ behavior = OMP_CLAUSE_DEFAULTMAP_TOFROM;
+ break;
+ case OMP_DEFAULTMAP_FIRSTPRIVATE:
+ behavior = OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE;
+ break;
+ case OMP_DEFAULTMAP_NONE: behavior = OMP_CLAUSE_DEFAULTMAP_NONE; break;
+ case OMP_DEFAULTMAP_DEFAULT:
+ behavior = OMP_CLAUSE_DEFAULTMAP_DEFAULT;
+ break;
+ default: gcc_unreachable ();
+ }
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_DEFAULTMAP);
+ OMP_CLAUSE_DEFAULTMAP_SET_KIND (c, behavior, category);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->depend_source)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_DEPEND);
+ OMP_CLAUSE_DEPEND_KIND (c) = OMP_CLAUSE_DEPEND_SOURCE;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+
+ if (clauses->async)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_ASYNC);
+ if (clauses->async_expr)
+ OMP_CLAUSE_ASYNC_EXPR (c)
+ = gfc_convert_expr_to_tree (block, clauses->async_expr);
+ else
+ OMP_CLAUSE_ASYNC_EXPR (c) = NULL;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->seq)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_SEQ);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->par_auto)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_AUTO);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->if_present)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_IF_PRESENT);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->finalize)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_FINALIZE);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->independent)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_INDEPENDENT);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->wait_list)
+ {
+ gfc_expr_list *el;
+
+ for (el = clauses->wait_list; el; el = el->next)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_WAIT);
+ OMP_CLAUSE_DECL (c) = gfc_convert_expr_to_tree (block, el->expr);
+ OMP_CLAUSE_CHAIN (c) = omp_clauses;
+ omp_clauses = c;
+ }
+ }
+ if (clauses->num_gangs_expr)
+ {
+ tree num_gangs_var
+ = gfc_convert_expr_to_tree (block, clauses->num_gangs_expr);
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NUM_GANGS);
+ OMP_CLAUSE_NUM_GANGS_EXPR (c) = num_gangs_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->num_workers_expr)
+ {
+ tree num_workers_var
+ = gfc_convert_expr_to_tree (block, clauses->num_workers_expr);
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_NUM_WORKERS);
+ OMP_CLAUSE_NUM_WORKERS_EXPR (c) = num_workers_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->vector_length_expr)
+ {
+ tree vector_length_var
+ = gfc_convert_expr_to_tree (block, clauses->vector_length_expr);
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_VECTOR_LENGTH);
+ OMP_CLAUSE_VECTOR_LENGTH_EXPR (c) = vector_length_var;
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ }
+ if (clauses->tile_list)
+ {
+ vec<tree, va_gc> *tvec;
+ gfc_expr_list *el;
+
+ vec_alloc (tvec, 4);
+
+ for (el = clauses->tile_list; el; el = el->next)
+ vec_safe_push (tvec, gfc_convert_expr_to_tree (block, el->expr));
+
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_TILE);
+ OMP_CLAUSE_TILE_LIST (c) = build_tree_list_vec (tvec);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ tvec->truncate (0);
+ }
+ if (clauses->vector)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_VECTOR);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+
+ if (clauses->vector_expr)
+ {
+ tree vector_var
+ = gfc_convert_expr_to_tree (block, clauses->vector_expr);
+ OMP_CLAUSE_VECTOR_EXPR (c) = vector_var;
+
+ /* TODO: We're not capturing location information for individual
+ clauses. However, if we have an expression attached to the
+ clause, that one provides better location information. */
+ OMP_CLAUSE_LOCATION (c)
+ = gfc_get_location (&clauses->vector_expr->where);
+ }
+ }
+ if (clauses->worker)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_WORKER);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+
+ if (clauses->worker_expr)
+ {
+ tree worker_var
+ = gfc_convert_expr_to_tree (block, clauses->worker_expr);
+ OMP_CLAUSE_WORKER_EXPR (c) = worker_var;
+
+ /* TODO: We're not capturing location information for individual
+ clauses. However, if we have an expression attached to the
+ clause, that one provides better location information. */
+ OMP_CLAUSE_LOCATION (c)
+ = gfc_get_location (&clauses->worker_expr->where);
+ }
+ }
+ if (clauses->gang)
+ {
+ tree arg;
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_GANG);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+
+ if (clauses->gang_num_expr)
+ {
+ arg = gfc_convert_expr_to_tree (block, clauses->gang_num_expr);
+ OMP_CLAUSE_GANG_EXPR (c) = arg;
+
+ /* TODO: We're not capturing location information for individual
+ clauses. However, if we have an expression attached to the
+ clause, that one provides better location information. */
+ OMP_CLAUSE_LOCATION (c)
+ = gfc_get_location (&clauses->gang_num_expr->where);
+ }
+
+ if (clauses->gang_static)
+ {
+ arg = clauses->gang_static_expr
+ ? gfc_convert_expr_to_tree (block, clauses->gang_static_expr)
+ : integer_minus_one_node;
+ OMP_CLAUSE_GANG_STATIC_EXPR (c) = arg;
+ }
+ }
+ if (clauses->bind != OMP_BIND_UNSET)
+ {
+ c = build_omp_clause (gfc_get_location (&where), OMP_CLAUSE_BIND);
+ omp_clauses = gfc_trans_add_clause (c, omp_clauses);
+ switch (clauses->bind)
+ {
+ case OMP_BIND_TEAMS:
+ OMP_CLAUSE_BIND_KIND (c) = OMP_CLAUSE_BIND_TEAMS;
+ break;
+ case OMP_BIND_PARALLEL:
+ OMP_CLAUSE_BIND_KIND (c) = OMP_CLAUSE_BIND_PARALLEL;
+ break;
+ case OMP_BIND_THREAD:
+ OMP_CLAUSE_BIND_KIND (c) = OMP_CLAUSE_BIND_THREAD;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ /* OpenACC 'nohost' clauses cannot appear here. */
+ gcc_checking_assert (!clauses->nohost);
+
+ return nreverse (omp_clauses);
+}
+
+/* Like gfc_trans_code, but force creation of a BIND_EXPR around it. */
+
+static tree
+gfc_trans_omp_code (gfc_code *code, bool force_empty)
+{
+ tree stmt;
+
+ pushlevel ();
+ stmt = gfc_trans_code (code);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ {
+ if (!IS_EMPTY_STMT (stmt) || force_empty)
+ {
+ tree block = poplevel (1, 0);
+ stmt = build3_v (BIND_EXPR, NULL, stmt, block);
+ }
+ else
+ poplevel (0, 0);
+ }
+ else
+ poplevel (0, 0);
+ return stmt;
+}
+
+/* Translate OpenACC 'parallel', 'kernels', 'serial', 'data', 'host_data'
+ construct. */
+
+static tree
+gfc_trans_oacc_construct (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, oacc_clauses;
+ enum tree_code construct_code;
+
+ switch (code->op)
+ {
+ case EXEC_OACC_PARALLEL:
+ construct_code = OACC_PARALLEL;
+ break;
+ case EXEC_OACC_KERNELS:
+ construct_code = OACC_KERNELS;
+ break;
+ case EXEC_OACC_SERIAL:
+ construct_code = OACC_SERIAL;
+ break;
+ case EXEC_OACC_DATA:
+ construct_code = OACC_DATA;
+ break;
+ case EXEC_OACC_HOST_DATA:
+ construct_code = OACC_HOST_DATA;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ gfc_start_block (&block);
+ oacc_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc, false, true);
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build2_loc (gfc_get_location (&code->loc), construct_code,
+ void_type_node, stmt, oacc_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+/* update, enter_data, exit_data, cache. */
+static tree
+gfc_trans_oacc_executable_directive (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, oacc_clauses;
+ enum tree_code construct_code;
+
+ switch (code->op)
+ {
+ case EXEC_OACC_UPDATE:
+ construct_code = OACC_UPDATE;
+ break;
+ case EXEC_OACC_ENTER_DATA:
+ construct_code = OACC_ENTER_DATA;
+ break;
+ case EXEC_OACC_EXIT_DATA:
+ construct_code = OACC_EXIT_DATA;
+ break;
+ case EXEC_OACC_CACHE:
+ construct_code = OACC_CACHE;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ gfc_start_block (&block);
+ oacc_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc, false, true);
+ stmt = build1_loc (input_location, construct_code, void_type_node,
+ oacc_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_oacc_wait_directive (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, t;
+ vec<tree, va_gc> *args;
+ int nparms = 0;
+ gfc_expr_list *el;
+ gfc_omp_clauses *clauses = code->ext.omp_clauses;
+ location_t loc = input_location;
+
+ for (el = clauses->wait_list; el; el = el->next)
+ nparms++;
+
+ vec_alloc (args, nparms + 2);
+ stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
+
+ gfc_start_block (&block);
+
+ if (clauses->async_expr)
+ t = gfc_convert_expr_to_tree (&block, clauses->async_expr);
+ else
+ t = build_int_cst (integer_type_node, -2);
+
+ args->quick_push (t);
+ args->quick_push (build_int_cst (integer_type_node, nparms));
+
+ for (el = clauses->wait_list; el; el = el->next)
+ args->quick_push (gfc_convert_expr_to_tree (&block, el->expr));
+
+ stmt = build_call_expr_loc_vec (loc, stmt, args);
+ gfc_add_expr_to_block (&block, stmt);
+
+ vec_free (args);
+
+ return gfc_finish_block (&block);
+}
+
+static tree gfc_trans_omp_sections (gfc_code *, gfc_omp_clauses *);
+static tree gfc_trans_omp_workshare (gfc_code *, gfc_omp_clauses *);
+
+static tree
+gfc_trans_omp_atomic (gfc_code *code)
+{
+ gfc_code *atomic_code = code->block;
+ gfc_se lse;
+ gfc_se rse;
+ gfc_se vse;
+ gfc_expr *expr1, *expr2, *e, *capture_expr1 = NULL, *capture_expr2 = NULL;
+ gfc_symbol *var;
+ stmtblock_t block;
+ tree lhsaddr, type, rhs, x, compare = NULL_TREE, comp_tgt = NULL_TREE;
+ enum tree_code op = ERROR_MARK;
+ enum tree_code aop = OMP_ATOMIC;
+ bool var_on_left = false, else_branch = false;
+ enum omp_memory_order mo, fail_mo;
+ switch (atomic_code->ext.omp_clauses->memorder)
+ {
+ case OMP_MEMORDER_UNSET: mo = OMP_MEMORY_ORDER_UNSPECIFIED; break;
+ case OMP_MEMORDER_ACQ_REL: mo = OMP_MEMORY_ORDER_ACQ_REL; break;
+ case OMP_MEMORDER_ACQUIRE: mo = OMP_MEMORY_ORDER_ACQUIRE; break;
+ case OMP_MEMORDER_RELAXED: mo = OMP_MEMORY_ORDER_RELAXED; break;
+ case OMP_MEMORDER_RELEASE: mo = OMP_MEMORY_ORDER_RELEASE; break;
+ case OMP_MEMORDER_SEQ_CST: mo = OMP_MEMORY_ORDER_SEQ_CST; break;
+ default: gcc_unreachable ();
+ }
+ switch (atomic_code->ext.omp_clauses->fail)
+ {
+ case OMP_MEMORDER_UNSET: fail_mo = OMP_FAIL_MEMORY_ORDER_UNSPECIFIED; break;
+ case OMP_MEMORDER_ACQUIRE: fail_mo = OMP_FAIL_MEMORY_ORDER_ACQUIRE; break;
+ case OMP_MEMORDER_RELAXED: fail_mo = OMP_FAIL_MEMORY_ORDER_RELAXED; break;
+ case OMP_MEMORDER_SEQ_CST: fail_mo = OMP_FAIL_MEMORY_ORDER_SEQ_CST; break;
+ default: gcc_unreachable ();
+ }
+ mo = (omp_memory_order) (mo | fail_mo);
+
+ code = code->block->next;
+ if (atomic_code->ext.omp_clauses->compare)
+ {
+ gfc_expr *comp_expr;
+ if (code->op == EXEC_IF)
+ {
+ comp_expr = code->block->expr1;
+ gcc_assert (code->block->next->op == EXEC_ASSIGN);
+ expr1 = code->block->next->expr1;
+ expr2 = code->block->next->expr2;
+ if (code->block->block)
+ {
+ gcc_assert (atomic_code->ext.omp_clauses->capture
+ && code->block->block->next->op == EXEC_ASSIGN);
+ else_branch = true;
+ aop = OMP_ATOMIC_CAPTURE_OLD;
+ capture_expr1 = code->block->block->next->expr1;
+ capture_expr2 = code->block->block->next->expr2;
+ }
+ else if (atomic_code->ext.omp_clauses->capture)
+ {
+ gcc_assert (code->next->op == EXEC_ASSIGN);
+ aop = OMP_ATOMIC_CAPTURE_NEW;
+ capture_expr1 = code->next->expr1;
+ capture_expr2 = code->next->expr2;
+ }
+ }
+ else
+ {
+ gcc_assert (atomic_code->ext.omp_clauses->capture
+ && code->op == EXEC_ASSIGN
+ && code->next->op == EXEC_IF);
+ aop = OMP_ATOMIC_CAPTURE_OLD;
+ capture_expr1 = code->expr1;
+ capture_expr2 = code->expr2;
+ expr1 = code->next->block->next->expr1;
+ expr2 = code->next->block->next->expr2;
+ comp_expr = code->next->block->expr1;
+ }
+ gfc_init_se (&lse, NULL);
+ gfc_conv_expr (&lse, comp_expr->value.op.op2);
+ gfc_add_block_to_block (&block, &lse.pre);
+ compare = lse.expr;
+ var = expr1->symtree->n.sym;
+ }
+ else
+ {
+ gcc_assert (code->op == EXEC_ASSIGN);
+ expr1 = code->expr1;
+ expr2 = code->expr2;
+ if (atomic_code->ext.omp_clauses->capture
+ && (expr2->expr_type == EXPR_VARIABLE
+ || (expr2->expr_type == EXPR_FUNCTION
+ && expr2->value.function.isym
+ && expr2->value.function.isym->id == GFC_ISYM_CONVERSION
+ && (expr2->value.function.actual->expr->expr_type
+ == EXPR_VARIABLE))))
+ {
+ capture_expr1 = expr1;
+ capture_expr2 = expr2;
+ expr1 = code->next->expr1;
+ expr2 = code->next->expr2;
+ aop = OMP_ATOMIC_CAPTURE_OLD;
+ }
+ else if (atomic_code->ext.omp_clauses->capture)
+ {
+ aop = OMP_ATOMIC_CAPTURE_NEW;
+ capture_expr1 = code->next->expr1;
+ capture_expr2 = code->next->expr2;
+ }
+ var = expr1->symtree->n.sym;
+ }
+
+ gfc_init_se (&lse, NULL);
+ gfc_init_se (&rse, NULL);
+ gfc_init_se (&vse, NULL);
+ gfc_start_block (&block);
+
+ if (((atomic_code->ext.omp_clauses->atomic_op & GFC_OMP_ATOMIC_MASK)
+ != GFC_OMP_ATOMIC_WRITE)
+ && expr2->expr_type == EXPR_FUNCTION
+ && expr2->value.function.isym
+ && expr2->value.function.isym->id == GFC_ISYM_CONVERSION)
+ expr2 = expr2->value.function.actual->expr;
+
+ if ((atomic_code->ext.omp_clauses->atomic_op & GFC_OMP_ATOMIC_MASK)
+ == GFC_OMP_ATOMIC_READ)
+ {
+ gfc_conv_expr (&vse, expr1);
+ gfc_add_block_to_block (&block, &vse.pre);
+
+ gfc_conv_expr (&lse, expr2);
+ gfc_add_block_to_block (&block, &lse.pre);
+ type = TREE_TYPE (lse.expr);
+ lhsaddr = gfc_build_addr_expr (NULL, lse.expr);
+
+ x = build1 (OMP_ATOMIC_READ, type, lhsaddr);
+ OMP_ATOMIC_MEMORY_ORDER (x) = mo;
+ x = convert (TREE_TYPE (vse.expr), x);
+ gfc_add_modify (&block, vse.expr, x);
+
+ gfc_add_block_to_block (&block, &lse.pre);
+ gfc_add_block_to_block (&block, &rse.pre);
+
+ return gfc_finish_block (&block);
+ }
+
+ if (capture_expr2
+ && capture_expr2->expr_type == EXPR_FUNCTION
+ && capture_expr2->value.function.isym
+ && capture_expr2->value.function.isym->id == GFC_ISYM_CONVERSION)
+ capture_expr2 = capture_expr2->value.function.actual->expr;
+ gcc_assert (!capture_expr2 || capture_expr2->expr_type == EXPR_VARIABLE);
+
+ if (aop == OMP_ATOMIC_CAPTURE_OLD)
+ {
+ gfc_conv_expr (&vse, capture_expr1);
+ gfc_add_block_to_block (&block, &vse.pre);
+ gfc_conv_expr (&lse, capture_expr2);
+ gfc_add_block_to_block (&block, &lse.pre);
+ gfc_init_se (&lse, NULL);
+ }
+
+ gfc_conv_expr (&lse, expr1);
+ gfc_add_block_to_block (&block, &lse.pre);
+ type = TREE_TYPE (lse.expr);
+ lhsaddr = gfc_build_addr_expr (NULL, lse.expr);
+
+ if (((atomic_code->ext.omp_clauses->atomic_op & GFC_OMP_ATOMIC_MASK)
+ == GFC_OMP_ATOMIC_WRITE)
+ || (atomic_code->ext.omp_clauses->atomic_op & GFC_OMP_ATOMIC_SWAP)
+ || compare)
+ {
+ gfc_conv_expr (&rse, expr2);
+ gfc_add_block_to_block (&block, &rse.pre);
+ }
+ else if (expr2->expr_type == EXPR_OP)
+ {
+ gfc_expr *e;
+ switch (expr2->value.op.op)
+ {
+ case INTRINSIC_PLUS:
+ op = PLUS_EXPR;
+ break;
+ case INTRINSIC_TIMES:
+ op = MULT_EXPR;
+ break;
+ case INTRINSIC_MINUS:
+ op = MINUS_EXPR;
+ break;
+ case INTRINSIC_DIVIDE:
+ if (expr2->ts.type == BT_INTEGER)
+ op = TRUNC_DIV_EXPR;
+ else
+ op = RDIV_EXPR;
+ break;
+ case INTRINSIC_AND:
+ op = TRUTH_ANDIF_EXPR;
+ break;
+ case INTRINSIC_OR:
+ op = TRUTH_ORIF_EXPR;
+ break;
+ case INTRINSIC_EQV:
+ op = EQ_EXPR;
+ break;
+ case INTRINSIC_NEQV:
+ op = NE_EXPR;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ e = expr2->value.op.op1;
+ if (e->expr_type == EXPR_FUNCTION
+ && e->value.function.isym
+ && e->value.function.isym->id == GFC_ISYM_CONVERSION)
+ e = e->value.function.actual->expr;
+ if (e->expr_type == EXPR_VARIABLE
+ && e->symtree != NULL
+ && e->symtree->n.sym == var)
+ {
+ expr2 = expr2->value.op.op2;
+ var_on_left = true;
+ }
+ else
+ {
+ e = expr2->value.op.op2;
+ if (e->expr_type == EXPR_FUNCTION
+ && e->value.function.isym
+ && e->value.function.isym->id == GFC_ISYM_CONVERSION)
+ e = e->value.function.actual->expr;
+ gcc_assert (e->expr_type == EXPR_VARIABLE
+ && e->symtree != NULL
+ && e->symtree->n.sym == var);
+ expr2 = expr2->value.op.op1;
+ var_on_left = false;
+ }
+ gfc_conv_expr (&rse, expr2);
+ gfc_add_block_to_block (&block, &rse.pre);
+ }
+ else
+ {
+ gcc_assert (expr2->expr_type == EXPR_FUNCTION);
+ switch (expr2->value.function.isym->id)
+ {
+ case GFC_ISYM_MIN:
+ op = MIN_EXPR;
+ break;
+ case GFC_ISYM_MAX:
+ op = MAX_EXPR;
+ break;
+ case GFC_ISYM_IAND:
+ op = BIT_AND_EXPR;
+ break;
+ case GFC_ISYM_IOR:
+ op = BIT_IOR_EXPR;
+ break;
+ case GFC_ISYM_IEOR:
+ op = BIT_XOR_EXPR;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ e = expr2->value.function.actual->expr;
+ if (e->expr_type == EXPR_FUNCTION
+ && e->value.function.isym
+ && e->value.function.isym->id == GFC_ISYM_CONVERSION)
+ e = e->value.function.actual->expr;
+ gcc_assert (e->expr_type == EXPR_VARIABLE
+ && e->symtree != NULL
+ && e->symtree->n.sym == var);
+
+ gfc_conv_expr (&rse, expr2->value.function.actual->next->expr);
+ gfc_add_block_to_block (&block, &rse.pre);
+ if (expr2->value.function.actual->next->next != NULL)
+ {
+ tree accum = gfc_create_var (TREE_TYPE (rse.expr), NULL);
+ gfc_actual_arglist *arg;
+
+ gfc_add_modify (&block, accum, rse.expr);
+ for (arg = expr2->value.function.actual->next->next; arg;
+ arg = arg->next)
+ {
+ gfc_init_block (&rse.pre);
+ gfc_conv_expr (&rse, arg->expr);
+ gfc_add_block_to_block (&block, &rse.pre);
+ x = fold_build2_loc (input_location, op, TREE_TYPE (accum),
+ accum, rse.expr);
+ gfc_add_modify (&block, accum, x);
+ }
+
+ rse.expr = accum;
+ }
+
+ expr2 = expr2->value.function.actual->next->expr;
+ }
+
+ lhsaddr = save_expr (lhsaddr);
+ if (TREE_CODE (lhsaddr) != SAVE_EXPR
+ && (TREE_CODE (lhsaddr) != ADDR_EXPR
+ || !VAR_P (TREE_OPERAND (lhsaddr, 0))))
+ {
+ /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
+ it even after unsharing function body. */
+ tree var = create_tmp_var_raw (TREE_TYPE (lhsaddr));
+ DECL_CONTEXT (var) = current_function_decl;
+ lhsaddr = build4 (TARGET_EXPR, TREE_TYPE (lhsaddr), var, lhsaddr,
+ NULL_TREE, NULL_TREE);
+ }
+
+ if (compare)
+ {
+ tree var = create_tmp_var_raw (TREE_TYPE (lhsaddr));
+ DECL_CONTEXT (var) = current_function_decl;
+ lhsaddr = build4 (TARGET_EXPR, TREE_TYPE (lhsaddr), var, lhsaddr, NULL,
+ NULL);
+ lse.expr = build_fold_indirect_ref_loc (input_location, lhsaddr);
+ compare = convert (TREE_TYPE (lse.expr), compare);
+ compare = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
+ lse.expr, compare);
+ }
+
+ if (expr2->expr_type == EXPR_VARIABLE || compare)
+ rhs = rse.expr;
+ else
+ rhs = gfc_evaluate_now (rse.expr, &block);
+
+ if (((atomic_code->ext.omp_clauses->atomic_op & GFC_OMP_ATOMIC_MASK)
+ == GFC_OMP_ATOMIC_WRITE)
+ || (atomic_code->ext.omp_clauses->atomic_op & GFC_OMP_ATOMIC_SWAP)
+ || compare)
+ x = rhs;
+ else
+ {
+ x = convert (TREE_TYPE (rhs),
+ build_fold_indirect_ref_loc (input_location, lhsaddr));
+ if (var_on_left)
+ x = fold_build2_loc (input_location, op, TREE_TYPE (rhs), x, rhs);
+ else
+ x = fold_build2_loc (input_location, op, TREE_TYPE (rhs), rhs, x);
+ }
+
+ if (TREE_CODE (TREE_TYPE (rhs)) == COMPLEX_TYPE
+ && TREE_CODE (type) != COMPLEX_TYPE)
+ x = fold_build1_loc (input_location, REALPART_EXPR,
+ TREE_TYPE (TREE_TYPE (rhs)), x);
+
+ gfc_add_block_to_block (&block, &lse.pre);
+ gfc_add_block_to_block (&block, &rse.pre);
+
+ if (aop == OMP_ATOMIC_CAPTURE_NEW)
+ {
+ gfc_conv_expr (&vse, capture_expr1);
+ gfc_add_block_to_block (&block, &vse.pre);
+ gfc_add_block_to_block (&block, &lse.pre);
+ }
+
+ if (compare && else_branch)
+ {
+ tree var2 = create_tmp_var_raw (boolean_type_node);
+ DECL_CONTEXT (var2) = current_function_decl;
+ comp_tgt = build4 (TARGET_EXPR, boolean_type_node, var2,
+ boolean_false_node, NULL, NULL);
+ compare = fold_build2_loc (input_location, MODIFY_EXPR, TREE_TYPE (var2),
+ var2, compare);
+ TREE_OPERAND (compare, 0) = comp_tgt;
+ compare = omit_one_operand_loc (input_location, boolean_type_node,
+ compare, comp_tgt);
+ }
+
+ if (compare)
+ x = build3_loc (input_location, COND_EXPR, type, compare,
+ convert (type, x), lse.expr);
+
+ if (aop == OMP_ATOMIC)
+ {
+ x = build2_v (OMP_ATOMIC, lhsaddr, convert (type, x));
+ OMP_ATOMIC_MEMORY_ORDER (x) = mo;
+ OMP_ATOMIC_WEAK (x) = atomic_code->ext.omp_clauses->weak;
+ gfc_add_expr_to_block (&block, x);
+ }
+ else
+ {
+ x = build2 (aop, type, lhsaddr, convert (type, x));
+ OMP_ATOMIC_MEMORY_ORDER (x) = mo;
+ OMP_ATOMIC_WEAK (x) = atomic_code->ext.omp_clauses->weak;
+ if (compare && else_branch)
+ {
+ tree vtmp = create_tmp_var_raw (TREE_TYPE (x));
+ DECL_CONTEXT (vtmp) = current_function_decl;
+ x = fold_build2_loc (input_location, MODIFY_EXPR,
+ TREE_TYPE (vtmp), vtmp, x);
+ vtmp = build4 (TARGET_EXPR, TREE_TYPE (vtmp), vtmp,
+ build_zero_cst (TREE_TYPE (vtmp)), NULL, NULL);
+ TREE_OPERAND (x, 0) = vtmp;
+ tree x2 = convert (TREE_TYPE (vse.expr), vtmp);
+ x2 = fold_build2_loc (input_location, MODIFY_EXPR,
+ TREE_TYPE (vse.expr), vse.expr, x2);
+ x2 = build3_loc (input_location, COND_EXPR, void_type_node, comp_tgt,
+ void_node, x2);
+ x = omit_one_operand_loc (input_location, TREE_TYPE (x2), x2, x);
+ gfc_add_expr_to_block (&block, x);
+ }
+ else
+ {
+ x = convert (TREE_TYPE (vse.expr), x);
+ gfc_add_modify (&block, vse.expr, x);
+ }
+ }
+
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_barrier (void)
+{
+ tree decl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
+ return build_call_expr_loc (input_location, decl, 0);
+}
+
+static tree
+gfc_trans_omp_cancel (gfc_code *code)
+{
+ int mask = 0;
+ tree ifc = boolean_true_node;
+ stmtblock_t block;
+ switch (code->ext.omp_clauses->cancel)
+ {
+ case OMP_CANCEL_PARALLEL: mask = 1; break;
+ case OMP_CANCEL_DO: mask = 2; break;
+ case OMP_CANCEL_SECTIONS: mask = 4; break;
+ case OMP_CANCEL_TASKGROUP: mask = 8; break;
+ default: gcc_unreachable ();
+ }
+ gfc_start_block (&block);
+ if (code->ext.omp_clauses->if_expr
+ || code->ext.omp_clauses->if_exprs[OMP_IF_CANCEL])
+ {
+ gfc_se se;
+ tree if_var;
+
+ gcc_assert ((code->ext.omp_clauses->if_expr == NULL)
+ ^ (code->ext.omp_clauses->if_exprs[OMP_IF_CANCEL] == NULL));
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, code->ext.omp_clauses->if_expr != NULL
+ ? code->ext.omp_clauses->if_expr
+ : code->ext.omp_clauses->if_exprs[OMP_IF_CANCEL]);
+ gfc_add_block_to_block (&block, &se.pre);
+ if_var = gfc_evaluate_now (se.expr, &block);
+ gfc_add_block_to_block (&block, &se.post);
+ tree type = TREE_TYPE (if_var);
+ ifc = fold_build2_loc (input_location, NE_EXPR,
+ boolean_type_node, if_var,
+ build_zero_cst (type));
+ }
+ tree decl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
+ tree c_bool_type = TREE_TYPE (TREE_TYPE (decl));
+ ifc = fold_convert (c_bool_type, ifc);
+ gfc_add_expr_to_block (&block,
+ build_call_expr_loc (input_location, decl, 2,
+ build_int_cst (integer_type_node,
+ mask), ifc));
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_cancellation_point (gfc_code *code)
+{
+ int mask = 0;
+ switch (code->ext.omp_clauses->cancel)
+ {
+ case OMP_CANCEL_PARALLEL: mask = 1; break;
+ case OMP_CANCEL_DO: mask = 2; break;
+ case OMP_CANCEL_SECTIONS: mask = 4; break;
+ case OMP_CANCEL_TASKGROUP: mask = 8; break;
+ default: gcc_unreachable ();
+ }
+ tree decl = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT);
+ return build_call_expr_loc (input_location, decl, 1,
+ build_int_cst (integer_type_node, mask));
+}
+
+static tree
+gfc_trans_omp_critical (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, name = NULL_TREE;
+ if (code->ext.omp_clauses->critical_name != NULL)
+ name = get_identifier (code->ext.omp_clauses->critical_name);
+ gfc_start_block (&block);
+ stmt = make_node (OMP_CRITICAL);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_CRITICAL_BODY (stmt) = gfc_trans_code (code->block->next);
+ OMP_CRITICAL_NAME (stmt) = name;
+ OMP_CRITICAL_CLAUSES (stmt) = gfc_trans_omp_clauses (&block,
+ code->ext.omp_clauses,
+ code->loc);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+typedef struct dovar_init_d {
+ tree var;
+ tree init;
+} dovar_init;
+
+
+static tree
+gfc_trans_omp_do (gfc_code *code, gfc_exec_op op, stmtblock_t *pblock,
+ gfc_omp_clauses *do_clauses, tree par_clauses)
+{
+ gfc_se se;
+ tree dovar, stmt, from, to, step, type, init, cond, incr, orig_decls;
+ tree count = NULL_TREE, cycle_label, tmp, omp_clauses;
+ stmtblock_t block;
+ stmtblock_t body;
+ gfc_omp_clauses *clauses = code->ext.omp_clauses;
+ int i, collapse = clauses->collapse;
+ vec<dovar_init> inits = vNULL;
+ dovar_init *di;
+ unsigned ix;
+ vec<tree, va_heap, vl_embed> *saved_doacross_steps = doacross_steps;
+ gfc_expr_list *tile = do_clauses ? do_clauses->tile_list : clauses->tile_list;
+
+ /* Both collapsed and tiled loops are lowered the same way. In
+ OpenACC, those clauses are not compatible, so prioritize the tile
+ clause, if present. */
+ if (tile)
+ {
+ collapse = 0;
+ for (gfc_expr_list *el = tile; el; el = el->next)
+ collapse++;
+ }
+
+ doacross_steps = NULL;
+ if (clauses->orderedc)
+ collapse = clauses->orderedc;
+ if (collapse <= 0)
+ collapse = 1;
+
+ code = code->block->next;
+ gcc_assert (code->op == EXEC_DO);
+
+ init = make_tree_vec (collapse);
+ cond = make_tree_vec (collapse);
+ incr = make_tree_vec (collapse);
+ orig_decls = clauses->orderedc ? make_tree_vec (collapse) : NULL_TREE;
+
+ if (pblock == NULL)
+ {
+ gfc_start_block (&block);
+ pblock = &block;
+ }
+
+ /* simd schedule modifier is only useful for composite do simd and other
+ constructs including that, where gfc_trans_omp_do is only called
+ on the simd construct and DO's clauses are translated elsewhere. */
+ do_clauses->sched_simd = false;
+
+ omp_clauses = gfc_trans_omp_clauses (pblock, do_clauses, code->loc);
+
+ for (i = 0; i < collapse; i++)
+ {
+ int simple = 0;
+ int dovar_found = 0;
+ tree dovar_decl;
+
+ if (clauses)
+ {
+ gfc_omp_namelist *n = NULL;
+ if (op == EXEC_OMP_SIMD && collapse == 1)
+ for (n = clauses->lists[OMP_LIST_LINEAR];
+ n != NULL; n = n->next)
+ if (code->ext.iterator->var->symtree->n.sym == n->sym)
+ {
+ dovar_found = 3;
+ break;
+ }
+ if (n == NULL && op != EXEC_OMP_DISTRIBUTE)
+ for (n = clauses->lists[OMP_LIST_LASTPRIVATE];
+ n != NULL; n = n->next)
+ if (code->ext.iterator->var->symtree->n.sym == n->sym)
+ {
+ dovar_found = 2;
+ break;
+ }
+ if (n == NULL)
+ for (n = clauses->lists[OMP_LIST_PRIVATE]; n != NULL; n = n->next)
+ if (code->ext.iterator->var->symtree->n.sym == n->sym)
+ {
+ dovar_found = 1;
+ break;
+ }
+ }
+
+ /* Evaluate all the expressions in the iterator. */
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_lhs (&se, code->ext.iterator->var);
+ gfc_add_block_to_block (pblock, &se.pre);
+ dovar = se.expr;
+ type = TREE_TYPE (dovar);
+ gcc_assert (TREE_CODE (type) == INTEGER_TYPE);
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_val (&se, code->ext.iterator->start);
+ gfc_add_block_to_block (pblock, &se.pre);
+ from = gfc_evaluate_now (se.expr, pblock);
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_val (&se, code->ext.iterator->end);
+ gfc_add_block_to_block (pblock, &se.pre);
+ to = gfc_evaluate_now (se.expr, pblock);
+
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr_val (&se, code->ext.iterator->step);
+ gfc_add_block_to_block (pblock, &se.pre);
+ step = gfc_evaluate_now (se.expr, pblock);
+ dovar_decl = dovar;
+
+ /* Special case simple loops. */
+ if (VAR_P (dovar))
+ {
+ if (integer_onep (step))
+ simple = 1;
+ else if (tree_int_cst_equal (step, integer_minus_one_node))
+ simple = -1;
+ }
+ else
+ dovar_decl
+ = gfc_trans_omp_variable (code->ext.iterator->var->symtree->n.sym,
+ false);
+
+ /* Loop body. */
+ if (simple)
+ {
+ TREE_VEC_ELT (init, i) = build2_v (MODIFY_EXPR, dovar, from);
+ /* The condition should not be folded. */
+ TREE_VEC_ELT (cond, i) = build2_loc (input_location, simple > 0
+ ? LE_EXPR : GE_EXPR,
+ logical_type_node, dovar, to);
+ TREE_VEC_ELT (incr, i) = fold_build2_loc (input_location, PLUS_EXPR,
+ type, dovar, step);
+ TREE_VEC_ELT (incr, i) = fold_build2_loc (input_location,
+ MODIFY_EXPR,
+ type, dovar,
+ TREE_VEC_ELT (incr, i));
+ }
+ else
+ {
+ /* STEP is not 1 or -1. Use:
+ for (count = 0; count < (to + step - from) / step; count++)
+ {
+ dovar = from + count * step;
+ body;
+ cycle_label:;
+ } */
+ tmp = fold_build2_loc (input_location, MINUS_EXPR, type, step, from);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, type, to, tmp);
+ tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR, type, tmp,
+ step);
+ tmp = gfc_evaluate_now (tmp, pblock);
+ count = gfc_create_var (type, "count");
+ TREE_VEC_ELT (init, i) = build2_v (MODIFY_EXPR, count,
+ build_int_cst (type, 0));
+ /* The condition should not be folded. */
+ TREE_VEC_ELT (cond, i) = build2_loc (input_location, LT_EXPR,
+ logical_type_node,
+ count, tmp);
+ TREE_VEC_ELT (incr, i) = fold_build2_loc (input_location, PLUS_EXPR,
+ type, count,
+ build_int_cst (type, 1));
+ TREE_VEC_ELT (incr, i) = fold_build2_loc (input_location,
+ MODIFY_EXPR, type, count,
+ TREE_VEC_ELT (incr, i));
+
+ /* Initialize DOVAR. */
+ tmp = fold_build2_loc (input_location, MULT_EXPR, type, count, step);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, type, from, tmp);
+ dovar_init e = {dovar, tmp};
+ inits.safe_push (e);
+ if (clauses->orderedc)
+ {
+ if (doacross_steps == NULL)
+ vec_safe_grow_cleared (doacross_steps, clauses->orderedc, true);
+ (*doacross_steps)[i] = step;
+ }
+ }
+ if (orig_decls)
+ TREE_VEC_ELT (orig_decls, i) = dovar_decl;
+
+ if (dovar_found == 3
+ && op == EXEC_OMP_SIMD
+ && collapse == 1
+ && !simple)
+ {
+ for (tmp = omp_clauses; tmp; tmp = OMP_CLAUSE_CHAIN (tmp))
+ if (OMP_CLAUSE_CODE (tmp) == OMP_CLAUSE_LINEAR
+ && OMP_CLAUSE_DECL (tmp) == dovar)
+ {
+ OMP_CLAUSE_LINEAR_NO_COPYIN (tmp) = 1;
+ break;
+ }
+ }
+ if (!dovar_found && op == EXEC_OMP_SIMD)
+ {
+ if (collapse == 1)
+ {
+ tmp = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
+ OMP_CLAUSE_LINEAR_STEP (tmp) = step;
+ OMP_CLAUSE_LINEAR_NO_COPYIN (tmp) = 1;
+ OMP_CLAUSE_DECL (tmp) = dovar_decl;
+ omp_clauses = gfc_trans_add_clause (tmp, omp_clauses);
+ }
+ if (!simple)
+ dovar_found = 3;
+ }
+ else if (!dovar_found && !simple)
+ {
+ tmp = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE);
+ OMP_CLAUSE_DECL (tmp) = dovar_decl;
+ omp_clauses = gfc_trans_add_clause (tmp, omp_clauses);
+ }
+ if (dovar_found > 1)
+ {
+ tree c = NULL;
+
+ tmp = NULL;
+ if (!simple)
+ {
+ /* If dovar is lastprivate, but different counter is used,
+ dovar += step needs to be added to
+ OMP_CLAUSE_LASTPRIVATE_STMT, otherwise the copied dovar
+ will have the value on entry of the last loop, rather
+ than value after iterator increment. */
+ if (clauses->orderedc)
+ {
+ if (clauses->collapse <= 1 || i >= clauses->collapse)
+ tmp = count;
+ else
+ tmp = fold_build2_loc (input_location, PLUS_EXPR,
+ type, count, build_one_cst (type));
+ tmp = fold_build2_loc (input_location, MULT_EXPR, type,
+ tmp, step);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, type,
+ from, tmp);
+ }
+ else
+ {
+ tmp = gfc_evaluate_now (step, pblock);
+ tmp = fold_build2_loc (input_location, PLUS_EXPR, type,
+ dovar, tmp);
+ }
+ tmp = fold_build2_loc (input_location, MODIFY_EXPR, type,
+ dovar, tmp);
+ for (c = omp_clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_DECL (c) == dovar_decl)
+ {
+ OMP_CLAUSE_LASTPRIVATE_STMT (c) = tmp;
+ break;
+ }
+ else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
+ && OMP_CLAUSE_DECL (c) == dovar_decl)
+ {
+ OMP_CLAUSE_LINEAR_STMT (c) = tmp;
+ break;
+ }
+ }
+ if (c == NULL && op == EXEC_OMP_DO && par_clauses != NULL)
+ {
+ for (c = par_clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
+ && OMP_CLAUSE_DECL (c) == dovar_decl)
+ {
+ tree l = build_omp_clause (input_location,
+ OMP_CLAUSE_LASTPRIVATE);
+ if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
+ OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (l) = 1;
+ OMP_CLAUSE_DECL (l) = dovar_decl;
+ OMP_CLAUSE_CHAIN (l) = omp_clauses;
+ OMP_CLAUSE_LASTPRIVATE_STMT (l) = tmp;
+ omp_clauses = l;
+ OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_SHARED);
+ break;
+ }
+ }
+ gcc_assert (simple || c != NULL);
+ }
+ if (!simple)
+ {
+ if (op != EXEC_OMP_SIMD || dovar_found == 1)
+ tmp = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE);
+ else if (collapse == 1)
+ {
+ tmp = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
+ OMP_CLAUSE_LINEAR_STEP (tmp) = build_int_cst (type, 1);
+ OMP_CLAUSE_LINEAR_NO_COPYIN (tmp) = 1;
+ OMP_CLAUSE_LINEAR_NO_COPYOUT (tmp) = 1;
+ }
+ else
+ tmp = build_omp_clause (input_location, OMP_CLAUSE_LASTPRIVATE);
+ OMP_CLAUSE_DECL (tmp) = count;
+ omp_clauses = gfc_trans_add_clause (tmp, omp_clauses);
+ }
+
+ if (i + 1 < collapse)
+ code = code->block->next;
+ }
+
+ if (pblock != &block)
+ {
+ pushlevel ();
+ gfc_start_block (&block);
+ }
+
+ gfc_start_block (&body);
+
+ FOR_EACH_VEC_ELT (inits, ix, di)
+ gfc_add_modify (&body, di->var, di->init);
+ inits.release ();
+
+ /* Cycle statement is implemented with a goto. Exit statement must not be
+ present for this loop. */
+ cycle_label = gfc_build_label_decl (NULL_TREE);
+
+ /* Put these labels where they can be found later. */
+
+ code->cycle_label = cycle_label;
+ code->exit_label = NULL_TREE;
+
+ /* Main loop body. */
+ if (clauses->lists[OMP_LIST_REDUCTION_INSCAN])
+ {
+ gcc_assert (code->block->next->next->op == EXEC_OMP_SCAN);
+ gcc_assert (code->block->next->next->next->next == NULL);
+ locus *cloc = &code->block->next->next->loc;
+ location_t loc = gfc_get_location (cloc);
+
+ gfc_code code2 = *code->block->next;
+ code2.next = NULL;
+ tmp = gfc_trans_code (&code2);
+ tmp = build2 (OMP_SCAN, void_type_node, tmp, NULL_TREE);
+ SET_EXPR_LOCATION (tmp, loc);
+ gfc_add_expr_to_block (&body, tmp);
+ input_location = loc;
+ tree c = gfc_trans_omp_clauses (&body,
+ code->block->next->next->ext.omp_clauses,
+ *cloc);
+ code2 = *code->block->next->next->next;
+ code2.next = NULL;
+ tmp = gfc_trans_code (&code2);
+ tmp = build2 (OMP_SCAN, void_type_node, tmp, c);
+ SET_EXPR_LOCATION (tmp, loc);
+ }
+ else
+ tmp = gfc_trans_omp_code (code->block->next, true);
+ gfc_add_expr_to_block (&body, tmp);
+
+ /* Label for cycle statements (if needed). */
+ if (TREE_USED (cycle_label))
+ {
+ tmp = build1_v (LABEL_EXPR, cycle_label);
+ gfc_add_expr_to_block (&body, tmp);
+ }
+
+ /* End of loop body. */
+ switch (op)
+ {
+ case EXEC_OMP_SIMD: stmt = make_node (OMP_SIMD); break;
+ case EXEC_OMP_DO: stmt = make_node (OMP_FOR); break;
+ case EXEC_OMP_DISTRIBUTE: stmt = make_node (OMP_DISTRIBUTE); break;
+ case EXEC_OMP_LOOP: stmt = make_node (OMP_LOOP); break;
+ case EXEC_OMP_TASKLOOP: stmt = make_node (OMP_TASKLOOP); break;
+ case EXEC_OACC_LOOP: stmt = make_node (OACC_LOOP); break;
+ default: gcc_unreachable ();
+ }
+
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_FOR_BODY (stmt) = gfc_finish_block (&body);
+ OMP_FOR_CLAUSES (stmt) = omp_clauses;
+ OMP_FOR_INIT (stmt) = init;
+ OMP_FOR_COND (stmt) = cond;
+ OMP_FOR_INCR (stmt) = incr;
+ if (orig_decls)
+ OMP_FOR_ORIG_DECLS (stmt) = orig_decls;
+ gfc_add_expr_to_block (&block, stmt);
+
+ vec_free (doacross_steps);
+ doacross_steps = saved_doacross_steps;
+
+ return gfc_finish_block (&block);
+}
+
+/* Translate combined OpenACC 'parallel loop', 'kernels loop', 'serial loop'
+ construct. */
+
+static tree
+gfc_trans_oacc_combined_directive (gfc_code *code)
+{
+ stmtblock_t block, *pblock = NULL;
+ gfc_omp_clauses construct_clauses, loop_clauses;
+ tree stmt, oacc_clauses = NULL_TREE;
+ enum tree_code construct_code;
+ location_t loc = input_location;
+
+ switch (code->op)
+ {
+ case EXEC_OACC_PARALLEL_LOOP:
+ construct_code = OACC_PARALLEL;
+ break;
+ case EXEC_OACC_KERNELS_LOOP:
+ construct_code = OACC_KERNELS;
+ break;
+ case EXEC_OACC_SERIAL_LOOP:
+ construct_code = OACC_SERIAL;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ gfc_start_block (&block);
+
+ memset (&loop_clauses, 0, sizeof (loop_clauses));
+ if (code->ext.omp_clauses != NULL)
+ {
+ memcpy (&construct_clauses, code->ext.omp_clauses,
+ sizeof (construct_clauses));
+ loop_clauses.collapse = construct_clauses.collapse;
+ loop_clauses.gang = construct_clauses.gang;
+ loop_clauses.gang_static = construct_clauses.gang_static;
+ loop_clauses.gang_num_expr = construct_clauses.gang_num_expr;
+ loop_clauses.gang_static_expr = construct_clauses.gang_static_expr;
+ loop_clauses.vector = construct_clauses.vector;
+ loop_clauses.vector_expr = construct_clauses.vector_expr;
+ loop_clauses.worker = construct_clauses.worker;
+ loop_clauses.worker_expr = construct_clauses.worker_expr;
+ loop_clauses.seq = construct_clauses.seq;
+ loop_clauses.par_auto = construct_clauses.par_auto;
+ loop_clauses.independent = construct_clauses.independent;
+ loop_clauses.tile_list = construct_clauses.tile_list;
+ loop_clauses.lists[OMP_LIST_PRIVATE]
+ = construct_clauses.lists[OMP_LIST_PRIVATE];
+ loop_clauses.lists[OMP_LIST_REDUCTION]
+ = construct_clauses.lists[OMP_LIST_REDUCTION];
+ construct_clauses.gang = false;
+ construct_clauses.gang_static = false;
+ construct_clauses.gang_num_expr = NULL;
+ construct_clauses.gang_static_expr = NULL;
+ construct_clauses.vector = false;
+ construct_clauses.vector_expr = NULL;
+ construct_clauses.worker = false;
+ construct_clauses.worker_expr = NULL;
+ construct_clauses.seq = false;
+ construct_clauses.par_auto = false;
+ construct_clauses.independent = false;
+ construct_clauses.independent = false;
+ construct_clauses.tile_list = NULL;
+ construct_clauses.lists[OMP_LIST_PRIVATE] = NULL;
+ if (construct_code == OACC_KERNELS)
+ construct_clauses.lists[OMP_LIST_REDUCTION] = NULL;
+ oacc_clauses = gfc_trans_omp_clauses (&block, &construct_clauses,
+ code->loc, false, true);
+ }
+ if (!loop_clauses.seq)
+ pblock = &block;
+ else
+ pushlevel ();
+ stmt = gfc_trans_omp_do (code, EXEC_OACC_LOOP, pblock, &loop_clauses, NULL);
+ protected_set_expr_location (stmt, loc);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ stmt = build2_loc (loc, construct_code, void_type_node, stmt, oacc_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_depobj (gfc_code *code)
+{
+ stmtblock_t block;
+ gfc_se se;
+ gfc_init_se (&se, NULL);
+ gfc_init_block (&block);
+ gfc_conv_expr (&se, code->ext.omp_clauses->depobj);
+ gcc_assert (se.pre.head == NULL && se.post.head == NULL);
+ tree depobj = se.expr;
+ location_t loc = EXPR_LOCATION (depobj);
+ if (!POINTER_TYPE_P (TREE_TYPE (depobj)))
+ depobj = gfc_build_addr_expr (NULL, depobj);
+ depobj = fold_convert (build_pointer_type_for_mode (ptr_type_node,
+ TYPE_MODE (ptr_type_node),
+ true), depobj);
+ gfc_omp_namelist *n = code->ext.omp_clauses->lists[OMP_LIST_DEPEND];
+ if (n)
+ {
+ tree var;
+ if (n->expr)
+ var = gfc_convert_expr_to_tree (&block, n->expr);
+ else
+ var = gfc_get_symbol_decl (n->sym);
+ if (!POINTER_TYPE_P (TREE_TYPE (var)))
+ var = gfc_build_addr_expr (NULL, var);
+ depobj = save_expr (depobj);
+ tree r = build_fold_indirect_ref_loc (loc, depobj);
+ gfc_add_expr_to_block (&block,
+ build2 (MODIFY_EXPR, void_type_node, r, var));
+ }
+
+ /* Only one may be set. */
+ gcc_assert (((int)(n != NULL) + (int)(code->ext.omp_clauses->destroy)
+ + (int)(code->ext.omp_clauses->depobj_update != OMP_DEPEND_UNSET))
+ == 1);
+ int k = -1; /* omp_clauses->destroy */
+ if (!code->ext.omp_clauses->destroy)
+ switch (code->ext.omp_clauses->depobj_update != OMP_DEPEND_UNSET
+ ? code->ext.omp_clauses->depobj_update : n->u.depend_op)
+ {
+ case OMP_DEPEND_IN: k = GOMP_DEPEND_IN; break;
+ case OMP_DEPEND_OUT: k = GOMP_DEPEND_OUT; break;
+ case OMP_DEPEND_INOUT: k = GOMP_DEPEND_INOUT; break;
+ case OMP_DEPEND_MUTEXINOUTSET: k = GOMP_DEPEND_MUTEXINOUTSET; break;
+ default: gcc_unreachable ();
+ }
+ tree t = build_int_cst (ptr_type_node, k);
+ depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj,
+ TYPE_SIZE_UNIT (ptr_type_node));
+ depobj = build_fold_indirect_ref_loc (loc, depobj);
+ gfc_add_expr_to_block (&block, build2 (MODIFY_EXPR, void_type_node, depobj, t));
+
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_error (gfc_code *code)
+{
+ stmtblock_t block;
+ gfc_se se;
+ tree len, message;
+ bool fatal = code->ext.omp_clauses->severity == OMP_SEVERITY_FATAL;
+ tree fndecl = builtin_decl_explicit (fatal ? BUILT_IN_GOMP_ERROR
+ : BUILT_IN_GOMP_WARNING);
+ gfc_start_block (&block);
+ gfc_init_se (&se, NULL );
+ if (!code->ext.omp_clauses->message)
+ {
+ message = null_pointer_node;
+ len = build_int_cst (size_type_node, 0);
+ }
+ else
+ {
+ gfc_conv_expr (&se, code->ext.omp_clauses->message);
+ message = se.expr;
+ if (!POINTER_TYPE_P (TREE_TYPE (message)))
+ /* To ensure an ARRAY_TYPE is not passed as such. */
+ message = gfc_build_addr_expr (NULL, message);
+ len = se.string_length;
+ }
+ gfc_add_block_to_block (&block, &se.pre);
+ gfc_add_expr_to_block (&block, build_call_expr_loc (input_location, fndecl,
+ 2, message, len));
+ gfc_add_block_to_block (&block, &se.post);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_flush (gfc_code *code)
+{
+ tree call;
+ if (!code->ext.omp_clauses
+ || code->ext.omp_clauses->memorder == OMP_MEMORDER_UNSET
+ || code->ext.omp_clauses->memorder == OMP_MEMORDER_SEQ_CST)
+ {
+ call = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
+ call = build_call_expr_loc (input_location, call, 0);
+ }
+ else
+ {
+ enum memmodel mo = MEMMODEL_LAST;
+ switch (code->ext.omp_clauses->memorder)
+ {
+ case OMP_MEMORDER_ACQ_REL: mo = MEMMODEL_ACQ_REL; break;
+ case OMP_MEMORDER_RELEASE: mo = MEMMODEL_RELEASE; break;
+ case OMP_MEMORDER_ACQUIRE: mo = MEMMODEL_ACQUIRE; break;
+ default: gcc_unreachable (); break;
+ }
+ call = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE);
+ call = build_call_expr_loc (input_location, call, 1,
+ build_int_cst (integer_type_node, mo));
+ }
+ return call;
+}
+
+static tree
+gfc_trans_omp_master (gfc_code *code)
+{
+ tree stmt = gfc_trans_code (code->block->next);
+ if (IS_EMPTY_STMT (stmt))
+ return stmt;
+ return build1_v (OMP_MASTER, stmt);
+}
+
+static tree
+gfc_trans_omp_masked (gfc_code *code, gfc_omp_clauses *clauses)
+{
+ stmtblock_t block;
+ tree body = gfc_trans_code (code->block->next);
+ if (IS_EMPTY_STMT (body))
+ return body;
+ if (!clauses)
+ clauses = code->ext.omp_clauses;
+ gfc_start_block (&block);
+ tree omp_clauses = gfc_trans_omp_clauses (&block, clauses, code->loc);
+ tree stmt = make_node (OMP_MASKED);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_MASKED_BODY (stmt) = body;
+ OMP_MASKED_CLAUSES (stmt) = omp_clauses;
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+
+static tree
+gfc_trans_omp_ordered (gfc_code *code)
+{
+ if (!flag_openmp)
+ {
+ if (!code->ext.omp_clauses->simd)
+ return gfc_trans_code (code->block ? code->block->next : NULL);
+ code->ext.omp_clauses->threads = 0;
+ }
+ tree omp_clauses = gfc_trans_omp_clauses (NULL, code->ext.omp_clauses,
+ code->loc);
+ return build2_loc (input_location, OMP_ORDERED, void_type_node,
+ code->block ? gfc_trans_code (code->block->next)
+ : NULL_TREE, omp_clauses);
+}
+
+static tree
+gfc_trans_omp_parallel (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, omp_clauses;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ pushlevel ();
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ stmt = build2_loc (input_location, OMP_PARALLEL, void_type_node, stmt,
+ omp_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+enum
+{
+ GFC_OMP_SPLIT_SIMD,
+ GFC_OMP_SPLIT_DO,
+ GFC_OMP_SPLIT_PARALLEL,
+ GFC_OMP_SPLIT_DISTRIBUTE,
+ GFC_OMP_SPLIT_TEAMS,
+ GFC_OMP_SPLIT_TARGET,
+ GFC_OMP_SPLIT_TASKLOOP,
+ GFC_OMP_SPLIT_MASKED,
+ GFC_OMP_SPLIT_NUM
+};
+
+enum
+{
+ GFC_OMP_MASK_SIMD = (1 << GFC_OMP_SPLIT_SIMD),
+ GFC_OMP_MASK_DO = (1 << GFC_OMP_SPLIT_DO),
+ GFC_OMP_MASK_PARALLEL = (1 << GFC_OMP_SPLIT_PARALLEL),
+ GFC_OMP_MASK_DISTRIBUTE = (1 << GFC_OMP_SPLIT_DISTRIBUTE),
+ GFC_OMP_MASK_TEAMS = (1 << GFC_OMP_SPLIT_TEAMS),
+ GFC_OMP_MASK_TARGET = (1 << GFC_OMP_SPLIT_TARGET),
+ GFC_OMP_MASK_TASKLOOP = (1 << GFC_OMP_SPLIT_TASKLOOP),
+ GFC_OMP_MASK_MASKED = (1 << GFC_OMP_SPLIT_MASKED)
+};
+
+/* If a var is in lastprivate/firstprivate/reduction but not in a
+ data mapping/sharing clause, add it to 'map(tofrom:' if is_target
+ and to 'shared' otherwise. */
+static void
+gfc_add_clause_implicitly (gfc_omp_clauses *clauses_out,
+ gfc_omp_clauses *clauses_in,
+ bool is_target, bool is_parallel_do)
+{
+ int clauselist_to_add = is_target ? OMP_LIST_MAP : OMP_LIST_SHARED;
+ gfc_omp_namelist *tail = NULL;
+ for (int i = 0; i < 5; ++i)
+ {
+ gfc_omp_namelist *n;
+ switch (i)
+ {
+ case 0: n = clauses_in->lists[OMP_LIST_FIRSTPRIVATE]; break;
+ case 1: n = clauses_in->lists[OMP_LIST_LASTPRIVATE]; break;
+ case 2: n = clauses_in->lists[OMP_LIST_REDUCTION]; break;
+ case 3: n = clauses_in->lists[OMP_LIST_REDUCTION_INSCAN]; break;
+ case 4: n = clauses_in->lists[OMP_LIST_REDUCTION_TASK]; break;
+ default: gcc_unreachable ();
+ }
+ for (; n != NULL; n = n->next)
+ {
+ gfc_omp_namelist *n2, **n_firstp = NULL, **n_lastp = NULL;
+ for (int j = 0; j < 6; ++j)
+ {
+ gfc_omp_namelist **n2ref = NULL, *prev2 = NULL;
+ switch (j)
+ {
+ case 0:
+ n2ref = &clauses_out->lists[clauselist_to_add];
+ break;
+ case 1:
+ n2ref = &clauses_out->lists[OMP_LIST_FIRSTPRIVATE];
+ break;
+ case 2:
+ if (is_target)
+ n2ref = &clauses_in->lists[OMP_LIST_LASTPRIVATE];
+ else
+ n2ref = &clauses_out->lists[OMP_LIST_LASTPRIVATE];
+ break;
+ case 3: n2ref = &clauses_out->lists[OMP_LIST_REDUCTION]; break;
+ case 4:
+ n2ref = &clauses_out->lists[OMP_LIST_REDUCTION_INSCAN];
+ break;
+ case 5:
+ n2ref = &clauses_out->lists[OMP_LIST_REDUCTION_TASK];
+ break;
+ default: gcc_unreachable ();
+ }
+ for (n2 = *n2ref; n2 != NULL; prev2 = n2, n2 = n2->next)
+ if (n2->sym == n->sym)
+ break;
+ if (n2)
+ {
+ if (j == 0 /* clauselist_to_add */)
+ break; /* Already present. */
+ if (j == 1 /* OMP_LIST_FIRSTPRIVATE */)
+ {
+ n_firstp = prev2 ? &prev2->next : n2ref;
+ continue;
+ }
+ if (j == 2 /* OMP_LIST_LASTPRIVATE */)
+ {
+ n_lastp = prev2 ? &prev2->next : n2ref;
+ continue;
+ }
+ break;
+ }
+ }
+ if (n_firstp && n_lastp)
+ {
+ /* For parallel do, GCC puts firstprivatee/lastprivate
+ on the parallel. */
+ if (is_parallel_do)
+ continue;
+ *n_firstp = (*n_firstp)->next;
+ if (!is_target)
+ *n_lastp = (*n_lastp)->next;
+ }
+ else if (is_target && n_lastp)
+ ;
+ else if (n2 || n_firstp || n_lastp)
+ continue;
+ if (clauses_out->lists[clauselist_to_add]
+ && (clauses_out->lists[clauselist_to_add]
+ == clauses_in->lists[clauselist_to_add]))
+ {
+ gfc_omp_namelist *p = NULL;
+ for (n2 = clauses_in->lists[clauselist_to_add]; n2; n2 = n2->next)
+ {
+ if (p)
+ {
+ p->next = gfc_get_omp_namelist ();
+ p = p->next;
+ }
+ else
+ {
+ p = gfc_get_omp_namelist ();
+ clauses_out->lists[clauselist_to_add] = p;
+ }
+ *p = *n2;
+ }
+ }
+ if (!tail)
+ {
+ tail = clauses_out->lists[clauselist_to_add];
+ for (; tail && tail->next; tail = tail->next)
+ ;
+ }
+ n2 = gfc_get_omp_namelist ();
+ n2->where = n->where;
+ n2->sym = n->sym;
+ if (is_target)
+ n2->u.map_op = OMP_MAP_TOFROM;
+ if (tail)
+ {
+ tail->next = n2;
+ tail = n2;
+ }
+ else
+ clauses_out->lists[clauselist_to_add] = n2;
+ }
+ }
+}
+
+static void
+gfc_free_split_omp_clauses (gfc_code *code, gfc_omp_clauses *clausesa)
+{
+ for (int i = 0; i < GFC_OMP_SPLIT_NUM; ++i)
+ for (int j = 0; j < OMP_LIST_NUM; ++j)
+ if (clausesa[i].lists[j] && clausesa[i].lists[j] != code->ext.omp_clauses->lists[j])
+ for (gfc_omp_namelist *n = clausesa[i].lists[j]; n;)
+ {
+ gfc_omp_namelist *p = n;
+ n = n->next;
+ free (p);
+ }
+}
+
+static void
+gfc_split_omp_clauses (gfc_code *code,
+ gfc_omp_clauses clausesa[GFC_OMP_SPLIT_NUM])
+{
+ int mask = 0, innermost = 0;
+ bool is_loop = false;
+ memset (clausesa, 0, GFC_OMP_SPLIT_NUM * sizeof (gfc_omp_clauses));
+ switch (code->op)
+ {
+ case EXEC_OMP_DISTRIBUTE:
+ innermost = GFC_OMP_SPLIT_DISTRIBUTE;
+ break;
+ case EXEC_OMP_DISTRIBUTE_PARALLEL_DO:
+ mask = GFC_OMP_MASK_DISTRIBUTE | GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO;
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ case EXEC_OMP_DISTRIBUTE_PARALLEL_DO_SIMD:
+ mask = GFC_OMP_MASK_DISTRIBUTE | GFC_OMP_MASK_PARALLEL
+ | GFC_OMP_MASK_DO | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_DISTRIBUTE_SIMD:
+ mask = GFC_OMP_MASK_DISTRIBUTE | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_DO:
+ case EXEC_OMP_LOOP:
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ case EXEC_OMP_DO_SIMD:
+ mask = GFC_OMP_MASK_DO | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_PARALLEL:
+ innermost = GFC_OMP_SPLIT_PARALLEL;
+ break;
+ case EXEC_OMP_PARALLEL_DO:
+ case EXEC_OMP_PARALLEL_LOOP:
+ mask = GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO;
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ case EXEC_OMP_PARALLEL_DO_SIMD:
+ mask = GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_PARALLEL_MASKED:
+ mask = GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_MASKED;
+ innermost = GFC_OMP_SPLIT_MASKED;
+ break;
+ case EXEC_OMP_PARALLEL_MASKED_TASKLOOP:
+ mask = (GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_MASKED
+ | GFC_OMP_MASK_TASKLOOP | GFC_OMP_MASK_SIMD);
+ innermost = GFC_OMP_SPLIT_TASKLOOP;
+ break;
+ case EXEC_OMP_PARALLEL_MASTER_TASKLOOP:
+ mask = GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_TASKLOOP | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_TASKLOOP;
+ break;
+ case EXEC_OMP_PARALLEL_MASKED_TASKLOOP_SIMD:
+ mask = (GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_MASKED
+ | GFC_OMP_MASK_TASKLOOP | GFC_OMP_MASK_SIMD);
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_PARALLEL_MASTER_TASKLOOP_SIMD:
+ mask = GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_TASKLOOP | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_SIMD:
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TARGET:
+ innermost = GFC_OMP_SPLIT_TARGET;
+ break;
+ case EXEC_OMP_TARGET_PARALLEL:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_PARALLEL;
+ innermost = GFC_OMP_SPLIT_PARALLEL;
+ break;
+ case EXEC_OMP_TARGET_PARALLEL_DO:
+ case EXEC_OMP_TARGET_PARALLEL_LOOP:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO;
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ case EXEC_OMP_TARGET_PARALLEL_DO_SIMD:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO
+ | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TARGET_SIMD:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TARGET_TEAMS:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_TEAMS;
+ innermost = GFC_OMP_SPLIT_TEAMS;
+ break;
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_TEAMS
+ | GFC_OMP_MASK_DISTRIBUTE;
+ innermost = GFC_OMP_SPLIT_DISTRIBUTE;
+ break;
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_DO:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DISTRIBUTE
+ | GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO;
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_DO_SIMD:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DISTRIBUTE
+ | GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_TEAMS
+ | GFC_OMP_MASK_DISTRIBUTE | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TARGET_TEAMS_LOOP:
+ mask = GFC_OMP_MASK_TARGET | GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DO;
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ case EXEC_OMP_MASKED_TASKLOOP:
+ mask = GFC_OMP_SPLIT_MASKED | GFC_OMP_SPLIT_TASKLOOP;
+ innermost = GFC_OMP_SPLIT_TASKLOOP;
+ break;
+ case EXEC_OMP_MASTER_TASKLOOP:
+ case EXEC_OMP_TASKLOOP:
+ innermost = GFC_OMP_SPLIT_TASKLOOP;
+ break;
+ case EXEC_OMP_MASKED_TASKLOOP_SIMD:
+ mask = GFC_OMP_MASK_MASKED | GFC_OMP_MASK_TASKLOOP | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_MASTER_TASKLOOP_SIMD:
+ case EXEC_OMP_TASKLOOP_SIMD:
+ mask = GFC_OMP_MASK_TASKLOOP | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TEAMS:
+ innermost = GFC_OMP_SPLIT_TEAMS;
+ break;
+ case EXEC_OMP_TEAMS_DISTRIBUTE:
+ mask = GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DISTRIBUTE;
+ innermost = GFC_OMP_SPLIT_DISTRIBUTE;
+ break;
+ case EXEC_OMP_TEAMS_DISTRIBUTE_PARALLEL_DO:
+ mask = GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DISTRIBUTE
+ | GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO;
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ case EXEC_OMP_TEAMS_DISTRIBUTE_PARALLEL_DO_SIMD:
+ mask = GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DISTRIBUTE
+ | GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TEAMS_DISTRIBUTE_SIMD:
+ mask = GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DISTRIBUTE | GFC_OMP_MASK_SIMD;
+ innermost = GFC_OMP_SPLIT_SIMD;
+ break;
+ case EXEC_OMP_TEAMS_LOOP:
+ mask = GFC_OMP_MASK_TEAMS | GFC_OMP_MASK_DO;
+ innermost = GFC_OMP_SPLIT_DO;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (mask == 0)
+ {
+ clausesa[innermost] = *code->ext.omp_clauses;
+ return;
+ }
+ /* Loops are similar to DO but still a bit different. */
+ switch (code->op)
+ {
+ case EXEC_OMP_LOOP:
+ case EXEC_OMP_PARALLEL_LOOP:
+ case EXEC_OMP_TEAMS_LOOP:
+ case EXEC_OMP_TARGET_PARALLEL_LOOP:
+ case EXEC_OMP_TARGET_TEAMS_LOOP:
+ is_loop = true;
+ default:
+ break;
+ }
+ if (code->ext.omp_clauses != NULL)
+ {
+ if (mask & GFC_OMP_MASK_TARGET)
+ {
+ /* First the clauses that are unique to some constructs. */
+ clausesa[GFC_OMP_SPLIT_TARGET].lists[OMP_LIST_MAP]
+ = code->ext.omp_clauses->lists[OMP_LIST_MAP];
+ clausesa[GFC_OMP_SPLIT_TARGET].lists[OMP_LIST_IS_DEVICE_PTR]
+ = code->ext.omp_clauses->lists[OMP_LIST_IS_DEVICE_PTR];
+ clausesa[GFC_OMP_SPLIT_TARGET].device
+ = code->ext.omp_clauses->device;
+ clausesa[GFC_OMP_SPLIT_TARGET].thread_limit
+ = code->ext.omp_clauses->thread_limit;
+ for (int i = 0; i < OMP_DEFAULTMAP_CAT_NUM; i++)
+ clausesa[GFC_OMP_SPLIT_TARGET].defaultmap[i]
+ = code->ext.omp_clauses->defaultmap[i];
+ clausesa[GFC_OMP_SPLIT_TARGET].if_exprs[OMP_IF_TARGET]
+ = code->ext.omp_clauses->if_exprs[OMP_IF_TARGET];
+ /* And this is copied to all. */
+ clausesa[GFC_OMP_SPLIT_TARGET].if_expr
+ = code->ext.omp_clauses->if_expr;
+ clausesa[GFC_OMP_SPLIT_TARGET].nowait
+ = code->ext.omp_clauses->nowait;
+ }
+ if (mask & GFC_OMP_MASK_TEAMS)
+ {
+ /* First the clauses that are unique to some constructs. */
+ clausesa[GFC_OMP_SPLIT_TEAMS].num_teams_lower
+ = code->ext.omp_clauses->num_teams_lower;
+ clausesa[GFC_OMP_SPLIT_TEAMS].num_teams_upper
+ = code->ext.omp_clauses->num_teams_upper;
+ clausesa[GFC_OMP_SPLIT_TEAMS].thread_limit
+ = code->ext.omp_clauses->thread_limit;
+ /* Shared and default clauses are allowed on parallel, teams
+ and taskloop. */
+ clausesa[GFC_OMP_SPLIT_TEAMS].lists[OMP_LIST_SHARED]
+ = code->ext.omp_clauses->lists[OMP_LIST_SHARED];
+ clausesa[GFC_OMP_SPLIT_TEAMS].default_sharing
+ = code->ext.omp_clauses->default_sharing;
+ }
+ if (mask & GFC_OMP_MASK_DISTRIBUTE)
+ {
+ /* First the clauses that are unique to some constructs. */
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].dist_sched_kind
+ = code->ext.omp_clauses->dist_sched_kind;
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].dist_chunk_size
+ = code->ext.omp_clauses->dist_chunk_size;
+ /* Duplicate collapse. */
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].collapse
+ = code->ext.omp_clauses->collapse;
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].order_concurrent
+ = code->ext.omp_clauses->order_concurrent;
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].order_unconstrained
+ = code->ext.omp_clauses->order_unconstrained;
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].order_reproducible
+ = code->ext.omp_clauses->order_reproducible;
+ }
+ if (mask & GFC_OMP_MASK_PARALLEL)
+ {
+ /* First the clauses that are unique to some constructs. */
+ clausesa[GFC_OMP_SPLIT_PARALLEL].lists[OMP_LIST_COPYIN]
+ = code->ext.omp_clauses->lists[OMP_LIST_COPYIN];
+ clausesa[GFC_OMP_SPLIT_PARALLEL].num_threads
+ = code->ext.omp_clauses->num_threads;
+ clausesa[GFC_OMP_SPLIT_PARALLEL].proc_bind
+ = code->ext.omp_clauses->proc_bind;
+ /* Shared and default clauses are allowed on parallel, teams
+ and taskloop. */
+ clausesa[GFC_OMP_SPLIT_PARALLEL].lists[OMP_LIST_SHARED]
+ = code->ext.omp_clauses->lists[OMP_LIST_SHARED];
+ clausesa[GFC_OMP_SPLIT_PARALLEL].default_sharing
+ = code->ext.omp_clauses->default_sharing;
+ clausesa[GFC_OMP_SPLIT_PARALLEL].if_exprs[OMP_IF_PARALLEL]
+ = code->ext.omp_clauses->if_exprs[OMP_IF_PARALLEL];
+ /* And this is copied to all. */
+ clausesa[GFC_OMP_SPLIT_PARALLEL].if_expr
+ = code->ext.omp_clauses->if_expr;
+ }
+ if (mask & GFC_OMP_MASK_MASKED)
+ clausesa[GFC_OMP_SPLIT_MASKED].filter = code->ext.omp_clauses->filter;
+ if ((mask & GFC_OMP_MASK_DO) && !is_loop)
+ {
+ /* First the clauses that are unique to some constructs. */
+ clausesa[GFC_OMP_SPLIT_DO].ordered
+ = code->ext.omp_clauses->ordered;
+ clausesa[GFC_OMP_SPLIT_DO].orderedc
+ = code->ext.omp_clauses->orderedc;
+ clausesa[GFC_OMP_SPLIT_DO].sched_kind
+ = code->ext.omp_clauses->sched_kind;
+ if (innermost == GFC_OMP_SPLIT_SIMD)
+ clausesa[GFC_OMP_SPLIT_DO].sched_simd
+ = code->ext.omp_clauses->sched_simd;
+ clausesa[GFC_OMP_SPLIT_DO].sched_monotonic
+ = code->ext.omp_clauses->sched_monotonic;
+ clausesa[GFC_OMP_SPLIT_DO].sched_nonmonotonic
+ = code->ext.omp_clauses->sched_nonmonotonic;
+ clausesa[GFC_OMP_SPLIT_DO].chunk_size
+ = code->ext.omp_clauses->chunk_size;
+ clausesa[GFC_OMP_SPLIT_DO].nowait
+ = code->ext.omp_clauses->nowait;
+ }
+ if (mask & GFC_OMP_MASK_DO)
+ {
+ clausesa[GFC_OMP_SPLIT_DO].bind
+ = code->ext.omp_clauses->bind;
+ /* Duplicate collapse. */
+ clausesa[GFC_OMP_SPLIT_DO].collapse
+ = code->ext.omp_clauses->collapse;
+ clausesa[GFC_OMP_SPLIT_DO].order_concurrent
+ = code->ext.omp_clauses->order_concurrent;
+ clausesa[GFC_OMP_SPLIT_DO].order_unconstrained
+ = code->ext.omp_clauses->order_unconstrained;
+ clausesa[GFC_OMP_SPLIT_DO].order_reproducible
+ = code->ext.omp_clauses->order_reproducible;
+ }
+ if (mask & GFC_OMP_MASK_SIMD)
+ {
+ clausesa[GFC_OMP_SPLIT_SIMD].safelen_expr
+ = code->ext.omp_clauses->safelen_expr;
+ clausesa[GFC_OMP_SPLIT_SIMD].simdlen_expr
+ = code->ext.omp_clauses->simdlen_expr;
+ clausesa[GFC_OMP_SPLIT_SIMD].lists[OMP_LIST_ALIGNED]
+ = code->ext.omp_clauses->lists[OMP_LIST_ALIGNED];
+ /* Duplicate collapse. */
+ clausesa[GFC_OMP_SPLIT_SIMD].collapse
+ = code->ext.omp_clauses->collapse;
+ clausesa[GFC_OMP_SPLIT_SIMD].if_exprs[OMP_IF_SIMD]
+ = code->ext.omp_clauses->if_exprs[OMP_IF_SIMD];
+ clausesa[GFC_OMP_SPLIT_SIMD].order_concurrent
+ = code->ext.omp_clauses->order_concurrent;
+ clausesa[GFC_OMP_SPLIT_SIMD].order_unconstrained
+ = code->ext.omp_clauses->order_unconstrained;
+ clausesa[GFC_OMP_SPLIT_SIMD].order_reproducible
+ = code->ext.omp_clauses->order_reproducible;
+ /* And this is copied to all. */
+ clausesa[GFC_OMP_SPLIT_SIMD].if_expr
+ = code->ext.omp_clauses->if_expr;
+ }
+ if (mask & GFC_OMP_MASK_TASKLOOP)
+ {
+ /* First the clauses that are unique to some constructs. */
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].nogroup
+ = code->ext.omp_clauses->nogroup;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].grainsize
+ = code->ext.omp_clauses->grainsize;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].grainsize_strict
+ = code->ext.omp_clauses->grainsize_strict;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].num_tasks
+ = code->ext.omp_clauses->num_tasks;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].num_tasks_strict
+ = code->ext.omp_clauses->num_tasks_strict;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].priority
+ = code->ext.omp_clauses->priority;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].final_expr
+ = code->ext.omp_clauses->final_expr;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].untied
+ = code->ext.omp_clauses->untied;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].mergeable
+ = code->ext.omp_clauses->mergeable;
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].if_exprs[OMP_IF_TASKLOOP]
+ = code->ext.omp_clauses->if_exprs[OMP_IF_TASKLOOP];
+ /* And this is copied to all. */
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].if_expr
+ = code->ext.omp_clauses->if_expr;
+ /* Shared and default clauses are allowed on parallel, teams
+ and taskloop. */
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].lists[OMP_LIST_SHARED]
+ = code->ext.omp_clauses->lists[OMP_LIST_SHARED];
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].default_sharing
+ = code->ext.omp_clauses->default_sharing;
+ /* Duplicate collapse. */
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].collapse
+ = code->ext.omp_clauses->collapse;
+ }
+ /* Private clause is supported on all constructs but master/masked,
+ it is enough to put it on the innermost one except for master/masked. For
+ !$ omp parallel do put it on parallel though,
+ as that's what we did for OpenMP 3.1. */
+ clausesa[((innermost == GFC_OMP_SPLIT_DO && !is_loop)
+ || code->op == EXEC_OMP_PARALLEL_MASTER
+ || code->op == EXEC_OMP_PARALLEL_MASKED)
+ ? (int) GFC_OMP_SPLIT_PARALLEL
+ : innermost].lists[OMP_LIST_PRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_PRIVATE];
+ /* Firstprivate clause is supported on all constructs but
+ simd and masked/master. Put it on the outermost of those and duplicate
+ on parallel and teams. */
+ if (mask & GFC_OMP_MASK_TARGET)
+ clausesa[GFC_OMP_SPLIT_TARGET].lists[OMP_LIST_FIRSTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_FIRSTPRIVATE];
+ if (mask & GFC_OMP_MASK_TEAMS)
+ clausesa[GFC_OMP_SPLIT_TEAMS].lists[OMP_LIST_FIRSTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_FIRSTPRIVATE];
+ else if (mask & GFC_OMP_MASK_DISTRIBUTE)
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].lists[OMP_LIST_FIRSTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_FIRSTPRIVATE];
+ if (mask & GFC_OMP_MASK_TASKLOOP)
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].lists[OMP_LIST_FIRSTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_FIRSTPRIVATE];
+ if ((mask & GFC_OMP_MASK_PARALLEL)
+ && !(mask & GFC_OMP_MASK_TASKLOOP))
+ clausesa[GFC_OMP_SPLIT_PARALLEL].lists[OMP_LIST_FIRSTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_FIRSTPRIVATE];
+ else if ((mask & GFC_OMP_MASK_DO) && !is_loop)
+ clausesa[GFC_OMP_SPLIT_DO].lists[OMP_LIST_FIRSTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_FIRSTPRIVATE];
+ /* Lastprivate is allowed on distribute, do, simd, taskloop and loop.
+ In parallel do{, simd} we actually want to put it on
+ parallel rather than do. */
+ if (mask & GFC_OMP_MASK_DISTRIBUTE)
+ clausesa[GFC_OMP_SPLIT_DISTRIBUTE].lists[OMP_LIST_LASTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_LASTPRIVATE];
+ if (mask & GFC_OMP_MASK_TASKLOOP)
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].lists[OMP_LIST_LASTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_LASTPRIVATE];
+ if ((mask & GFC_OMP_MASK_PARALLEL) && !is_loop
+ && !(mask & GFC_OMP_MASK_TASKLOOP))
+ clausesa[GFC_OMP_SPLIT_PARALLEL].lists[OMP_LIST_LASTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_LASTPRIVATE];
+ else if (mask & GFC_OMP_MASK_DO)
+ clausesa[GFC_OMP_SPLIT_DO].lists[OMP_LIST_LASTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_LASTPRIVATE];
+ if (mask & GFC_OMP_MASK_SIMD)
+ clausesa[GFC_OMP_SPLIT_SIMD].lists[OMP_LIST_LASTPRIVATE]
+ = code->ext.omp_clauses->lists[OMP_LIST_LASTPRIVATE];
+ /* Reduction is allowed on simd, do, parallel, teams, taskloop, and loop.
+ Duplicate it on all of them, but
+ - omit on do if parallel is present;
+ - omit on task and parallel if loop is present;
+ additionally, inscan applies to do/simd only. */
+ for (int i = OMP_LIST_REDUCTION; i <= OMP_LIST_REDUCTION_TASK; i++)
+ {
+ if (mask & GFC_OMP_MASK_TASKLOOP
+ && i != OMP_LIST_REDUCTION_INSCAN)
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].lists[i]
+ = code->ext.omp_clauses->lists[i];
+ if (mask & GFC_OMP_MASK_TEAMS
+ && i != OMP_LIST_REDUCTION_INSCAN
+ && !is_loop)
+ clausesa[GFC_OMP_SPLIT_TEAMS].lists[i]
+ = code->ext.omp_clauses->lists[i];
+ if (mask & GFC_OMP_MASK_PARALLEL
+ && i != OMP_LIST_REDUCTION_INSCAN
+ && !(mask & GFC_OMP_MASK_TASKLOOP)
+ && !is_loop)
+ clausesa[GFC_OMP_SPLIT_PARALLEL].lists[i]
+ = code->ext.omp_clauses->lists[i];
+ else if (mask & GFC_OMP_MASK_DO)
+ clausesa[GFC_OMP_SPLIT_DO].lists[i]
+ = code->ext.omp_clauses->lists[i];
+ if (mask & GFC_OMP_MASK_SIMD)
+ clausesa[GFC_OMP_SPLIT_SIMD].lists[i]
+ = code->ext.omp_clauses->lists[i];
+ }
+ if (mask & GFC_OMP_MASK_TARGET)
+ clausesa[GFC_OMP_SPLIT_TARGET].lists[OMP_LIST_IN_REDUCTION]
+ = code->ext.omp_clauses->lists[OMP_LIST_IN_REDUCTION];
+ if (mask & GFC_OMP_MASK_TASKLOOP)
+ clausesa[GFC_OMP_SPLIT_TASKLOOP].lists[OMP_LIST_IN_REDUCTION]
+ = code->ext.omp_clauses->lists[OMP_LIST_IN_REDUCTION];
+ /* Linear clause is supported on do and simd,
+ put it on the innermost one. */
+ clausesa[innermost].lists[OMP_LIST_LINEAR]
+ = code->ext.omp_clauses->lists[OMP_LIST_LINEAR];
+ }
+ /* Propagate firstprivate/lastprivate/reduction vars to
+ shared (parallel, teams) and map-tofrom (target). */
+ if (mask & GFC_OMP_MASK_TARGET)
+ gfc_add_clause_implicitly (&clausesa[GFC_OMP_SPLIT_TARGET],
+ code->ext.omp_clauses, true, false);
+ if ((mask & GFC_OMP_MASK_PARALLEL) && innermost != GFC_OMP_MASK_PARALLEL)
+ gfc_add_clause_implicitly (&clausesa[GFC_OMP_SPLIT_PARALLEL],
+ code->ext.omp_clauses, false,
+ mask & GFC_OMP_MASK_DO);
+ if (mask & GFC_OMP_MASK_TEAMS && innermost != GFC_OMP_MASK_TEAMS)
+ gfc_add_clause_implicitly (&clausesa[GFC_OMP_SPLIT_TEAMS],
+ code->ext.omp_clauses, false, false);
+ if (((mask & (GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO))
+ == (GFC_OMP_MASK_PARALLEL | GFC_OMP_MASK_DO))
+ && !is_loop)
+ clausesa[GFC_OMP_SPLIT_DO].nowait = true;
+
+ /* Distribute allocate clause to do, parallel, distribute, teams, target
+ and taskloop. The code below itereates over variables in the
+ allocate list and checks if that available is also in any
+ privatization clause on those construct. If yes, then we add it
+ to the list of 'allocate'ed variables for that construct. If a
+ variable is found in none of them then we issue an error. */
+
+ if (code->ext.omp_clauses->lists[OMP_LIST_ALLOCATE])
+ {
+ gfc_omp_namelist *alloc_nl, *priv_nl;
+ gfc_omp_namelist *tails[GFC_OMP_SPLIT_NUM];
+ for (alloc_nl = code->ext.omp_clauses->lists[OMP_LIST_ALLOCATE];
+ alloc_nl; alloc_nl = alloc_nl->next)
+ {
+ bool found = false;
+ for (int i = GFC_OMP_SPLIT_DO; i <= GFC_OMP_SPLIT_TASKLOOP; i++)
+ {
+ gfc_omp_namelist *p;
+ int list;
+ for (list = 0; list < OMP_LIST_NUM; list++)
+ {
+ switch (list)
+ {
+ case OMP_LIST_PRIVATE:
+ case OMP_LIST_FIRSTPRIVATE:
+ case OMP_LIST_LASTPRIVATE:
+ case OMP_LIST_REDUCTION:
+ case OMP_LIST_REDUCTION_INSCAN:
+ case OMP_LIST_REDUCTION_TASK:
+ case OMP_LIST_IN_REDUCTION:
+ case OMP_LIST_TASK_REDUCTION:
+ case OMP_LIST_LINEAR:
+ for (priv_nl = clausesa[i].lists[list]; priv_nl;
+ priv_nl = priv_nl->next)
+ if (alloc_nl->sym == priv_nl->sym)
+ {
+ found = true;
+ p = gfc_get_omp_namelist ();
+ p->sym = alloc_nl->sym;
+ p->expr = alloc_nl->expr;
+ p->where = alloc_nl->where;
+ if (clausesa[i].lists[OMP_LIST_ALLOCATE] == NULL)
+ {
+ clausesa[i].lists[OMP_LIST_ALLOCATE] = p;
+ tails[i] = p;
+ }
+ else
+ {
+ tails[i]->next = p;
+ tails[i] = tails[i]->next;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ if (!found)
+ gfc_error ("%qs specified in 'allocate' clause at %L but not "
+ "in an explicit privatization clause",
+ alloc_nl->sym->name, &alloc_nl->where);
+ }
+ }
+}
+
+static tree
+gfc_trans_omp_do_simd (gfc_code *code, stmtblock_t *pblock,
+ gfc_omp_clauses *clausesa, tree omp_clauses)
+{
+ stmtblock_t block;
+ gfc_omp_clauses clausesa_buf[GFC_OMP_SPLIT_NUM];
+ tree stmt, body, omp_do_clauses = NULL_TREE;
+ bool free_clausesa = false;
+
+ if (pblock == NULL)
+ gfc_start_block (&block);
+ else
+ gfc_init_block (&block);
+
+ if (clausesa == NULL)
+ {
+ clausesa = clausesa_buf;
+ gfc_split_omp_clauses (code, clausesa);
+ free_clausesa = true;
+ }
+ if (flag_openmp)
+ omp_do_clauses
+ = gfc_trans_omp_clauses (&block, &clausesa[GFC_OMP_SPLIT_DO], code->loc);
+ body = gfc_trans_omp_do (code, EXEC_OMP_SIMD, pblock ? pblock : &block,
+ &clausesa[GFC_OMP_SPLIT_SIMD], omp_clauses);
+ if (pblock == NULL)
+ {
+ if (TREE_CODE (body) != BIND_EXPR)
+ body = build3_v (BIND_EXPR, NULL, body, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ }
+ else if (TREE_CODE (body) != BIND_EXPR)
+ body = build3_v (BIND_EXPR, NULL, body, NULL_TREE);
+ if (flag_openmp)
+ {
+ stmt = make_node (OMP_FOR);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_FOR_BODY (stmt) = body;
+ OMP_FOR_CLAUSES (stmt) = omp_do_clauses;
+ }
+ else
+ stmt = body;
+ gfc_add_expr_to_block (&block, stmt);
+ if (free_clausesa)
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_parallel_do (gfc_code *code, bool is_loop, stmtblock_t *pblock,
+ gfc_omp_clauses *clausesa)
+{
+ stmtblock_t block, *new_pblock = pblock;
+ gfc_omp_clauses clausesa_buf[GFC_OMP_SPLIT_NUM];
+ tree stmt, omp_clauses = NULL_TREE;
+ bool free_clausesa = false;
+
+ if (pblock == NULL)
+ gfc_start_block (&block);
+ else
+ gfc_init_block (&block);
+
+ if (clausesa == NULL)
+ {
+ clausesa = clausesa_buf;
+ gfc_split_omp_clauses (code, clausesa);
+ free_clausesa = true;
+ }
+ omp_clauses
+ = gfc_trans_omp_clauses (&block, &clausesa[GFC_OMP_SPLIT_PARALLEL],
+ code->loc);
+ if (pblock == NULL)
+ {
+ if (!clausesa[GFC_OMP_SPLIT_DO].ordered
+ && clausesa[GFC_OMP_SPLIT_DO].sched_kind != OMP_SCHED_STATIC)
+ new_pblock = &block;
+ else
+ pushlevel ();
+ }
+ stmt = gfc_trans_omp_do (code, is_loop ? EXEC_OMP_LOOP : EXEC_OMP_DO,
+ new_pblock, &clausesa[GFC_OMP_SPLIT_DO],
+ omp_clauses);
+ if (pblock == NULL)
+ {
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ }
+ else if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, NULL_TREE);
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_PARALLEL,
+ void_type_node, stmt, omp_clauses);
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ gfc_add_expr_to_block (&block, stmt);
+ if (free_clausesa)
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_parallel_do_simd (gfc_code *code, stmtblock_t *pblock,
+ gfc_omp_clauses *clausesa)
+{
+ stmtblock_t block;
+ gfc_omp_clauses clausesa_buf[GFC_OMP_SPLIT_NUM];
+ tree stmt, omp_clauses = NULL_TREE;
+ bool free_clausesa = false;
+
+ if (pblock == NULL)
+ gfc_start_block (&block);
+ else
+ gfc_init_block (&block);
+
+ if (clausesa == NULL)
+ {
+ clausesa = clausesa_buf;
+ gfc_split_omp_clauses (code, clausesa);
+ free_clausesa = true;
+ }
+ if (flag_openmp)
+ omp_clauses
+ = gfc_trans_omp_clauses (&block, &clausesa[GFC_OMP_SPLIT_PARALLEL],
+ code->loc);
+ if (pblock == NULL)
+ pushlevel ();
+ stmt = gfc_trans_omp_do_simd (code, pblock, clausesa, omp_clauses);
+ if (pblock == NULL)
+ {
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ }
+ else if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, NULL_TREE);
+ if (flag_openmp)
+ {
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_PARALLEL,
+ void_type_node, stmt, omp_clauses);
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ }
+ gfc_add_expr_to_block (&block, stmt);
+ if (free_clausesa)
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_parallel_sections (gfc_code *code)
+{
+ stmtblock_t block;
+ gfc_omp_clauses section_clauses;
+ tree stmt, omp_clauses;
+
+ memset (&section_clauses, 0, sizeof (section_clauses));
+ section_clauses.nowait = true;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ pushlevel ();
+ stmt = gfc_trans_omp_sections (code, &section_clauses);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_PARALLEL,
+ void_type_node, stmt, omp_clauses);
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_parallel_workshare (gfc_code *code)
+{
+ stmtblock_t block;
+ gfc_omp_clauses workshare_clauses;
+ tree stmt, omp_clauses;
+
+ memset (&workshare_clauses, 0, sizeof (workshare_clauses));
+ workshare_clauses.nowait = true;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ pushlevel ();
+ stmt = gfc_trans_omp_workshare (code, &workshare_clauses);
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_PARALLEL,
+ void_type_node, stmt, omp_clauses);
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_scope (gfc_code *code)
+{
+ stmtblock_t block;
+ tree body = gfc_trans_code (code->block->next);
+ if (IS_EMPTY_STMT (body))
+ return body;
+ gfc_start_block (&block);
+ tree omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ tree stmt = make_node (OMP_SCOPE);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_SCOPE_BODY (stmt) = body;
+ OMP_SCOPE_CLAUSES (stmt) = omp_clauses;
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_sections (gfc_code *code, gfc_omp_clauses *clauses)
+{
+ stmtblock_t block, body;
+ tree omp_clauses, stmt;
+ bool has_lastprivate = clauses->lists[OMP_LIST_LASTPRIVATE] != NULL;
+ location_t loc = gfc_get_location (&code->loc);
+
+ gfc_start_block (&block);
+
+ omp_clauses = gfc_trans_omp_clauses (&block, clauses, code->loc);
+
+ gfc_init_block (&body);
+ for (code = code->block; code; code = code->block)
+ {
+ /* Last section is special because of lastprivate, so even if it
+ is empty, chain it in. */
+ stmt = gfc_trans_omp_code (code->next,
+ has_lastprivate && code->block == NULL);
+ if (! IS_EMPTY_STMT (stmt))
+ {
+ stmt = build1_v (OMP_SECTION, stmt);
+ gfc_add_expr_to_block (&body, stmt);
+ }
+ }
+ stmt = gfc_finish_block (&body);
+
+ stmt = build2_loc (loc, OMP_SECTIONS, void_type_node, stmt, omp_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_single (gfc_code *code, gfc_omp_clauses *clauses)
+{
+ tree omp_clauses = gfc_trans_omp_clauses (NULL, clauses, code->loc);
+ tree stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_SINGLE, void_type_node,
+ stmt, omp_clauses);
+ return stmt;
+}
+
+static tree
+gfc_trans_omp_task (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, omp_clauses;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ pushlevel ();
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_TASK, void_type_node,
+ stmt, omp_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_taskgroup (gfc_code *code)
+{
+ stmtblock_t block;
+ gfc_start_block (&block);
+ tree body = gfc_trans_code (code->block->next);
+ tree stmt = make_node (OMP_TASKGROUP);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_TASKGROUP_BODY (stmt) = body;
+ OMP_TASKGROUP_CLAUSES (stmt) = gfc_trans_omp_clauses (&block,
+ code->ext.omp_clauses,
+ code->loc);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_taskwait (gfc_code *code)
+{
+ if (!code->ext.omp_clauses)
+ {
+ tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
+ return build_call_expr_loc (input_location, decl, 0);
+ }
+ stmtblock_t block;
+ gfc_start_block (&block);
+ tree stmt = make_node (OMP_TASK);
+ TREE_TYPE (stmt) = void_type_node;
+ OMP_TASK_BODY (stmt) = NULL_TREE;
+ OMP_TASK_CLAUSES (stmt) = gfc_trans_omp_clauses (&block,
+ code->ext.omp_clauses,
+ code->loc);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_taskyield (void)
+{
+ tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
+ return build_call_expr_loc (input_location, decl, 0);
+}
+
+static tree
+gfc_trans_omp_distribute (gfc_code *code, gfc_omp_clauses *clausesa)
+{
+ stmtblock_t block;
+ gfc_omp_clauses clausesa_buf[GFC_OMP_SPLIT_NUM];
+ tree stmt, omp_clauses = NULL_TREE;
+ bool free_clausesa = false;
+
+ gfc_start_block (&block);
+ if (clausesa == NULL)
+ {
+ clausesa = clausesa_buf;
+ gfc_split_omp_clauses (code, clausesa);
+ free_clausesa = true;
+ }
+ if (flag_openmp)
+ omp_clauses
+ = gfc_trans_omp_clauses (&block, &clausesa[GFC_OMP_SPLIT_DISTRIBUTE],
+ code->loc);
+ switch (code->op)
+ {
+ case EXEC_OMP_DISTRIBUTE:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE:
+ case EXEC_OMP_TEAMS_DISTRIBUTE:
+ /* This is handled in gfc_trans_omp_do. */
+ gcc_unreachable ();
+ break;
+ case EXEC_OMP_DISTRIBUTE_PARALLEL_DO:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_DO:
+ case EXEC_OMP_TEAMS_DISTRIBUTE_PARALLEL_DO:
+ stmt = gfc_trans_omp_parallel_do (code, false, &block, clausesa);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ case EXEC_OMP_DISTRIBUTE_PARALLEL_DO_SIMD:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_DO_SIMD:
+ case EXEC_OMP_TEAMS_DISTRIBUTE_PARALLEL_DO_SIMD:
+ stmt = gfc_trans_omp_parallel_do_simd (code, &block, clausesa);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ case EXEC_OMP_DISTRIBUTE_SIMD:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD:
+ case EXEC_OMP_TEAMS_DISTRIBUTE_SIMD:
+ stmt = gfc_trans_omp_do (code, EXEC_OMP_SIMD, &block,
+ &clausesa[GFC_OMP_SPLIT_SIMD], NULL_TREE);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (flag_openmp)
+ {
+ tree distribute = make_node (OMP_DISTRIBUTE);
+ TREE_TYPE (distribute) = void_type_node;
+ OMP_FOR_BODY (distribute) = stmt;
+ OMP_FOR_CLAUSES (distribute) = omp_clauses;
+ stmt = distribute;
+ }
+ gfc_add_expr_to_block (&block, stmt);
+ if (free_clausesa)
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_teams (gfc_code *code, gfc_omp_clauses *clausesa,
+ tree omp_clauses)
+{
+ stmtblock_t block;
+ gfc_omp_clauses clausesa_buf[GFC_OMP_SPLIT_NUM];
+ tree stmt;
+ bool combined = true, free_clausesa = false;
+
+ gfc_start_block (&block);
+ if (clausesa == NULL)
+ {
+ clausesa = clausesa_buf;
+ gfc_split_omp_clauses (code, clausesa);
+ free_clausesa = true;
+ }
+ if (flag_openmp)
+ {
+ omp_clauses
+ = chainon (omp_clauses,
+ gfc_trans_omp_clauses (&block,
+ &clausesa[GFC_OMP_SPLIT_TEAMS],
+ code->loc));
+ pushlevel ();
+ }
+ switch (code->op)
+ {
+ case EXEC_OMP_TARGET_TEAMS:
+ case EXEC_OMP_TEAMS:
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ combined = false;
+ break;
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE:
+ case EXEC_OMP_TEAMS_DISTRIBUTE:
+ stmt = gfc_trans_omp_do (code, EXEC_OMP_DISTRIBUTE, NULL,
+ &clausesa[GFC_OMP_SPLIT_DISTRIBUTE],
+ NULL);
+ break;
+ case EXEC_OMP_TARGET_TEAMS_LOOP:
+ case EXEC_OMP_TEAMS_LOOP:
+ stmt = gfc_trans_omp_do (code, EXEC_OMP_LOOP, NULL,
+ &clausesa[GFC_OMP_SPLIT_DO],
+ NULL);
+ break;
+ default:
+ stmt = gfc_trans_omp_distribute (code, clausesa);
+ break;
+ }
+ if (flag_openmp)
+ {
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_TEAMS,
+ void_type_node, stmt, omp_clauses);
+ if (combined)
+ OMP_TEAMS_COMBINED (stmt) = 1;
+ }
+ gfc_add_expr_to_block (&block, stmt);
+ if (free_clausesa)
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_target (gfc_code *code)
+{
+ stmtblock_t block;
+ gfc_omp_clauses clausesa[GFC_OMP_SPLIT_NUM];
+ tree stmt, omp_clauses = NULL_TREE;
+
+ gfc_start_block (&block);
+ gfc_split_omp_clauses (code, clausesa);
+ if (flag_openmp)
+ omp_clauses
+ = gfc_trans_omp_clauses (&block, &clausesa[GFC_OMP_SPLIT_TARGET],
+ code->loc);
+ switch (code->op)
+ {
+ case EXEC_OMP_TARGET:
+ pushlevel ();
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ break;
+ case EXEC_OMP_TARGET_PARALLEL:
+ {
+ stmtblock_t iblock;
+
+ pushlevel ();
+ gfc_start_block (&iblock);
+ tree inner_clauses
+ = gfc_trans_omp_clauses (&iblock, &clausesa[GFC_OMP_SPLIT_PARALLEL],
+ code->loc);
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build2_loc (input_location, OMP_PARALLEL, void_type_node, stmt,
+ inner_clauses);
+ gfc_add_expr_to_block (&iblock, stmt);
+ stmt = gfc_finish_block (&iblock);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ }
+ break;
+ case EXEC_OMP_TARGET_PARALLEL_DO:
+ case EXEC_OMP_TARGET_PARALLEL_LOOP:
+ stmt = gfc_trans_omp_parallel_do (code,
+ (code->op
+ == EXEC_OMP_TARGET_PARALLEL_LOOP),
+ &block, clausesa);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ case EXEC_OMP_TARGET_PARALLEL_DO_SIMD:
+ stmt = gfc_trans_omp_parallel_do_simd (code, &block, clausesa);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ case EXEC_OMP_TARGET_SIMD:
+ stmt = gfc_trans_omp_do (code, EXEC_OMP_SIMD, &block,
+ &clausesa[GFC_OMP_SPLIT_SIMD], NULL_TREE);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ default:
+ if (flag_openmp
+ && (clausesa[GFC_OMP_SPLIT_TEAMS].num_teams_upper
+ || clausesa[GFC_OMP_SPLIT_TEAMS].thread_limit))
+ {
+ gfc_omp_clauses clausesb;
+ tree teams_clauses;
+ /* For combined !$omp target teams, the num_teams and
+ thread_limit clauses are evaluated before entering the
+ target construct. */
+ memset (&clausesb, '\0', sizeof (clausesb));
+ clausesb.num_teams_lower
+ = clausesa[GFC_OMP_SPLIT_TEAMS].num_teams_lower;
+ clausesb.num_teams_upper
+ = clausesa[GFC_OMP_SPLIT_TEAMS].num_teams_upper;
+ clausesb.thread_limit = clausesa[GFC_OMP_SPLIT_TEAMS].thread_limit;
+ clausesa[GFC_OMP_SPLIT_TEAMS].num_teams_lower = NULL;
+ clausesa[GFC_OMP_SPLIT_TEAMS].num_teams_upper = NULL;
+ clausesa[GFC_OMP_SPLIT_TEAMS].thread_limit = NULL;
+ teams_clauses
+ = gfc_trans_omp_clauses (&block, &clausesb, code->loc);
+ pushlevel ();
+ stmt = gfc_trans_omp_teams (code, clausesa, teams_clauses);
+ }
+ else
+ {
+ pushlevel ();
+ stmt = gfc_trans_omp_teams (code, clausesa, NULL_TREE);
+ }
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ }
+ if (flag_openmp)
+ {
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_TARGET,
+ void_type_node, stmt, omp_clauses);
+ if (code->op != EXEC_OMP_TARGET)
+ OMP_TARGET_COMBINED (stmt) = 1;
+ cfun->has_omp_target = true;
+ }
+ gfc_add_expr_to_block (&block, stmt);
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_taskloop (gfc_code *code, gfc_exec_op op)
+{
+ stmtblock_t block;
+ gfc_omp_clauses clausesa[GFC_OMP_SPLIT_NUM];
+ tree stmt, omp_clauses = NULL_TREE;
+
+ gfc_start_block (&block);
+ gfc_split_omp_clauses (code, clausesa);
+ if (flag_openmp)
+ omp_clauses
+ = gfc_trans_omp_clauses (&block, &clausesa[GFC_OMP_SPLIT_TASKLOOP],
+ code->loc);
+ switch (op)
+ {
+ case EXEC_OMP_TASKLOOP:
+ /* This is handled in gfc_trans_omp_do. */
+ gcc_unreachable ();
+ break;
+ case EXEC_OMP_TASKLOOP_SIMD:
+ stmt = gfc_trans_omp_do (code, EXEC_OMP_SIMD, &block,
+ &clausesa[GFC_OMP_SPLIT_SIMD], NULL_TREE);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ if (flag_openmp)
+ {
+ tree taskloop = make_node (OMP_TASKLOOP);
+ TREE_TYPE (taskloop) = void_type_node;
+ OMP_FOR_BODY (taskloop) = stmt;
+ OMP_FOR_CLAUSES (taskloop) = omp_clauses;
+ stmt = taskloop;
+ }
+ gfc_add_expr_to_block (&block, stmt);
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_master_masked_taskloop (gfc_code *code, gfc_exec_op op)
+{
+ gfc_omp_clauses clausesa[GFC_OMP_SPLIT_NUM];
+ stmtblock_t block;
+ tree stmt;
+
+ if (op != EXEC_OMP_MASTER_TASKLOOP_SIMD
+ && code->op != EXEC_OMP_MASTER_TASKLOOP)
+ gfc_split_omp_clauses (code, clausesa);
+
+ pushlevel ();
+ if (op == EXEC_OMP_MASKED_TASKLOOP_SIMD
+ || op == EXEC_OMP_MASTER_TASKLOOP_SIMD)
+ stmt = gfc_trans_omp_taskloop (code, EXEC_OMP_TASKLOOP_SIMD);
+ else
+ {
+ gcc_assert (op == EXEC_OMP_MASKED_TASKLOOP
+ || op == EXEC_OMP_MASTER_TASKLOOP);
+ stmt = gfc_trans_omp_do (code, EXEC_OMP_TASKLOOP, NULL,
+ code->op != EXEC_OMP_MASTER_TASKLOOP
+ ? &clausesa[GFC_OMP_SPLIT_TASKLOOP]
+ : code->ext.omp_clauses, NULL);
+ }
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ gfc_start_block (&block);
+ if (op == EXEC_OMP_MASKED_TASKLOOP || op == EXEC_OMP_MASKED_TASKLOOP_SIMD)
+ {
+ tree clauses = gfc_trans_omp_clauses (&block,
+ &clausesa[GFC_OMP_SPLIT_MASKED],
+ code->loc);
+ tree msk = make_node (OMP_MASKED);
+ TREE_TYPE (msk) = void_type_node;
+ OMP_MASKED_BODY (msk) = stmt;
+ OMP_MASKED_CLAUSES (msk) = clauses;
+ OMP_MASKED_COMBINED (msk) = 1;
+ gfc_add_expr_to_block (&block, msk);
+ }
+ else
+ {
+ gcc_assert (op == EXEC_OMP_MASTER_TASKLOOP
+ || op == EXEC_OMP_MASTER_TASKLOOP_SIMD);
+ stmt = build1_v (OMP_MASTER, stmt);
+ gfc_add_expr_to_block (&block, stmt);
+ }
+ if (op != EXEC_OMP_MASTER_TASKLOOP_SIMD
+ && code->op != EXEC_OMP_MASTER_TASKLOOP)
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_parallel_master_masked (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, omp_clauses;
+ gfc_omp_clauses clausesa[GFC_OMP_SPLIT_NUM];
+ bool parallel_combined = false;
+
+ if (code->op != EXEC_OMP_PARALLEL_MASTER)
+ gfc_split_omp_clauses (code, clausesa);
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block,
+ code->op == EXEC_OMP_PARALLEL_MASTER
+ ? code->ext.omp_clauses
+ : &clausesa[GFC_OMP_SPLIT_PARALLEL],
+ code->loc);
+ pushlevel ();
+ if (code->op == EXEC_OMP_PARALLEL_MASTER)
+ stmt = gfc_trans_omp_master (code);
+ else if (code->op == EXEC_OMP_PARALLEL_MASKED)
+ stmt = gfc_trans_omp_masked (code, &clausesa[GFC_OMP_SPLIT_MASKED]);
+ else
+ {
+ gfc_exec_op op;
+ switch (code->op)
+ {
+ case EXEC_OMP_PARALLEL_MASKED_TASKLOOP:
+ op = EXEC_OMP_MASKED_TASKLOOP;
+ break;
+ case EXEC_OMP_PARALLEL_MASKED_TASKLOOP_SIMD:
+ op = EXEC_OMP_MASKED_TASKLOOP_SIMD;
+ break;
+ case EXEC_OMP_PARALLEL_MASTER_TASKLOOP:
+ op = EXEC_OMP_MASTER_TASKLOOP;
+ break;
+ case EXEC_OMP_PARALLEL_MASTER_TASKLOOP_SIMD:
+ op = EXEC_OMP_MASTER_TASKLOOP_SIMD;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ stmt = gfc_trans_omp_master_masked_taskloop (code, op);
+ parallel_combined = true;
+ }
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0));
+ else
+ poplevel (0, 0);
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_PARALLEL,
+ void_type_node, stmt, omp_clauses);
+ /* masked does have just filter clause, but during gimplification
+ isn't represented by a gimplification omp context, so for
+ !$omp parallel masked don't set OMP_PARALLEL_COMBINED,
+ so that
+ !$omp parallel masked
+ !$omp taskloop simd lastprivate (x)
+ isn't confused with
+ !$omp parallel masked taskloop simd lastprivate (x) */
+ if (parallel_combined)
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ gfc_add_expr_to_block (&block, stmt);
+ if (code->op != EXEC_OMP_PARALLEL_MASTER)
+ gfc_free_split_omp_clauses (code, clausesa);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_target_data (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, omp_clauses;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build2_loc (gfc_get_location (&code->loc), OMP_TARGET_DATA,
+ void_type_node, stmt, omp_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_target_enter_data (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, omp_clauses;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ stmt = build1_loc (input_location, OMP_TARGET_ENTER_DATA, void_type_node,
+ omp_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_target_exit_data (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, omp_clauses;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ stmt = build1_loc (input_location, OMP_TARGET_EXIT_DATA, void_type_node,
+ omp_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_target_update (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, omp_clauses;
+
+ gfc_start_block (&block);
+ omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
+ code->loc);
+ stmt = build1_loc (input_location, OMP_TARGET_UPDATE, void_type_node,
+ omp_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+ return gfc_finish_block (&block);
+}
+
+static tree
+gfc_trans_omp_workshare (gfc_code *code, gfc_omp_clauses *clauses)
+{
+ tree res, tmp, stmt;
+ stmtblock_t block, *pblock = NULL;
+ stmtblock_t singleblock;
+ int saved_ompws_flags;
+ bool singleblock_in_progress = false;
+ /* True if previous gfc_code in workshare construct is not workshared. */
+ bool prev_singleunit;
+ location_t loc = gfc_get_location (&code->loc);
+
+ code = code->block->next;
+
+ pushlevel ();
+
+ gfc_start_block (&block);
+ pblock = &block;
+
+ ompws_flags = OMPWS_WORKSHARE_FLAG;
+ prev_singleunit = false;
+
+ /* Translate statements one by one to trees until we reach
+ the end of the workshare construct. Adjacent gfc_codes that
+ are a single unit of work are clustered and encapsulated in a
+ single OMP_SINGLE construct. */
+ for (; code; code = code->next)
+ {
+ if (code->here != 0)
+ {
+ res = gfc_trans_label_here (code);
+ gfc_add_expr_to_block (pblock, res);
+ }
+
+ /* No dependence analysis, use for clauses with wait.
+ If this is the last gfc_code, use default omp_clauses. */
+ if (code->next == NULL && clauses->nowait)
+ ompws_flags |= OMPWS_NOWAIT;
+
+ /* By default, every gfc_code is a single unit of work. */
+ ompws_flags |= OMPWS_CURR_SINGLEUNIT;
+ ompws_flags &= ~(OMPWS_SCALARIZER_WS | OMPWS_SCALARIZER_BODY);
+
+ switch (code->op)
+ {
+ case EXEC_NOP:
+ res = NULL_TREE;
+ break;
+
+ case EXEC_ASSIGN:
+ res = gfc_trans_assign (code);
+ break;
+
+ case EXEC_POINTER_ASSIGN:
+ res = gfc_trans_pointer_assign (code);
+ break;
+
+ case EXEC_INIT_ASSIGN:
+ res = gfc_trans_init_assign (code);
+ break;
+
+ case EXEC_FORALL:
+ res = gfc_trans_forall (code);
+ break;
+
+ case EXEC_WHERE:
+ res = gfc_trans_where (code);
+ break;
+
+ case EXEC_OMP_ATOMIC:
+ res = gfc_trans_omp_directive (code);
+ break;
+
+ case EXEC_OMP_PARALLEL:
+ case EXEC_OMP_PARALLEL_DO:
+ case EXEC_OMP_PARALLEL_MASTER:
+ case EXEC_OMP_PARALLEL_SECTIONS:
+ case EXEC_OMP_PARALLEL_WORKSHARE:
+ case EXEC_OMP_CRITICAL:
+ saved_ompws_flags = ompws_flags;
+ ompws_flags = 0;
+ res = gfc_trans_omp_directive (code);
+ ompws_flags = saved_ompws_flags;
+ break;
+
+ case EXEC_BLOCK:
+ res = gfc_trans_block_construct (code);
+ break;
+
+ default:
+ gfc_internal_error ("gfc_trans_omp_workshare(): Bad statement code");
+ }
+
+ gfc_set_backend_locus (&code->loc);
+
+ if (res != NULL_TREE && ! IS_EMPTY_STMT (res))
+ {
+ if (prev_singleunit)
+ {
+ if (ompws_flags & OMPWS_CURR_SINGLEUNIT)
+ /* Add current gfc_code to single block. */
+ gfc_add_expr_to_block (&singleblock, res);
+ else
+ {
+ /* Finish single block and add it to pblock. */
+ tmp = gfc_finish_block (&singleblock);
+ tmp = build2_loc (loc, OMP_SINGLE,
+ void_type_node, tmp, NULL_TREE);
+ gfc_add_expr_to_block (pblock, tmp);
+ /* Add current gfc_code to pblock. */
+ gfc_add_expr_to_block (pblock, res);
+ singleblock_in_progress = false;
+ }
+ }
+ else
+ {
+ if (ompws_flags & OMPWS_CURR_SINGLEUNIT)
+ {
+ /* Start single block. */
+ gfc_init_block (&singleblock);
+ gfc_add_expr_to_block (&singleblock, res);
+ singleblock_in_progress = true;
+ loc = gfc_get_location (&code->loc);
+ }
+ else
+ /* Add the new statement to the block. */
+ gfc_add_expr_to_block (pblock, res);
+ }
+ prev_singleunit = (ompws_flags & OMPWS_CURR_SINGLEUNIT) != 0;
+ }
+ }
+
+ /* Finish remaining SINGLE block, if we were in the middle of one. */
+ if (singleblock_in_progress)
+ {
+ /* Finish single block and add it to pblock. */
+ tmp = gfc_finish_block (&singleblock);
+ tmp = build2_loc (loc, OMP_SINGLE, void_type_node, tmp,
+ clauses->nowait
+ ? build_omp_clause (input_location, OMP_CLAUSE_NOWAIT)
+ : NULL_TREE);
+ gfc_add_expr_to_block (pblock, tmp);
+ }
+
+ stmt = gfc_finish_block (pblock);
+ if (TREE_CODE (stmt) != BIND_EXPR)
+ {
+ if (!IS_EMPTY_STMT (stmt))
+ {
+ tree bindblock = poplevel (1, 0);
+ stmt = build3_v (BIND_EXPR, NULL, stmt, bindblock);
+ }
+ else
+ poplevel (0, 0);
+ }
+ else
+ poplevel (0, 0);
+
+ if (IS_EMPTY_STMT (stmt) && !clauses->nowait)
+ stmt = gfc_trans_omp_barrier ();
+
+ ompws_flags = 0;
+ return stmt;
+}
+
+tree
+gfc_trans_oacc_declare (gfc_code *code)
+{
+ stmtblock_t block;
+ tree stmt, oacc_clauses;
+ enum tree_code construct_code;
+
+ construct_code = OACC_DATA;
+
+ gfc_start_block (&block);
+
+ oacc_clauses = gfc_trans_omp_clauses (&block, code->ext.oacc_declare->clauses,
+ code->loc, false, true);
+ stmt = gfc_trans_omp_code (code->block->next, true);
+ stmt = build2_loc (input_location, construct_code, void_type_node, stmt,
+ oacc_clauses);
+ gfc_add_expr_to_block (&block, stmt);
+
+ return gfc_finish_block (&block);
+}
+
+tree
+gfc_trans_oacc_directive (gfc_code *code)
+{
+ switch (code->op)
+ {
+ case EXEC_OACC_PARALLEL_LOOP:
+ case EXEC_OACC_KERNELS_LOOP:
+ case EXEC_OACC_SERIAL_LOOP:
+ return gfc_trans_oacc_combined_directive (code);
+ case EXEC_OACC_PARALLEL:
+ case EXEC_OACC_KERNELS:
+ case EXEC_OACC_SERIAL:
+ case EXEC_OACC_DATA:
+ case EXEC_OACC_HOST_DATA:
+ return gfc_trans_oacc_construct (code);
+ case EXEC_OACC_LOOP:
+ return gfc_trans_omp_do (code, code->op, NULL, code->ext.omp_clauses,
+ NULL);
+ case EXEC_OACC_UPDATE:
+ case EXEC_OACC_CACHE:
+ case EXEC_OACC_ENTER_DATA:
+ case EXEC_OACC_EXIT_DATA:
+ return gfc_trans_oacc_executable_directive (code);
+ case EXEC_OACC_WAIT:
+ return gfc_trans_oacc_wait_directive (code);
+ case EXEC_OACC_ATOMIC:
+ return gfc_trans_omp_atomic (code);
+ case EXEC_OACC_DECLARE:
+ return gfc_trans_oacc_declare (code);
+ default:
+ gcc_unreachable ();
+ }
+}
+
+tree
+gfc_trans_omp_directive (gfc_code *code)
+{
+ switch (code->op)
+ {
+ case EXEC_OMP_ATOMIC:
+ return gfc_trans_omp_atomic (code);
+ case EXEC_OMP_BARRIER:
+ return gfc_trans_omp_barrier ();
+ case EXEC_OMP_CANCEL:
+ return gfc_trans_omp_cancel (code);
+ case EXEC_OMP_CANCELLATION_POINT:
+ return gfc_trans_omp_cancellation_point (code);
+ case EXEC_OMP_CRITICAL:
+ return gfc_trans_omp_critical (code);
+ case EXEC_OMP_DEPOBJ:
+ return gfc_trans_omp_depobj (code);
+ case EXEC_OMP_DISTRIBUTE:
+ case EXEC_OMP_DO:
+ case EXEC_OMP_LOOP:
+ case EXEC_OMP_SIMD:
+ case EXEC_OMP_TASKLOOP:
+ return gfc_trans_omp_do (code, code->op, NULL, code->ext.omp_clauses,
+ NULL);
+ case EXEC_OMP_DISTRIBUTE_PARALLEL_DO:
+ case EXEC_OMP_DISTRIBUTE_PARALLEL_DO_SIMD:
+ case EXEC_OMP_DISTRIBUTE_SIMD:
+ return gfc_trans_omp_distribute (code, NULL);
+ case EXEC_OMP_DO_SIMD:
+ return gfc_trans_omp_do_simd (code, NULL, NULL, NULL_TREE);
+ case EXEC_OMP_ERROR:
+ return gfc_trans_omp_error (code);
+ case EXEC_OMP_FLUSH:
+ return gfc_trans_omp_flush (code);
+ case EXEC_OMP_MASKED:
+ return gfc_trans_omp_masked (code, NULL);
+ case EXEC_OMP_MASTER:
+ return gfc_trans_omp_master (code);
+ case EXEC_OMP_MASKED_TASKLOOP:
+ case EXEC_OMP_MASKED_TASKLOOP_SIMD:
+ case EXEC_OMP_MASTER_TASKLOOP:
+ case EXEC_OMP_MASTER_TASKLOOP_SIMD:
+ return gfc_trans_omp_master_masked_taskloop (code, code->op);
+ case EXEC_OMP_ORDERED:
+ return gfc_trans_omp_ordered (code);
+ case EXEC_OMP_PARALLEL:
+ return gfc_trans_omp_parallel (code);
+ case EXEC_OMP_PARALLEL_DO:
+ return gfc_trans_omp_parallel_do (code, false, NULL, NULL);
+ case EXEC_OMP_PARALLEL_LOOP:
+ return gfc_trans_omp_parallel_do (code, true, NULL, NULL);
+ case EXEC_OMP_PARALLEL_DO_SIMD:
+ return gfc_trans_omp_parallel_do_simd (code, NULL, NULL);
+ case EXEC_OMP_PARALLEL_MASKED:
+ case EXEC_OMP_PARALLEL_MASKED_TASKLOOP:
+ case EXEC_OMP_PARALLEL_MASKED_TASKLOOP_SIMD:
+ case EXEC_OMP_PARALLEL_MASTER:
+ case EXEC_OMP_PARALLEL_MASTER_TASKLOOP:
+ case EXEC_OMP_PARALLEL_MASTER_TASKLOOP_SIMD:
+ return gfc_trans_omp_parallel_master_masked (code);
+ case EXEC_OMP_PARALLEL_SECTIONS:
+ return gfc_trans_omp_parallel_sections (code);
+ case EXEC_OMP_PARALLEL_WORKSHARE:
+ return gfc_trans_omp_parallel_workshare (code);
+ case EXEC_OMP_SCOPE:
+ return gfc_trans_omp_scope (code);
+ case EXEC_OMP_SECTIONS:
+ return gfc_trans_omp_sections (code, code->ext.omp_clauses);
+ case EXEC_OMP_SINGLE:
+ return gfc_trans_omp_single (code, code->ext.omp_clauses);
+ case EXEC_OMP_TARGET:
+ case EXEC_OMP_TARGET_PARALLEL:
+ case EXEC_OMP_TARGET_PARALLEL_DO:
+ case EXEC_OMP_TARGET_PARALLEL_DO_SIMD:
+ case EXEC_OMP_TARGET_PARALLEL_LOOP:
+ case EXEC_OMP_TARGET_SIMD:
+ case EXEC_OMP_TARGET_TEAMS:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_DO:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_DO_SIMD:
+ case EXEC_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD:
+ case EXEC_OMP_TARGET_TEAMS_LOOP:
+ return gfc_trans_omp_target (code);
+ case EXEC_OMP_TARGET_DATA:
+ return gfc_trans_omp_target_data (code);
+ case EXEC_OMP_TARGET_ENTER_DATA:
+ return gfc_trans_omp_target_enter_data (code);
+ case EXEC_OMP_TARGET_EXIT_DATA:
+ return gfc_trans_omp_target_exit_data (code);
+ case EXEC_OMP_TARGET_UPDATE:
+ return gfc_trans_omp_target_update (code);
+ case EXEC_OMP_TASK:
+ return gfc_trans_omp_task (code);
+ case EXEC_OMP_TASKGROUP:
+ return gfc_trans_omp_taskgroup (code);
+ case EXEC_OMP_TASKLOOP_SIMD:
+ return gfc_trans_omp_taskloop (code, code->op);
+ case EXEC_OMP_TASKWAIT:
+ return gfc_trans_omp_taskwait (code);
+ case EXEC_OMP_TASKYIELD:
+ return gfc_trans_omp_taskyield ();
+ case EXEC_OMP_TEAMS:
+ case EXEC_OMP_TEAMS_DISTRIBUTE:
+ case EXEC_OMP_TEAMS_DISTRIBUTE_PARALLEL_DO:
+ case EXEC_OMP_TEAMS_DISTRIBUTE_PARALLEL_DO_SIMD:
+ case EXEC_OMP_TEAMS_DISTRIBUTE_SIMD:
+ case EXEC_OMP_TEAMS_LOOP:
+ return gfc_trans_omp_teams (code, NULL, NULL_TREE);
+ case EXEC_OMP_WORKSHARE:
+ return gfc_trans_omp_workshare (code, code->ext.omp_clauses);
+ default:
+ gcc_unreachable ();
+ }
+}
+
+void
+gfc_trans_omp_declare_simd (gfc_namespace *ns)
+{
+ if (ns->entries)
+ return;
+
+ gfc_omp_declare_simd *ods;
+ for (ods = ns->omp_declare_simd; ods; ods = ods->next)
+ {
+ tree c = gfc_trans_omp_clauses (NULL, ods->clauses, ods->where, true);
+ tree fndecl = ns->proc_name->backend_decl;
+ if (c != NULL_TREE)
+ c = tree_cons (NULL_TREE, c, NULL_TREE);
+ c = build_tree_list (get_identifier ("omp declare simd"), c);
+ TREE_CHAIN (c) = DECL_ATTRIBUTES (fndecl);
+ DECL_ATTRIBUTES (fndecl) = c;
+ }
+}
+
+void
+gfc_trans_omp_declare_variant (gfc_namespace *ns)
+{
+ tree base_fn_decl = ns->proc_name->backend_decl;
+ gfc_namespace *search_ns = ns;
+ gfc_omp_declare_variant *next;
+
+ for (gfc_omp_declare_variant *odv = search_ns->omp_declare_variant;
+ search_ns; odv = next)
+ {
+ /* Look in the parent namespace if there are no more directives in the
+ current namespace. */
+ if (!odv)
+ {
+ search_ns = search_ns->parent;
+ if (search_ns)
+ next = search_ns->omp_declare_variant;
+ continue;
+ }
+
+ next = odv->next;
+
+ if (odv->error_p)
+ continue;
+
+ /* Check directive the first time it is encountered. */
+ bool error_found = true;
+
+ if (odv->checked_p)
+ error_found = false;
+ if (odv->base_proc_symtree == NULL)
+ {
+ if (!search_ns->proc_name->attr.function
+ && !search_ns->proc_name->attr.subroutine)
+ gfc_error ("The base name for 'declare variant' must be "
+ "specified at %L ", &odv->where);
+ else
+ error_found = false;
+ }
+ else
+ {
+ if (!search_ns->contained
+ && strcmp (odv->base_proc_symtree->name,
+ ns->proc_name->name))
+ gfc_error ("The base name at %L does not match the name of the "
+ "current procedure", &odv->where);
+ else if (odv->base_proc_symtree->n.sym->attr.entry)
+ gfc_error ("The base name at %L must not be an entry name",
+ &odv->where);
+ else if (odv->base_proc_symtree->n.sym->attr.generic)
+ gfc_error ("The base name at %L must not be a generic name",
+ &odv->where);
+ else if (odv->base_proc_symtree->n.sym->attr.proc_pointer)
+ gfc_error ("The base name at %L must not be a procedure pointer",
+ &odv->where);
+ else if (odv->base_proc_symtree->n.sym->attr.implicit_type)
+ gfc_error ("The base procedure at %L must have an explicit "
+ "interface", &odv->where);
+ else
+ error_found = false;
+ }
+
+ odv->checked_p = true;
+ if (error_found)
+ {
+ odv->error_p = true;
+ continue;
+ }
+
+ /* Ignore directives that do not apply to the current procedure. */
+ if ((odv->base_proc_symtree == NULL && search_ns != ns)
+ || (odv->base_proc_symtree != NULL
+ && strcmp (odv->base_proc_symtree->name, ns->proc_name->name)))
+ continue;
+
+ tree set_selectors = NULL_TREE;
+ gfc_omp_set_selector *oss;
+
+ for (oss = odv->set_selectors; oss; oss = oss->next)
+ {
+ tree selectors = NULL_TREE;
+ gfc_omp_selector *os;
+ for (os = oss->trait_selectors; os; os = os->next)
+ {
+ tree properties = NULL_TREE;
+ gfc_omp_trait_property *otp;
+
+ for (otp = os->properties; otp; otp = otp->next)
+ {
+ switch (otp->property_kind)
+ {
+ case CTX_PROPERTY_USER:
+ case CTX_PROPERTY_EXPR:
+ {
+ gfc_se se;
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, otp->expr);
+ properties = tree_cons (NULL_TREE, se.expr,
+ properties);
+ }
+ break;
+ case CTX_PROPERTY_ID:
+ properties = tree_cons (get_identifier (otp->name),
+ NULL_TREE, properties);
+ break;
+ case CTX_PROPERTY_NAME_LIST:
+ {
+ tree prop = NULL_TREE, value = NULL_TREE;
+ if (otp->is_name)
+ prop = get_identifier (otp->name);
+ else
+ value = gfc_conv_constant_to_tree (otp->expr);
+
+ properties = tree_cons (prop, value, properties);
+ }
+ break;
+ case CTX_PROPERTY_SIMD:
+ properties = gfc_trans_omp_clauses (NULL, otp->clauses,
+ odv->where, true);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ if (os->score)
+ {
+ gfc_se se;
+ gfc_init_se (&se, NULL);
+ gfc_conv_expr (&se, os->score);
+ properties = tree_cons (get_identifier (" score"),
+ se.expr, properties);
+ }
+
+ selectors = tree_cons (get_identifier (os->trait_selector_name),
+ properties, selectors);
+ }
+
+ set_selectors
+ = tree_cons (get_identifier (oss->trait_set_selector_name),
+ selectors, set_selectors);
+ }
+
+ const char *variant_proc_name = odv->variant_proc_symtree->name;
+ gfc_symbol *variant_proc_sym = odv->variant_proc_symtree->n.sym;
+ if (variant_proc_sym == NULL || variant_proc_sym->attr.implicit_type)
+ {
+ gfc_symtree *proc_st;
+ gfc_find_sym_tree (variant_proc_name, gfc_current_ns, 1, &proc_st);
+ variant_proc_sym = proc_st->n.sym;
+ }
+ if (variant_proc_sym == NULL)
+ {
+ gfc_error ("Cannot find symbol %qs", variant_proc_name);
+ continue;
+ }
+ set_selectors = omp_check_context_selector
+ (gfc_get_location (&odv->where), set_selectors);
+ if (set_selectors != error_mark_node)
+ {
+ if (!variant_proc_sym->attr.implicit_type
+ && !variant_proc_sym->attr.subroutine
+ && !variant_proc_sym->attr.function)
+ {
+ gfc_error ("variant %qs at %L is not a function or subroutine",
+ variant_proc_name, &odv->where);
+ variant_proc_sym = NULL;
+ }
+ else if (omp_get_context_selector (set_selectors, "construct",
+ "simd") == NULL_TREE)
+ {
+ char err[256];
+ if (!gfc_compare_interfaces (ns->proc_name, variant_proc_sym,
+ variant_proc_sym->name, 0, 1,
+ err, sizeof (err), NULL, NULL))
+ {
+ gfc_error ("variant %qs and base %qs at %L have "
+ "incompatible types: %s",
+ variant_proc_name, ns->proc_name->name,
+ &odv->where, err);
+ variant_proc_sym = NULL;
+ }
+ }
+ if (variant_proc_sym != NULL)
+ {
+ gfc_set_sym_referenced (variant_proc_sym);
+ tree construct = omp_get_context_selector (set_selectors,
+ "construct", NULL);
+ omp_mark_declare_variant (gfc_get_location (&odv->where),
+ gfc_get_symbol_decl (variant_proc_sym),
+ construct);
+ if (omp_context_selector_matches (set_selectors))
+ {
+ tree id = get_identifier ("omp declare variant base");
+ tree variant = gfc_get_symbol_decl (variant_proc_sym);
+ DECL_ATTRIBUTES (base_fn_decl)
+ = tree_cons (id, build_tree_list (variant, set_selectors),
+ DECL_ATTRIBUTES (base_fn_decl));
+ }
+ }
+ }
+ }
+}