diff options
author | Martin Liska <mliska@suse.cz> | 2022-01-14 16:56:44 +0100 |
---|---|---|
committer | Martin Liska <mliska@suse.cz> | 2022-01-17 22:12:04 +0100 |
commit | 5c69acb32329d49e58c26fa41ae74229a52b9106 (patch) | |
tree | ddb05f9d73afb6f998457d2ac4b720e3b3b60483 /gcc/tree-ssa-ccp.c | |
parent | 490e23032baaece71f2ec09fa1805064b150fbc2 (diff) | |
download | gcc-5c69acb32329d49e58c26fa41ae74229a52b9106.zip gcc-5c69acb32329d49e58c26fa41ae74229a52b9106.tar.gz gcc-5c69acb32329d49e58c26fa41ae74229a52b9106.tar.bz2 |
Rename .c files to .cc files.
gcc/ada/ChangeLog:
* adadecode.c: Moved to...
* adadecode.cc: ...here.
* affinity.c: Moved to...
* affinity.cc: ...here.
* argv-lynxos178-raven-cert.c: Moved to...
* argv-lynxos178-raven-cert.cc: ...here.
* argv.c: Moved to...
* argv.cc: ...here.
* aux-io.c: Moved to...
* aux-io.cc: ...here.
* cio.c: Moved to...
* cio.cc: ...here.
* cstreams.c: Moved to...
* cstreams.cc: ...here.
* env.c: Moved to...
* env.cc: ...here.
* exit.c: Moved to...
* exit.cc: ...here.
* expect.c: Moved to...
* expect.cc: ...here.
* final.c: Moved to...
* final.cc: ...here.
* gcc-interface/cuintp.c: Moved to...
* gcc-interface/cuintp.cc: ...here.
* gcc-interface/decl.c: Moved to...
* gcc-interface/decl.cc: ...here.
* gcc-interface/misc.c: Moved to...
* gcc-interface/misc.cc: ...here.
* gcc-interface/targtyps.c: Moved to...
* gcc-interface/targtyps.cc: ...here.
* gcc-interface/trans.c: Moved to...
* gcc-interface/trans.cc: ...here.
* gcc-interface/utils.c: Moved to...
* gcc-interface/utils.cc: ...here.
* gcc-interface/utils2.c: Moved to...
* gcc-interface/utils2.cc: ...here.
* init.c: Moved to...
* init.cc: ...here.
* initialize.c: Moved to...
* initialize.cc: ...here.
* libgnarl/thread.c: Moved to...
* libgnarl/thread.cc: ...here.
* link.c: Moved to...
* link.cc: ...here.
* locales.c: Moved to...
* locales.cc: ...here.
* mkdir.c: Moved to...
* mkdir.cc: ...here.
* raise.c: Moved to...
* raise.cc: ...here.
* rtfinal.c: Moved to...
* rtfinal.cc: ...here.
* rtinit.c: Moved to...
* rtinit.cc: ...here.
* seh_init.c: Moved to...
* seh_init.cc: ...here.
* sigtramp-armdroid.c: Moved to...
* sigtramp-armdroid.cc: ...here.
* sigtramp-ios.c: Moved to...
* sigtramp-ios.cc: ...here.
* sigtramp-qnx.c: Moved to...
* sigtramp-qnx.cc: ...here.
* sigtramp-vxworks.c: Moved to...
* sigtramp-vxworks.cc: ...here.
* socket.c: Moved to...
* socket.cc: ...here.
* tracebak.c: Moved to...
* tracebak.cc: ...here.
* version.c: Moved to...
* version.cc: ...here.
* vx_stack_info.c: Moved to...
* vx_stack_info.cc: ...here.
gcc/ChangeLog:
* adjust-alignment.c: Moved to...
* adjust-alignment.cc: ...here.
* alias.c: Moved to...
* alias.cc: ...here.
* alloc-pool.c: Moved to...
* alloc-pool.cc: ...here.
* asan.c: Moved to...
* asan.cc: ...here.
* attribs.c: Moved to...
* attribs.cc: ...here.
* auto-inc-dec.c: Moved to...
* auto-inc-dec.cc: ...here.
* auto-profile.c: Moved to...
* auto-profile.cc: ...here.
* bb-reorder.c: Moved to...
* bb-reorder.cc: ...here.
* bitmap.c: Moved to...
* bitmap.cc: ...here.
* btfout.c: Moved to...
* btfout.cc: ...here.
* builtins.c: Moved to...
* builtins.cc: ...here.
* caller-save.c: Moved to...
* caller-save.cc: ...here.
* calls.c: Moved to...
* calls.cc: ...here.
* ccmp.c: Moved to...
* ccmp.cc: ...here.
* cfg.c: Moved to...
* cfg.cc: ...here.
* cfganal.c: Moved to...
* cfganal.cc: ...here.
* cfgbuild.c: Moved to...
* cfgbuild.cc: ...here.
* cfgcleanup.c: Moved to...
* cfgcleanup.cc: ...here.
* cfgexpand.c: Moved to...
* cfgexpand.cc: ...here.
* cfghooks.c: Moved to...
* cfghooks.cc: ...here.
* cfgloop.c: Moved to...
* cfgloop.cc: ...here.
* cfgloopanal.c: Moved to...
* cfgloopanal.cc: ...here.
* cfgloopmanip.c: Moved to...
* cfgloopmanip.cc: ...here.
* cfgrtl.c: Moved to...
* cfgrtl.cc: ...here.
* cgraph.c: Moved to...
* cgraph.cc: ...here.
* cgraphbuild.c: Moved to...
* cgraphbuild.cc: ...here.
* cgraphclones.c: Moved to...
* cgraphclones.cc: ...here.
* cgraphunit.c: Moved to...
* cgraphunit.cc: ...here.
* collect-utils.c: Moved to...
* collect-utils.cc: ...here.
* collect2-aix.c: Moved to...
* collect2-aix.cc: ...here.
* collect2.c: Moved to...
* collect2.cc: ...here.
* combine-stack-adj.c: Moved to...
* combine-stack-adj.cc: ...here.
* combine.c: Moved to...
* combine.cc: ...here.
* common/common-targhooks.c: Moved to...
* common/common-targhooks.cc: ...here.
* common/config/aarch64/aarch64-common.c: Moved to...
* common/config/aarch64/aarch64-common.cc: ...here.
* common/config/alpha/alpha-common.c: Moved to...
* common/config/alpha/alpha-common.cc: ...here.
* common/config/arc/arc-common.c: Moved to...
* common/config/arc/arc-common.cc: ...here.
* common/config/arm/arm-common.c: Moved to...
* common/config/arm/arm-common.cc: ...here.
* common/config/avr/avr-common.c: Moved to...
* common/config/avr/avr-common.cc: ...here.
* common/config/bfin/bfin-common.c: Moved to...
* common/config/bfin/bfin-common.cc: ...here.
* common/config/bpf/bpf-common.c: Moved to...
* common/config/bpf/bpf-common.cc: ...here.
* common/config/c6x/c6x-common.c: Moved to...
* common/config/c6x/c6x-common.cc: ...here.
* common/config/cr16/cr16-common.c: Moved to...
* common/config/cr16/cr16-common.cc: ...here.
* common/config/cris/cris-common.c: Moved to...
* common/config/cris/cris-common.cc: ...here.
* common/config/csky/csky-common.c: Moved to...
* common/config/csky/csky-common.cc: ...here.
* common/config/default-common.c: Moved to...
* common/config/default-common.cc: ...here.
* common/config/epiphany/epiphany-common.c: Moved to...
* common/config/epiphany/epiphany-common.cc: ...here.
* common/config/fr30/fr30-common.c: Moved to...
* common/config/fr30/fr30-common.cc: ...here.
* common/config/frv/frv-common.c: Moved to...
* common/config/frv/frv-common.cc: ...here.
* common/config/gcn/gcn-common.c: Moved to...
* common/config/gcn/gcn-common.cc: ...here.
* common/config/h8300/h8300-common.c: Moved to...
* common/config/h8300/h8300-common.cc: ...here.
* common/config/i386/i386-common.c: Moved to...
* common/config/i386/i386-common.cc: ...here.
* common/config/ia64/ia64-common.c: Moved to...
* common/config/ia64/ia64-common.cc: ...here.
* common/config/iq2000/iq2000-common.c: Moved to...
* common/config/iq2000/iq2000-common.cc: ...here.
* common/config/lm32/lm32-common.c: Moved to...
* common/config/lm32/lm32-common.cc: ...here.
* common/config/m32r/m32r-common.c: Moved to...
* common/config/m32r/m32r-common.cc: ...here.
* common/config/m68k/m68k-common.c: Moved to...
* common/config/m68k/m68k-common.cc: ...here.
* common/config/mcore/mcore-common.c: Moved to...
* common/config/mcore/mcore-common.cc: ...here.
* common/config/microblaze/microblaze-common.c: Moved to...
* common/config/microblaze/microblaze-common.cc: ...here.
* common/config/mips/mips-common.c: Moved to...
* common/config/mips/mips-common.cc: ...here.
* common/config/mmix/mmix-common.c: Moved to...
* common/config/mmix/mmix-common.cc: ...here.
* common/config/mn10300/mn10300-common.c: Moved to...
* common/config/mn10300/mn10300-common.cc: ...here.
* common/config/msp430/msp430-common.c: Moved to...
* common/config/msp430/msp430-common.cc: ...here.
* common/config/nds32/nds32-common.c: Moved to...
* common/config/nds32/nds32-common.cc: ...here.
* common/config/nios2/nios2-common.c: Moved to...
* common/config/nios2/nios2-common.cc: ...here.
* common/config/nvptx/nvptx-common.c: Moved to...
* common/config/nvptx/nvptx-common.cc: ...here.
* common/config/or1k/or1k-common.c: Moved to...
* common/config/or1k/or1k-common.cc: ...here.
* common/config/pa/pa-common.c: Moved to...
* common/config/pa/pa-common.cc: ...here.
* common/config/pdp11/pdp11-common.c: Moved to...
* common/config/pdp11/pdp11-common.cc: ...here.
* common/config/pru/pru-common.c: Moved to...
* common/config/pru/pru-common.cc: ...here.
* common/config/riscv/riscv-common.c: Moved to...
* common/config/riscv/riscv-common.cc: ...here.
* common/config/rs6000/rs6000-common.c: Moved to...
* common/config/rs6000/rs6000-common.cc: ...here.
* common/config/rx/rx-common.c: Moved to...
* common/config/rx/rx-common.cc: ...here.
* common/config/s390/s390-common.c: Moved to...
* common/config/s390/s390-common.cc: ...here.
* common/config/sh/sh-common.c: Moved to...
* common/config/sh/sh-common.cc: ...here.
* common/config/sparc/sparc-common.c: Moved to...
* common/config/sparc/sparc-common.cc: ...here.
* common/config/tilegx/tilegx-common.c: Moved to...
* common/config/tilegx/tilegx-common.cc: ...here.
* common/config/tilepro/tilepro-common.c: Moved to...
* common/config/tilepro/tilepro-common.cc: ...here.
* common/config/v850/v850-common.c: Moved to...
* common/config/v850/v850-common.cc: ...here.
* common/config/vax/vax-common.c: Moved to...
* common/config/vax/vax-common.cc: ...here.
* common/config/visium/visium-common.c: Moved to...
* common/config/visium/visium-common.cc: ...here.
* common/config/xstormy16/xstormy16-common.c: Moved to...
* common/config/xstormy16/xstormy16-common.cc: ...here.
* common/config/xtensa/xtensa-common.c: Moved to...
* common/config/xtensa/xtensa-common.cc: ...here.
* compare-elim.c: Moved to...
* compare-elim.cc: ...here.
* config/aarch64/aarch64-bti-insert.c: Moved to...
* config/aarch64/aarch64-bti-insert.cc: ...here.
* config/aarch64/aarch64-builtins.c: Moved to...
* config/aarch64/aarch64-builtins.cc: ...here.
* config/aarch64/aarch64-c.c: Moved to...
* config/aarch64/aarch64-c.cc: ...here.
* config/aarch64/aarch64-d.c: Moved to...
* config/aarch64/aarch64-d.cc: ...here.
* config/aarch64/aarch64.c: Moved to...
* config/aarch64/aarch64.cc: ...here.
* config/aarch64/cortex-a57-fma-steering.c: Moved to...
* config/aarch64/cortex-a57-fma-steering.cc: ...here.
* config/aarch64/driver-aarch64.c: Moved to...
* config/aarch64/driver-aarch64.cc: ...here.
* config/aarch64/falkor-tag-collision-avoidance.c: Moved to...
* config/aarch64/falkor-tag-collision-avoidance.cc: ...here.
* config/aarch64/host-aarch64-darwin.c: Moved to...
* config/aarch64/host-aarch64-darwin.cc: ...here.
* config/alpha/alpha.c: Moved to...
* config/alpha/alpha.cc: ...here.
* config/alpha/driver-alpha.c: Moved to...
* config/alpha/driver-alpha.cc: ...here.
* config/arc/arc-c.c: Moved to...
* config/arc/arc-c.cc: ...here.
* config/arc/arc.c: Moved to...
* config/arc/arc.cc: ...here.
* config/arc/driver-arc.c: Moved to...
* config/arc/driver-arc.cc: ...here.
* config/arm/aarch-common.c: Moved to...
* config/arm/aarch-common.cc: ...here.
* config/arm/arm-builtins.c: Moved to...
* config/arm/arm-builtins.cc: ...here.
* config/arm/arm-c.c: Moved to...
* config/arm/arm-c.cc: ...here.
* config/arm/arm-d.c: Moved to...
* config/arm/arm-d.cc: ...here.
* config/arm/arm.c: Moved to...
* config/arm/arm.cc: ...here.
* config/arm/driver-arm.c: Moved to...
* config/arm/driver-arm.cc: ...here.
* config/avr/avr-c.c: Moved to...
* config/avr/avr-c.cc: ...here.
* config/avr/avr-devices.c: Moved to...
* config/avr/avr-devices.cc: ...here.
* config/avr/avr-log.c: Moved to...
* config/avr/avr-log.cc: ...here.
* config/avr/avr.c: Moved to...
* config/avr/avr.cc: ...here.
* config/avr/driver-avr.c: Moved to...
* config/avr/driver-avr.cc: ...here.
* config/avr/gen-avr-mmcu-specs.c: Moved to...
* config/avr/gen-avr-mmcu-specs.cc: ...here.
* config/avr/gen-avr-mmcu-texi.c: Moved to...
* config/avr/gen-avr-mmcu-texi.cc: ...here.
* config/bfin/bfin.c: Moved to...
* config/bfin/bfin.cc: ...here.
* config/bpf/bpf.c: Moved to...
* config/bpf/bpf.cc: ...here.
* config/bpf/coreout.c: Moved to...
* config/bpf/coreout.cc: ...here.
* config/c6x/c6x.c: Moved to...
* config/c6x/c6x.cc: ...here.
* config/cr16/cr16.c: Moved to...
* config/cr16/cr16.cc: ...here.
* config/cris/cris.c: Moved to...
* config/cris/cris.cc: ...here.
* config/csky/csky.c: Moved to...
* config/csky/csky.cc: ...here.
* config/darwin-c.c: Moved to...
* config/darwin-c.cc: ...here.
* config/darwin-d.c: Moved to...
* config/darwin-d.cc: ...here.
* config/darwin-driver.c: Moved to...
* config/darwin-driver.cc: ...here.
* config/darwin-f.c: Moved to...
* config/darwin-f.cc: ...here.
* config/darwin.c: Moved to...
* config/darwin.cc: ...here.
* config/default-c.c: Moved to...
* config/default-c.cc: ...here.
* config/default-d.c: Moved to...
* config/default-d.cc: ...here.
* config/dragonfly-d.c: Moved to...
* config/dragonfly-d.cc: ...here.
* config/epiphany/epiphany.c: Moved to...
* config/epiphany/epiphany.cc: ...here.
* config/epiphany/mode-switch-use.c: Moved to...
* config/epiphany/mode-switch-use.cc: ...here.
* config/epiphany/resolve-sw-modes.c: Moved to...
* config/epiphany/resolve-sw-modes.cc: ...here.
* config/fr30/fr30.c: Moved to...
* config/fr30/fr30.cc: ...here.
* config/freebsd-d.c: Moved to...
* config/freebsd-d.cc: ...here.
* config/frv/frv.c: Moved to...
* config/frv/frv.cc: ...here.
* config/ft32/ft32.c: Moved to...
* config/ft32/ft32.cc: ...here.
* config/gcn/driver-gcn.c: Moved to...
* config/gcn/driver-gcn.cc: ...here.
* config/gcn/gcn-run.c: Moved to...
* config/gcn/gcn-run.cc: ...here.
* config/gcn/gcn-tree.c: Moved to...
* config/gcn/gcn-tree.cc: ...here.
* config/gcn/gcn.c: Moved to...
* config/gcn/gcn.cc: ...here.
* config/gcn/mkoffload.c: Moved to...
* config/gcn/mkoffload.cc: ...here.
* config/glibc-c.c: Moved to...
* config/glibc-c.cc: ...here.
* config/glibc-d.c: Moved to...
* config/glibc-d.cc: ...here.
* config/h8300/h8300.c: Moved to...
* config/h8300/h8300.cc: ...here.
* config/host-darwin.c: Moved to...
* config/host-darwin.cc: ...here.
* config/host-hpux.c: Moved to...
* config/host-hpux.cc: ...here.
* config/host-linux.c: Moved to...
* config/host-linux.cc: ...here.
* config/host-netbsd.c: Moved to...
* config/host-netbsd.cc: ...here.
* config/host-openbsd.c: Moved to...
* config/host-openbsd.cc: ...here.
* config/host-solaris.c: Moved to...
* config/host-solaris.cc: ...here.
* config/i386/djgpp.c: Moved to...
* config/i386/djgpp.cc: ...here.
* config/i386/driver-i386.c: Moved to...
* config/i386/driver-i386.cc: ...here.
* config/i386/driver-mingw32.c: Moved to...
* config/i386/driver-mingw32.cc: ...here.
* config/i386/gnu-property.c: Moved to...
* config/i386/gnu-property.cc: ...here.
* config/i386/host-cygwin.c: Moved to...
* config/i386/host-cygwin.cc: ...here.
* config/i386/host-i386-darwin.c: Moved to...
* config/i386/host-i386-darwin.cc: ...here.
* config/i386/host-mingw32.c: Moved to...
* config/i386/host-mingw32.cc: ...here.
* config/i386/i386-builtins.c: Moved to...
* config/i386/i386-builtins.cc: ...here.
* config/i386/i386-c.c: Moved to...
* config/i386/i386-c.cc: ...here.
* config/i386/i386-d.c: Moved to...
* config/i386/i386-d.cc: ...here.
* config/i386/i386-expand.c: Moved to...
* config/i386/i386-expand.cc: ...here.
* config/i386/i386-features.c: Moved to...
* config/i386/i386-features.cc: ...here.
* config/i386/i386-options.c: Moved to...
* config/i386/i386-options.cc: ...here.
* config/i386/i386.c: Moved to...
* config/i386/i386.cc: ...here.
* config/i386/intelmic-mkoffload.c: Moved to...
* config/i386/intelmic-mkoffload.cc: ...here.
* config/i386/msformat-c.c: Moved to...
* config/i386/msformat-c.cc: ...here.
* config/i386/winnt-cxx.c: Moved to...
* config/i386/winnt-cxx.cc: ...here.
* config/i386/winnt-d.c: Moved to...
* config/i386/winnt-d.cc: ...here.
* config/i386/winnt-stubs.c: Moved to...
* config/i386/winnt-stubs.cc: ...here.
* config/i386/winnt.c: Moved to...
* config/i386/winnt.cc: ...here.
* config/i386/x86-tune-sched-atom.c: Moved to...
* config/i386/x86-tune-sched-atom.cc: ...here.
* config/i386/x86-tune-sched-bd.c: Moved to...
* config/i386/x86-tune-sched-bd.cc: ...here.
* config/i386/x86-tune-sched-core.c: Moved to...
* config/i386/x86-tune-sched-core.cc: ...here.
* config/i386/x86-tune-sched.c: Moved to...
* config/i386/x86-tune-sched.cc: ...here.
* config/ia64/ia64-c.c: Moved to...
* config/ia64/ia64-c.cc: ...here.
* config/ia64/ia64.c: Moved to...
* config/ia64/ia64.cc: ...here.
* config/iq2000/iq2000.c: Moved to...
* config/iq2000/iq2000.cc: ...here.
* config/linux.c: Moved to...
* config/linux.cc: ...here.
* config/lm32/lm32.c: Moved to...
* config/lm32/lm32.cc: ...here.
* config/m32c/m32c-pragma.c: Moved to...
* config/m32c/m32c-pragma.cc: ...here.
* config/m32c/m32c.c: Moved to...
* config/m32c/m32c.cc: ...here.
* config/m32r/m32r.c: Moved to...
* config/m32r/m32r.cc: ...here.
* config/m68k/m68k.c: Moved to...
* config/m68k/m68k.cc: ...here.
* config/mcore/mcore.c: Moved to...
* config/mcore/mcore.cc: ...here.
* config/microblaze/microblaze-c.c: Moved to...
* config/microblaze/microblaze-c.cc: ...here.
* config/microblaze/microblaze.c: Moved to...
* config/microblaze/microblaze.cc: ...here.
* config/mips/driver-native.c: Moved to...
* config/mips/driver-native.cc: ...here.
* config/mips/frame-header-opt.c: Moved to...
* config/mips/frame-header-opt.cc: ...here.
* config/mips/mips-d.c: Moved to...
* config/mips/mips-d.cc: ...here.
* config/mips/mips.c: Moved to...
* config/mips/mips.cc: ...here.
* config/mmix/mmix.c: Moved to...
* config/mmix/mmix.cc: ...here.
* config/mn10300/mn10300.c: Moved to...
* config/mn10300/mn10300.cc: ...here.
* config/moxie/moxie.c: Moved to...
* config/moxie/moxie.cc: ...here.
* config/msp430/driver-msp430.c: Moved to...
* config/msp430/driver-msp430.cc: ...here.
* config/msp430/msp430-c.c: Moved to...
* config/msp430/msp430-c.cc: ...here.
* config/msp430/msp430-devices.c: Moved to...
* config/msp430/msp430-devices.cc: ...here.
* config/msp430/msp430.c: Moved to...
* config/msp430/msp430.cc: ...here.
* config/nds32/nds32-cost.c: Moved to...
* config/nds32/nds32-cost.cc: ...here.
* config/nds32/nds32-fp-as-gp.c: Moved to...
* config/nds32/nds32-fp-as-gp.cc: ...here.
* config/nds32/nds32-intrinsic.c: Moved to...
* config/nds32/nds32-intrinsic.cc: ...here.
* config/nds32/nds32-isr.c: Moved to...
* config/nds32/nds32-isr.cc: ...here.
* config/nds32/nds32-md-auxiliary.c: Moved to...
* config/nds32/nds32-md-auxiliary.cc: ...here.
* config/nds32/nds32-memory-manipulation.c: Moved to...
* config/nds32/nds32-memory-manipulation.cc: ...here.
* config/nds32/nds32-pipelines-auxiliary.c: Moved to...
* config/nds32/nds32-pipelines-auxiliary.cc: ...here.
* config/nds32/nds32-predicates.c: Moved to...
* config/nds32/nds32-predicates.cc: ...here.
* config/nds32/nds32-relax-opt.c: Moved to...
* config/nds32/nds32-relax-opt.cc: ...here.
* config/nds32/nds32-utils.c: Moved to...
* config/nds32/nds32-utils.cc: ...here.
* config/nds32/nds32.c: Moved to...
* config/nds32/nds32.cc: ...here.
* config/netbsd-d.c: Moved to...
* config/netbsd-d.cc: ...here.
* config/netbsd.c: Moved to...
* config/netbsd.cc: ...here.
* config/nios2/nios2.c: Moved to...
* config/nios2/nios2.cc: ...here.
* config/nvptx/mkoffload.c: Moved to...
* config/nvptx/mkoffload.cc: ...here.
* config/nvptx/nvptx-c.c: Moved to...
* config/nvptx/nvptx-c.cc: ...here.
* config/nvptx/nvptx.c: Moved to...
* config/nvptx/nvptx.cc: ...here.
* config/openbsd-d.c: Moved to...
* config/openbsd-d.cc: ...here.
* config/or1k/or1k.c: Moved to...
* config/or1k/or1k.cc: ...here.
* config/pa/pa-d.c: Moved to...
* config/pa/pa-d.cc: ...here.
* config/pa/pa.c: Moved to...
* config/pa/pa.cc: ...here.
* config/pdp11/pdp11.c: Moved to...
* config/pdp11/pdp11.cc: ...here.
* config/pru/pru-passes.c: Moved to...
* config/pru/pru-passes.cc: ...here.
* config/pru/pru-pragma.c: Moved to...
* config/pru/pru-pragma.cc: ...here.
* config/pru/pru.c: Moved to...
* config/pru/pru.cc: ...here.
* config/riscv/riscv-builtins.c: Moved to...
* config/riscv/riscv-builtins.cc: ...here.
* config/riscv/riscv-c.c: Moved to...
* config/riscv/riscv-c.cc: ...here.
* config/riscv/riscv-d.c: Moved to...
* config/riscv/riscv-d.cc: ...here.
* config/riscv/riscv-shorten-memrefs.c: Moved to...
* config/riscv/riscv-shorten-memrefs.cc: ...here.
* config/riscv/riscv-sr.c: Moved to...
* config/riscv/riscv-sr.cc: ...here.
* config/riscv/riscv.c: Moved to...
* config/riscv/riscv.cc: ...here.
* config/rl78/rl78-c.c: Moved to...
* config/rl78/rl78-c.cc: ...here.
* config/rl78/rl78.c: Moved to...
* config/rl78/rl78.cc: ...here.
* config/rs6000/driver-rs6000.c: Moved to...
* config/rs6000/driver-rs6000.cc: ...here.
* config/rs6000/host-darwin.c: Moved to...
* config/rs6000/host-darwin.cc: ...here.
* config/rs6000/host-ppc64-darwin.c: Moved to...
* config/rs6000/host-ppc64-darwin.cc: ...here.
* config/rs6000/rbtree.c: Moved to...
* config/rs6000/rbtree.cc: ...here.
* config/rs6000/rs6000-c.c: Moved to...
* config/rs6000/rs6000-c.cc: ...here.
* config/rs6000/rs6000-call.c: Moved to...
* config/rs6000/rs6000-call.cc: ...here.
* config/rs6000/rs6000-d.c: Moved to...
* config/rs6000/rs6000-d.cc: ...here.
* config/rs6000/rs6000-gen-builtins.c: Moved to...
* config/rs6000/rs6000-gen-builtins.cc: ...here.
* config/rs6000/rs6000-linux.c: Moved to...
* config/rs6000/rs6000-linux.cc: ...here.
* config/rs6000/rs6000-logue.c: Moved to...
* config/rs6000/rs6000-logue.cc: ...here.
* config/rs6000/rs6000-p8swap.c: Moved to...
* config/rs6000/rs6000-p8swap.cc: ...here.
* config/rs6000/rs6000-pcrel-opt.c: Moved to...
* config/rs6000/rs6000-pcrel-opt.cc: ...here.
* config/rs6000/rs6000-string.c: Moved to...
* config/rs6000/rs6000-string.cc: ...here.
* config/rs6000/rs6000.c: Moved to...
* config/rs6000/rs6000.cc: ...here.
* config/rx/rx.c: Moved to...
* config/rx/rx.cc: ...here.
* config/s390/driver-native.c: Moved to...
* config/s390/driver-native.cc: ...here.
* config/s390/s390-c.c: Moved to...
* config/s390/s390-c.cc: ...here.
* config/s390/s390-d.c: Moved to...
* config/s390/s390-d.cc: ...here.
* config/s390/s390.c: Moved to...
* config/s390/s390.cc: ...here.
* config/sh/divtab-sh4-300.c: Moved to...
* config/sh/divtab-sh4-300.cc: ...here.
* config/sh/divtab-sh4.c: Moved to...
* config/sh/divtab-sh4.cc: ...here.
* config/sh/divtab.c: Moved to...
* config/sh/divtab.cc: ...here.
* config/sh/sh-c.c: Moved to...
* config/sh/sh-c.cc: ...here.
* config/sh/sh.c: Moved to...
* config/sh/sh.cc: ...here.
* config/sol2-c.c: Moved to...
* config/sol2-c.cc: ...here.
* config/sol2-cxx.c: Moved to...
* config/sol2-cxx.cc: ...here.
* config/sol2-d.c: Moved to...
* config/sol2-d.cc: ...here.
* config/sol2-stubs.c: Moved to...
* config/sol2-stubs.cc: ...here.
* config/sol2.c: Moved to...
* config/sol2.cc: ...here.
* config/sparc/driver-sparc.c: Moved to...
* config/sparc/driver-sparc.cc: ...here.
* config/sparc/sparc-c.c: Moved to...
* config/sparc/sparc-c.cc: ...here.
* config/sparc/sparc-d.c: Moved to...
* config/sparc/sparc-d.cc: ...here.
* config/sparc/sparc.c: Moved to...
* config/sparc/sparc.cc: ...here.
* config/stormy16/stormy16.c: Moved to...
* config/stormy16/stormy16.cc: ...here.
* config/tilegx/mul-tables.c: Moved to...
* config/tilegx/mul-tables.cc: ...here.
* config/tilegx/tilegx-c.c: Moved to...
* config/tilegx/tilegx-c.cc: ...here.
* config/tilegx/tilegx.c: Moved to...
* config/tilegx/tilegx.cc: ...here.
* config/tilepro/mul-tables.c: Moved to...
* config/tilepro/mul-tables.cc: ...here.
* config/tilepro/tilepro-c.c: Moved to...
* config/tilepro/tilepro-c.cc: ...here.
* config/tilepro/tilepro.c: Moved to...
* config/tilepro/tilepro.cc: ...here.
* config/v850/v850-c.c: Moved to...
* config/v850/v850-c.cc: ...here.
* config/v850/v850.c: Moved to...
* config/v850/v850.cc: ...here.
* config/vax/vax.c: Moved to...
* config/vax/vax.cc: ...here.
* config/visium/visium.c: Moved to...
* config/visium/visium.cc: ...here.
* config/vms/vms-c.c: Moved to...
* config/vms/vms-c.cc: ...here.
* config/vms/vms-f.c: Moved to...
* config/vms/vms-f.cc: ...here.
* config/vms/vms.c: Moved to...
* config/vms/vms.cc: ...here.
* config/vxworks-c.c: Moved to...
* config/vxworks-c.cc: ...here.
* config/vxworks.c: Moved to...
* config/vxworks.cc: ...here.
* config/winnt-c.c: Moved to...
* config/winnt-c.cc: ...here.
* config/xtensa/xtensa.c: Moved to...
* config/xtensa/xtensa.cc: ...here.
* context.c: Moved to...
* context.cc: ...here.
* convert.c: Moved to...
* convert.cc: ...here.
* coverage.c: Moved to...
* coverage.cc: ...here.
* cppbuiltin.c: Moved to...
* cppbuiltin.cc: ...here.
* cppdefault.c: Moved to...
* cppdefault.cc: ...here.
* cprop.c: Moved to...
* cprop.cc: ...here.
* cse.c: Moved to...
* cse.cc: ...here.
* cselib.c: Moved to...
* cselib.cc: ...here.
* ctfc.c: Moved to...
* ctfc.cc: ...here.
* ctfout.c: Moved to...
* ctfout.cc: ...here.
* data-streamer-in.c: Moved to...
* data-streamer-in.cc: ...here.
* data-streamer-out.c: Moved to...
* data-streamer-out.cc: ...here.
* data-streamer.c: Moved to...
* data-streamer.cc: ...here.
* dbgcnt.c: Moved to...
* dbgcnt.cc: ...here.
* dbxout.c: Moved to...
* dbxout.cc: ...here.
* dce.c: Moved to...
* dce.cc: ...here.
* ddg.c: Moved to...
* ddg.cc: ...here.
* debug.c: Moved to...
* debug.cc: ...here.
* df-core.c: Moved to...
* df-core.cc: ...here.
* df-problems.c: Moved to...
* df-problems.cc: ...here.
* df-scan.c: Moved to...
* df-scan.cc: ...here.
* dfp.c: Moved to...
* dfp.cc: ...here.
* diagnostic-color.c: Moved to...
* diagnostic-color.cc: ...here.
* diagnostic-show-locus.c: Moved to...
* diagnostic-show-locus.cc: ...here.
* diagnostic-spec.c: Moved to...
* diagnostic-spec.cc: ...here.
* diagnostic.c: Moved to...
* diagnostic.cc: ...here.
* dojump.c: Moved to...
* dojump.cc: ...here.
* dominance.c: Moved to...
* dominance.cc: ...here.
* domwalk.c: Moved to...
* domwalk.cc: ...here.
* double-int.c: Moved to...
* double-int.cc: ...here.
* dse.c: Moved to...
* dse.cc: ...here.
* dumpfile.c: Moved to...
* dumpfile.cc: ...here.
* dwarf2asm.c: Moved to...
* dwarf2asm.cc: ...here.
* dwarf2cfi.c: Moved to...
* dwarf2cfi.cc: ...here.
* dwarf2ctf.c: Moved to...
* dwarf2ctf.cc: ...here.
* dwarf2out.c: Moved to...
* dwarf2out.cc: ...here.
* early-remat.c: Moved to...
* early-remat.cc: ...here.
* edit-context.c: Moved to...
* edit-context.cc: ...here.
* emit-rtl.c: Moved to...
* emit-rtl.cc: ...here.
* errors.c: Moved to...
* errors.cc: ...here.
* et-forest.c: Moved to...
* et-forest.cc: ...here.
* except.c: Moved to...
* except.cc: ...here.
* explow.c: Moved to...
* explow.cc: ...here.
* expmed.c: Moved to...
* expmed.cc: ...here.
* expr.c: Moved to...
* expr.cc: ...here.
* fibonacci_heap.c: Moved to...
* fibonacci_heap.cc: ...here.
* file-find.c: Moved to...
* file-find.cc: ...here.
* file-prefix-map.c: Moved to...
* file-prefix-map.cc: ...here.
* final.c: Moved to...
* final.cc: ...here.
* fixed-value.c: Moved to...
* fixed-value.cc: ...here.
* fold-const-call.c: Moved to...
* fold-const-call.cc: ...here.
* fold-const.c: Moved to...
* fold-const.cc: ...here.
* fp-test.c: Moved to...
* fp-test.cc: ...here.
* function-tests.c: Moved to...
* function-tests.cc: ...here.
* function.c: Moved to...
* function.cc: ...here.
* fwprop.c: Moved to...
* fwprop.cc: ...here.
* gcc-ar.c: Moved to...
* gcc-ar.cc: ...here.
* gcc-main.c: Moved to...
* gcc-main.cc: ...here.
* gcc-rich-location.c: Moved to...
* gcc-rich-location.cc: ...here.
* gcc.c: Moved to...
* gcc.cc: ...here.
* gcov-dump.c: Moved to...
* gcov-dump.cc: ...here.
* gcov-io.c: Moved to...
* gcov-io.cc: ...here.
* gcov-tool.c: Moved to...
* gcov-tool.cc: ...here.
* gcov.c: Moved to...
* gcov.cc: ...here.
* gcse-common.c: Moved to...
* gcse-common.cc: ...here.
* gcse.c: Moved to...
* gcse.cc: ...here.
* genattr-common.c: Moved to...
* genattr-common.cc: ...here.
* genattr.c: Moved to...
* genattr.cc: ...here.
* genattrtab.c: Moved to...
* genattrtab.cc: ...here.
* genautomata.c: Moved to...
* genautomata.cc: ...here.
* gencfn-macros.c: Moved to...
* gencfn-macros.cc: ...here.
* gencheck.c: Moved to...
* gencheck.cc: ...here.
* genchecksum.c: Moved to...
* genchecksum.cc: ...here.
* gencodes.c: Moved to...
* gencodes.cc: ...here.
* genconditions.c: Moved to...
* genconditions.cc: ...here.
* genconfig.c: Moved to...
* genconfig.cc: ...here.
* genconstants.c: Moved to...
* genconstants.cc: ...here.
* genemit.c: Moved to...
* genemit.cc: ...here.
* genenums.c: Moved to...
* genenums.cc: ...here.
* generic-match-head.c: Moved to...
* generic-match-head.cc: ...here.
* genextract.c: Moved to...
* genextract.cc: ...here.
* genflags.c: Moved to...
* genflags.cc: ...here.
* gengenrtl.c: Moved to...
* gengenrtl.cc: ...here.
* gengtype-parse.c: Moved to...
* gengtype-parse.cc: ...here.
* gengtype-state.c: Moved to...
* gengtype-state.cc: ...here.
* gengtype.c: Moved to...
* gengtype.cc: ...here.
* genhooks.c: Moved to...
* genhooks.cc: ...here.
* genmatch.c: Moved to...
* genmatch.cc: ...here.
* genmddeps.c: Moved to...
* genmddeps.cc: ...here.
* genmddump.c: Moved to...
* genmddump.cc: ...here.
* genmodes.c: Moved to...
* genmodes.cc: ...here.
* genopinit.c: Moved to...
* genopinit.cc: ...here.
* genoutput.c: Moved to...
* genoutput.cc: ...here.
* genpeep.c: Moved to...
* genpeep.cc: ...here.
* genpreds.c: Moved to...
* genpreds.cc: ...here.
* genrecog.c: Moved to...
* genrecog.cc: ...here.
* gensupport.c: Moved to...
* gensupport.cc: ...here.
* gentarget-def.c: Moved to...
* gentarget-def.cc: ...here.
* genversion.c: Moved to...
* genversion.cc: ...here.
* ggc-common.c: Moved to...
* ggc-common.cc: ...here.
* ggc-none.c: Moved to...
* ggc-none.cc: ...here.
* ggc-page.c: Moved to...
* ggc-page.cc: ...here.
* ggc-tests.c: Moved to...
* ggc-tests.cc: ...here.
* gimple-builder.c: Moved to...
* gimple-builder.cc: ...here.
* gimple-expr.c: Moved to...
* gimple-expr.cc: ...here.
* gimple-fold.c: Moved to...
* gimple-fold.cc: ...here.
* gimple-iterator.c: Moved to...
* gimple-iterator.cc: ...here.
* gimple-laddress.c: Moved to...
* gimple-laddress.cc: ...here.
* gimple-loop-jam.c: Moved to...
* gimple-loop-jam.cc: ...here.
* gimple-low.c: Moved to...
* gimple-low.cc: ...here.
* gimple-match-head.c: Moved to...
* gimple-match-head.cc: ...here.
* gimple-pretty-print.c: Moved to...
* gimple-pretty-print.cc: ...here.
* gimple-ssa-backprop.c: Moved to...
* gimple-ssa-backprop.cc: ...here.
* gimple-ssa-evrp-analyze.c: Moved to...
* gimple-ssa-evrp-analyze.cc: ...here.
* gimple-ssa-evrp.c: Moved to...
* gimple-ssa-evrp.cc: ...here.
* gimple-ssa-isolate-paths.c: Moved to...
* gimple-ssa-isolate-paths.cc: ...here.
* gimple-ssa-nonnull-compare.c: Moved to...
* gimple-ssa-nonnull-compare.cc: ...here.
* gimple-ssa-split-paths.c: Moved to...
* gimple-ssa-split-paths.cc: ...here.
* gimple-ssa-sprintf.c: Moved to...
* gimple-ssa-sprintf.cc: ...here.
* gimple-ssa-store-merging.c: Moved to...
* gimple-ssa-store-merging.cc: ...here.
* gimple-ssa-strength-reduction.c: Moved to...
* gimple-ssa-strength-reduction.cc: ...here.
* gimple-ssa-warn-alloca.c: Moved to...
* gimple-ssa-warn-alloca.cc: ...here.
* gimple-ssa-warn-restrict.c: Moved to...
* gimple-ssa-warn-restrict.cc: ...here.
* gimple-streamer-in.c: Moved to...
* gimple-streamer-in.cc: ...here.
* gimple-streamer-out.c: Moved to...
* gimple-streamer-out.cc: ...here.
* gimple-walk.c: Moved to...
* gimple-walk.cc: ...here.
* gimple-warn-recursion.c: Moved to...
* gimple-warn-recursion.cc: ...here.
* gimple.c: Moved to...
* gimple.cc: ...here.
* gimplify-me.c: Moved to...
* gimplify-me.cc: ...here.
* gimplify.c: Moved to...
* gimplify.cc: ...here.
* godump.c: Moved to...
* godump.cc: ...here.
* graph.c: Moved to...
* graph.cc: ...here.
* graphds.c: Moved to...
* graphds.cc: ...here.
* graphite-dependences.c: Moved to...
* graphite-dependences.cc: ...here.
* graphite-isl-ast-to-gimple.c: Moved to...
* graphite-isl-ast-to-gimple.cc: ...here.
* graphite-optimize-isl.c: Moved to...
* graphite-optimize-isl.cc: ...here.
* graphite-poly.c: Moved to...
* graphite-poly.cc: ...here.
* graphite-scop-detection.c: Moved to...
* graphite-scop-detection.cc: ...here.
* graphite-sese-to-poly.c: Moved to...
* graphite-sese-to-poly.cc: ...here.
* graphite.c: Moved to...
* graphite.cc: ...here.
* haifa-sched.c: Moved to...
* haifa-sched.cc: ...here.
* hash-map-tests.c: Moved to...
* hash-map-tests.cc: ...here.
* hash-set-tests.c: Moved to...
* hash-set-tests.cc: ...here.
* hash-table.c: Moved to...
* hash-table.cc: ...here.
* hooks.c: Moved to...
* hooks.cc: ...here.
* host-default.c: Moved to...
* host-default.cc: ...here.
* hw-doloop.c: Moved to...
* hw-doloop.cc: ...here.
* hwint.c: Moved to...
* hwint.cc: ...here.
* ifcvt.c: Moved to...
* ifcvt.cc: ...here.
* inchash.c: Moved to...
* inchash.cc: ...here.
* incpath.c: Moved to...
* incpath.cc: ...here.
* init-regs.c: Moved to...
* init-regs.cc: ...here.
* input.c: Moved to...
* input.cc: ...here.
* internal-fn.c: Moved to...
* internal-fn.cc: ...here.
* intl.c: Moved to...
* intl.cc: ...here.
* ipa-comdats.c: Moved to...
* ipa-comdats.cc: ...here.
* ipa-cp.c: Moved to...
* ipa-cp.cc: ...here.
* ipa-devirt.c: Moved to...
* ipa-devirt.cc: ...here.
* ipa-fnsummary.c: Moved to...
* ipa-fnsummary.cc: ...here.
* ipa-icf-gimple.c: Moved to...
* ipa-icf-gimple.cc: ...here.
* ipa-icf.c: Moved to...
* ipa-icf.cc: ...here.
* ipa-inline-analysis.c: Moved to...
* ipa-inline-analysis.cc: ...here.
* ipa-inline-transform.c: Moved to...
* ipa-inline-transform.cc: ...here.
* ipa-inline.c: Moved to...
* ipa-inline.cc: ...here.
* ipa-modref-tree.c: Moved to...
* ipa-modref-tree.cc: ...here.
* ipa-modref.c: Moved to...
* ipa-modref.cc: ...here.
* ipa-param-manipulation.c: Moved to...
* ipa-param-manipulation.cc: ...here.
* ipa-polymorphic-call.c: Moved to...
* ipa-polymorphic-call.cc: ...here.
* ipa-predicate.c: Moved to...
* ipa-predicate.cc: ...here.
* ipa-profile.c: Moved to...
* ipa-profile.cc: ...here.
* ipa-prop.c: Moved to...
* ipa-prop.cc: ...here.
* ipa-pure-const.c: Moved to...
* ipa-pure-const.cc: ...here.
* ipa-ref.c: Moved to...
* ipa-ref.cc: ...here.
* ipa-reference.c: Moved to...
* ipa-reference.cc: ...here.
* ipa-split.c: Moved to...
* ipa-split.cc: ...here.
* ipa-sra.c: Moved to...
* ipa-sra.cc: ...here.
* ipa-utils.c: Moved to...
* ipa-utils.cc: ...here.
* ipa-visibility.c: Moved to...
* ipa-visibility.cc: ...here.
* ipa.c: Moved to...
* ipa.cc: ...here.
* ira-build.c: Moved to...
* ira-build.cc: ...here.
* ira-color.c: Moved to...
* ira-color.cc: ...here.
* ira-conflicts.c: Moved to...
* ira-conflicts.cc: ...here.
* ira-costs.c: Moved to...
* ira-costs.cc: ...here.
* ira-emit.c: Moved to...
* ira-emit.cc: ...here.
* ira-lives.c: Moved to...
* ira-lives.cc: ...here.
* ira.c: Moved to...
* ira.cc: ...here.
* jump.c: Moved to...
* jump.cc: ...here.
* langhooks.c: Moved to...
* langhooks.cc: ...here.
* lcm.c: Moved to...
* lcm.cc: ...here.
* lists.c: Moved to...
* lists.cc: ...here.
* loop-doloop.c: Moved to...
* loop-doloop.cc: ...here.
* loop-init.c: Moved to...
* loop-init.cc: ...here.
* loop-invariant.c: Moved to...
* loop-invariant.cc: ...here.
* loop-iv.c: Moved to...
* loop-iv.cc: ...here.
* loop-unroll.c: Moved to...
* loop-unroll.cc: ...here.
* lower-subreg.c: Moved to...
* lower-subreg.cc: ...here.
* lra-assigns.c: Moved to...
* lra-assigns.cc: ...here.
* lra-coalesce.c: Moved to...
* lra-coalesce.cc: ...here.
* lra-constraints.c: Moved to...
* lra-constraints.cc: ...here.
* lra-eliminations.c: Moved to...
* lra-eliminations.cc: ...here.
* lra-lives.c: Moved to...
* lra-lives.cc: ...here.
* lra-remat.c: Moved to...
* lra-remat.cc: ...here.
* lra-spills.c: Moved to...
* lra-spills.cc: ...here.
* lra.c: Moved to...
* lra.cc: ...here.
* lto-cgraph.c: Moved to...
* lto-cgraph.cc: ...here.
* lto-compress.c: Moved to...
* lto-compress.cc: ...here.
* lto-opts.c: Moved to...
* lto-opts.cc: ...here.
* lto-section-in.c: Moved to...
* lto-section-in.cc: ...here.
* lto-section-out.c: Moved to...
* lto-section-out.cc: ...here.
* lto-streamer-in.c: Moved to...
* lto-streamer-in.cc: ...here.
* lto-streamer-out.c: Moved to...
* lto-streamer-out.cc: ...here.
* lto-streamer.c: Moved to...
* lto-streamer.cc: ...here.
* lto-wrapper.c: Moved to...
* lto-wrapper.cc: ...here.
* main.c: Moved to...
* main.cc: ...here.
* mcf.c: Moved to...
* mcf.cc: ...here.
* mode-switching.c: Moved to...
* mode-switching.cc: ...here.
* modulo-sched.c: Moved to...
* modulo-sched.cc: ...here.
* multiple_target.c: Moved to...
* multiple_target.cc: ...here.
* omp-expand.c: Moved to...
* omp-expand.cc: ...here.
* omp-general.c: Moved to...
* omp-general.cc: ...here.
* omp-low.c: Moved to...
* omp-low.cc: ...here.
* omp-offload.c: Moved to...
* omp-offload.cc: ...here.
* omp-simd-clone.c: Moved to...
* omp-simd-clone.cc: ...here.
* opt-suggestions.c: Moved to...
* opt-suggestions.cc: ...here.
* optabs-libfuncs.c: Moved to...
* optabs-libfuncs.cc: ...here.
* optabs-query.c: Moved to...
* optabs-query.cc: ...here.
* optabs-tree.c: Moved to...
* optabs-tree.cc: ...here.
* optabs.c: Moved to...
* optabs.cc: ...here.
* opts-common.c: Moved to...
* opts-common.cc: ...here.
* opts-global.c: Moved to...
* opts-global.cc: ...here.
* opts.c: Moved to...
* opts.cc: ...here.
* passes.c: Moved to...
* passes.cc: ...here.
* plugin.c: Moved to...
* plugin.cc: ...here.
* postreload-gcse.c: Moved to...
* postreload-gcse.cc: ...here.
* postreload.c: Moved to...
* postreload.cc: ...here.
* predict.c: Moved to...
* predict.cc: ...here.
* prefix.c: Moved to...
* prefix.cc: ...here.
* pretty-print.c: Moved to...
* pretty-print.cc: ...here.
* print-rtl-function.c: Moved to...
* print-rtl-function.cc: ...here.
* print-rtl.c: Moved to...
* print-rtl.cc: ...here.
* print-tree.c: Moved to...
* print-tree.cc: ...here.
* profile-count.c: Moved to...
* profile-count.cc: ...here.
* profile.c: Moved to...
* profile.cc: ...here.
* read-md.c: Moved to...
* read-md.cc: ...here.
* read-rtl-function.c: Moved to...
* read-rtl-function.cc: ...here.
* read-rtl.c: Moved to...
* read-rtl.cc: ...here.
* real.c: Moved to...
* real.cc: ...here.
* realmpfr.c: Moved to...
* realmpfr.cc: ...here.
* recog.c: Moved to...
* recog.cc: ...here.
* ree.c: Moved to...
* ree.cc: ...here.
* reg-stack.c: Moved to...
* reg-stack.cc: ...here.
* regcprop.c: Moved to...
* regcprop.cc: ...here.
* reginfo.c: Moved to...
* reginfo.cc: ...here.
* regrename.c: Moved to...
* regrename.cc: ...here.
* regstat.c: Moved to...
* regstat.cc: ...here.
* reload.c: Moved to...
* reload.cc: ...here.
* reload1.c: Moved to...
* reload1.cc: ...here.
* reorg.c: Moved to...
* reorg.cc: ...here.
* resource.c: Moved to...
* resource.cc: ...here.
* rtl-error.c: Moved to...
* rtl-error.cc: ...here.
* rtl-tests.c: Moved to...
* rtl-tests.cc: ...here.
* rtl.c: Moved to...
* rtl.cc: ...here.
* rtlanal.c: Moved to...
* rtlanal.cc: ...here.
* rtlhash.c: Moved to...
* rtlhash.cc: ...here.
* rtlhooks.c: Moved to...
* rtlhooks.cc: ...here.
* rtx-vector-builder.c: Moved to...
* rtx-vector-builder.cc: ...here.
* run-rtl-passes.c: Moved to...
* run-rtl-passes.cc: ...here.
* sancov.c: Moved to...
* sancov.cc: ...here.
* sanopt.c: Moved to...
* sanopt.cc: ...here.
* sbitmap.c: Moved to...
* sbitmap.cc: ...here.
* sched-deps.c: Moved to...
* sched-deps.cc: ...here.
* sched-ebb.c: Moved to...
* sched-ebb.cc: ...here.
* sched-rgn.c: Moved to...
* sched-rgn.cc: ...here.
* sel-sched-dump.c: Moved to...
* sel-sched-dump.cc: ...here.
* sel-sched-ir.c: Moved to...
* sel-sched-ir.cc: ...here.
* sel-sched.c: Moved to...
* sel-sched.cc: ...here.
* selftest-diagnostic.c: Moved to...
* selftest-diagnostic.cc: ...here.
* selftest-rtl.c: Moved to...
* selftest-rtl.cc: ...here.
* selftest-run-tests.c: Moved to...
* selftest-run-tests.cc: ...here.
* selftest.c: Moved to...
* selftest.cc: ...here.
* sese.c: Moved to...
* sese.cc: ...here.
* shrink-wrap.c: Moved to...
* shrink-wrap.cc: ...here.
* simplify-rtx.c: Moved to...
* simplify-rtx.cc: ...here.
* sparseset.c: Moved to...
* sparseset.cc: ...here.
* spellcheck-tree.c: Moved to...
* spellcheck-tree.cc: ...here.
* spellcheck.c: Moved to...
* spellcheck.cc: ...here.
* sreal.c: Moved to...
* sreal.cc: ...here.
* stack-ptr-mod.c: Moved to...
* stack-ptr-mod.cc: ...here.
* statistics.c: Moved to...
* statistics.cc: ...here.
* stmt.c: Moved to...
* stmt.cc: ...here.
* stor-layout.c: Moved to...
* stor-layout.cc: ...here.
* store-motion.c: Moved to...
* store-motion.cc: ...here.
* streamer-hooks.c: Moved to...
* streamer-hooks.cc: ...here.
* stringpool.c: Moved to...
* stringpool.cc: ...here.
* substring-locations.c: Moved to...
* substring-locations.cc: ...here.
* symtab.c: Moved to...
* symtab.cc: ...here.
* target-globals.c: Moved to...
* target-globals.cc: ...here.
* targhooks.c: Moved to...
* targhooks.cc: ...here.
* timevar.c: Moved to...
* timevar.cc: ...here.
* toplev.c: Moved to...
* toplev.cc: ...here.
* tracer.c: Moved to...
* tracer.cc: ...here.
* trans-mem.c: Moved to...
* trans-mem.cc: ...here.
* tree-affine.c: Moved to...
* tree-affine.cc: ...here.
* tree-call-cdce.c: Moved to...
* tree-call-cdce.cc: ...here.
* tree-cfg.c: Moved to...
* tree-cfg.cc: ...here.
* tree-cfgcleanup.c: Moved to...
* tree-cfgcleanup.cc: ...here.
* tree-chrec.c: Moved to...
* tree-chrec.cc: ...here.
* tree-complex.c: Moved to...
* tree-complex.cc: ...here.
* tree-data-ref.c: Moved to...
* tree-data-ref.cc: ...here.
* tree-dfa.c: Moved to...
* tree-dfa.cc: ...here.
* tree-diagnostic.c: Moved to...
* tree-diagnostic.cc: ...here.
* tree-dump.c: Moved to...
* tree-dump.cc: ...here.
* tree-eh.c: Moved to...
* tree-eh.cc: ...here.
* tree-emutls.c: Moved to...
* tree-emutls.cc: ...here.
* tree-if-conv.c: Moved to...
* tree-if-conv.cc: ...here.
* tree-inline.c: Moved to...
* tree-inline.cc: ...here.
* tree-into-ssa.c: Moved to...
* tree-into-ssa.cc: ...here.
* tree-iterator.c: Moved to...
* tree-iterator.cc: ...here.
* tree-loop-distribution.c: Moved to...
* tree-loop-distribution.cc: ...here.
* tree-nested.c: Moved to...
* tree-nested.cc: ...here.
* tree-nrv.c: Moved to...
* tree-nrv.cc: ...here.
* tree-object-size.c: Moved to...
* tree-object-size.cc: ...here.
* tree-outof-ssa.c: Moved to...
* tree-outof-ssa.cc: ...here.
* tree-parloops.c: Moved to...
* tree-parloops.cc: ...here.
* tree-phinodes.c: Moved to...
* tree-phinodes.cc: ...here.
* tree-predcom.c: Moved to...
* tree-predcom.cc: ...here.
* tree-pretty-print.c: Moved to...
* tree-pretty-print.cc: ...here.
* tree-profile.c: Moved to...
* tree-profile.cc: ...here.
* tree-scalar-evolution.c: Moved to...
* tree-scalar-evolution.cc: ...here.
* tree-sra.c: Moved to...
* tree-sra.cc: ...here.
* tree-ssa-address.c: Moved to...
* tree-ssa-address.cc: ...here.
* tree-ssa-alias.c: Moved to...
* tree-ssa-alias.cc: ...here.
* tree-ssa-ccp.c: Moved to...
* tree-ssa-ccp.cc: ...here.
* tree-ssa-coalesce.c: Moved to...
* tree-ssa-coalesce.cc: ...here.
* tree-ssa-copy.c: Moved to...
* tree-ssa-copy.cc: ...here.
* tree-ssa-dce.c: Moved to...
* tree-ssa-dce.cc: ...here.
* tree-ssa-dom.c: Moved to...
* tree-ssa-dom.cc: ...here.
* tree-ssa-dse.c: Moved to...
* tree-ssa-dse.cc: ...here.
* tree-ssa-forwprop.c: Moved to...
* tree-ssa-forwprop.cc: ...here.
* tree-ssa-ifcombine.c: Moved to...
* tree-ssa-ifcombine.cc: ...here.
* tree-ssa-live.c: Moved to...
* tree-ssa-live.cc: ...here.
* tree-ssa-loop-ch.c: Moved to...
* tree-ssa-loop-ch.cc: ...here.
* tree-ssa-loop-im.c: Moved to...
* tree-ssa-loop-im.cc: ...here.
* tree-ssa-loop-ivcanon.c: Moved to...
* tree-ssa-loop-ivcanon.cc: ...here.
* tree-ssa-loop-ivopts.c: Moved to...
* tree-ssa-loop-ivopts.cc: ...here.
* tree-ssa-loop-manip.c: Moved to...
* tree-ssa-loop-manip.cc: ...here.
* tree-ssa-loop-niter.c: Moved to...
* tree-ssa-loop-niter.cc: ...here.
* tree-ssa-loop-prefetch.c: Moved to...
* tree-ssa-loop-prefetch.cc: ...here.
* tree-ssa-loop-split.c: Moved to...
* tree-ssa-loop-split.cc: ...here.
* tree-ssa-loop-unswitch.c: Moved to...
* tree-ssa-loop-unswitch.cc: ...here.
* tree-ssa-loop.c: Moved to...
* tree-ssa-loop.cc: ...here.
* tree-ssa-math-opts.c: Moved to...
* tree-ssa-math-opts.cc: ...here.
* tree-ssa-operands.c: Moved to...
* tree-ssa-operands.cc: ...here.
* tree-ssa-phiopt.c: Moved to...
* tree-ssa-phiopt.cc: ...here.
* tree-ssa-phiprop.c: Moved to...
* tree-ssa-phiprop.cc: ...here.
* tree-ssa-pre.c: Moved to...
* tree-ssa-pre.cc: ...here.
* tree-ssa-propagate.c: Moved to...
* tree-ssa-propagate.cc: ...here.
* tree-ssa-reassoc.c: Moved to...
* tree-ssa-reassoc.cc: ...here.
* tree-ssa-sccvn.c: Moved to...
* tree-ssa-sccvn.cc: ...here.
* tree-ssa-scopedtables.c: Moved to...
* tree-ssa-scopedtables.cc: ...here.
* tree-ssa-sink.c: Moved to...
* tree-ssa-sink.cc: ...here.
* tree-ssa-strlen.c: Moved to...
* tree-ssa-strlen.cc: ...here.
* tree-ssa-structalias.c: Moved to...
* tree-ssa-structalias.cc: ...here.
* tree-ssa-tail-merge.c: Moved to...
* tree-ssa-tail-merge.cc: ...here.
* tree-ssa-ter.c: Moved to...
* tree-ssa-ter.cc: ...here.
* tree-ssa-threadbackward.c: Moved to...
* tree-ssa-threadbackward.cc: ...here.
* tree-ssa-threadedge.c: Moved to...
* tree-ssa-threadedge.cc: ...here.
* tree-ssa-threadupdate.c: Moved to...
* tree-ssa-threadupdate.cc: ...here.
* tree-ssa-uncprop.c: Moved to...
* tree-ssa-uncprop.cc: ...here.
* tree-ssa-uninit.c: Moved to...
* tree-ssa-uninit.cc: ...here.
* tree-ssa.c: Moved to...
* tree-ssa.cc: ...here.
* tree-ssanames.c: Moved to...
* tree-ssanames.cc: ...here.
* tree-stdarg.c: Moved to...
* tree-stdarg.cc: ...here.
* tree-streamer-in.c: Moved to...
* tree-streamer-in.cc: ...here.
* tree-streamer-out.c: Moved to...
* tree-streamer-out.cc: ...here.
* tree-streamer.c: Moved to...
* tree-streamer.cc: ...here.
* tree-switch-conversion.c: Moved to...
* tree-switch-conversion.cc: ...here.
* tree-tailcall.c: Moved to...
* tree-tailcall.cc: ...here.
* tree-vect-data-refs.c: Moved to...
* tree-vect-data-refs.cc: ...here.
* tree-vect-generic.c: Moved to...
* tree-vect-generic.cc: ...here.
* tree-vect-loop-manip.c: Moved to...
* tree-vect-loop-manip.cc: ...here.
* tree-vect-loop.c: Moved to...
* tree-vect-loop.cc: ...here.
* tree-vect-patterns.c: Moved to...
* tree-vect-patterns.cc: ...here.
* tree-vect-slp-patterns.c: Moved to...
* tree-vect-slp-patterns.cc: ...here.
* tree-vect-slp.c: Moved to...
* tree-vect-slp.cc: ...here.
* tree-vect-stmts.c: Moved to...
* tree-vect-stmts.cc: ...here.
* tree-vector-builder.c: Moved to...
* tree-vector-builder.cc: ...here.
* tree-vectorizer.c: Moved to...
* tree-vectorizer.cc: ...here.
* tree-vrp.c: Moved to...
* tree-vrp.cc: ...here.
* tree.c: Moved to...
* tree.cc: ...here.
* tsan.c: Moved to...
* tsan.cc: ...here.
* typed-splay-tree.c: Moved to...
* typed-splay-tree.cc: ...here.
* ubsan.c: Moved to...
* ubsan.cc: ...here.
* valtrack.c: Moved to...
* valtrack.cc: ...here.
* value-prof.c: Moved to...
* value-prof.cc: ...here.
* var-tracking.c: Moved to...
* var-tracking.cc: ...here.
* varasm.c: Moved to...
* varasm.cc: ...here.
* varpool.c: Moved to...
* varpool.cc: ...here.
* vec-perm-indices.c: Moved to...
* vec-perm-indices.cc: ...here.
* vec.c: Moved to...
* vec.cc: ...here.
* vmsdbgout.c: Moved to...
* vmsdbgout.cc: ...here.
* vr-values.c: Moved to...
* vr-values.cc: ...here.
* vtable-verify.c: Moved to...
* vtable-verify.cc: ...here.
* web.c: Moved to...
* web.cc: ...here.
* xcoffout.c: Moved to...
* xcoffout.cc: ...here.
gcc/c-family/ChangeLog:
* c-ada-spec.c: Moved to...
* c-ada-spec.cc: ...here.
* c-attribs.c: Moved to...
* c-attribs.cc: ...here.
* c-common.c: Moved to...
* c-common.cc: ...here.
* c-cppbuiltin.c: Moved to...
* c-cppbuiltin.cc: ...here.
* c-dump.c: Moved to...
* c-dump.cc: ...here.
* c-format.c: Moved to...
* c-format.cc: ...here.
* c-gimplify.c: Moved to...
* c-gimplify.cc: ...here.
* c-indentation.c: Moved to...
* c-indentation.cc: ...here.
* c-lex.c: Moved to...
* c-lex.cc: ...here.
* c-omp.c: Moved to...
* c-omp.cc: ...here.
* c-opts.c: Moved to...
* c-opts.cc: ...here.
* c-pch.c: Moved to...
* c-pch.cc: ...here.
* c-ppoutput.c: Moved to...
* c-ppoutput.cc: ...here.
* c-pragma.c: Moved to...
* c-pragma.cc: ...here.
* c-pretty-print.c: Moved to...
* c-pretty-print.cc: ...here.
* c-semantics.c: Moved to...
* c-semantics.cc: ...here.
* c-ubsan.c: Moved to...
* c-ubsan.cc: ...here.
* c-warn.c: Moved to...
* c-warn.cc: ...here.
* cppspec.c: Moved to...
* cppspec.cc: ...here.
* stub-objc.c: Moved to...
* stub-objc.cc: ...here.
gcc/c/ChangeLog:
* c-aux-info.c: Moved to...
* c-aux-info.cc: ...here.
* c-convert.c: Moved to...
* c-convert.cc: ...here.
* c-decl.c: Moved to...
* c-decl.cc: ...here.
* c-errors.c: Moved to...
* c-errors.cc: ...here.
* c-fold.c: Moved to...
* c-fold.cc: ...here.
* c-lang.c: Moved to...
* c-lang.cc: ...here.
* c-objc-common.c: Moved to...
* c-objc-common.cc: ...here.
* c-parser.c: Moved to...
* c-parser.cc: ...here.
* c-typeck.c: Moved to...
* c-typeck.cc: ...here.
* gccspec.c: Moved to...
* gccspec.cc: ...here.
* gimple-parser.c: Moved to...
* gimple-parser.cc: ...here.
gcc/cp/ChangeLog:
* call.c: Moved to...
* call.cc: ...here.
* class.c: Moved to...
* class.cc: ...here.
* constexpr.c: Moved to...
* constexpr.cc: ...here.
* cp-gimplify.c: Moved to...
* cp-gimplify.cc: ...here.
* cp-lang.c: Moved to...
* cp-lang.cc: ...here.
* cp-objcp-common.c: Moved to...
* cp-objcp-common.cc: ...here.
* cp-ubsan.c: Moved to...
* cp-ubsan.cc: ...here.
* cvt.c: Moved to...
* cvt.cc: ...here.
* cxx-pretty-print.c: Moved to...
* cxx-pretty-print.cc: ...here.
* decl.c: Moved to...
* decl.cc: ...here.
* decl2.c: Moved to...
* decl2.cc: ...here.
* dump.c: Moved to...
* dump.cc: ...here.
* error.c: Moved to...
* error.cc: ...here.
* except.c: Moved to...
* except.cc: ...here.
* expr.c: Moved to...
* expr.cc: ...here.
* friend.c: Moved to...
* friend.cc: ...here.
* g++spec.c: Moved to...
* g++spec.cc: ...here.
* init.c: Moved to...
* init.cc: ...here.
* lambda.c: Moved to...
* lambda.cc: ...here.
* lex.c: Moved to...
* lex.cc: ...here.
* mangle.c: Moved to...
* mangle.cc: ...here.
* method.c: Moved to...
* method.cc: ...here.
* name-lookup.c: Moved to...
* name-lookup.cc: ...here.
* optimize.c: Moved to...
* optimize.cc: ...here.
* parser.c: Moved to...
* parser.cc: ...here.
* pt.c: Moved to...
* pt.cc: ...here.
* ptree.c: Moved to...
* ptree.cc: ...here.
* rtti.c: Moved to...
* rtti.cc: ...here.
* search.c: Moved to...
* search.cc: ...here.
* semantics.c: Moved to...
* semantics.cc: ...here.
* tree.c: Moved to...
* tree.cc: ...here.
* typeck.c: Moved to...
* typeck.cc: ...here.
* typeck2.c: Moved to...
* typeck2.cc: ...here.
* vtable-class-hierarchy.c: Moved to...
* vtable-class-hierarchy.cc: ...here.
gcc/fortran/ChangeLog:
* arith.c: Moved to...
* arith.cc: ...here.
* array.c: Moved to...
* array.cc: ...here.
* bbt.c: Moved to...
* bbt.cc: ...here.
* check.c: Moved to...
* check.cc: ...here.
* class.c: Moved to...
* class.cc: ...here.
* constructor.c: Moved to...
* constructor.cc: ...here.
* convert.c: Moved to...
* convert.cc: ...here.
* cpp.c: Moved to...
* cpp.cc: ...here.
* data.c: Moved to...
* data.cc: ...here.
* decl.c: Moved to...
* decl.cc: ...here.
* dependency.c: Moved to...
* dependency.cc: ...here.
* dump-parse-tree.c: Moved to...
* dump-parse-tree.cc: ...here.
* error.c: Moved to...
* error.cc: ...here.
* expr.c: Moved to...
* expr.cc: ...here.
* f95-lang.c: Moved to...
* f95-lang.cc: ...here.
* frontend-passes.c: Moved to...
* frontend-passes.cc: ...here.
* gfortranspec.c: Moved to...
* gfortranspec.cc: ...here.
* interface.c: Moved to...
* interface.cc: ...here.
* intrinsic.c: Moved to...
* intrinsic.cc: ...here.
* io.c: Moved to...
* io.cc: ...here.
* iresolve.c: Moved to...
* iresolve.cc: ...here.
* match.c: Moved to...
* match.cc: ...here.
* matchexp.c: Moved to...
* matchexp.cc: ...here.
* misc.c: Moved to...
* misc.cc: ...here.
* module.c: Moved to...
* module.cc: ...here.
* openmp.c: Moved to...
* openmp.cc: ...here.
* options.c: Moved to...
* options.cc: ...here.
* parse.c: Moved to...
* parse.cc: ...here.
* primary.c: Moved to...
* primary.cc: ...here.
* resolve.c: Moved to...
* resolve.cc: ...here.
* scanner.c: Moved to...
* scanner.cc: ...here.
* simplify.c: Moved to...
* simplify.cc: ...here.
* st.c: Moved to...
* st.cc: ...here.
* symbol.c: Moved to...
* symbol.cc: ...here.
* target-memory.c: Moved to...
* target-memory.cc: ...here.
* trans-array.c: Moved to...
* trans-array.cc: ...here.
* trans-common.c: Moved to...
* trans-common.cc: ...here.
* trans-const.c: Moved to...
* trans-const.cc: ...here.
* trans-decl.c: Moved to...
* trans-decl.cc: ...here.
* trans-expr.c: Moved to...
* trans-expr.cc: ...here.
* trans-intrinsic.c: Moved to...
* trans-intrinsic.cc: ...here.
* trans-io.c: Moved to...
* trans-io.cc: ...here.
* trans-openmp.c: Moved to...
* trans-openmp.cc: ...here.
* trans-stmt.c: Moved to...
* trans-stmt.cc: ...here.
* trans-types.c: Moved to...
* trans-types.cc: ...here.
* trans.c: Moved to...
* trans.cc: ...here.
gcc/go/ChangeLog:
* go-backend.c: Moved to...
* go-backend.cc: ...here.
* go-lang.c: Moved to...
* go-lang.cc: ...here.
* gospec.c: Moved to...
* gospec.cc: ...here.
gcc/jit/ChangeLog:
* dummy-frontend.c: Moved to...
* dummy-frontend.cc: ...here.
* jit-builtins.c: Moved to...
* jit-builtins.cc: ...here.
* jit-logging.c: Moved to...
* jit-logging.cc: ...here.
* jit-playback.c: Moved to...
* jit-playback.cc: ...here.
* jit-recording.c: Moved to...
* jit-recording.cc: ...here.
* jit-result.c: Moved to...
* jit-result.cc: ...here.
* jit-spec.c: Moved to...
* jit-spec.cc: ...here.
* jit-tempdir.c: Moved to...
* jit-tempdir.cc: ...here.
* jit-w32.c: Moved to...
* jit-w32.cc: ...here.
* libgccjit.c: Moved to...
* libgccjit.cc: ...here.
gcc/lto/ChangeLog:
* common.c: Moved to...
* common.cc: ...here.
* lto-common.c: Moved to...
* lto-common.cc: ...here.
* lto-dump.c: Moved to...
* lto-dump.cc: ...here.
* lto-lang.c: Moved to...
* lto-lang.cc: ...here.
* lto-object.c: Moved to...
* lto-object.cc: ...here.
* lto-partition.c: Moved to...
* lto-partition.cc: ...here.
* lto-symtab.c: Moved to...
* lto-symtab.cc: ...here.
* lto.c: Moved to...
* lto.cc: ...here.
gcc/objc/ChangeLog:
* objc-act.c: Moved to...
* objc-act.cc: ...here.
* objc-encoding.c: Moved to...
* objc-encoding.cc: ...here.
* objc-gnu-runtime-abi-01.c: Moved to...
* objc-gnu-runtime-abi-01.cc: ...here.
* objc-lang.c: Moved to...
* objc-lang.cc: ...here.
* objc-map.c: Moved to...
* objc-map.cc: ...here.
* objc-next-runtime-abi-01.c: Moved to...
* objc-next-runtime-abi-01.cc: ...here.
* objc-next-runtime-abi-02.c: Moved to...
* objc-next-runtime-abi-02.cc: ...here.
* objc-runtime-shared-support.c: Moved to...
* objc-runtime-shared-support.cc: ...here.
gcc/objcp/ChangeLog:
* objcp-decl.c: Moved to...
* objcp-decl.cc: ...here.
* objcp-lang.c: Moved to...
* objcp-lang.cc: ...here.
libcpp/ChangeLog:
* charset.c: Moved to...
* charset.cc: ...here.
* directives.c: Moved to...
* directives.cc: ...here.
* errors.c: Moved to...
* errors.cc: ...here.
* expr.c: Moved to...
* expr.cc: ...here.
* files.c: Moved to...
* files.cc: ...here.
* identifiers.c: Moved to...
* identifiers.cc: ...here.
* init.c: Moved to...
* init.cc: ...here.
* lex.c: Moved to...
* lex.cc: ...here.
* line-map.c: Moved to...
* line-map.cc: ...here.
* macro.c: Moved to...
* macro.cc: ...here.
* makeucnid.c: Moved to...
* makeucnid.cc: ...here.
* mkdeps.c: Moved to...
* mkdeps.cc: ...here.
* pch.c: Moved to...
* pch.cc: ...here.
* symtab.c: Moved to...
* symtab.cc: ...here.
* traditional.c: Moved to...
* traditional.cc: ...here.
Diffstat (limited to 'gcc/tree-ssa-ccp.c')
-rw-r--r-- | gcc/tree-ssa-ccp.c | 4640 |
1 files changed, 0 insertions, 4640 deletions
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c deleted file mode 100644 index 93fa536..0000000 --- a/gcc/tree-ssa-ccp.c +++ /dev/null @@ -1,4640 +0,0 @@ -/* Conditional constant propagation pass for the GNU compiler. - Copyright (C) 2000-2022 Free Software Foundation, Inc. - Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org> - Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com> - -This file is part of GCC. - -GCC is free software; you can redistribute it and/or modify it -under the terms of the GNU General Public License as published by the -Free Software Foundation; either version 3, or (at your option) any -later version. - -GCC is distributed in the hope that it will be useful, but WITHOUT -ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -for more details. - -You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING3. If not see -<http://www.gnu.org/licenses/>. */ - -/* Conditional constant propagation (CCP) is based on the SSA - propagation engine (tree-ssa-propagate.c). Constant assignments of - the form VAR = CST are propagated from the assignments into uses of - VAR, which in turn may generate new constants. The simulation uses - a four level lattice to keep track of constant values associated - with SSA names. Given an SSA name V_i, it may take one of the - following values: - - UNINITIALIZED -> the initial state of the value. This value - is replaced with a correct initial value - the first time the value is used, so the - rest of the pass does not need to care about - it. Using this value simplifies initialization - of the pass, and prevents us from needlessly - scanning statements that are never reached. - - UNDEFINED -> V_i is a local variable whose definition - has not been processed yet. Therefore we - don't yet know if its value is a constant - or not. - - CONSTANT -> V_i has been found to hold a constant - value C. - - VARYING -> V_i cannot take a constant value, or if it - does, it is not possible to determine it - at compile time. - - The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node: - - 1- In ccp_visit_stmt, we are interested in assignments whose RHS - evaluates into a constant and conditional jumps whose predicate - evaluates into a boolean true or false. When an assignment of - the form V_i = CONST is found, V_i's lattice value is set to - CONSTANT and CONST is associated with it. This causes the - propagation engine to add all the SSA edges coming out the - assignment into the worklists, so that statements that use V_i - can be visited. - - If the statement is a conditional with a constant predicate, we - mark the outgoing edges as executable or not executable - depending on the predicate's value. This is then used when - visiting PHI nodes to know when a PHI argument can be ignored. - - - 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the - same constant C, then the LHS of the PHI is set to C. This - evaluation is known as the "meet operation". Since one of the - goals of this evaluation is to optimistically return constant - values as often as possible, it uses two main short cuts: - - - If an argument is flowing in through a non-executable edge, it - is ignored. This is useful in cases like this: - - if (PRED) - a_9 = 3; - else - a_10 = 100; - a_11 = PHI (a_9, a_10) - - If PRED is known to always evaluate to false, then we can - assume that a_11 will always take its value from a_10, meaning - that instead of consider it VARYING (a_9 and a_10 have - different values), we can consider it CONSTANT 100. - - - If an argument has an UNDEFINED value, then it does not affect - the outcome of the meet operation. If a variable V_i has an - UNDEFINED value, it means that either its defining statement - hasn't been visited yet or V_i has no defining statement, in - which case the original symbol 'V' is being used - uninitialized. Since 'V' is a local variable, the compiler - may assume any initial value for it. - - - After propagation, every variable V_i that ends up with a lattice - value of CONSTANT will have the associated constant value in the - array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for - final substitution and folding. - - This algorithm uses wide-ints at the max precision of the target. - This means that, with one uninteresting exception, variables with - UNSIGNED types never go to VARYING because the bits above the - precision of the type of the variable are always zero. The - uninteresting case is a variable of UNSIGNED type that has the - maximum precision of the target. Such variables can go to VARYING, - but this causes no loss of infomation since these variables will - never be extended. - - References: - - Constant propagation with conditional branches, - Wegman and Zadeck, ACM TOPLAS 13(2):181-210. - - Building an Optimizing Compiler, - Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9. - - Advanced Compiler Design and Implementation, - Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */ - -#include "config.h" -#include "system.h" -#include "coretypes.h" -#include "backend.h" -#include "target.h" -#include "tree.h" -#include "gimple.h" -#include "tree-pass.h" -#include "ssa.h" -#include "gimple-pretty-print.h" -#include "fold-const.h" -#include "gimple-fold.h" -#include "tree-eh.h" -#include "gimplify.h" -#include "gimple-iterator.h" -#include "tree-cfg.h" -#include "tree-ssa-propagate.h" -#include "dbgcnt.h" -#include "builtins.h" -#include "cfgloop.h" -#include "stor-layout.h" -#include "optabs-query.h" -#include "tree-ssa-ccp.h" -#include "tree-dfa.h" -#include "diagnostic-core.h" -#include "stringpool.h" -#include "attribs.h" -#include "tree-vector-builder.h" -#include "cgraph.h" -#include "alloc-pool.h" -#include "symbol-summary.h" -#include "ipa-utils.h" -#include "ipa-prop.h" -#include "internal-fn.h" - -/* Possible lattice values. */ -typedef enum -{ - UNINITIALIZED, - UNDEFINED, - CONSTANT, - VARYING -} ccp_lattice_t; - -class ccp_prop_value_t { -public: - /* Lattice value. */ - ccp_lattice_t lattice_val; - - /* Propagated value. */ - tree value; - - /* Mask that applies to the propagated value during CCP. For X - with a CONSTANT lattice value X & ~mask == value & ~mask. The - zero bits in the mask cover constant values. The ones mean no - information. */ - widest_int mask; -}; - -class ccp_propagate : public ssa_propagation_engine -{ - public: - enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) FINAL OVERRIDE; - enum ssa_prop_result visit_phi (gphi *) FINAL OVERRIDE; -}; - -/* Array of propagated constant values. After propagation, - CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If - the constant is held in an SSA name representing a memory store - (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual - memory reference used to store (i.e., the LHS of the assignment - doing the store). */ -static ccp_prop_value_t *const_val; -static unsigned n_const_val; - -static void canonicalize_value (ccp_prop_value_t *); -static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *); - -/* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */ - -static void -dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val) -{ - switch (val.lattice_val) - { - case UNINITIALIZED: - fprintf (outf, "%sUNINITIALIZED", prefix); - break; - case UNDEFINED: - fprintf (outf, "%sUNDEFINED", prefix); - break; - case VARYING: - fprintf (outf, "%sVARYING", prefix); - break; - case CONSTANT: - if (TREE_CODE (val.value) != INTEGER_CST - || val.mask == 0) - { - fprintf (outf, "%sCONSTANT ", prefix); - print_generic_expr (outf, val.value, dump_flags); - } - else - { - widest_int cval = wi::bit_and_not (wi::to_widest (val.value), - val.mask); - fprintf (outf, "%sCONSTANT ", prefix); - print_hex (cval, outf); - fprintf (outf, " ("); - print_hex (val.mask, outf); - fprintf (outf, ")"); - } - break; - default: - gcc_unreachable (); - } -} - - -/* Print lattice value VAL to stderr. */ - -void debug_lattice_value (ccp_prop_value_t val); - -DEBUG_FUNCTION void -debug_lattice_value (ccp_prop_value_t val) -{ - dump_lattice_value (stderr, "", val); - fprintf (stderr, "\n"); -} - -/* Extend NONZERO_BITS to a full mask, based on sgn. */ - -static widest_int -extend_mask (const wide_int &nonzero_bits, signop sgn) -{ - return widest_int::from (nonzero_bits, sgn); -} - -/* Compute a default value for variable VAR and store it in the - CONST_VAL array. The following rules are used to get default - values: - - 1- Global and static variables that are declared constant are - considered CONSTANT. - - 2- Any other value is considered UNDEFINED. This is useful when - considering PHI nodes. PHI arguments that are undefined do not - change the constant value of the PHI node, which allows for more - constants to be propagated. - - 3- Variables defined by statements other than assignments and PHI - nodes are considered VARYING. - - 4- Initial values of variables that are not GIMPLE registers are - considered VARYING. */ - -static ccp_prop_value_t -get_default_value (tree var) -{ - ccp_prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 }; - gimple *stmt; - - stmt = SSA_NAME_DEF_STMT (var); - - if (gimple_nop_p (stmt)) - { - /* Variables defined by an empty statement are those used - before being initialized. If VAR is a local variable, we - can assume initially that it is UNDEFINED, otherwise we must - consider it VARYING. */ - if (!virtual_operand_p (var) - && SSA_NAME_VAR (var) - && TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL) - val.lattice_val = UNDEFINED; - else - { - val.lattice_val = VARYING; - val.mask = -1; - if (flag_tree_bit_ccp) - { - wide_int nonzero_bits = get_nonzero_bits (var); - tree value; - widest_int mask; - - if (SSA_NAME_VAR (var) - && TREE_CODE (SSA_NAME_VAR (var)) == PARM_DECL - && ipcp_get_parm_bits (SSA_NAME_VAR (var), &value, &mask)) - { - val.lattice_val = CONSTANT; - val.value = value; - widest_int ipa_value = wi::to_widest (value); - /* Unknown bits from IPA CP must be equal to zero. */ - gcc_assert (wi::bit_and (ipa_value, mask) == 0); - val.mask = mask; - if (nonzero_bits != -1) - val.mask &= extend_mask (nonzero_bits, - TYPE_SIGN (TREE_TYPE (var))); - } - else if (nonzero_bits != -1) - { - val.lattice_val = CONSTANT; - val.value = build_zero_cst (TREE_TYPE (var)); - val.mask = extend_mask (nonzero_bits, - TYPE_SIGN (TREE_TYPE (var))); - } - } - } - } - else if (is_gimple_assign (stmt)) - { - tree cst; - if (gimple_assign_single_p (stmt) - && DECL_P (gimple_assign_rhs1 (stmt)) - && (cst = get_symbol_constant_value (gimple_assign_rhs1 (stmt)))) - { - val.lattice_val = CONSTANT; - val.value = cst; - } - else - { - /* Any other variable defined by an assignment is considered - UNDEFINED. */ - val.lattice_val = UNDEFINED; - } - } - else if ((is_gimple_call (stmt) - && gimple_call_lhs (stmt) != NULL_TREE) - || gimple_code (stmt) == GIMPLE_PHI) - { - /* A variable defined by a call or a PHI node is considered - UNDEFINED. */ - val.lattice_val = UNDEFINED; - } - else - { - /* Otherwise, VAR will never take on a constant value. */ - val.lattice_val = VARYING; - val.mask = -1; - } - - return val; -} - - -/* Get the constant value associated with variable VAR. */ - -static inline ccp_prop_value_t * -get_value (tree var) -{ - ccp_prop_value_t *val; - - if (const_val == NULL - || SSA_NAME_VERSION (var) >= n_const_val) - return NULL; - - val = &const_val[SSA_NAME_VERSION (var)]; - if (val->lattice_val == UNINITIALIZED) - *val = get_default_value (var); - - canonicalize_value (val); - - return val; -} - -/* Return the constant tree value associated with VAR. */ - -static inline tree -get_constant_value (tree var) -{ - ccp_prop_value_t *val; - if (TREE_CODE (var) != SSA_NAME) - { - if (is_gimple_min_invariant (var)) - return var; - return NULL_TREE; - } - val = get_value (var); - if (val - && val->lattice_val == CONSTANT - && (TREE_CODE (val->value) != INTEGER_CST - || val->mask == 0)) - return val->value; - return NULL_TREE; -} - -/* Sets the value associated with VAR to VARYING. */ - -static inline void -set_value_varying (tree var) -{ - ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)]; - - val->lattice_val = VARYING; - val->value = NULL_TREE; - val->mask = -1; -} - -/* For integer constants, make sure to drop TREE_OVERFLOW. */ - -static void -canonicalize_value (ccp_prop_value_t *val) -{ - if (val->lattice_val != CONSTANT) - return; - - if (TREE_OVERFLOW_P (val->value)) - val->value = drop_tree_overflow (val->value); -} - -/* Return whether the lattice transition is valid. */ - -static bool -valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val) -{ - /* Lattice transitions must always be monotonically increasing in - value. */ - if (old_val.lattice_val < new_val.lattice_val) - return true; - - if (old_val.lattice_val != new_val.lattice_val) - return false; - - if (!old_val.value && !new_val.value) - return true; - - /* Now both lattice values are CONSTANT. */ - - /* Allow arbitrary copy changes as we might look through PHI <a_1, ...> - when only a single copy edge is executable. */ - if (TREE_CODE (old_val.value) == SSA_NAME - && TREE_CODE (new_val.value) == SSA_NAME) - return true; - - /* Allow transitioning from a constant to a copy. */ - if (is_gimple_min_invariant (old_val.value) - && TREE_CODE (new_val.value) == SSA_NAME) - return true; - - /* Allow transitioning from PHI <&x, not executable> == &x - to PHI <&x, &y> == common alignment. */ - if (TREE_CODE (old_val.value) != INTEGER_CST - && TREE_CODE (new_val.value) == INTEGER_CST) - return true; - - /* Bit-lattices have to agree in the still valid bits. */ - if (TREE_CODE (old_val.value) == INTEGER_CST - && TREE_CODE (new_val.value) == INTEGER_CST) - return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask) - == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask)); - - /* Otherwise constant values have to agree. */ - if (operand_equal_p (old_val.value, new_val.value, 0)) - return true; - - /* At least the kinds and types should agree now. */ - if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value) - || !types_compatible_p (TREE_TYPE (old_val.value), - TREE_TYPE (new_val.value))) - return false; - - /* For floats and !HONOR_NANS allow transitions from (partial) NaN - to non-NaN. */ - tree type = TREE_TYPE (new_val.value); - if (SCALAR_FLOAT_TYPE_P (type) - && !HONOR_NANS (type)) - { - if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value))) - return true; - } - else if (VECTOR_FLOAT_TYPE_P (type) - && !HONOR_NANS (type)) - { - unsigned int count - = tree_vector_builder::binary_encoded_nelts (old_val.value, - new_val.value); - for (unsigned int i = 0; i < count; ++i) - if (!REAL_VALUE_ISNAN - (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i))) - && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i), - VECTOR_CST_ENCODED_ELT (new_val.value, i), 0)) - return false; - return true; - } - else if (COMPLEX_FLOAT_TYPE_P (type) - && !HONOR_NANS (type)) - { - if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value))) - && !operand_equal_p (TREE_REALPART (old_val.value), - TREE_REALPART (new_val.value), 0)) - return false; - if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value))) - && !operand_equal_p (TREE_IMAGPART (old_val.value), - TREE_IMAGPART (new_val.value), 0)) - return false; - return true; - } - return false; -} - -/* Set the value for variable VAR to NEW_VAL. Return true if the new - value is different from VAR's previous value. */ - -static bool -set_lattice_value (tree var, ccp_prop_value_t *new_val) -{ - /* We can deal with old UNINITIALIZED values just fine here. */ - ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)]; - - canonicalize_value (new_val); - - /* We have to be careful to not go up the bitwise lattice - represented by the mask. Instead of dropping to VARYING - use the meet operator to retain a conservative value. - Missed optimizations like PR65851 makes this necessary. - It also ensures we converge to a stable lattice solution. */ - if (old_val->lattice_val != UNINITIALIZED) - ccp_lattice_meet (new_val, old_val); - - gcc_checking_assert (valid_lattice_transition (*old_val, *new_val)); - - /* If *OLD_VAL and NEW_VAL are the same, return false to inform the - caller that this was a non-transition. */ - if (old_val->lattice_val != new_val->lattice_val - || (new_val->lattice_val == CONSTANT - && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value) - || (TREE_CODE (new_val->value) == INTEGER_CST - && (new_val->mask != old_val->mask - || (wi::bit_and_not (wi::to_widest (old_val->value), - new_val->mask) - != wi::bit_and_not (wi::to_widest (new_val->value), - new_val->mask)))) - || (TREE_CODE (new_val->value) != INTEGER_CST - && !operand_equal_p (new_val->value, old_val->value, 0))))) - { - /* ??? We would like to delay creation of INTEGER_CSTs from - partially constants here. */ - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - dump_lattice_value (dump_file, "Lattice value changed to ", *new_val); - fprintf (dump_file, ". Adding SSA edges to worklist.\n"); - } - - *old_val = *new_val; - - gcc_assert (new_val->lattice_val != UNINITIALIZED); - return true; - } - - return false; -} - -static ccp_prop_value_t get_value_for_expr (tree, bool); -static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree); -void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *, - signop, int, const widest_int &, const widest_int &, - signop, int, const widest_int &, const widest_int &); - -/* Return a widest_int that can be used for bitwise simplifications - from VAL. */ - -static widest_int -value_to_wide_int (ccp_prop_value_t val) -{ - if (val.value - && TREE_CODE (val.value) == INTEGER_CST) - return wi::to_widest (val.value); - - return 0; -} - -/* Return the value for the address expression EXPR based on alignment - information. */ - -static ccp_prop_value_t -get_value_from_alignment (tree expr) -{ - tree type = TREE_TYPE (expr); - ccp_prop_value_t val; - unsigned HOST_WIDE_INT bitpos; - unsigned int align; - - gcc_assert (TREE_CODE (expr) == ADDR_EXPR); - - get_pointer_alignment_1 (expr, &align, &bitpos); - val.mask = wi::bit_and_not - (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type) - ? wi::mask <widest_int> (TYPE_PRECISION (type), false) - : -1, - align / BITS_PER_UNIT - 1); - val.lattice_val - = wi::sext (val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT; - if (val.lattice_val == CONSTANT) - val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT); - else - val.value = NULL_TREE; - - return val; -} - -/* Return the value for the tree operand EXPR. If FOR_BITS_P is true - return constant bits extracted from alignment information for - invariant addresses. */ - -static ccp_prop_value_t -get_value_for_expr (tree expr, bool for_bits_p) -{ - ccp_prop_value_t val; - - if (TREE_CODE (expr) == SSA_NAME) - { - ccp_prop_value_t *val_ = get_value (expr); - if (val_) - val = *val_; - else - { - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - } - if (for_bits_p - && val.lattice_val == CONSTANT) - { - if (TREE_CODE (val.value) == ADDR_EXPR) - val = get_value_from_alignment (val.value); - else if (TREE_CODE (val.value) != INTEGER_CST) - { - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - } - } - /* Fall back to a copy value. */ - if (!for_bits_p - && val.lattice_val == VARYING - && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr)) - { - val.lattice_val = CONSTANT; - val.value = expr; - val.mask = -1; - } - } - else if (is_gimple_min_invariant (expr) - && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST)) - { - val.lattice_val = CONSTANT; - val.value = expr; - val.mask = 0; - canonicalize_value (&val); - } - else if (TREE_CODE (expr) == ADDR_EXPR) - val = get_value_from_alignment (expr); - else - { - val.lattice_val = VARYING; - val.mask = -1; - val.value = NULL_TREE; - } - - if (val.lattice_val == VARYING - && TYPE_UNSIGNED (TREE_TYPE (expr))) - val.mask = wi::zext (val.mask, TYPE_PRECISION (TREE_TYPE (expr))); - - return val; -} - -/* Return the likely CCP lattice value for STMT. - - If STMT has no operands, then return CONSTANT. - - Else if undefinedness of operands of STMT cause its value to be - undefined, then return UNDEFINED. - - Else if any operands of STMT are constants, then return CONSTANT. - - Else return VARYING. */ - -static ccp_lattice_t -likely_value (gimple *stmt) -{ - bool has_constant_operand, has_undefined_operand, all_undefined_operands; - bool has_nsa_operand; - tree use; - ssa_op_iter iter; - unsigned i; - - enum gimple_code code = gimple_code (stmt); - - /* This function appears to be called only for assignments, calls, - conditionals, and switches, due to the logic in visit_stmt. */ - gcc_assert (code == GIMPLE_ASSIGN - || code == GIMPLE_CALL - || code == GIMPLE_COND - || code == GIMPLE_SWITCH); - - /* If the statement has volatile operands, it won't fold to a - constant value. */ - if (gimple_has_volatile_ops (stmt)) - return VARYING; - - /* Arrive here for more complex cases. */ - has_constant_operand = false; - has_undefined_operand = false; - all_undefined_operands = true; - has_nsa_operand = false; - FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) - { - ccp_prop_value_t *val = get_value (use); - - if (val && val->lattice_val == UNDEFINED) - has_undefined_operand = true; - else - all_undefined_operands = false; - - if (val && val->lattice_val == CONSTANT) - has_constant_operand = true; - - if (SSA_NAME_IS_DEFAULT_DEF (use) - || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use))) - has_nsa_operand = true; - } - - /* There may be constants in regular rhs operands. For calls we - have to ignore lhs, fndecl and static chain, otherwise only - the lhs. */ - for (i = (is_gimple_call (stmt) ? 2 : 0) + gimple_has_lhs (stmt); - i < gimple_num_ops (stmt); ++i) - { - tree op = gimple_op (stmt, i); - if (!op || TREE_CODE (op) == SSA_NAME) - continue; - if (is_gimple_min_invariant (op)) - has_constant_operand = true; - } - - if (has_constant_operand) - all_undefined_operands = false; - - if (has_undefined_operand - && code == GIMPLE_CALL - && gimple_call_internal_p (stmt)) - switch (gimple_call_internal_fn (stmt)) - { - /* These 3 builtins use the first argument just as a magic - way how to find out a decl uid. */ - case IFN_GOMP_SIMD_LANE: - case IFN_GOMP_SIMD_VF: - case IFN_GOMP_SIMD_LAST_LANE: - has_undefined_operand = false; - break; - default: - break; - } - - /* If the operation combines operands like COMPLEX_EXPR make sure to - not mark the result UNDEFINED if only one part of the result is - undefined. */ - if (has_undefined_operand && all_undefined_operands) - return UNDEFINED; - else if (code == GIMPLE_ASSIGN && has_undefined_operand) - { - switch (gimple_assign_rhs_code (stmt)) - { - /* Unary operators are handled with all_undefined_operands. */ - case PLUS_EXPR: - case MINUS_EXPR: - case POINTER_PLUS_EXPR: - case BIT_XOR_EXPR: - /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected. - Not bitwise operators, one VARYING operand may specify the - result completely. - Not logical operators for the same reason, apart from XOR. - Not COMPLEX_EXPR as one VARYING operand makes the result partly - not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because - the undefined operand may be promoted. */ - return UNDEFINED; - - case ADDR_EXPR: - /* If any part of an address is UNDEFINED, like the index - of an ARRAY_EXPR, then treat the result as UNDEFINED. */ - return UNDEFINED; - - default: - ; - } - } - /* If there was an UNDEFINED operand but the result may be not UNDEFINED - fall back to CONSTANT. During iteration UNDEFINED may still drop - to CONSTANT. */ - if (has_undefined_operand) - return CONSTANT; - - /* We do not consider virtual operands here -- load from read-only - memory may have only VARYING virtual operands, but still be - constant. Also we can combine the stmt with definitions from - operands whose definitions are not simulated again. */ - if (has_constant_operand - || has_nsa_operand - || gimple_references_memory_p (stmt)) - return CONSTANT; - - return VARYING; -} - -/* Returns true if STMT cannot be constant. */ - -static bool -surely_varying_stmt_p (gimple *stmt) -{ - /* If the statement has operands that we cannot handle, it cannot be - constant. */ - if (gimple_has_volatile_ops (stmt)) - return true; - - /* If it is a call and does not return a value or is not a - builtin and not an indirect call or a call to function with - assume_aligned/alloc_align attribute, it is varying. */ - if (is_gimple_call (stmt)) - { - tree fndecl, fntype = gimple_call_fntype (stmt); - if (!gimple_call_lhs (stmt) - || ((fndecl = gimple_call_fndecl (stmt)) != NULL_TREE - && !fndecl_built_in_p (fndecl) - && !lookup_attribute ("assume_aligned", - TYPE_ATTRIBUTES (fntype)) - && !lookup_attribute ("alloc_align", - TYPE_ATTRIBUTES (fntype)))) - return true; - } - - /* Any other store operation is not interesting. */ - else if (gimple_vdef (stmt)) - return true; - - /* Anything other than assignments and conditional jumps are not - interesting for CCP. */ - if (gimple_code (stmt) != GIMPLE_ASSIGN - && gimple_code (stmt) != GIMPLE_COND - && gimple_code (stmt) != GIMPLE_SWITCH - && gimple_code (stmt) != GIMPLE_CALL) - return true; - - return false; -} - -/* Initialize local data structures for CCP. */ - -static void -ccp_initialize (void) -{ - basic_block bb; - - n_const_val = num_ssa_names; - const_val = XCNEWVEC (ccp_prop_value_t, n_const_val); - - /* Initialize simulation flags for PHI nodes and statements. */ - FOR_EACH_BB_FN (bb, cfun) - { - gimple_stmt_iterator i; - - for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (&i)) - { - gimple *stmt = gsi_stmt (i); - bool is_varying; - - /* If the statement is a control insn, then we do not - want to avoid simulating the statement once. Failure - to do so means that those edges will never get added. */ - if (stmt_ends_bb_p (stmt)) - is_varying = false; - else - is_varying = surely_varying_stmt_p (stmt); - - if (is_varying) - { - tree def; - ssa_op_iter iter; - - /* If the statement will not produce a constant, mark - all its outputs VARYING. */ - FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) - set_value_varying (def); - } - prop_set_simulate_again (stmt, !is_varying); - } - } - - /* Now process PHI nodes. We never clear the simulate_again flag on - phi nodes, since we do not know which edges are executable yet, - except for phi nodes for virtual operands when we do not do store ccp. */ - FOR_EACH_BB_FN (bb, cfun) - { - gphi_iterator i; - - for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i)) - { - gphi *phi = i.phi (); - - if (virtual_operand_p (gimple_phi_result (phi))) - prop_set_simulate_again (phi, false); - else - prop_set_simulate_again (phi, true); - } - } -} - -/* Debug count support. Reset the values of ssa names - VARYING when the total number ssa names analyzed is - beyond the debug count specified. */ - -static void -do_dbg_cnt (void) -{ - unsigned i; - for (i = 0; i < num_ssa_names; i++) - { - if (!dbg_cnt (ccp)) - { - const_val[i].lattice_val = VARYING; - const_val[i].mask = -1; - const_val[i].value = NULL_TREE; - } - } -} - - -/* We want to provide our own GET_VALUE and FOLD_STMT virtual methods. */ -class ccp_folder : public substitute_and_fold_engine -{ - public: - tree value_of_expr (tree, gimple *) FINAL OVERRIDE; - bool fold_stmt (gimple_stmt_iterator *) FINAL OVERRIDE; -}; - -/* This method just wraps GET_CONSTANT_VALUE for now. Over time - naked calls to GET_CONSTANT_VALUE should be eliminated in favor - of calling member functions. */ - -tree -ccp_folder::value_of_expr (tree op, gimple *) -{ - return get_constant_value (op); -} - -/* Do final substitution of propagated values, cleanup the flowgraph and - free allocated storage. If NONZERO_P, record nonzero bits. - - Return TRUE when something was optimized. */ - -static bool -ccp_finalize (bool nonzero_p) -{ - bool something_changed; - unsigned i; - tree name; - - do_dbg_cnt (); - - /* Derive alignment and misalignment information from partially - constant pointers in the lattice or nonzero bits from partially - constant integers. */ - FOR_EACH_SSA_NAME (i, name, cfun) - { - ccp_prop_value_t *val; - unsigned int tem, align; - - if (!POINTER_TYPE_P (TREE_TYPE (name)) - && (!INTEGRAL_TYPE_P (TREE_TYPE (name)) - /* Don't record nonzero bits before IPA to avoid - using too much memory. */ - || !nonzero_p)) - continue; - - val = get_value (name); - if (val->lattice_val != CONSTANT - || TREE_CODE (val->value) != INTEGER_CST - || val->mask == 0) - continue; - - if (POINTER_TYPE_P (TREE_TYPE (name))) - { - /* Trailing mask bits specify the alignment, trailing value - bits the misalignment. */ - tem = val->mask.to_uhwi (); - align = least_bit_hwi (tem); - if (align > 1) - set_ptr_info_alignment (get_ptr_info (name), align, - (TREE_INT_CST_LOW (val->value) - & (align - 1))); - } - else - { - unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value)); - wide_int nonzero_bits - = (wide_int::from (val->mask, precision, UNSIGNED) - | wi::to_wide (val->value)); - nonzero_bits &= get_nonzero_bits (name); - set_nonzero_bits (name, nonzero_bits); - } - } - - /* Perform substitutions based on the known constant values. */ - class ccp_folder ccp_folder; - something_changed = ccp_folder.substitute_and_fold (); - - free (const_val); - const_val = NULL; - return something_changed; -} - - -/* Compute the meet operator between *VAL1 and *VAL2. Store the result - in VAL1. - - any M UNDEFINED = any - any M VARYING = VARYING - Ci M Cj = Ci if (i == j) - Ci M Cj = VARYING if (i != j) - */ - -static void -ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2) -{ - if (val1->lattice_val == UNDEFINED - /* For UNDEFINED M SSA we can't always SSA because its definition - may not dominate the PHI node. Doing optimistic copy propagation - also causes a lot of gcc.dg/uninit-pred*.c FAILs. */ - && (val2->lattice_val != CONSTANT - || TREE_CODE (val2->value) != SSA_NAME)) - { - /* UNDEFINED M any = any */ - *val1 = *val2; - } - else if (val2->lattice_val == UNDEFINED - /* See above. */ - && (val1->lattice_val != CONSTANT - || TREE_CODE (val1->value) != SSA_NAME)) - { - /* any M UNDEFINED = any - Nothing to do. VAL1 already contains the value we want. */ - ; - } - else if (val1->lattice_val == VARYING - || val2->lattice_val == VARYING) - { - /* any M VARYING = VARYING. */ - val1->lattice_val = VARYING; - val1->mask = -1; - val1->value = NULL_TREE; - } - else if (val1->lattice_val == CONSTANT - && val2->lattice_val == CONSTANT - && TREE_CODE (val1->value) == INTEGER_CST - && TREE_CODE (val2->value) == INTEGER_CST) - { - /* Ci M Cj = Ci if (i == j) - Ci M Cj = VARYING if (i != j) - - For INTEGER_CSTs mask unequal bits. If no equal bits remain, - drop to varying. */ - val1->mask = (val1->mask | val2->mask - | (wi::to_widest (val1->value) - ^ wi::to_widest (val2->value))); - if (wi::sext (val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1) - { - val1->lattice_val = VARYING; - val1->value = NULL_TREE; - } - } - else if (val1->lattice_val == CONSTANT - && val2->lattice_val == CONSTANT - && operand_equal_p (val1->value, val2->value, 0)) - { - /* Ci M Cj = Ci if (i == j) - Ci M Cj = VARYING if (i != j) - - VAL1 already contains the value we want for equivalent values. */ - } - else if (val1->lattice_val == CONSTANT - && val2->lattice_val == CONSTANT - && (TREE_CODE (val1->value) == ADDR_EXPR - || TREE_CODE (val2->value) == ADDR_EXPR)) - { - /* When not equal addresses are involved try meeting for - alignment. */ - ccp_prop_value_t tem = *val2; - if (TREE_CODE (val1->value) == ADDR_EXPR) - *val1 = get_value_for_expr (val1->value, true); - if (TREE_CODE (val2->value) == ADDR_EXPR) - tem = get_value_for_expr (val2->value, true); - ccp_lattice_meet (val1, &tem); - } - else - { - /* Any other combination is VARYING. */ - val1->lattice_val = VARYING; - val1->mask = -1; - val1->value = NULL_TREE; - } -} - - -/* Loop through the PHI_NODE's parameters for BLOCK and compare their - lattice values to determine PHI_NODE's lattice value. The value of a - PHI node is determined calling ccp_lattice_meet with all the arguments - of the PHI node that are incoming via executable edges. */ - -enum ssa_prop_result -ccp_propagate::visit_phi (gphi *phi) -{ - unsigned i; - ccp_prop_value_t new_val; - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "\nVisiting PHI node: "); - print_gimple_stmt (dump_file, phi, 0, dump_flags); - } - - new_val.lattice_val = UNDEFINED; - new_val.value = NULL_TREE; - new_val.mask = 0; - - bool first = true; - bool non_exec_edge = false; - for (i = 0; i < gimple_phi_num_args (phi); i++) - { - /* Compute the meet operator over all the PHI arguments flowing - through executable edges. */ - edge e = gimple_phi_arg_edge (phi, i); - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, - "\tArgument #%d (%d -> %d %sexecutable)\n", - i, e->src->index, e->dest->index, - (e->flags & EDGE_EXECUTABLE) ? "" : "not "); - } - - /* If the incoming edge is executable, Compute the meet operator for - the existing value of the PHI node and the current PHI argument. */ - if (e->flags & EDGE_EXECUTABLE) - { - tree arg = gimple_phi_arg (phi, i)->def; - ccp_prop_value_t arg_val = get_value_for_expr (arg, false); - - if (first) - { - new_val = arg_val; - first = false; - } - else - ccp_lattice_meet (&new_val, &arg_val); - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "\t"); - print_generic_expr (dump_file, arg, dump_flags); - dump_lattice_value (dump_file, "\tValue: ", arg_val); - fprintf (dump_file, "\n"); - } - - if (new_val.lattice_val == VARYING) - break; - } - else - non_exec_edge = true; - } - - /* In case there were non-executable edges and the value is a copy - make sure its definition dominates the PHI node. */ - if (non_exec_edge - && new_val.lattice_val == CONSTANT - && TREE_CODE (new_val.value) == SSA_NAME - && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value) - && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (phi), - gimple_bb (SSA_NAME_DEF_STMT (new_val.value)))) - { - new_val.lattice_val = VARYING; - new_val.value = NULL_TREE; - new_val.mask = -1; - } - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - dump_lattice_value (dump_file, "\n PHI node value: ", new_val); - fprintf (dump_file, "\n\n"); - } - - /* Make the transition to the new value. */ - if (set_lattice_value (gimple_phi_result (phi), &new_val)) - { - if (new_val.lattice_val == VARYING) - return SSA_PROP_VARYING; - else - return SSA_PROP_INTERESTING; - } - else - return SSA_PROP_NOT_INTERESTING; -} - -/* Return the constant value for OP or OP otherwise. */ - -static tree -valueize_op (tree op) -{ - if (TREE_CODE (op) == SSA_NAME) - { - tree tem = get_constant_value (op); - if (tem) - return tem; - } - return op; -} - -/* Return the constant value for OP, but signal to not follow SSA - edges if the definition may be simulated again. */ - -static tree -valueize_op_1 (tree op) -{ - if (TREE_CODE (op) == SSA_NAME) - { - /* If the definition may be simulated again we cannot follow - this SSA edge as the SSA propagator does not necessarily - re-visit the use. */ - gimple *def_stmt = SSA_NAME_DEF_STMT (op); - if (!gimple_nop_p (def_stmt) - && prop_simulate_again_p (def_stmt)) - return NULL_TREE; - tree tem = get_constant_value (op); - if (tem) - return tem; - } - return op; -} - -/* CCP specific front-end to the non-destructive constant folding - routines. - - Attempt to simplify the RHS of STMT knowing that one or more - operands are constants. - - If simplification is possible, return the simplified RHS, - otherwise return the original RHS or NULL_TREE. */ - -static tree -ccp_fold (gimple *stmt) -{ - location_t loc = gimple_location (stmt); - switch (gimple_code (stmt)) - { - case GIMPLE_COND: - { - /* Handle comparison operators that can appear in GIMPLE form. */ - tree op0 = valueize_op (gimple_cond_lhs (stmt)); - tree op1 = valueize_op (gimple_cond_rhs (stmt)); - enum tree_code code = gimple_cond_code (stmt); - return fold_binary_loc (loc, code, boolean_type_node, op0, op1); - } - - case GIMPLE_SWITCH: - { - /* Return the constant switch index. */ - return valueize_op (gimple_switch_index (as_a <gswitch *> (stmt))); - } - - case GIMPLE_ASSIGN: - case GIMPLE_CALL: - return gimple_fold_stmt_to_constant_1 (stmt, - valueize_op, valueize_op_1); - - default: - gcc_unreachable (); - } -} - -/* Determine the minimum and maximum values, *MIN and *MAX respectively, - represented by the mask pair VAL and MASK with signedness SGN and - precision PRECISION. */ - -void -value_mask_to_min_max (widest_int *min, widest_int *max, - const widest_int &val, const widest_int &mask, - signop sgn, int precision) -{ - *min = wi::bit_and_not (val, mask); - *max = val | mask; - if (sgn == SIGNED && wi::neg_p (mask)) - { - widest_int sign_bit = wi::lshift (1, precision - 1); - *min ^= sign_bit; - *max ^= sign_bit; - /* MAX is zero extended, and MIN is sign extended. */ - *min = wi::ext (*min, precision, sgn); - *max = wi::ext (*max, precision, sgn); - } -} - -/* Apply the operation CODE in type TYPE to the value, mask pair - RVAL and RMASK representing a value of type RTYPE and set - the value, mask pair *VAL and *MASK to the result. */ - -void -bit_value_unop (enum tree_code code, signop type_sgn, int type_precision, - widest_int *val, widest_int *mask, - signop rtype_sgn, int rtype_precision, - const widest_int &rval, const widest_int &rmask) -{ - switch (code) - { - case BIT_NOT_EXPR: - *mask = rmask; - *val = ~rval; - break; - - case NEGATE_EXPR: - { - widest_int temv, temm; - /* Return ~rval + 1. */ - bit_value_unop (BIT_NOT_EXPR, type_sgn, type_precision, &temv, &temm, - type_sgn, type_precision, rval, rmask); - bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask, - type_sgn, type_precision, temv, temm, - type_sgn, type_precision, 1, 0); - break; - } - - CASE_CONVERT: - { - /* First extend mask and value according to the original type. */ - *mask = wi::ext (rmask, rtype_precision, rtype_sgn); - *val = wi::ext (rval, rtype_precision, rtype_sgn); - - /* Then extend mask and value according to the target type. */ - *mask = wi::ext (*mask, type_precision, type_sgn); - *val = wi::ext (*val, type_precision, type_sgn); - break; - } - - case ABS_EXPR: - case ABSU_EXPR: - if (wi::sext (rmask, rtype_precision) == -1) - *mask = -1; - else if (wi::neg_p (rmask)) - { - /* Result is either rval or -rval. */ - widest_int temv, temm; - bit_value_unop (NEGATE_EXPR, rtype_sgn, rtype_precision, &temv, - &temm, type_sgn, type_precision, rval, rmask); - temm |= (rmask | (rval ^ temv)); - /* Extend the result. */ - *mask = wi::ext (temm, type_precision, type_sgn); - *val = wi::ext (temv, type_precision, type_sgn); - } - else if (wi::neg_p (rval)) - { - bit_value_unop (NEGATE_EXPR, type_sgn, type_precision, val, mask, - type_sgn, type_precision, rval, rmask); - } - else - { - *mask = rmask; - *val = rval; - } - break; - - default: - *mask = -1; - break; - } -} - -/* Determine the mask pair *VAL and *MASK from multiplying the - argument mask pair RVAL, RMASK by the unsigned constant C. */ -void -bit_value_mult_const (signop sgn, int width, - widest_int *val, widest_int *mask, - const widest_int &rval, const widest_int &rmask, - widest_int c) -{ - widest_int sum_mask = 0; - - /* Ensure rval_lo only contains known bits. */ - widest_int rval_lo = wi::bit_and_not (rval, rmask); - - if (rval_lo != 0) - { - /* General case (some bits of multiplicand are known set). */ - widest_int sum_val = 0; - while (c != 0) - { - /* Determine the lowest bit set in the multiplier. */ - int bitpos = wi::ctz (c); - widest_int term_mask = rmask << bitpos; - widest_int term_val = rval_lo << bitpos; - - /* sum += term. */ - widest_int lo = sum_val + term_val; - widest_int hi = (sum_val | sum_mask) + (term_val | term_mask); - sum_mask |= term_mask | (lo ^ hi); - sum_val = lo; - - /* Clear this bit in the multiplier. */ - c ^= wi::lshift (1, bitpos); - } - /* Correctly extend the result value. */ - *val = wi::ext (sum_val, width, sgn); - } - else - { - /* Special case (no bits of multiplicand are known set). */ - while (c != 0) - { - /* Determine the lowest bit set in the multiplier. */ - int bitpos = wi::ctz (c); - widest_int term_mask = rmask << bitpos; - - /* sum += term. */ - widest_int hi = sum_mask + term_mask; - sum_mask |= term_mask | hi; - - /* Clear this bit in the multiplier. */ - c ^= wi::lshift (1, bitpos); - } - *val = 0; - } - - /* Correctly extend the result mask. */ - *mask = wi::ext (sum_mask, width, sgn); -} - -/* Fill up to MAX values in the BITS array with values representing - each of the non-zero bits in the value X. Returns the number of - bits in X (capped at the maximum value MAX). For example, an X - value 11, places 1, 2 and 8 in BITS and returns the value 3. */ - -unsigned int -get_individual_bits (widest_int *bits, widest_int x, unsigned int max) -{ - unsigned int count = 0; - while (count < max && x != 0) - { - int bitpos = wi::ctz (x); - bits[count] = wi::lshift (1, bitpos); - x ^= bits[count]; - count++; - } - return count; -} - -/* Array of 2^N - 1 values representing the bits flipped between - consecutive Gray codes. This is used to efficiently enumerate - all permutations on N bits using XOR. */ -static const unsigned char gray_code_bit_flips[63] = { - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, - 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 -}; - -/* Apply the operation CODE in type TYPE to the value, mask pairs - R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE - and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */ - -void -bit_value_binop (enum tree_code code, signop sgn, int width, - widest_int *val, widest_int *mask, - signop r1type_sgn, int r1type_precision, - const widest_int &r1val, const widest_int &r1mask, - signop r2type_sgn, int r2type_precision ATTRIBUTE_UNUSED, - const widest_int &r2val, const widest_int &r2mask) -{ - bool swap_p = false; - - /* Assume we'll get a constant result. Use an initial non varying - value, we fall back to varying in the end if necessary. */ - *mask = -1; - /* Ensure that VAL is initialized (to any value). */ - *val = 0; - - switch (code) - { - case BIT_AND_EXPR: - /* The mask is constant where there is a known not - set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */ - *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask); - *val = r1val & r2val; - break; - - case BIT_IOR_EXPR: - /* The mask is constant where there is a known - set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */ - *mask = wi::bit_and_not (r1mask | r2mask, - wi::bit_and_not (r1val, r1mask) - | wi::bit_and_not (r2val, r2mask)); - *val = r1val | r2val; - break; - - case BIT_XOR_EXPR: - /* m1 | m2 */ - *mask = r1mask | r2mask; - *val = r1val ^ r2val; - break; - - case LROTATE_EXPR: - case RROTATE_EXPR: - if (r2mask == 0) - { - widest_int shift = r2val; - if (shift == 0) - { - *mask = r1mask; - *val = r1val; - } - else - { - if (wi::neg_p (shift, r2type_sgn)) - { - shift = -shift; - if (code == RROTATE_EXPR) - code = LROTATE_EXPR; - else - code = RROTATE_EXPR; - } - if (code == RROTATE_EXPR) - { - *mask = wi::rrotate (r1mask, shift, width); - *val = wi::rrotate (r1val, shift, width); - } - else - { - *mask = wi::lrotate (r1mask, shift, width); - *val = wi::lrotate (r1val, shift, width); - } - } - } - else if (wi::ltu_p (r2val | r2mask, width) - && wi::popcount (r2mask) <= 4) - { - widest_int bits[4]; - widest_int res_val, res_mask; - widest_int tmp_val, tmp_mask; - widest_int shift = wi::bit_and_not (r2val, r2mask); - unsigned int bit_count = get_individual_bits (bits, r2mask, 4); - unsigned int count = (1 << bit_count) - 1; - - /* Initialize result to rotate by smallest value of shift. */ - if (code == RROTATE_EXPR) - { - res_mask = wi::rrotate (r1mask, shift, width); - res_val = wi::rrotate (r1val, shift, width); - } - else - { - res_mask = wi::lrotate (r1mask, shift, width); - res_val = wi::lrotate (r1val, shift, width); - } - - /* Iterate through the remaining values of shift. */ - for (unsigned int i=0; i<count; i++) - { - shift ^= bits[gray_code_bit_flips[i]]; - if (code == RROTATE_EXPR) - { - tmp_mask = wi::rrotate (r1mask, shift, width); - tmp_val = wi::rrotate (r1val, shift, width); - } - else - { - tmp_mask = wi::lrotate (r1mask, shift, width); - tmp_val = wi::lrotate (r1val, shift, width); - } - /* Accumulate the result. */ - res_mask |= tmp_mask | (res_val ^ tmp_val); - } - *val = wi::bit_and_not (res_val, res_mask); - *mask = res_mask; - } - break; - - case LSHIFT_EXPR: - case RSHIFT_EXPR: - /* ??? We can handle partially known shift counts if we know - its sign. That way we can tell that (x << (y | 8)) & 255 - is zero. */ - if (r2mask == 0) - { - widest_int shift = r2val; - if (shift == 0) - { - *mask = r1mask; - *val = r1val; - } - else - { - if (wi::neg_p (shift, r2type_sgn)) - break; - if (code == RSHIFT_EXPR) - { - *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn); - *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn); - } - else - { - *mask = wi::ext (r1mask << shift, width, sgn); - *val = wi::ext (r1val << shift, width, sgn); - } - } - } - else if (wi::ltu_p (r2val | r2mask, width)) - { - if (wi::popcount (r2mask) <= 4) - { - widest_int bits[4]; - widest_int arg_val, arg_mask; - widest_int res_val, res_mask; - widest_int tmp_val, tmp_mask; - widest_int shift = wi::bit_and_not (r2val, r2mask); - unsigned int bit_count = get_individual_bits (bits, r2mask, 4); - unsigned int count = (1 << bit_count) - 1; - - /* Initialize result to shift by smallest value of shift. */ - if (code == RSHIFT_EXPR) - { - arg_mask = wi::ext (r1mask, width, sgn); - arg_val = wi::ext (r1val, width, sgn); - res_mask = wi::rshift (arg_mask, shift, sgn); - res_val = wi::rshift (arg_val, shift, sgn); - } - else - { - arg_mask = r1mask; - arg_val = r1val; - res_mask = arg_mask << shift; - res_val = arg_val << shift; - } - - /* Iterate through the remaining values of shift. */ - for (unsigned int i=0; i<count; i++) - { - shift ^= bits[gray_code_bit_flips[i]]; - if (code == RSHIFT_EXPR) - { - tmp_mask = wi::rshift (arg_mask, shift, sgn); - tmp_val = wi::rshift (arg_val, shift, sgn); - } - else - { - tmp_mask = arg_mask << shift; - tmp_val = arg_val << shift; - } - /* Accumulate the result. */ - res_mask |= tmp_mask | (res_val ^ tmp_val); - } - res_mask = wi::ext (res_mask, width, sgn); - res_val = wi::ext (res_val, width, sgn); - *val = wi::bit_and_not (res_val, res_mask); - *mask = res_mask; - } - else if ((r1val | r1mask) == 0) - { - /* Handle shifts of zero to avoid undefined wi::ctz below. */ - *mask = 0; - *val = 0; - } - else if (code == LSHIFT_EXPR) - { - widest_int tmp = wi::mask <widest_int> (width, false); - tmp <<= wi::ctz (r1val | r1mask); - tmp <<= wi::bit_and_not (r2val, r2mask); - *mask = wi::ext (tmp, width, sgn); - *val = 0; - } - else if (!wi::neg_p (r1val | r1mask, sgn)) - { - /* Logical right shift, or zero sign bit. */ - widest_int arg = r1val | r1mask; - int lzcount = wi::clz (arg); - if (lzcount) - lzcount -= wi::get_precision (arg) - width; - widest_int tmp = wi::mask <widest_int> (width, false); - tmp = wi::lrshift (tmp, lzcount); - tmp = wi::lrshift (tmp, wi::bit_and_not (r2val, r2mask)); - *mask = wi::ext (tmp, width, sgn); - *val = 0; - } - else if (!wi::neg_p (r1mask)) - { - /* Arithmetic right shift with set sign bit. */ - widest_int arg = wi::bit_and_not (r1val, r1mask); - int sbcount = wi::clrsb (arg); - sbcount -= wi::get_precision (arg) - width; - widest_int tmp = wi::mask <widest_int> (width, false); - tmp = wi::lrshift (tmp, sbcount); - tmp = wi::lrshift (tmp, wi::bit_and_not (r2val, r2mask)); - *mask = wi::sext (tmp, width); - tmp = wi::bit_not (tmp); - *val = wi::sext (tmp, width); - } - } - break; - - case PLUS_EXPR: - case POINTER_PLUS_EXPR: - { - /* Do the addition with unknown bits set to zero, to give carry-ins of - zero wherever possible. */ - widest_int lo = (wi::bit_and_not (r1val, r1mask) - + wi::bit_and_not (r2val, r2mask)); - lo = wi::ext (lo, width, sgn); - /* Do the addition with unknown bits set to one, to give carry-ins of - one wherever possible. */ - widest_int hi = (r1val | r1mask) + (r2val | r2mask); - hi = wi::ext (hi, width, sgn); - /* Each bit in the result is known if (a) the corresponding bits in - both inputs are known, and (b) the carry-in to that bit position - is known. We can check condition (b) by seeing if we got the same - result with minimised carries as with maximised carries. */ - *mask = r1mask | r2mask | (lo ^ hi); - *mask = wi::ext (*mask, width, sgn); - /* It shouldn't matter whether we choose lo or hi here. */ - *val = lo; - break; - } - - case MINUS_EXPR: - case POINTER_DIFF_EXPR: - { - /* Subtraction is derived from the addition algorithm above. */ - widest_int lo = wi::bit_and_not (r1val, r1mask) - (r2val | r2mask); - lo = wi::ext (lo, width, sgn); - widest_int hi = (r1val | r1mask) - wi::bit_and_not (r2val, r2mask); - hi = wi::ext (hi, width, sgn); - *mask = r1mask | r2mask | (lo ^ hi); - *mask = wi::ext (*mask, width, sgn); - *val = lo; - break; - } - - case MULT_EXPR: - if (r2mask == 0 - && !wi::neg_p (r2val, sgn) - && (flag_expensive_optimizations || wi::popcount (r2val) < 8)) - bit_value_mult_const (sgn, width, val, mask, r1val, r1mask, r2val); - else if (r1mask == 0 - && !wi::neg_p (r1val, sgn) - && (flag_expensive_optimizations || wi::popcount (r1val) < 8)) - bit_value_mult_const (sgn, width, val, mask, r2val, r2mask, r1val); - else - { - /* Just track trailing zeros in both operands and transfer - them to the other. */ - int r1tz = wi::ctz (r1val | r1mask); - int r2tz = wi::ctz (r2val | r2mask); - if (r1tz + r2tz >= width) - { - *mask = 0; - *val = 0; - } - else if (r1tz + r2tz > 0) - { - *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true), - width, sgn); - *val = 0; - } - } - break; - - case EQ_EXPR: - case NE_EXPR: - { - widest_int m = r1mask | r2mask; - if (wi::bit_and_not (r1val, m) != wi::bit_and_not (r2val, m)) - { - *mask = 0; - *val = ((code == EQ_EXPR) ? 0 : 1); - } - else - { - /* We know the result of a comparison is always one or zero. */ - *mask = 1; - *val = 0; - } - break; - } - - case GE_EXPR: - case GT_EXPR: - swap_p = true; - code = swap_tree_comparison (code); - /* Fall through. */ - case LT_EXPR: - case LE_EXPR: - { - widest_int min1, max1, min2, max2; - int minmax, maxmin; - - const widest_int &o1val = swap_p ? r2val : r1val; - const widest_int &o1mask = swap_p ? r2mask : r1mask; - const widest_int &o2val = swap_p ? r1val : r2val; - const widest_int &o2mask = swap_p ? r1mask : r2mask; - - value_mask_to_min_max (&min1, &max1, o1val, o1mask, - r1type_sgn, r1type_precision); - value_mask_to_min_max (&min2, &max2, o2val, o2mask, - r1type_sgn, r1type_precision); - - /* For comparisons the signedness is in the comparison operands. */ - /* Do a cross comparison of the max/min pairs. */ - maxmin = wi::cmp (max1, min2, r1type_sgn); - minmax = wi::cmp (min1, max2, r1type_sgn); - if (maxmin < (code == LE_EXPR ? 1: 0)) /* o1 < or <= o2. */ - { - *mask = 0; - *val = 1; - } - else if (minmax > (code == LT_EXPR ? -1 : 0)) /* o1 >= or > o2. */ - { - *mask = 0; - *val = 0; - } - else if (maxmin == minmax) /* o1 and o2 are equal. */ - { - /* This probably should never happen as we'd have - folded the thing during fully constant value folding. */ - *mask = 0; - *val = (code == LE_EXPR ? 1 : 0); - } - else - { - /* We know the result of a comparison is always one or zero. */ - *mask = 1; - *val = 0; - } - break; - } - - case MIN_EXPR: - case MAX_EXPR: - { - widest_int min1, max1, min2, max2; - - value_mask_to_min_max (&min1, &max1, r1val, r1mask, sgn, width); - value_mask_to_min_max (&min2, &max2, r2val, r2mask, sgn, width); - - if (wi::cmp (max1, min2, sgn) <= 0) /* r1 is less than r2. */ - { - if (code == MIN_EXPR) - { - *mask = r1mask; - *val = r1val; - } - else - { - *mask = r2mask; - *val = r2val; - } - } - else if (wi::cmp (min1, max2, sgn) >= 0) /* r2 is less than r1. */ - { - if (code == MIN_EXPR) - { - *mask = r2mask; - *val = r2val; - } - else - { - *mask = r1mask; - *val = r1val; - } - } - else - { - /* The result is either r1 or r2. */ - *mask = r1mask | r2mask | (r1val ^ r2val); - *val = r1val; - } - break; - } - - case TRUNC_MOD_EXPR: - { - widest_int r1max = r1val | r1mask; - widest_int r2max = r2val | r2mask; - if (sgn == UNSIGNED - || (!wi::neg_p (r1max) && !wi::neg_p (r2max))) - { - /* Confirm R2 has some bits set, to avoid division by zero. */ - widest_int r2min = wi::bit_and_not (r2val, r2mask); - if (r2min != 0) - { - /* R1 % R2 is R1 if R1 is always less than R2. */ - if (wi::ltu_p (r1max, r2min)) - { - *mask = r1mask; - *val = r1val; - } - else - { - /* R1 % R2 is always less than the maximum of R2. */ - unsigned int lzcount = wi::clz (r2max); - unsigned int bits = wi::get_precision (r2max) - lzcount; - if (r2max == wi::lshift (1, bits)) - bits--; - *mask = wi::mask <widest_int> (bits, false); - *val = 0; - } - } - } - } - break; - - case TRUNC_DIV_EXPR: - { - widest_int r1max = r1val | r1mask; - widest_int r2max = r2val | r2mask; - if (sgn == UNSIGNED - || (!wi::neg_p (r1max) && !wi::neg_p (r2max))) - { - /* Confirm R2 has some bits set, to avoid division by zero. */ - widest_int r2min = wi::bit_and_not (r2val, r2mask); - if (r2min != 0) - { - /* R1 / R2 is zero if R1 is always less than R2. */ - if (wi::ltu_p (r1max, r2min)) - { - *mask = 0; - *val = 0; - } - else - { - widest_int upper = wi::udiv_trunc (r1max, r2min); - unsigned int lzcount = wi::clz (upper); - unsigned int bits = wi::get_precision (upper) - lzcount; - *mask = wi::mask <widest_int> (bits, false); - *val = 0; - } - } - } - } - break; - - default:; - } -} - -/* Return the propagation value when applying the operation CODE to - the value RHS yielding type TYPE. */ - -static ccp_prop_value_t -bit_value_unop (enum tree_code code, tree type, tree rhs) -{ - ccp_prop_value_t rval = get_value_for_expr (rhs, true); - widest_int value, mask; - ccp_prop_value_t val; - - if (rval.lattice_val == UNDEFINED) - return rval; - - gcc_assert ((rval.lattice_val == CONSTANT - && TREE_CODE (rval.value) == INTEGER_CST) - || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1); - bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask, - TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)), - value_to_wide_int (rval), rval.mask); - if (wi::sext (mask, TYPE_PRECISION (type)) != -1) - { - val.lattice_val = CONSTANT; - val.mask = mask; - /* ??? Delay building trees here. */ - val.value = wide_int_to_tree (type, value); - } - else - { - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - } - return val; -} - -/* Return the propagation value when applying the operation CODE to - the values RHS1 and RHS2 yielding type TYPE. */ - -static ccp_prop_value_t -bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2) -{ - ccp_prop_value_t r1val = get_value_for_expr (rhs1, true); - ccp_prop_value_t r2val = get_value_for_expr (rhs2, true); - widest_int value, mask; - ccp_prop_value_t val; - - if (r1val.lattice_val == UNDEFINED - || r2val.lattice_val == UNDEFINED) - { - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - return val; - } - - gcc_assert ((r1val.lattice_val == CONSTANT - && TREE_CODE (r1val.value) == INTEGER_CST) - || wi::sext (r1val.mask, - TYPE_PRECISION (TREE_TYPE (rhs1))) == -1); - gcc_assert ((r2val.lattice_val == CONSTANT - && TREE_CODE (r2val.value) == INTEGER_CST) - || wi::sext (r2val.mask, - TYPE_PRECISION (TREE_TYPE (rhs2))) == -1); - bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask, - TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)), - value_to_wide_int (r1val), r1val.mask, - TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)), - value_to_wide_int (r2val), r2val.mask); - - /* (x * x) & 2 == 0. */ - if (code == MULT_EXPR && rhs1 == rhs2 && TYPE_PRECISION (type) > 1) - { - widest_int m = 2; - if (wi::sext (mask, TYPE_PRECISION (type)) != -1) - value = wi::bit_and_not (value, m); - else - value = 0; - mask = wi::bit_and_not (mask, m); - } - - if (wi::sext (mask, TYPE_PRECISION (type)) != -1) - { - val.lattice_val = CONSTANT; - val.mask = mask; - /* ??? Delay building trees here. */ - val.value = wide_int_to_tree (type, value); - } - else - { - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - } - return val; -} - -/* Return the propagation value for __builtin_assume_aligned - and functions with assume_aligned or alloc_aligned attribute. - For __builtin_assume_aligned, ATTR is NULL_TREE, - for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED - is false, for alloc_aligned attribute ATTR is non-NULL and - ALLOC_ALIGNED is true. */ - -static ccp_prop_value_t -bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval, - bool alloc_aligned) -{ - tree align, misalign = NULL_TREE, type; - unsigned HOST_WIDE_INT aligni, misaligni = 0; - ccp_prop_value_t alignval; - widest_int value, mask; - ccp_prop_value_t val; - - if (attr == NULL_TREE) - { - tree ptr = gimple_call_arg (stmt, 0); - type = TREE_TYPE (ptr); - ptrval = get_value_for_expr (ptr, true); - } - else - { - tree lhs = gimple_call_lhs (stmt); - type = TREE_TYPE (lhs); - } - - if (ptrval.lattice_val == UNDEFINED) - return ptrval; - gcc_assert ((ptrval.lattice_val == CONSTANT - && TREE_CODE (ptrval.value) == INTEGER_CST) - || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1); - if (attr == NULL_TREE) - { - /* Get aligni and misaligni from __builtin_assume_aligned. */ - align = gimple_call_arg (stmt, 1); - if (!tree_fits_uhwi_p (align)) - return ptrval; - aligni = tree_to_uhwi (align); - if (gimple_call_num_args (stmt) > 2) - { - misalign = gimple_call_arg (stmt, 2); - if (!tree_fits_uhwi_p (misalign)) - return ptrval; - misaligni = tree_to_uhwi (misalign); - } - } - else - { - /* Get aligni and misaligni from assume_aligned or - alloc_align attributes. */ - if (TREE_VALUE (attr) == NULL_TREE) - return ptrval; - attr = TREE_VALUE (attr); - align = TREE_VALUE (attr); - if (!tree_fits_uhwi_p (align)) - return ptrval; - aligni = tree_to_uhwi (align); - if (alloc_aligned) - { - if (aligni == 0 || aligni > gimple_call_num_args (stmt)) - return ptrval; - align = gimple_call_arg (stmt, aligni - 1); - if (!tree_fits_uhwi_p (align)) - return ptrval; - aligni = tree_to_uhwi (align); - } - else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr))) - { - misalign = TREE_VALUE (TREE_CHAIN (attr)); - if (!tree_fits_uhwi_p (misalign)) - return ptrval; - misaligni = tree_to_uhwi (misalign); - } - } - if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni) - return ptrval; - - align = build_int_cst_type (type, -aligni); - alignval = get_value_for_expr (align, true); - bit_value_binop (BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), &value, &mask, - TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (ptrval), ptrval.mask, - TYPE_SIGN (type), TYPE_PRECISION (type), value_to_wide_int (alignval), alignval.mask); - - if (wi::sext (mask, TYPE_PRECISION (type)) != -1) - { - val.lattice_val = CONSTANT; - val.mask = mask; - gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0); - gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0); - value |= misaligni; - /* ??? Delay building trees here. */ - val.value = wide_int_to_tree (type, value); - } - else - { - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - } - return val; -} - -/* Evaluate statement STMT. - Valid only for assignments, calls, conditionals, and switches. */ - -static ccp_prop_value_t -evaluate_stmt (gimple *stmt) -{ - ccp_prop_value_t val; - tree simplified = NULL_TREE; - ccp_lattice_t likelyvalue = likely_value (stmt); - bool is_constant = false; - unsigned int align; - bool ignore_return_flags = false; - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "which is likely "); - switch (likelyvalue) - { - case CONSTANT: - fprintf (dump_file, "CONSTANT"); - break; - case UNDEFINED: - fprintf (dump_file, "UNDEFINED"); - break; - case VARYING: - fprintf (dump_file, "VARYING"); - break; - default:; - } - fprintf (dump_file, "\n"); - } - - /* If the statement is likely to have a CONSTANT result, then try - to fold the statement to determine the constant value. */ - /* FIXME. This is the only place that we call ccp_fold. - Since likely_value never returns CONSTANT for calls, we will - not attempt to fold them, including builtins that may profit. */ - if (likelyvalue == CONSTANT) - { - fold_defer_overflow_warnings (); - simplified = ccp_fold (stmt); - if (simplified - && TREE_CODE (simplified) == SSA_NAME) - { - /* We may not use values of something that may be simulated again, - see valueize_op_1. */ - if (SSA_NAME_IS_DEFAULT_DEF (simplified) - || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified))) - { - ccp_prop_value_t *val = get_value (simplified); - if (val && val->lattice_val != VARYING) - { - fold_undefer_overflow_warnings (true, stmt, 0); - return *val; - } - } - else - /* We may also not place a non-valueized copy in the lattice - as that might become stale if we never re-visit this stmt. */ - simplified = NULL_TREE; - } - is_constant = simplified && is_gimple_min_invariant (simplified); - fold_undefer_overflow_warnings (is_constant, stmt, 0); - if (is_constant) - { - /* The statement produced a constant value. */ - val.lattice_val = CONSTANT; - val.value = simplified; - val.mask = 0; - return val; - } - } - /* If the statement is likely to have a VARYING result, then do not - bother folding the statement. */ - else if (likelyvalue == VARYING) - { - enum gimple_code code = gimple_code (stmt); - if (code == GIMPLE_ASSIGN) - { - enum tree_code subcode = gimple_assign_rhs_code (stmt); - - /* Other cases cannot satisfy is_gimple_min_invariant - without folding. */ - if (get_gimple_rhs_class (subcode) == GIMPLE_SINGLE_RHS) - simplified = gimple_assign_rhs1 (stmt); - } - else if (code == GIMPLE_SWITCH) - simplified = gimple_switch_index (as_a <gswitch *> (stmt)); - else - /* These cannot satisfy is_gimple_min_invariant without folding. */ - gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND); - is_constant = simplified && is_gimple_min_invariant (simplified); - if (is_constant) - { - /* The statement produced a constant value. */ - val.lattice_val = CONSTANT; - val.value = simplified; - val.mask = 0; - } - } - /* If the statement result is likely UNDEFINED, make it so. */ - else if (likelyvalue == UNDEFINED) - { - val.lattice_val = UNDEFINED; - val.value = NULL_TREE; - val.mask = 0; - return val; - } - - /* Resort to simplification for bitwise tracking. */ - if (flag_tree_bit_ccp - && (likelyvalue == CONSTANT || is_gimple_call (stmt) - || (gimple_assign_single_p (stmt) - && gimple_assign_rhs_code (stmt) == ADDR_EXPR)) - && !is_constant) - { - enum gimple_code code = gimple_code (stmt); - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - if (code == GIMPLE_ASSIGN) - { - enum tree_code subcode = gimple_assign_rhs_code (stmt); - tree rhs1 = gimple_assign_rhs1 (stmt); - tree lhs = gimple_assign_lhs (stmt); - if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs)) - || POINTER_TYPE_P (TREE_TYPE (lhs))) - && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) - || POINTER_TYPE_P (TREE_TYPE (rhs1)))) - switch (get_gimple_rhs_class (subcode)) - { - case GIMPLE_SINGLE_RHS: - val = get_value_for_expr (rhs1, true); - break; - - case GIMPLE_UNARY_RHS: - val = bit_value_unop (subcode, TREE_TYPE (lhs), rhs1); - break; - - case GIMPLE_BINARY_RHS: - val = bit_value_binop (subcode, TREE_TYPE (lhs), rhs1, - gimple_assign_rhs2 (stmt)); - break; - - default:; - } - } - else if (code == GIMPLE_COND) - { - enum tree_code code = gimple_cond_code (stmt); - tree rhs1 = gimple_cond_lhs (stmt); - tree rhs2 = gimple_cond_rhs (stmt); - if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) - || POINTER_TYPE_P (TREE_TYPE (rhs1))) - val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2); - } - else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL)) - { - tree fndecl = gimple_call_fndecl (stmt); - switch (DECL_FUNCTION_CODE (fndecl)) - { - case BUILT_IN_MALLOC: - case BUILT_IN_REALLOC: - case BUILT_IN_CALLOC: - case BUILT_IN_STRDUP: - case BUILT_IN_STRNDUP: - val.lattice_val = CONSTANT; - val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT - / BITS_PER_UNIT - 1); - break; - - CASE_BUILT_IN_ALLOCA: - align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA - ? BIGGEST_ALIGNMENT - : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))); - val.lattice_val = CONSTANT; - val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1); - break; - - case BUILT_IN_ASSUME_ALIGNED: - val = bit_value_assume_aligned (stmt, NULL_TREE, val, false); - ignore_return_flags = true; - break; - - case BUILT_IN_ALIGNED_ALLOC: - case BUILT_IN_GOMP_ALLOC: - { - tree align = get_constant_value (gimple_call_arg (stmt, 0)); - if (align - && tree_fits_uhwi_p (align)) - { - unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align); - if (aligni > 1 - /* align must be power-of-two */ - && (aligni & (aligni - 1)) == 0) - { - val.lattice_val = CONSTANT; - val.value = build_int_cst (ptr_type_node, 0); - val.mask = -aligni; - } - } - break; - } - - case BUILT_IN_BSWAP16: - case BUILT_IN_BSWAP32: - case BUILT_IN_BSWAP64: - case BUILT_IN_BSWAP128: - val = get_value_for_expr (gimple_call_arg (stmt, 0), true); - if (val.lattice_val == UNDEFINED) - break; - else if (val.lattice_val == CONSTANT - && val.value - && TREE_CODE (val.value) == INTEGER_CST) - { - tree type = TREE_TYPE (gimple_call_lhs (stmt)); - int prec = TYPE_PRECISION (type); - wide_int wval = wi::to_wide (val.value); - val.value - = wide_int_to_tree (type, - wide_int::from (wval, prec, - UNSIGNED).bswap ()); - val.mask - = widest_int::from (wide_int::from (val.mask, prec, - UNSIGNED).bswap (), - UNSIGNED); - if (wi::sext (val.mask, prec) != -1) - break; - } - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - break; - - default:; - } - } - if (is_gimple_call (stmt) && gimple_call_lhs (stmt)) - { - tree fntype = gimple_call_fntype (stmt); - if (fntype) - { - tree attrs = lookup_attribute ("assume_aligned", - TYPE_ATTRIBUTES (fntype)); - if (attrs) - val = bit_value_assume_aligned (stmt, attrs, val, false); - attrs = lookup_attribute ("alloc_align", - TYPE_ATTRIBUTES (fntype)); - if (attrs) - val = bit_value_assume_aligned (stmt, attrs, val, true); - } - int flags = ignore_return_flags - ? 0 : gimple_call_return_flags (as_a <gcall *> (stmt)); - if (flags & ERF_RETURNS_ARG - && (flags & ERF_RETURN_ARG_MASK) < gimple_call_num_args (stmt)) - { - val = get_value_for_expr - (gimple_call_arg (stmt, - flags & ERF_RETURN_ARG_MASK), true); - } - } - is_constant = (val.lattice_val == CONSTANT); - } - - if (flag_tree_bit_ccp - && ((is_constant && TREE_CODE (val.value) == INTEGER_CST) - || !is_constant) - && gimple_get_lhs (stmt) - && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME) - { - tree lhs = gimple_get_lhs (stmt); - wide_int nonzero_bits = get_nonzero_bits (lhs); - if (nonzero_bits != -1) - { - if (!is_constant) - { - val.lattice_val = CONSTANT; - val.value = build_zero_cst (TREE_TYPE (lhs)); - val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs))); - is_constant = true; - } - else - { - if (wi::bit_and_not (wi::to_wide (val.value), nonzero_bits) != 0) - val.value = wide_int_to_tree (TREE_TYPE (lhs), - nonzero_bits - & wi::to_wide (val.value)); - if (nonzero_bits == 0) - val.mask = 0; - else - val.mask = val.mask & extend_mask (nonzero_bits, - TYPE_SIGN (TREE_TYPE (lhs))); - } - } - } - - /* The statement produced a nonconstant value. */ - if (!is_constant) - { - /* The statement produced a copy. */ - if (simplified && TREE_CODE (simplified) == SSA_NAME - && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified)) - { - val.lattice_val = CONSTANT; - val.value = simplified; - val.mask = -1; - } - /* The statement is VARYING. */ - else - { - val.lattice_val = VARYING; - val.value = NULL_TREE; - val.mask = -1; - } - } - - return val; -} - -typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab; - -/* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before - each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */ - -static void -insert_clobber_before_stack_restore (tree saved_val, tree var, - gimple_htab **visited) -{ - gimple *stmt; - gassign *clobber_stmt; - tree clobber; - imm_use_iterator iter; - gimple_stmt_iterator i; - gimple **slot; - - FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val) - if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) - { - clobber = build_clobber (TREE_TYPE (var)); - clobber_stmt = gimple_build_assign (var, clobber); - - i = gsi_for_stmt (stmt); - gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT); - } - else if (gimple_code (stmt) == GIMPLE_PHI) - { - if (!*visited) - *visited = new gimple_htab (10); - - slot = (*visited)->find_slot (stmt, INSERT); - if (*slot != NULL) - continue; - - *slot = stmt; - insert_clobber_before_stack_restore (gimple_phi_result (stmt), var, - visited); - } - else if (gimple_assign_ssa_name_copy_p (stmt)) - insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var, - visited); -} - -/* Advance the iterator to the previous non-debug gimple statement in the same - or dominating basic block. */ - -static inline void -gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i) -{ - basic_block dom; - - gsi_prev_nondebug (i); - while (gsi_end_p (*i)) - { - dom = get_immediate_dominator (CDI_DOMINATORS, gsi_bb (*i)); - if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun)) - return; - - *i = gsi_last_bb (dom); - } -} - -/* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert - a clobber of VAR before each matching BUILT_IN_STACK_RESTORE. - - It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when - a previous pass (such as DOM) duplicated it along multiple paths to a BB. - In that case the function gives up without inserting the clobbers. */ - -static void -insert_clobbers_for_var (gimple_stmt_iterator i, tree var) -{ - gimple *stmt; - tree saved_val; - gimple_htab *visited = NULL; - - for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (&i)) - { - stmt = gsi_stmt (i); - - if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE)) - continue; - - saved_val = gimple_call_lhs (stmt); - if (saved_val == NULL_TREE) - continue; - - insert_clobber_before_stack_restore (saved_val, var, &visited); - break; - } - - delete visited; -} - -/* Detects a __builtin_alloca_with_align with constant size argument. Declares - fixed-size array and returns the address, if found, otherwise returns - NULL_TREE. */ - -static tree -fold_builtin_alloca_with_align (gimple *stmt) -{ - unsigned HOST_WIDE_INT size, threshold, n_elem; - tree lhs, arg, block, var, elem_type, array_type; - - /* Get lhs. */ - lhs = gimple_call_lhs (stmt); - if (lhs == NULL_TREE) - return NULL_TREE; - - /* Detect constant argument. */ - arg = get_constant_value (gimple_call_arg (stmt, 0)); - if (arg == NULL_TREE - || TREE_CODE (arg) != INTEGER_CST - || !tree_fits_uhwi_p (arg)) - return NULL_TREE; - - size = tree_to_uhwi (arg); - - /* Heuristic: don't fold large allocas. */ - threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame; - /* In case the alloca is located at function entry, it has the same lifetime - as a declared array, so we allow a larger size. */ - block = gimple_block (stmt); - if (!(cfun->after_inlining - && block - && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL)) - threshold /= 10; - if (size > threshold) - return NULL_TREE; - - /* We have to be able to move points-to info. We used to assert - that we can but IPA PTA might end up with two UIDs here - as it might need to handle more than one instance being - live at the same time. Instead of trying to detect this case - (using the first UID would be OK) just give up for now. */ - struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs); - unsigned uid = 0; - if (pi != NULL - && !pi->pt.anything - && !pt_solution_singleton_or_null_p (&pi->pt, &uid)) - return NULL_TREE; - - /* Declare array. */ - elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1); - n_elem = size * 8 / BITS_PER_UNIT; - array_type = build_array_type_nelts (elem_type, n_elem); - - if (tree ssa_name = SSA_NAME_IDENTIFIER (lhs)) - { - /* Give the temporary a name derived from the name of the VLA - declaration so it can be referenced in diagnostics. */ - const char *name = IDENTIFIER_POINTER (ssa_name); - var = create_tmp_var (array_type, name); - } - else - var = create_tmp_var (array_type); - - if (gimple *lhsdef = SSA_NAME_DEF_STMT (lhs)) - { - /* Set the temporary's location to that of the VLA declaration - so it can be pointed to in diagnostics. */ - location_t loc = gimple_location (lhsdef); - DECL_SOURCE_LOCATION (var) = loc; - } - - SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1))); - if (uid != 0) - SET_DECL_PT_UID (var, uid); - - /* Fold alloca to the address of the array. */ - return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var)); -} - -/* Fold the stmt at *GSI with CCP specific information that propagating - and regular folding does not catch. */ - -bool -ccp_folder::fold_stmt (gimple_stmt_iterator *gsi) -{ - gimple *stmt = gsi_stmt (*gsi); - - switch (gimple_code (stmt)) - { - case GIMPLE_COND: - { - gcond *cond_stmt = as_a <gcond *> (stmt); - ccp_prop_value_t val; - /* Statement evaluation will handle type mismatches in constants - more gracefully than the final propagation. This allows us to - fold more conditionals here. */ - val = evaluate_stmt (stmt); - if (val.lattice_val != CONSTANT - || val.mask != 0) - return false; - - if (dump_file) - { - fprintf (dump_file, "Folding predicate "); - print_gimple_expr (dump_file, stmt, 0); - fprintf (dump_file, " to "); - print_generic_expr (dump_file, val.value); - fprintf (dump_file, "\n"); - } - - if (integer_zerop (val.value)) - gimple_cond_make_false (cond_stmt); - else - gimple_cond_make_true (cond_stmt); - - return true; - } - - case GIMPLE_CALL: - { - tree lhs = gimple_call_lhs (stmt); - int flags = gimple_call_flags (stmt); - tree val; - tree argt; - bool changed = false; - unsigned i; - - /* If the call was folded into a constant make sure it goes - away even if we cannot propagate into all uses because of - type issues. */ - if (lhs - && TREE_CODE (lhs) == SSA_NAME - && (val = get_constant_value (lhs)) - /* Don't optimize away calls that have side-effects. */ - && (flags & (ECF_CONST|ECF_PURE)) != 0 - && (flags & ECF_LOOPING_CONST_OR_PURE) == 0) - { - tree new_rhs = unshare_expr (val); - if (!useless_type_conversion_p (TREE_TYPE (lhs), - TREE_TYPE (new_rhs))) - new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs); - gimplify_and_update_call_from_tree (gsi, new_rhs); - return true; - } - - /* Internal calls provide no argument types, so the extra laxity - for normal calls does not apply. */ - if (gimple_call_internal_p (stmt)) - return false; - - /* The heuristic of fold_builtin_alloca_with_align differs before and - after inlining, so we don't require the arg to be changed into a - constant for folding, but just to be constant. */ - if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN) - || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX)) - { - tree new_rhs = fold_builtin_alloca_with_align (stmt); - if (new_rhs) - { - gimplify_and_update_call_from_tree (gsi, new_rhs); - tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0); - insert_clobbers_for_var (*gsi, var); - return true; - } - } - - /* If there's no extra info from an assume_aligned call, - drop it so it doesn't act as otherwise useless dataflow - barrier. */ - if (gimple_call_builtin_p (stmt, BUILT_IN_ASSUME_ALIGNED)) - { - tree ptr = gimple_call_arg (stmt, 0); - ccp_prop_value_t ptrval = get_value_for_expr (ptr, true); - if (ptrval.lattice_val == CONSTANT - && TREE_CODE (ptrval.value) == INTEGER_CST - && ptrval.mask != 0) - { - ccp_prop_value_t val - = bit_value_assume_aligned (stmt, NULL_TREE, ptrval, false); - unsigned int ptralign = least_bit_hwi (ptrval.mask.to_uhwi ()); - unsigned int align = least_bit_hwi (val.mask.to_uhwi ()); - if (ptralign == align - && ((TREE_INT_CST_LOW (ptrval.value) & (align - 1)) - == (TREE_INT_CST_LOW (val.value) & (align - 1)))) - { - replace_call_with_value (gsi, ptr); - return true; - } - } - } - - /* Propagate into the call arguments. Compared to replace_uses_in - this can use the argument slot types for type verification - instead of the current argument type. We also can safely - drop qualifiers here as we are dealing with constants anyway. */ - argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt)); - for (i = 0; i < gimple_call_num_args (stmt) && argt; - ++i, argt = TREE_CHAIN (argt)) - { - tree arg = gimple_call_arg (stmt, i); - if (TREE_CODE (arg) == SSA_NAME - && (val = get_constant_value (arg)) - && useless_type_conversion_p - (TYPE_MAIN_VARIANT (TREE_VALUE (argt)), - TYPE_MAIN_VARIANT (TREE_TYPE (val)))) - { - gimple_call_set_arg (stmt, i, unshare_expr (val)); - changed = true; - } - } - - return changed; - } - - case GIMPLE_ASSIGN: - { - tree lhs = gimple_assign_lhs (stmt); - tree val; - - /* If we have a load that turned out to be constant replace it - as we cannot propagate into all uses in all cases. */ - if (gimple_assign_single_p (stmt) - && TREE_CODE (lhs) == SSA_NAME - && (val = get_constant_value (lhs))) - { - tree rhs = unshare_expr (val); - if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs))) - rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs); - gimple_assign_set_rhs_from_tree (gsi, rhs); - return true; - } - - return false; - } - - default: - return false; - } -} - -/* Visit the assignment statement STMT. Set the value of its LHS to the - value computed by the RHS and store LHS in *OUTPUT_P. If STMT - creates virtual definitions, set the value of each new name to that - of the RHS (if we can derive a constant out of the RHS). - Value-returning call statements also perform an assignment, and - are handled here. */ - -static enum ssa_prop_result -visit_assignment (gimple *stmt, tree *output_p) -{ - ccp_prop_value_t val; - enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING; - - tree lhs = gimple_get_lhs (stmt); - if (TREE_CODE (lhs) == SSA_NAME) - { - /* Evaluate the statement, which could be - either a GIMPLE_ASSIGN or a GIMPLE_CALL. */ - val = evaluate_stmt (stmt); - - /* If STMT is an assignment to an SSA_NAME, we only have one - value to set. */ - if (set_lattice_value (lhs, &val)) - { - *output_p = lhs; - if (val.lattice_val == VARYING) - retval = SSA_PROP_VARYING; - else - retval = SSA_PROP_INTERESTING; - } - } - - return retval; -} - - -/* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING - if it can determine which edge will be taken. Otherwise, return - SSA_PROP_VARYING. */ - -static enum ssa_prop_result -visit_cond_stmt (gimple *stmt, edge *taken_edge_p) -{ - ccp_prop_value_t val; - basic_block block; - - block = gimple_bb (stmt); - val = evaluate_stmt (stmt); - if (val.lattice_val != CONSTANT - || val.mask != 0) - return SSA_PROP_VARYING; - - /* Find which edge out of the conditional block will be taken and add it - to the worklist. If no single edge can be determined statically, - return SSA_PROP_VARYING to feed all the outgoing edges to the - propagation engine. */ - *taken_edge_p = find_taken_edge (block, val.value); - if (*taken_edge_p) - return SSA_PROP_INTERESTING; - else - return SSA_PROP_VARYING; -} - - -/* Evaluate statement STMT. If the statement produces an output value and - its evaluation changes the lattice value of its output, return - SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the - output value. - - If STMT is a conditional branch and we can determine its truth - value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying - value, return SSA_PROP_VARYING. */ - -enum ssa_prop_result -ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p) -{ - tree def; - ssa_op_iter iter; - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "\nVisiting statement:\n"); - print_gimple_stmt (dump_file, stmt, 0, dump_flags); - } - - switch (gimple_code (stmt)) - { - case GIMPLE_ASSIGN: - /* If the statement is an assignment that produces a single - output value, evaluate its RHS to see if the lattice value of - its output has changed. */ - return visit_assignment (stmt, output_p); - - case GIMPLE_CALL: - /* A value-returning call also performs an assignment. */ - if (gimple_call_lhs (stmt) != NULL_TREE) - return visit_assignment (stmt, output_p); - break; - - case GIMPLE_COND: - case GIMPLE_SWITCH: - /* If STMT is a conditional branch, see if we can determine - which branch will be taken. */ - /* FIXME. It appears that we should be able to optimize - computed GOTOs here as well. */ - return visit_cond_stmt (stmt, taken_edge_p); - - default: - break; - } - - /* Any other kind of statement is not interesting for constant - propagation and, therefore, not worth simulating. */ - if (dump_file && (dump_flags & TDF_DETAILS)) - fprintf (dump_file, "No interesting values produced. Marked VARYING.\n"); - - /* Definitions made by statements other than assignments to - SSA_NAMEs represent unknown modifications to their outputs. - Mark them VARYING. */ - FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) - set_value_varying (def); - - return SSA_PROP_VARYING; -} - - -/* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P, - record nonzero bits. */ - -static unsigned int -do_ssa_ccp (bool nonzero_p) -{ - unsigned int todo = 0; - calculate_dominance_info (CDI_DOMINATORS); - - ccp_initialize (); - class ccp_propagate ccp_propagate; - ccp_propagate.ssa_propagate (); - if (ccp_finalize (nonzero_p || flag_ipa_bit_cp)) - { - todo = (TODO_cleanup_cfg | TODO_update_ssa); - - /* ccp_finalize does not preserve loop-closed ssa. */ - loops_state_clear (LOOP_CLOSED_SSA); - } - - free_dominance_info (CDI_DOMINATORS); - return todo; -} - - -namespace { - -const pass_data pass_data_ccp = -{ - GIMPLE_PASS, /* type */ - "ccp", /* name */ - OPTGROUP_NONE, /* optinfo_flags */ - TV_TREE_CCP, /* tv_id */ - ( PROP_cfg | PROP_ssa ), /* properties_required */ - 0, /* properties_provided */ - 0, /* properties_destroyed */ - 0, /* todo_flags_start */ - TODO_update_address_taken, /* todo_flags_finish */ -}; - -class pass_ccp : public gimple_opt_pass -{ -public: - pass_ccp (gcc::context *ctxt) - : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false) - {} - - /* opt_pass methods: */ - opt_pass * clone () { return new pass_ccp (m_ctxt); } - void set_pass_param (unsigned int n, bool param) - { - gcc_assert (n == 0); - nonzero_p = param; - } - virtual bool gate (function *) { return flag_tree_ccp != 0; } - virtual unsigned int execute (function *) { return do_ssa_ccp (nonzero_p); } - - private: - /* Determines whether the pass instance records nonzero bits. */ - bool nonzero_p; -}; // class pass_ccp - -} // anon namespace - -gimple_opt_pass * -make_pass_ccp (gcc::context *ctxt) -{ - return new pass_ccp (ctxt); -} - - - -/* Try to optimize out __builtin_stack_restore. Optimize it out - if there is another __builtin_stack_restore in the same basic - block and no calls or ASM_EXPRs are in between, or if this block's - only outgoing edge is to EXIT_BLOCK and there are no calls or - ASM_EXPRs after this __builtin_stack_restore. */ - -static tree -optimize_stack_restore (gimple_stmt_iterator i) -{ - tree callee; - gimple *stmt; - - basic_block bb = gsi_bb (i); - gimple *call = gsi_stmt (i); - - if (gimple_code (call) != GIMPLE_CALL - || gimple_call_num_args (call) != 1 - || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME - || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0)))) - return NULL_TREE; - - for (gsi_next (&i); !gsi_end_p (i); gsi_next (&i)) - { - stmt = gsi_stmt (i); - if (gimple_code (stmt) == GIMPLE_ASM) - return NULL_TREE; - if (gimple_code (stmt) != GIMPLE_CALL) - continue; - - callee = gimple_call_fndecl (stmt); - if (!callee - || !fndecl_built_in_p (callee, BUILT_IN_NORMAL) - /* All regular builtins are ok, just obviously not alloca. */ - || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee))) - return NULL_TREE; - - if (fndecl_built_in_p (callee, BUILT_IN_STACK_RESTORE)) - goto second_stack_restore; - } - - if (!gsi_end_p (i)) - return NULL_TREE; - - /* Allow one successor of the exit block, or zero successors. */ - switch (EDGE_COUNT (bb->succs)) - { - case 0: - break; - case 1: - if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) - return NULL_TREE; - break; - default: - return NULL_TREE; - } - second_stack_restore: - - /* If there's exactly one use, then zap the call to __builtin_stack_save. - If there are multiple uses, then the last one should remove the call. - In any case, whether the call to __builtin_stack_save can be removed - or not is irrelevant to removing the call to __builtin_stack_restore. */ - if (has_single_use (gimple_call_arg (call, 0))) - { - gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0)); - if (is_gimple_call (stack_save)) - { - callee = gimple_call_fndecl (stack_save); - if (callee && fndecl_built_in_p (callee, BUILT_IN_STACK_SAVE)) - { - gimple_stmt_iterator stack_save_gsi; - tree rhs; - - stack_save_gsi = gsi_for_stmt (stack_save); - rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0); - replace_call_with_value (&stack_save_gsi, rhs); - } - } - } - - /* No effect, so the statement will be deleted. */ - return integer_zero_node; -} - -/* If va_list type is a simple pointer and nothing special is needed, - optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0), - __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple - pointer assignment. */ - -static tree -optimize_stdarg_builtin (gimple *call) -{ - tree callee, lhs, rhs, cfun_va_list; - bool va_list_simple_ptr; - location_t loc = gimple_location (call); - - callee = gimple_call_fndecl (call); - - cfun_va_list = targetm.fn_abi_va_list (callee); - va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list) - && (TREE_TYPE (cfun_va_list) == void_type_node - || TREE_TYPE (cfun_va_list) == char_type_node); - - switch (DECL_FUNCTION_CODE (callee)) - { - case BUILT_IN_VA_START: - if (!va_list_simple_ptr - || targetm.expand_builtin_va_start != NULL - || !builtin_decl_explicit_p (BUILT_IN_NEXT_ARG)) - return NULL_TREE; - - if (gimple_call_num_args (call) != 2) - return NULL_TREE; - - lhs = gimple_call_arg (call, 0); - if (!POINTER_TYPE_P (TREE_TYPE (lhs)) - || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs))) - != TYPE_MAIN_VARIANT (cfun_va_list)) - return NULL_TREE; - - lhs = build_fold_indirect_ref_loc (loc, lhs); - rhs = build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_NEXT_ARG), - 1, integer_zero_node); - rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs); - return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs); - - case BUILT_IN_VA_COPY: - if (!va_list_simple_ptr) - return NULL_TREE; - - if (gimple_call_num_args (call) != 2) - return NULL_TREE; - - lhs = gimple_call_arg (call, 0); - if (!POINTER_TYPE_P (TREE_TYPE (lhs)) - || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs))) - != TYPE_MAIN_VARIANT (cfun_va_list)) - return NULL_TREE; - - lhs = build_fold_indirect_ref_loc (loc, lhs); - rhs = gimple_call_arg (call, 1); - if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs)) - != TYPE_MAIN_VARIANT (cfun_va_list)) - return NULL_TREE; - - rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs); - return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs); - - case BUILT_IN_VA_END: - /* No effect, so the statement will be deleted. */ - return integer_zero_node; - - default: - gcc_unreachable (); - } -} - -/* Attemp to make the block of __builtin_unreachable I unreachable by changing - the incoming jumps. Return true if at least one jump was changed. */ - -static bool -optimize_unreachable (gimple_stmt_iterator i) -{ - basic_block bb = gsi_bb (i); - gimple_stmt_iterator gsi; - gimple *stmt; - edge_iterator ei; - edge e; - bool ret; - - if (flag_sanitize & SANITIZE_UNREACHABLE) - return false; - - for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) - { - stmt = gsi_stmt (gsi); - - if (is_gimple_debug (stmt)) - continue; - - if (glabel *label_stmt = dyn_cast <glabel *> (stmt)) - { - /* Verify we do not need to preserve the label. */ - if (FORCED_LABEL (gimple_label_label (label_stmt))) - return false; - - continue; - } - - /* Only handle the case that __builtin_unreachable is the first statement - in the block. We rely on DCE to remove stmts without side-effects - before __builtin_unreachable. */ - if (gsi_stmt (gsi) != gsi_stmt (i)) - return false; - } - - ret = false; - FOR_EACH_EDGE (e, ei, bb->preds) - { - gsi = gsi_last_bb (e->src); - if (gsi_end_p (gsi)) - continue; - - stmt = gsi_stmt (gsi); - if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) - { - if (e->flags & EDGE_TRUE_VALUE) - gimple_cond_make_false (cond_stmt); - else if (e->flags & EDGE_FALSE_VALUE) - gimple_cond_make_true (cond_stmt); - else - gcc_unreachable (); - update_stmt (cond_stmt); - } - else - { - /* Todo: handle other cases. Note that unreachable switch case - statements have already been removed. */ - continue; - } - - ret = true; - } - - return ret; -} - -/* Convert - _1 = __atomic_fetch_or_* (ptr_6, 1, _3); - _7 = ~_1; - _5 = (_Bool) _7; - to - _1 = __atomic_fetch_or_* (ptr_6, 1, _3); - _8 = _1 & 1; - _5 = _8 == 0; - and convert - _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); - _7 = ~_1; - _4 = (_Bool) _7; - to - _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); - _8 = _1 & 1; - _4 = (_Bool) _8; - - USE_STMT is the gimplt statement which uses the return value of - __atomic_fetch_or_*. LHS is the return value of __atomic_fetch_or_*. - MASK is the mask passed to __atomic_fetch_or_*. - */ - -static gimple * -convert_atomic_bit_not (enum internal_fn fn, gimple *use_stmt, - tree lhs, tree mask) -{ - tree and_mask; - if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) - { - /* MASK must be ~1. */ - if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), - ~HOST_WIDE_INT_1), mask, 0)) - return nullptr; - and_mask = build_int_cst (TREE_TYPE (lhs), 1); - } - else - { - /* MASK must be 1. */ - if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), 1), mask, 0)) - return nullptr; - and_mask = mask; - } - - tree use_lhs = gimple_assign_lhs (use_stmt); - - use_operand_p use_p; - gimple *use_not_stmt; - - if (!single_imm_use (use_lhs, &use_p, &use_not_stmt) - || !is_gimple_assign (use_not_stmt)) - return nullptr; - - if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt))) - return nullptr; - - tree use_not_lhs = gimple_assign_lhs (use_not_stmt); - if (TREE_CODE (TREE_TYPE (use_not_lhs)) != BOOLEAN_TYPE) - return nullptr; - - gimple_stmt_iterator gsi; - gsi = gsi_for_stmt (use_stmt); - gsi_remove (&gsi, true); - tree var = make_ssa_name (TREE_TYPE (lhs)); - use_stmt = gimple_build_assign (var, BIT_AND_EXPR, lhs, and_mask); - gsi = gsi_for_stmt (use_not_stmt); - gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT); - lhs = gimple_assign_lhs (use_not_stmt); - gimple *g = gimple_build_assign (lhs, EQ_EXPR, var, - build_zero_cst (TREE_TYPE (mask))); - gsi_insert_after (&gsi, g, GSI_NEW_STMT); - gsi = gsi_for_stmt (use_not_stmt); - gsi_remove (&gsi, true); - return use_stmt; -} - -/* match.pd function to match atomic_bit_test_and pattern which - has nop_convert: - _1 = __atomic_fetch_or_4 (&v, 1, 0); - _2 = (int) _1; - _5 = _2 & 1; - */ -extern bool gimple_nop_atomic_bit_test_and_p (tree, tree *, - tree (*) (tree)); -extern bool gimple_nop_convert (tree, tree*, tree (*) (tree)); - -/* Optimize - mask_2 = 1 << cnt_1; - _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3); - _5 = _4 & mask_2; - to - _4 = .ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3); - _5 = _4; - If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1 - is passed instead of 0, and the builtin just returns a zero - or 1 value instead of the actual bit. - Similarly for __sync_fetch_and_or_* (without the ", _3" part - in there), and/or if mask_2 is a power of 2 constant. - Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT - in that case. And similarly for and instead of or, except that - the second argument to the builtin needs to be one's complement - of the mask instead of mask. */ - -static bool -optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip, - enum internal_fn fn, bool has_model_arg, - bool after) -{ - gimple *call = gsi_stmt (*gsip); - tree lhs = gimple_call_lhs (call); - use_operand_p use_p; - gimple *use_stmt; - tree mask; - optab optab; - - if (!flag_inline_atomics - || optimize_debug - || !gimple_call_builtin_p (call, BUILT_IN_NORMAL) - || !lhs - || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs) - || !single_imm_use (lhs, &use_p, &use_stmt) - || !is_gimple_assign (use_stmt) - || !gimple_vdef (call)) - return false; - - switch (fn) - { - case IFN_ATOMIC_BIT_TEST_AND_SET: - optab = atomic_bit_test_and_set_optab; - break; - case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT: - optab = atomic_bit_test_and_complement_optab; - break; - case IFN_ATOMIC_BIT_TEST_AND_RESET: - optab = atomic_bit_test_and_reset_optab; - break; - default: - return false; - } - - tree bit = nullptr; - - mask = gimple_call_arg (call, 1); - tree_code rhs_code = gimple_assign_rhs_code (use_stmt); - if (rhs_code != BIT_AND_EXPR) - { - if (rhs_code != NOP_EXPR && rhs_code != BIT_NOT_EXPR) - return false; - - tree use_lhs = gimple_assign_lhs (use_stmt); - if (TREE_CODE (use_lhs) == SSA_NAME - && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs)) - return false; - - tree use_rhs = gimple_assign_rhs1 (use_stmt); - if (lhs != use_rhs) - return false; - - if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) - == CODE_FOR_nothing) - return false; - - gimple *g; - gimple_stmt_iterator gsi; - tree var; - int ibit = -1; - - if (rhs_code == BIT_NOT_EXPR) - { - g = convert_atomic_bit_not (fn, use_stmt, lhs, mask); - if (!g) - return false; - use_stmt = g; - ibit = 0; - } - else if (TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE) - { - tree and_mask; - if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) - { - /* MASK must be ~1. */ - if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), - ~HOST_WIDE_INT_1), - mask, 0)) - return false; - - /* Convert - _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); - _4 = (_Bool) _1; - to - _1 = __atomic_fetch_and_* (ptr_6, ~1, _3); - _5 = _1 & 1; - _4 = (_Bool) _5; - */ - and_mask = build_int_cst (TREE_TYPE (lhs), 1); - } - else - { - and_mask = build_int_cst (TREE_TYPE (lhs), 1); - if (!operand_equal_p (and_mask, mask, 0)) - return false; - - /* Convert - _1 = __atomic_fetch_or_* (ptr_6, 1, _3); - _4 = (_Bool) _1; - to - _1 = __atomic_fetch_or_* (ptr_6, 1, _3); - _5 = _1 & 1; - _4 = (_Bool) _5; - */ - } - var = make_ssa_name (TREE_TYPE (use_rhs)); - replace_uses_by (use_rhs, var); - g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs, - and_mask); - gsi = gsi_for_stmt (use_stmt); - gsi_insert_before (&gsi, g, GSI_NEW_STMT); - use_stmt = g; - ibit = 0; - } - else if (TYPE_PRECISION (TREE_TYPE (use_lhs)) - <= TYPE_PRECISION (TREE_TYPE (use_rhs))) - { - gimple *use_nop_stmt; - if (!single_imm_use (use_lhs, &use_p, &use_nop_stmt) - || !is_gimple_assign (use_nop_stmt)) - return false; - tree use_nop_lhs = gimple_assign_lhs (use_nop_stmt); - rhs_code = gimple_assign_rhs_code (use_nop_stmt); - if (rhs_code != BIT_AND_EXPR) - { - if (TREE_CODE (use_nop_lhs) == SSA_NAME - && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs)) - return false; - if (rhs_code == BIT_NOT_EXPR) - { - g = convert_atomic_bit_not (fn, use_nop_stmt, lhs, - mask); - if (!g) - return false; - /* Convert - _1 = __atomic_fetch_or_4 (ptr_6, 1, _3); - _2 = (int) _1; - _7 = ~_2; - _5 = (_Bool) _7; - to - _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3); - _8 = _1 & 1; - _5 = _8 == 0; - and convert - _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3); - _2 = (int) _1; - _7 = ~_2; - _5 = (_Bool) _7; - to - _1 = __atomic_fetch_and_4 (ptr_6, 1, _3); - _8 = _1 & 1; - _5 = _8 == 0; - */ - gsi = gsi_for_stmt (use_stmt); - gsi_remove (&gsi, true); - use_stmt = g; - ibit = 0; - } - else - { - if (TREE_CODE (TREE_TYPE (use_nop_lhs)) != BOOLEAN_TYPE) - return false; - if (rhs_code != GE_EXPR && rhs_code != LT_EXPR) - return false; - tree cmp_rhs1 = gimple_assign_rhs1 (use_nop_stmt); - if (use_lhs != cmp_rhs1) - return false; - tree cmp_rhs2 = gimple_assign_rhs2 (use_nop_stmt); - if (!integer_zerop (cmp_rhs2)) - return false; - - tree and_mask; - - unsigned HOST_WIDE_INT bytes - = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs))); - ibit = bytes * BITS_PER_UNIT - 1; - unsigned HOST_WIDE_INT highest - = HOST_WIDE_INT_1U << ibit; - - if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) - { - /* Get the signed maximum of the USE_RHS type. */ - and_mask = build_int_cst (TREE_TYPE (use_rhs), - highest - 1); - if (!operand_equal_p (and_mask, mask, 0)) - return false; - - /* Convert - _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3); - _5 = (signed int) _1; - _4 = _5 < 0 or _5 >= 0; - to - _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3); - _6 = _1 & 0x80000000; - _4 = _6 != 0 or _6 == 0; - */ - and_mask = build_int_cst (TREE_TYPE (use_rhs), - highest); - } - else - { - /* Get the signed minimum of the USE_RHS type. */ - and_mask = build_int_cst (TREE_TYPE (use_rhs), - highest); - if (!operand_equal_p (and_mask, mask, 0)) - return false; - - /* Convert - _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3); - _5 = (signed int) _1; - _4 = _5 < 0 or _5 >= 0; - to - _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3); - _6 = _1 & 0x80000000; - _4 = _6 != 0 or _6 == 0; - */ - } - var = make_ssa_name (TREE_TYPE (use_rhs)); - gsi = gsi_for_stmt (use_stmt); - gsi_remove (&gsi, true); - g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs, - and_mask); - gsi = gsi_for_stmt (use_nop_stmt); - gsi_insert_before (&gsi, g, GSI_NEW_STMT); - use_stmt = g; - g = gimple_build_assign (use_nop_lhs, - (rhs_code == GE_EXPR - ? EQ_EXPR : NE_EXPR), - var, - build_zero_cst (TREE_TYPE (use_rhs))); - gsi_insert_after (&gsi, g, GSI_NEW_STMT); - gsi = gsi_for_stmt (use_nop_stmt); - gsi_remove (&gsi, true); - } - } - else - { - tree match_op[3]; - gimple *g; - if (!gimple_nop_atomic_bit_test_and_p (use_nop_lhs, - &match_op[0], NULL) - || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op[2]) - || !single_imm_use (match_op[2], &use_p, &g) - || !is_gimple_assign (g)) - return false; - mask = match_op[0]; - if (TREE_CODE (match_op[1]) == INTEGER_CST) - { - ibit = tree_log2 (match_op[1]); - gcc_assert (ibit >= 0); - } - else - { - g = SSA_NAME_DEF_STMT (match_op[1]); - gcc_assert (is_gimple_assign (g)); - bit = gimple_assign_rhs2 (g); - } - /* Convert - _1 = __atomic_fetch_or_4 (ptr_6, mask, _3); - _2 = (int) _1; - _5 = _2 & mask; - to - _1 = __atomic_fetch_or_4 (ptr_6, mask, _3); - _6 = _1 & mask; - _5 = (int) _6; - and convert - _1 = ~mask_7; - _2 = (unsigned int) _1; - _3 = __atomic_fetch_and_4 (ptr_6, _2, 0); - _4 = (int) _3; - _5 = _4 & mask_7; - to - _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3); - _12 = _3 & mask_7; - _5 = (int) _12; - - and Convert - _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3); - _2 = (short int) _1; - _5 = _2 & mask; - to - _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3); - _8 = _1 & mask; - _5 = (short int) _8; - */ - gimple_seq stmts = NULL; - match_op[1] = gimple_convert (&stmts, - TREE_TYPE (use_rhs), - match_op[1]); - var = gimple_build (&stmts, BIT_AND_EXPR, - TREE_TYPE (use_rhs), use_rhs, match_op[1]); - gsi = gsi_for_stmt (use_stmt); - gsi_remove (&gsi, true); - release_defs (use_stmt); - use_stmt = gimple_seq_last_stmt (stmts); - gsi = gsi_for_stmt (use_nop_stmt); - gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT); - gimple_assign_set_rhs_with_ops (&gsi, CONVERT_EXPR, var); - update_stmt (use_nop_stmt); - } - } - else - return false; - - if (!bit) - { - if (ibit < 0) - gcc_unreachable (); - bit = build_int_cst (TREE_TYPE (lhs), ibit); - } - } - else if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) - == CODE_FOR_nothing) - return false; - - tree use_lhs = gimple_assign_lhs (use_stmt); - if (!use_lhs) - return false; - - if (!bit) - { - if (TREE_CODE (mask) == INTEGER_CST) - { - if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) - mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask); - mask = fold_convert (TREE_TYPE (lhs), mask); - int ibit = tree_log2 (mask); - if (ibit < 0) - return false; - bit = build_int_cst (TREE_TYPE (lhs), ibit); - } - else if (TREE_CODE (mask) == SSA_NAME) - { - gimple *g = SSA_NAME_DEF_STMT (mask); - tree match_op; - if (gimple_nop_convert (mask, &match_op, NULL)) - { - mask = match_op; - if (TREE_CODE (mask) != SSA_NAME) - return false; - g = SSA_NAME_DEF_STMT (mask); - } - if (!is_gimple_assign (g)) - return false; - - if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET) - { - if (gimple_assign_rhs_code (g) != BIT_NOT_EXPR) - return false; - mask = gimple_assign_rhs1 (g); - if (TREE_CODE (mask) != SSA_NAME) - return false; - g = SSA_NAME_DEF_STMT (mask); - } - - if (!is_gimple_assign (g) - || gimple_assign_rhs_code (g) != LSHIFT_EXPR - || !integer_onep (gimple_assign_rhs1 (g))) - return false; - bit = gimple_assign_rhs2 (g); - } - else - return false; - - tree cmp_mask; - if (gimple_assign_rhs1 (use_stmt) == lhs) - cmp_mask = gimple_assign_rhs2 (use_stmt); - else - cmp_mask = gimple_assign_rhs1 (use_stmt); - - tree match_op; - if (gimple_nop_convert (cmp_mask, &match_op, NULL)) - cmp_mask = match_op; - - if (!operand_equal_p (cmp_mask, mask, 0)) - return false; - } - - bool use_bool = true; - bool has_debug_uses = false; - imm_use_iterator iter; - gimple *g; - - if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs)) - use_bool = false; - FOR_EACH_IMM_USE_STMT (g, iter, use_lhs) - { - enum tree_code code = ERROR_MARK; - tree op0 = NULL_TREE, op1 = NULL_TREE; - if (is_gimple_debug (g)) - { - has_debug_uses = true; - continue; - } - else if (is_gimple_assign (g)) - switch (gimple_assign_rhs_code (g)) - { - case COND_EXPR: - op1 = gimple_assign_rhs1 (g); - code = TREE_CODE (op1); - if (TREE_CODE_CLASS (code) != tcc_comparison) - break; - op0 = TREE_OPERAND (op1, 0); - op1 = TREE_OPERAND (op1, 1); - break; - case EQ_EXPR: - case NE_EXPR: - code = gimple_assign_rhs_code (g); - op0 = gimple_assign_rhs1 (g); - op1 = gimple_assign_rhs2 (g); - break; - default: - break; - } - else if (gimple_code (g) == GIMPLE_COND) - { - code = gimple_cond_code (g); - op0 = gimple_cond_lhs (g); - op1 = gimple_cond_rhs (g); - } - - if ((code == EQ_EXPR || code == NE_EXPR) - && op0 == use_lhs - && integer_zerop (op1)) - { - use_operand_p use_p; - int n = 0; - FOR_EACH_IMM_USE_ON_STMT (use_p, iter) - n++; - if (n == 1) - continue; - } - - use_bool = false; - break; - } - - tree new_lhs = make_ssa_name (TREE_TYPE (lhs)); - tree flag = build_int_cst (TREE_TYPE (lhs), use_bool); - if (has_model_arg) - g = gimple_build_call_internal (fn, 4, gimple_call_arg (call, 0), - bit, flag, gimple_call_arg (call, 2)); - else - g = gimple_build_call_internal (fn, 3, gimple_call_arg (call, 0), - bit, flag); - gimple_call_set_lhs (g, new_lhs); - gimple_set_location (g, gimple_location (call)); - gimple_move_vops (g, call); - bool throws = stmt_can_throw_internal (cfun, call); - gimple_call_set_nothrow (as_a <gcall *> (g), - gimple_call_nothrow_p (as_a <gcall *> (call))); - gimple_stmt_iterator gsi = *gsip; - gsi_insert_after (&gsi, g, GSI_NEW_STMT); - edge e = NULL; - if (throws) - { - maybe_clean_or_replace_eh_stmt (call, g); - if (after || (use_bool && has_debug_uses)) - e = find_fallthru_edge (gsi_bb (gsi)->succs); - } - if (after) - { - /* The internal function returns the value of the specified bit - before the atomic operation. If we are interested in the value - of the specified bit after the atomic operation (makes only sense - for xor, otherwise the bit content is compile time known), - we need to invert the bit. */ - tree mask_convert = mask; - gimple_seq stmts = NULL; - if (!use_bool) - mask_convert = gimple_convert (&stmts, TREE_TYPE (lhs), mask); - new_lhs = gimple_build (&stmts, BIT_XOR_EXPR, TREE_TYPE (lhs), new_lhs, - use_bool ? build_int_cst (TREE_TYPE (lhs), 1) - : mask_convert); - if (throws) - { - gsi_insert_seq_on_edge_immediate (e, stmts); - gsi = gsi_for_stmt (gimple_seq_last (stmts)); - } - else - gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); - } - if (use_bool && has_debug_uses) - { - tree temp = NULL_TREE; - if (!throws || after || single_pred_p (e->dest)) - { - temp = build_debug_expr_decl (TREE_TYPE (lhs)); - tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit); - g = gimple_build_debug_bind (temp, t, g); - if (throws && !after) - { - gsi = gsi_after_labels (e->dest); - gsi_insert_before (&gsi, g, GSI_SAME_STMT); - } - else - gsi_insert_after (&gsi, g, GSI_NEW_STMT); - } - FOR_EACH_IMM_USE_STMT (g, iter, use_lhs) - if (is_gimple_debug (g)) - { - use_operand_p use_p; - if (temp == NULL_TREE) - gimple_debug_bind_reset_value (g); - else - FOR_EACH_IMM_USE_ON_STMT (use_p, iter) - SET_USE (use_p, temp); - update_stmt (g); - } - } - SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs) - = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs); - replace_uses_by (use_lhs, new_lhs); - gsi = gsi_for_stmt (use_stmt); - gsi_remove (&gsi, true); - release_defs (use_stmt); - gsi_remove (gsip, true); - release_ssa_name (lhs); - return true; -} - -/* Optimize - _4 = __atomic_add_fetch_* (ptr_6, arg_2, _3); - _5 = _4 == 0; - to - _4 = .ATOMIC_ADD_FETCH_CMP_0 (EQ_EXPR, ptr_6, arg_2, _3); - _5 = _4; - Similarly for __sync_add_and_fetch_* (without the ", _3" part - in there). */ - -static bool -optimize_atomic_op_fetch_cmp_0 (gimple_stmt_iterator *gsip, - enum internal_fn fn, bool has_model_arg) -{ - gimple *call = gsi_stmt (*gsip); - tree lhs = gimple_call_lhs (call); - use_operand_p use_p; - gimple *use_stmt; - - if (!flag_inline_atomics - || optimize_debug - || !gimple_call_builtin_p (call, BUILT_IN_NORMAL) - || !lhs - || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs) - || !single_imm_use (lhs, &use_p, &use_stmt) - || !gimple_vdef (call)) - return false; - - optab optab; - switch (fn) - { - case IFN_ATOMIC_ADD_FETCH_CMP_0: - optab = atomic_add_fetch_cmp_0_optab; - break; - case IFN_ATOMIC_SUB_FETCH_CMP_0: - optab = atomic_sub_fetch_cmp_0_optab; - break; - case IFN_ATOMIC_AND_FETCH_CMP_0: - optab = atomic_and_fetch_cmp_0_optab; - break; - case IFN_ATOMIC_OR_FETCH_CMP_0: - optab = atomic_or_fetch_cmp_0_optab; - break; - case IFN_ATOMIC_XOR_FETCH_CMP_0: - optab = atomic_xor_fetch_cmp_0_optab; - break; - default: - return false; - } - - if (optab_handler (optab, TYPE_MODE (TREE_TYPE (lhs))) - == CODE_FOR_nothing) - return false; - - tree use_lhs = lhs; - if (gimple_assign_cast_p (use_stmt)) - { - use_lhs = gimple_assign_lhs (use_stmt); - if (!tree_nop_conversion_p (TREE_TYPE (use_lhs), TREE_TYPE (lhs)) - || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs)) - && !POINTER_TYPE_P (TREE_TYPE (use_lhs))) - || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs) - || !single_imm_use (use_lhs, &use_p, &use_stmt)) - return false; - } - enum tree_code code = ERROR_MARK; - tree op0 = NULL_TREE, op1 = NULL_TREE; - if (is_gimple_assign (use_stmt)) - switch (gimple_assign_rhs_code (use_stmt)) - { - case COND_EXPR: - op1 = gimple_assign_rhs1 (use_stmt); - code = TREE_CODE (op1); - if (TREE_CODE_CLASS (code) == tcc_comparison) - { - op0 = TREE_OPERAND (op1, 0); - op1 = TREE_OPERAND (op1, 1); - } - break; - default: - code = gimple_assign_rhs_code (use_stmt); - if (TREE_CODE_CLASS (code) == tcc_comparison) - { - op0 = gimple_assign_rhs1 (use_stmt); - op1 = gimple_assign_rhs2 (use_stmt); - } - break; - } - else if (gimple_code (use_stmt) == GIMPLE_COND) - { - code = gimple_cond_code (use_stmt); - op0 = gimple_cond_lhs (use_stmt); - op1 = gimple_cond_rhs (use_stmt); - } - - switch (code) - { - case LT_EXPR: - case LE_EXPR: - case GT_EXPR: - case GE_EXPR: - if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs)) - || TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE - || TYPE_UNSIGNED (TREE_TYPE (use_lhs))) - return false; - /* FALLTHRU */ - case EQ_EXPR: - case NE_EXPR: - if (op0 == use_lhs && integer_zerop (op1)) - break; - return false; - default: - return false; - } - - int encoded; - switch (code) - { - /* Use special encoding of the operation. We want to also - encode the mode in the first argument and for neither EQ_EXPR - etc. nor EQ etc. we can rely it will fit into QImode. */ - case EQ_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_EQ; break; - case NE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_NE; break; - case LT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LT; break; - case LE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LE; break; - case GT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GT; break; - case GE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GE; break; - default: gcc_unreachable (); - } - - tree new_lhs = make_ssa_name (boolean_type_node); - gimple *g; - tree flag = build_int_cst (TREE_TYPE (lhs), encoded); - if (has_model_arg) - g = gimple_build_call_internal (fn, 4, flag, - gimple_call_arg (call, 0), - gimple_call_arg (call, 1), - gimple_call_arg (call, 2)); - else - g = gimple_build_call_internal (fn, 3, flag, - gimple_call_arg (call, 0), - gimple_call_arg (call, 1)); - gimple_call_set_lhs (g, new_lhs); - gimple_set_location (g, gimple_location (call)); - gimple_move_vops (g, call); - bool throws = stmt_can_throw_internal (cfun, call); - gimple_call_set_nothrow (as_a <gcall *> (g), - gimple_call_nothrow_p (as_a <gcall *> (call))); - gimple_stmt_iterator gsi = *gsip; - gsi_insert_after (&gsi, g, GSI_SAME_STMT); - if (throws) - maybe_clean_or_replace_eh_stmt (call, g); - if (is_gimple_assign (use_stmt)) - switch (gimple_assign_rhs_code (use_stmt)) - { - case COND_EXPR: - gimple_assign_set_rhs1 (use_stmt, new_lhs); - break; - default: - gsi = gsi_for_stmt (use_stmt); - if (tree ulhs = gimple_assign_lhs (use_stmt)) - if (useless_type_conversion_p (TREE_TYPE (ulhs), - boolean_type_node)) - { - gimple_assign_set_rhs_with_ops (&gsi, SSA_NAME, new_lhs); - break; - } - gimple_assign_set_rhs_with_ops (&gsi, NOP_EXPR, new_lhs); - break; - } - else if (gimple_code (use_stmt) == GIMPLE_COND) - { - gcond *use_cond = as_a <gcond *> (use_stmt); - gimple_cond_set_code (use_cond, NE_EXPR); - gimple_cond_set_lhs (use_cond, new_lhs); - gimple_cond_set_rhs (use_cond, boolean_false_node); - } - - update_stmt (use_stmt); - if (use_lhs != lhs) - { - gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (use_lhs)); - gsi_remove (&gsi, true); - release_ssa_name (use_lhs); - } - gsi_remove (gsip, true); - release_ssa_name (lhs); - return true; -} - -/* Optimize - a = {}; - b = a; - into - a = {}; - b = {}; - Similarly for memset (&a, ..., sizeof (a)); instead of a = {}; - and/or memcpy (&b, &a, sizeof (a)); instead of b = a; */ - -static void -optimize_memcpy (gimple_stmt_iterator *gsip, tree dest, tree src, tree len) -{ - gimple *stmt = gsi_stmt (*gsip); - if (gimple_has_volatile_ops (stmt)) - return; - - tree vuse = gimple_vuse (stmt); - if (vuse == NULL) - return; - - gimple *defstmt = SSA_NAME_DEF_STMT (vuse); - tree src2 = NULL_TREE, len2 = NULL_TREE; - poly_int64 offset, offset2; - tree val = integer_zero_node; - if (gimple_store_p (defstmt) - && gimple_assign_single_p (defstmt) - && TREE_CODE (gimple_assign_rhs1 (defstmt)) == CONSTRUCTOR - && !gimple_clobber_p (defstmt)) - src2 = gimple_assign_lhs (defstmt); - else if (gimple_call_builtin_p (defstmt, BUILT_IN_MEMSET) - && TREE_CODE (gimple_call_arg (defstmt, 0)) == ADDR_EXPR - && TREE_CODE (gimple_call_arg (defstmt, 1)) == INTEGER_CST) - { - src2 = TREE_OPERAND (gimple_call_arg (defstmt, 0), 0); - len2 = gimple_call_arg (defstmt, 2); - val = gimple_call_arg (defstmt, 1); - /* For non-0 val, we'd have to transform stmt from assignment - into memset (only if dest is addressable). */ - if (!integer_zerop (val) && is_gimple_assign (stmt)) - src2 = NULL_TREE; - } - - if (src2 == NULL_TREE) - return; - - if (len == NULL_TREE) - len = (TREE_CODE (src) == COMPONENT_REF - ? DECL_SIZE_UNIT (TREE_OPERAND (src, 1)) - : TYPE_SIZE_UNIT (TREE_TYPE (src))); - if (len2 == NULL_TREE) - len2 = (TREE_CODE (src2) == COMPONENT_REF - ? DECL_SIZE_UNIT (TREE_OPERAND (src2, 1)) - : TYPE_SIZE_UNIT (TREE_TYPE (src2))); - if (len == NULL_TREE - || !poly_int_tree_p (len) - || len2 == NULL_TREE - || !poly_int_tree_p (len2)) - return; - - src = get_addr_base_and_unit_offset (src, &offset); - src2 = get_addr_base_and_unit_offset (src2, &offset2); - if (src == NULL_TREE - || src2 == NULL_TREE - || maybe_lt (offset, offset2)) - return; - - if (!operand_equal_p (src, src2, 0)) - return; - - /* [ src + offset2, src + offset2 + len2 - 1 ] is set to val. - Make sure that - [ src + offset, src + offset + len - 1 ] is a subset of that. */ - if (maybe_gt (wi::to_poly_offset (len) + (offset - offset2), - wi::to_poly_offset (len2))) - return; - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "Simplified\n "); - print_gimple_stmt (dump_file, stmt, 0, dump_flags); - fprintf (dump_file, "after previous\n "); - print_gimple_stmt (dump_file, defstmt, 0, dump_flags); - } - - /* For simplicity, don't change the kind of the stmt, - turn dest = src; into dest = {}; and memcpy (&dest, &src, len); - into memset (&dest, val, len); - In theory we could change dest = src into memset if dest - is addressable (maybe beneficial if val is not 0), or - memcpy (&dest, &src, len) into dest = {} if len is the size - of dest, dest isn't volatile. */ - if (is_gimple_assign (stmt)) - { - tree ctor = build_constructor (TREE_TYPE (dest), NULL); - gimple_assign_set_rhs_from_tree (gsip, ctor); - update_stmt (stmt); - } - else /* If stmt is memcpy, transform it into memset. */ - { - gcall *call = as_a <gcall *> (stmt); - tree fndecl = builtin_decl_implicit (BUILT_IN_MEMSET); - gimple_call_set_fndecl (call, fndecl); - gimple_call_set_fntype (call, TREE_TYPE (fndecl)); - gimple_call_set_arg (call, 1, val); - update_stmt (stmt); - } - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "into\n "); - print_gimple_stmt (dump_file, stmt, 0, dump_flags); - } -} - -/* A simple pass that attempts to fold all builtin functions. This pass - is run after we've propagated as many constants as we can. */ - -namespace { - -const pass_data pass_data_fold_builtins = -{ - GIMPLE_PASS, /* type */ - "fab", /* name */ - OPTGROUP_NONE, /* optinfo_flags */ - TV_NONE, /* tv_id */ - ( PROP_cfg | PROP_ssa ), /* properties_required */ - 0, /* properties_provided */ - 0, /* properties_destroyed */ - 0, /* todo_flags_start */ - TODO_update_ssa, /* todo_flags_finish */ -}; - -class pass_fold_builtins : public gimple_opt_pass -{ -public: - pass_fold_builtins (gcc::context *ctxt) - : gimple_opt_pass (pass_data_fold_builtins, ctxt) - {} - - /* opt_pass methods: */ - opt_pass * clone () { return new pass_fold_builtins (m_ctxt); } - virtual unsigned int execute (function *); - -}; // class pass_fold_builtins - -unsigned int -pass_fold_builtins::execute (function *fun) -{ - bool cfg_changed = false; - basic_block bb; - unsigned int todoflags = 0; - - FOR_EACH_BB_FN (bb, fun) - { - gimple_stmt_iterator i; - for (i = gsi_start_bb (bb); !gsi_end_p (i); ) - { - gimple *stmt, *old_stmt; - tree callee; - enum built_in_function fcode; - - stmt = gsi_stmt (i); - - if (gimple_code (stmt) != GIMPLE_CALL) - { - /* Remove all *ssaname_N ={v} {CLOBBER}; stmts, - after the last GIMPLE DSE they aren't needed and might - unnecessarily keep the SSA_NAMEs live. */ - if (gimple_clobber_p (stmt)) - { - tree lhs = gimple_assign_lhs (stmt); - if (TREE_CODE (lhs) == MEM_REF - && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME) - { - unlink_stmt_vdef (stmt); - gsi_remove (&i, true); - release_defs (stmt); - continue; - } - } - else if (gimple_assign_load_p (stmt) && gimple_store_p (stmt)) - optimize_memcpy (&i, gimple_assign_lhs (stmt), - gimple_assign_rhs1 (stmt), NULL_TREE); - gsi_next (&i); - continue; - } - - callee = gimple_call_fndecl (stmt); - if (!callee || !fndecl_built_in_p (callee, BUILT_IN_NORMAL)) - { - gsi_next (&i); - continue; - } - - fcode = DECL_FUNCTION_CODE (callee); - if (fold_stmt (&i)) - ; - else - { - tree result = NULL_TREE; - switch (DECL_FUNCTION_CODE (callee)) - { - case BUILT_IN_CONSTANT_P: - /* Resolve __builtin_constant_p. If it hasn't been - folded to integer_one_node by now, it's fairly - certain that the value simply isn't constant. */ - result = integer_zero_node; - break; - - case BUILT_IN_ASSUME_ALIGNED: - /* Remove __builtin_assume_aligned. */ - result = gimple_call_arg (stmt, 0); - break; - - case BUILT_IN_STACK_RESTORE: - result = optimize_stack_restore (i); - if (result) - break; - gsi_next (&i); - continue; - - case BUILT_IN_UNREACHABLE: - if (optimize_unreachable (i)) - cfg_changed = true; - break; - - case BUILT_IN_ATOMIC_ADD_FETCH_1: - case BUILT_IN_ATOMIC_ADD_FETCH_2: - case BUILT_IN_ATOMIC_ADD_FETCH_4: - case BUILT_IN_ATOMIC_ADD_FETCH_8: - case BUILT_IN_ATOMIC_ADD_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_ADD_FETCH_CMP_0, - true); - break; - case BUILT_IN_SYNC_ADD_AND_FETCH_1: - case BUILT_IN_SYNC_ADD_AND_FETCH_2: - case BUILT_IN_SYNC_ADD_AND_FETCH_4: - case BUILT_IN_SYNC_ADD_AND_FETCH_8: - case BUILT_IN_SYNC_ADD_AND_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_ADD_FETCH_CMP_0, - false); - break; - - case BUILT_IN_ATOMIC_SUB_FETCH_1: - case BUILT_IN_ATOMIC_SUB_FETCH_2: - case BUILT_IN_ATOMIC_SUB_FETCH_4: - case BUILT_IN_ATOMIC_SUB_FETCH_8: - case BUILT_IN_ATOMIC_SUB_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_SUB_FETCH_CMP_0, - true); - break; - case BUILT_IN_SYNC_SUB_AND_FETCH_1: - case BUILT_IN_SYNC_SUB_AND_FETCH_2: - case BUILT_IN_SYNC_SUB_AND_FETCH_4: - case BUILT_IN_SYNC_SUB_AND_FETCH_8: - case BUILT_IN_SYNC_SUB_AND_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_SUB_FETCH_CMP_0, - false); - break; - - case BUILT_IN_ATOMIC_FETCH_OR_1: - case BUILT_IN_ATOMIC_FETCH_OR_2: - case BUILT_IN_ATOMIC_FETCH_OR_4: - case BUILT_IN_ATOMIC_FETCH_OR_8: - case BUILT_IN_ATOMIC_FETCH_OR_16: - optimize_atomic_bit_test_and (&i, - IFN_ATOMIC_BIT_TEST_AND_SET, - true, false); - break; - case BUILT_IN_SYNC_FETCH_AND_OR_1: - case BUILT_IN_SYNC_FETCH_AND_OR_2: - case BUILT_IN_SYNC_FETCH_AND_OR_4: - case BUILT_IN_SYNC_FETCH_AND_OR_8: - case BUILT_IN_SYNC_FETCH_AND_OR_16: - optimize_atomic_bit_test_and (&i, - IFN_ATOMIC_BIT_TEST_AND_SET, - false, false); - break; - - case BUILT_IN_ATOMIC_FETCH_XOR_1: - case BUILT_IN_ATOMIC_FETCH_XOR_2: - case BUILT_IN_ATOMIC_FETCH_XOR_4: - case BUILT_IN_ATOMIC_FETCH_XOR_8: - case BUILT_IN_ATOMIC_FETCH_XOR_16: - optimize_atomic_bit_test_and - (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, false); - break; - case BUILT_IN_SYNC_FETCH_AND_XOR_1: - case BUILT_IN_SYNC_FETCH_AND_XOR_2: - case BUILT_IN_SYNC_FETCH_AND_XOR_4: - case BUILT_IN_SYNC_FETCH_AND_XOR_8: - case BUILT_IN_SYNC_FETCH_AND_XOR_16: - optimize_atomic_bit_test_and - (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, false); - break; - - case BUILT_IN_ATOMIC_XOR_FETCH_1: - case BUILT_IN_ATOMIC_XOR_FETCH_2: - case BUILT_IN_ATOMIC_XOR_FETCH_4: - case BUILT_IN_ATOMIC_XOR_FETCH_8: - case BUILT_IN_ATOMIC_XOR_FETCH_16: - if (optimize_atomic_bit_test_and - (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, true, true)) - break; - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_XOR_FETCH_CMP_0, - true); - break; - case BUILT_IN_SYNC_XOR_AND_FETCH_1: - case BUILT_IN_SYNC_XOR_AND_FETCH_2: - case BUILT_IN_SYNC_XOR_AND_FETCH_4: - case BUILT_IN_SYNC_XOR_AND_FETCH_8: - case BUILT_IN_SYNC_XOR_AND_FETCH_16: - if (optimize_atomic_bit_test_and - (&i, IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, false, true)) - break; - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_XOR_FETCH_CMP_0, - false); - break; - - case BUILT_IN_ATOMIC_FETCH_AND_1: - case BUILT_IN_ATOMIC_FETCH_AND_2: - case BUILT_IN_ATOMIC_FETCH_AND_4: - case BUILT_IN_ATOMIC_FETCH_AND_8: - case BUILT_IN_ATOMIC_FETCH_AND_16: - optimize_atomic_bit_test_and (&i, - IFN_ATOMIC_BIT_TEST_AND_RESET, - true, false); - break; - case BUILT_IN_SYNC_FETCH_AND_AND_1: - case BUILT_IN_SYNC_FETCH_AND_AND_2: - case BUILT_IN_SYNC_FETCH_AND_AND_4: - case BUILT_IN_SYNC_FETCH_AND_AND_8: - case BUILT_IN_SYNC_FETCH_AND_AND_16: - optimize_atomic_bit_test_and (&i, - IFN_ATOMIC_BIT_TEST_AND_RESET, - false, false); - break; - - case BUILT_IN_ATOMIC_AND_FETCH_1: - case BUILT_IN_ATOMIC_AND_FETCH_2: - case BUILT_IN_ATOMIC_AND_FETCH_4: - case BUILT_IN_ATOMIC_AND_FETCH_8: - case BUILT_IN_ATOMIC_AND_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_AND_FETCH_CMP_0, - true); - break; - case BUILT_IN_SYNC_AND_AND_FETCH_1: - case BUILT_IN_SYNC_AND_AND_FETCH_2: - case BUILT_IN_SYNC_AND_AND_FETCH_4: - case BUILT_IN_SYNC_AND_AND_FETCH_8: - case BUILT_IN_SYNC_AND_AND_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_AND_FETCH_CMP_0, - false); - break; - - case BUILT_IN_ATOMIC_OR_FETCH_1: - case BUILT_IN_ATOMIC_OR_FETCH_2: - case BUILT_IN_ATOMIC_OR_FETCH_4: - case BUILT_IN_ATOMIC_OR_FETCH_8: - case BUILT_IN_ATOMIC_OR_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_OR_FETCH_CMP_0, - true); - break; - case BUILT_IN_SYNC_OR_AND_FETCH_1: - case BUILT_IN_SYNC_OR_AND_FETCH_2: - case BUILT_IN_SYNC_OR_AND_FETCH_4: - case BUILT_IN_SYNC_OR_AND_FETCH_8: - case BUILT_IN_SYNC_OR_AND_FETCH_16: - optimize_atomic_op_fetch_cmp_0 (&i, - IFN_ATOMIC_OR_FETCH_CMP_0, - false); - break; - - case BUILT_IN_MEMCPY: - if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL) - && TREE_CODE (gimple_call_arg (stmt, 0)) == ADDR_EXPR - && TREE_CODE (gimple_call_arg (stmt, 1)) == ADDR_EXPR - && TREE_CODE (gimple_call_arg (stmt, 2)) == INTEGER_CST) - { - tree dest = TREE_OPERAND (gimple_call_arg (stmt, 0), 0); - tree src = TREE_OPERAND (gimple_call_arg (stmt, 1), 0); - tree len = gimple_call_arg (stmt, 2); - optimize_memcpy (&i, dest, src, len); - } - break; - - case BUILT_IN_VA_START: - case BUILT_IN_VA_END: - case BUILT_IN_VA_COPY: - /* These shouldn't be folded before pass_stdarg. */ - result = optimize_stdarg_builtin (stmt); - break; - - default:; - } - - if (!result) - { - gsi_next (&i); - continue; - } - - gimplify_and_update_call_from_tree (&i, result); - } - - todoflags |= TODO_update_address_taken; - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "Simplified\n "); - print_gimple_stmt (dump_file, stmt, 0, dump_flags); - } - - old_stmt = stmt; - stmt = gsi_stmt (i); - update_stmt (stmt); - - if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt) - && gimple_purge_dead_eh_edges (bb)) - cfg_changed = true; - - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "to\n "); - print_gimple_stmt (dump_file, stmt, 0, dump_flags); - fprintf (dump_file, "\n"); - } - - /* Retry the same statement if it changed into another - builtin, there might be new opportunities now. */ - if (gimple_code (stmt) != GIMPLE_CALL) - { - gsi_next (&i); - continue; - } - callee = gimple_call_fndecl (stmt); - if (!callee - || !fndecl_built_in_p (callee, fcode)) - gsi_next (&i); - } - } - - /* Delete unreachable blocks. */ - if (cfg_changed) - todoflags |= TODO_cleanup_cfg; - - return todoflags; -} - -} // anon namespace - -gimple_opt_pass * -make_pass_fold_builtins (gcc::context *ctxt) -{ - return new pass_fold_builtins (ctxt); -} - -/* A simple pass that emits some warnings post IPA. */ - -namespace { - -const pass_data pass_data_post_ipa_warn = -{ - GIMPLE_PASS, /* type */ - "post_ipa_warn", /* name */ - OPTGROUP_NONE, /* optinfo_flags */ - TV_NONE, /* tv_id */ - ( PROP_cfg | PROP_ssa ), /* properties_required */ - 0, /* properties_provided */ - 0, /* properties_destroyed */ - 0, /* todo_flags_start */ - 0, /* todo_flags_finish */ -}; - -class pass_post_ipa_warn : public gimple_opt_pass -{ -public: - pass_post_ipa_warn (gcc::context *ctxt) - : gimple_opt_pass (pass_data_post_ipa_warn, ctxt) - {} - - /* opt_pass methods: */ - opt_pass * clone () { return new pass_post_ipa_warn (m_ctxt); } - virtual bool gate (function *) { return warn_nonnull != 0; } - virtual unsigned int execute (function *); - -}; // class pass_fold_builtins - -unsigned int -pass_post_ipa_warn::execute (function *fun) -{ - basic_block bb; - - FOR_EACH_BB_FN (bb, fun) - { - gimple_stmt_iterator gsi; - for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) - { - gimple *stmt = gsi_stmt (gsi); - if (!is_gimple_call (stmt) || warning_suppressed_p (stmt, OPT_Wnonnull)) - continue; - - tree fntype = gimple_call_fntype (stmt); - bitmap nonnullargs = get_nonnull_args (fntype); - if (!nonnullargs) - continue; - - tree fndecl = gimple_call_fndecl (stmt); - const bool closure = fndecl && DECL_LAMBDA_FUNCTION_P (fndecl); - - for (unsigned i = 0; i < gimple_call_num_args (stmt); i++) - { - tree arg = gimple_call_arg (stmt, i); - if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE) - continue; - if (!integer_zerop (arg)) - continue; - if (i == 0 && closure) - /* Avoid warning for the first argument to lambda functions. */ - continue; - if (!bitmap_empty_p (nonnullargs) - && !bitmap_bit_p (nonnullargs, i)) - continue; - - /* In C++ non-static member functions argument 0 refers - to the implicit this pointer. Use the same one-based - numbering for ordinary arguments. */ - unsigned argno = TREE_CODE (fntype) == METHOD_TYPE ? i : i + 1; - location_t loc = (EXPR_HAS_LOCATION (arg) - ? EXPR_LOCATION (arg) - : gimple_location (stmt)); - auto_diagnostic_group d; - if (argno == 0) - { - if (warning_at (loc, OPT_Wnonnull, - "%qs pointer is null", "this") - && fndecl) - inform (DECL_SOURCE_LOCATION (fndecl), - "in a call to non-static member function %qD", - fndecl); - continue; - } - - if (!warning_at (loc, OPT_Wnonnull, - "argument %u null where non-null " - "expected", argno)) - continue; - - tree fndecl = gimple_call_fndecl (stmt); - if (fndecl && DECL_IS_UNDECLARED_BUILTIN (fndecl)) - inform (loc, "in a call to built-in function %qD", - fndecl); - else if (fndecl) - inform (DECL_SOURCE_LOCATION (fndecl), - "in a call to function %qD declared %qs", - fndecl, "nonnull"); - } - BITMAP_FREE (nonnullargs); - } - } - return 0; -} - -} // anon namespace - -gimple_opt_pass * -make_pass_post_ipa_warn (gcc::context *ctxt) -{ - return new pass_post_ipa_warn (ctxt); -} |